]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
Merge remote-tracking branch 'origin/trunk' into range-gen3-merge
authorAldy Hernandez <aldyh@gcc.gnu.org>
Thu, 22 Mar 2018 14:13:03 +0000 (14:13 +0000)
committerAldy Hernandez <aldyh@gcc.gnu.org>
Thu, 22 Mar 2018 14:13:03 +0000 (14:13 +0000)
This merge has not been tested apart from building c/c++ with
--disable-bootstrap.

get_nonzero_bits_as_range() needs to be looked at.

From-SVN: r258769

39 files changed:
1  2 
gcc/ChangeLog
gcc/Makefile.in
gcc/builtins.c
gcc/calls.c
gcc/calls.h
gcc/fold-const.c
gcc/function-tests.c
gcc/gengtype-parse.c
gcc/gengtype.c
gcc/gimple-fold.c
gcc/gimple-pretty-print.c
gcc/gimple-ssa-warn-alloca.c
gcc/gimple-ssa-warn-restrict.c
gcc/internal-fn.c
gcc/ipa-cp.c
gcc/ipa-prop.c
gcc/passes.c
gcc/range.c
gcc/range.h
gcc/selftest.h
gcc/ssa.h
gcc/tree-core.h
gcc/tree-data-ref.c
gcc/tree-scalar-evolution.c
gcc/tree-ssa-copy.c
gcc/tree-ssa-loop-niter.c
gcc/tree-ssa-phiopt.c
gcc/tree-ssa-pre.c
gcc/tree-ssa-sccvn.c
gcc/tree-ssa-sccvn.h
gcc/tree-ssa-strlen.c
gcc/tree-ssanames.c
gcc/tree-ssanames.h
gcc/tree-vect-patterns.c
gcc/tree-vrp.c
gcc/tree-vrp.h
gcc/tree.c
gcc/tree.h
gcc/vr-values.c

diff --cc gcc/ChangeLog
index c89b7f400869eb0dcc7422c205238d881037977b,53224b1895eb795fd34fcd3c3bd96fd786b79f39..a6f6443482a7ae9b2e767976fdd7f52d169b8a64
- 2017-09-15  Jackson Woodruff  <jackson.woodruff@arm.com>
++2017-08-24  Aldy Hernandez  <aldyh@redhat.com>
++
++      PR middle-end/81931
++      * tree-ssanames.c (get_nonzero_bits): Use element_precision
++      instead of TYPE_PRECISION.
++
++2017-08-22  Aldy Hernandez  <aldyh@redhat.com>
++
++      * wide-int.h (hwi_with_prec::hwi_with_prec): Sign extend.
++
 +2017-10-26  Richard Sandiford  <richard.sandiford@linaro.org>
 +
 +      * wide-int-print.cc (print_hex): Loop based on extract_uhwi.
 +      Don't print any bits outside the precision of the value.
 +      * wide-int.cc (test_printing): Add some new tests.
 +
+ 2018-03-20  David H. Gutteridge  <dhgutteridge@sympatico.ca>
+       PR target/84838
+       * Minor grammar fixes for x86 options.
+ 2018-03-20  Jakub Jelinek  <jakub@redhat.com>
+       PR debug/84875
+       * dce.c (delete_unmarked_insns): Don't remove frame related noop moves
+       holding REG_CFA_RESTORE notes, instead turn them into a USE.
+ 2018-03-20  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/83789
+       * config/rs6000/altivec.md (altivec_lvx_<mode>_2op): Delete define_insn.
+       (altivec_lvx_<mode>_1op): Likewise.
+       (altivec_stvx_<mode>_2op): Likewise.
+       (altivec_stvx_<mode>_1op): Likewise.
+       (altivec_lvx_<VM2:mode>): New define_expand.
+       (altivec_stvx_<VM2:mode>): Likewise.
+       (altivec_lvx_<VM2:mode>_2op_<P:mptrsize>): New define_insn.
+       (altivec_lvx_<VM2:mode>_1op_<P:mptrsize>): Likewise.
+       (altivec_stvx_<VM2:mode>_2op_<P:mptrsize>): Likewise.
+       (altivec_stvx_<VM2:mode>_1op_<P:mptrsize>): Likewise.
+       * config/rs6000/rs6000-p8swap.c (rs6000_gen_stvx): Use new expanders.
+       (rs6000_gen_lvx): Likewise.
+       * config/rs6000/rs6000.c (altivec_expand_lv_builtin): Likewise.
+       (altivec_expand_stv_builtin): Likewise.
+       (altivec_expand_builtin): Likewise.
+       * config/rs6000/vector.md: Likewise.
  
-       PR tree-optimization/71026
-       * match.pd: Move RDIV patterns from fold-const.c
-       * fold-const.c (distribute_real_division): Removed.
-       (fold_binary_loc): Remove calls to distribute_real_divison.
+ 2018-03-20  Richard Biener  <rguenther@suse.de>
  
- 2017-09-15  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84986
+       * config/i386/i386.c (ix86_add_stmt_cost): Only cost
+       sign-conversions as zero, fall back to standard scalar_stmt
+       cost for the rest.
  
-       * doc/invoke.texi: Document -std=c++17 and -std=gnu++17 and document
-       c++1z and gnu++1z as deprecated.  Change other references to
-       -std=c++1z to -std=c++17 and -std=gnu++1z to -std=gnu++17.
-       Change -Wc++1z-compat to -Wc++17-compat.
-       * doc/cpp.texi: Document -std=c++17 defines __cplusplus 201703L.
-       * dwarf2out.c (highest_c_language): Handle C++17.
-       (gen_compile_unit_die): Likewise.
+ 2018-03-20  Martin Liska  <mliska@suse.cz>
  
- 2017-09-15  Jakub Jelinek  <jakub@redhat.com>
+       PR ipa/84825
+       * predict.c (rebuild_frequencies): Handle case when we have
+       PROFILE_ABSENT, but flag_guess_branch_prob is false.
  
-       PR rtl-optimization/82192
-       * combine.c (make_extraction): Don't look through non-paradoxical
-       SUBREGs or TRUNCATE if pos + len is or might be bigger than
-       inner's mode.
+ 2018-03-20  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-09-15  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/84990
+       * dwarf2asm.c (dw2_output_indirect_constant_1): Temporarily turn off
+       flag_section_anchors.
+       * varasm.c (use_blocks_for_decl_p): Remove hack for
+       dw2_force_const_mem.
  
-       * target.def (function_arg_offset): New hook.
-       * targhooks.h (default_function_arg_offset): Declare.
-       * targhooks.c (default_function_arg_offset): New function.
-       * function.c (locate_and_pad_parm): Use
-       targetm.calls.function_arg_offset instead of FUNCTION_ARG_OFFSET.
-       * doc/tm.texi.in (FUNCTION_ARG_OFFSET): Replace with...
-       (TARGET_FUNCTION_ARG_OFFSET): ...this.
-       * doc/tm.texi: Regenerate.
-       * config/spu/spu.h (FUNCTION_ARG_OFFSET): Delete.
-       * config/spu/spu.c (spu_function_arg_offset): New function.
-       (TARGET_FUNCTION_ARG_OFFSET): Redefine.
-       * system.h (FUNCTION_ARG_OFFSET): Poison.
+       PR target/84845
+       * config/aarch64/aarch64.md (*aarch64_reg_<mode>3_neg_mask2): Rename
+       to ...
+       (*aarch64_<optab>_reg_<mode>3_neg_mask2): ... this.  If pseudos can't
+       be created, use lowpart_subreg of operands[0] rather than operands[0]
+       itself.
+       (*aarch64_reg_<mode>3_minus_mask): Rename to ...
+       (*aarch64_ashl_reg_<mode>3_minus_mask): ... this.
+       (*aarch64_<optab>_reg_di3_mask2): Use const_int_operand predicate
+       and n constraint instead of aarch64_shift_imm_di and Usd.
+       (*aarch64_reg_<optab>_minus<mode>3): Rename to ...
+       (*aarch64_<optab>_reg_minus<mode>3): ... this.
  
- 2017-09-15  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayard  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-20  Sudakshina Das  <sudi.das@arm.com>
  
-       * target.def (truly_noop_truncation): New hook.
-       (mode_rep_extended): Refer to TARGET_TRULY_NOOP_TRUNCATION rather
-       than TRULY_NOOP_TRUNCATION.
-       * hooks.h (hook_bool_uint_uint_true): Declare.
-       * hooks.c (hook_bool_uint_uint_true): New function.
-       * doc/tm.texi.in (TRULY_NOOP_TRUNCATION): Replace with...
-       (TARGET_TRULY_NOOP_TRUNCATION): ...this.
-       * doc/tm.texi: Regenerate.
-       * combine.c (make_extraction): Refer to TARGET_TRULY_NOOP_TRUNCATION
-       rather than TRULY_NOOP_TRUNCATION in comments.
-       (simplify_comparison): Likewise.
-       (record_truncated_value): Likewise.
-       * expmed.c (extract_bit_field_1): Likewise.
-       (extract_split_bit_field): Likewise.
-       * convert.c (convert_to_integer_1): Use targetm.truly_noop_truncation
-       instead of TRULY_NOOP_TRUNCATION.
-       * function.c (assign_parm_setup_block): Likewise.
-       * machmode.h (TRULY_NOOP_TRUNCATION_MODES_P): Likewise.
-       * rtlhooks.c: Include target.h.
-       * config/aarch64/aarch64.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/alpha/alpha.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/arc/arc.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/arm/arm.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/avr/avr.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/bfin/bfin.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/c6x/c6x.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/cr16/cr16.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/cris/cris.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/epiphany/epiphany.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/fr30/fr30.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/frv/frv.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/ft32/ft32.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/h8300/h8300.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/i386/i386.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/ia64/ia64.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/iq2000/iq2000.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/lm32/lm32.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/m32c/m32c.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/m32r/m32r.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/m68k/m68k.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/mcore/mcore.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/microblaze/microblaze.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/mips/mips.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/mips/mips.c (mips_truly_noop_truncation): New function.
-       (TARGET_TRULY_NOOP_TRUNCATION): Redefine.
-       * config/mips/mips.md: Refer to TARGET_TRULY_NOOP_TRUNCATION
-       rather than TRULY_NOOP_TRUNCATION in comments.
-       * config/mmix/mmix.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/mn10300/mn10300.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/moxie/moxie.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/msp430/msp430.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/nds32/nds32.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/nios2/nios2.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/nvptx/nvptx.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/pa/pa.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/pdp11/pdp11.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/powerpcspe/powerpcspe.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/riscv/riscv.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/riscv/riscv.md: Refer to TARGET_TRULY_NOOP_TRUNCATION
-       rather than TRULY_NOOP_TRUNCATION in comments.
-       * config/rl78/rl78.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/rs6000/rs6000.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/rx/rx.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/s390/s390.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/sh/sh.h (MAYBE_BASE_REGISTER_RTX_P): Remove
-       TRULY_NOOP_TRUNCATION condition.
-       (MAYBE_INDEX_REGISTER_RTX_P): Likewise.
-       (TRULY_NOOP_TRUNCATION): Delete.
-       * config/sparc/sparc.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/spu/spu.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/spu/spu.c (spu_truly_noop_truncation): New function.
-       (TARGET_TRULY_NOOP_TRUNCATION): Redefine.
-       * config/stormy16/stormy16.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/tilegx/tilegx.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/tilegx/tilegx.c (tilegx_truly_noop_truncation): New fuction.
-       (TARGET_TRULY_NOOP_TRUNCATION): Redefine.
-       * config/tilegx/tilegx.md: Refer to TARGET_TRULY_NOOP_TRUNCATION
-       rather than TRULY_NOOP_TRUNCATION in comments.
-       * config/tilepro/tilepro.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/v850/v850.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/vax/vax.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/visium/visium.h (TRULY_NOOP_TRUNCATION): Delete.
-       * config/xtensa/xtensa.h (TRULY_NOOP_TRUNCATION): Delete.
-       * system.h (TRULY_NOOP_TRUNCATION): Poison.
- 2017-09-15  Christophe Lyon  <christophe.lyon@linaro.org>
-       PR target/67591
-       * config/arm/arm.md (*cmp_and): Add enabled_for_depr_it attribute.
-       (*cmp_ior): Likewise.
-       (*ior_scc_scc): Add alternative for enabled_for_depr_it attribute.
-       (*ior_scc_scc_cmp): Likewise.
-       (*and_scc_scc): Likewise.
-       (*and_scc_scc_cmp): Likewise.
- 2017-09-15  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayard  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/82989
+       * config/arm/neon.md (ashldi3_neon): Update ?s for constraints
+       to favor GPR over NEON registers.
+       (<shift>di3_neon): Likewise.
  
-       * target.def (can_change_mode_class): New hook.
-       (mode_rep_extended): Refer to it instead of CANNOT_CHANGE_MODE_CLASS.
-       (hard_regno_nregs): Likewise.
-       * hooks.h (hook_bool_mode_mode_reg_class_t_true): Declare.
-       * hooks.c (hook_bool_mode_mode_reg_class_t_true): New function.
-       * doc/tm.texi.in (CANNOT_CHANGE_MODE_CLASS): Replace with...
-       (TARGET_CAN_CHANGE_MODE_CLASS): ...this.
-       (LOAD_EXTEND_OP): Update accordingly.
-       * doc/tm.texi: Regenerate.
-       * doc/rtl.texi: Refer to TARGET_CAN_CHANGE_MODE_CLASS instead of
-       CANNOT_CHANGE_MODE_CLASS.
-       * hard-reg-set.h (REG_CANNOT_CHANGE_MODE_P): Replace with...
-       (REG_CAN_CHANGE_MODE_P): ...this new macro.
-       * combine.c (simplify_set): Update accordingly.
-       * emit-rtl.c (validate_subreg): Likewise.
-       * recog.c (general_operand): Likewise.
-       * regcprop.c (mode_change_ok): Likewise.
-       * reload1.c (choose_reload_regs): Likewise.
-       (inherit_piecemeal_p): Likewise.
-       * rtlanal.c (simplify_subreg_regno): Likewise.
-       * postreload.c (reload_cse_simplify_set): Use REG_CAN_CHANGE_MODE_P
-       instead of CANNOT_CHANGE_MODE_CLASS.
-       (reload_cse_simplify_operands): Likewise.
-       * reload.c (push_reload): Use targetm.can_change_mode_class
-       instead of CANNOT_CHANGE_MODE_CLASS.
-       (push_reload): Likewise.  Also use REG_CAN_CHANGE_MODE_P instead of
-       REG_CANNOT_CHANGE_MODE_P.
-       * config/alpha/alpha.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/alpha/alpha.c (alpha_can_change_mode_class): New function.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/arm/arm.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/arm/arm.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (arm_can_change_mode_class): New function.
-       * config/arm/neon.md: Refer to TARGET_CAN_CHANGE_MODE_CLASS rather
-       than CANNOT_CHANGE_MODE_CLASS in comments.
-       * config/i386/i386.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/i386/i386-protos.h (ix86_cannot_change_mode_class): Delete.
-       * config/i386/i386.c (ix86_cannot_change_mode_class): Replace with...
-       (ix86_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/ia64/ia64.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/ia64/ia64.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (ia64_can_change_mode_class): New function.
-       * config/m32c/m32c.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/m32c/m32c-protos.h (m32c_cannot_change_mode_class): Delete.
-       * config/m32c/m32c.c (m32c_cannot_change_mode_class): Replace with...
-       (m32c_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/mips/mips.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/mips/mips-protos.h (mips_cannot_change_mode_class): Delete.
-       * config/mips/mips.c (mips_cannot_change_mode_class): Replace with...
-       (mips_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/msp430/msp430.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/msp430/msp430.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (msp430_can_change_mode_class): New function.
-       * config/nvptx/nvptx.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/nvptx/nvptx.c (nvptx_can_change_mode_class): New function.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/pa/pa32-regs.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/pa/pa64-regs.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/pa/pa-protos.h (pa_cannot_change_mode_class): Delete.
-       * config/pa/pa.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (pa_cannot_change_mode_class): Replace with...
-       (pa_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (pa_modes_tieable_p): Refer to TARGET_CAN_CHANGE_MODE_CLASS rather
-       than CANNOT_CHANGE_MODE_CLASS in comments.
-       * config/pdp11/pdp11.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/pdp11/pdp11-protos.h (pdp11_cannot_change_mode_class): Delete.
-       * config/pdp11/pdp11.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (pdp11_cannot_change_mode_class): Replace with...
-       (pdp11_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       * config/powerpcspe/powerpcspe.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/powerpcspe/powerpcspe-protos.h
-       (rs6000_cannot_change_mode_class_ptr): Delete.
-       * config/powerpcspe/powerpcspe.c
-       (rs6000_cannot_change_mode_class_ptr): Delete.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (rs6000_option_override_internal): Assign to
-       targetm.can_change_mode_class instead of
-       rs6000_cannot_change_mode_class_ptr.
-       (rs6000_cannot_change_mode_class): Replace with...
-       (rs6000_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (rs6000_debug_cannot_change_mode_class): Replace with...
-       (rs6000_debug_can_change_mode_class): ...this new function.
-       * config/riscv/riscv.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/riscv/riscv.c (riscv_can_change_mode_class): New function.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/rs6000/rs6000.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/rs6000/rs6000-protos.h (rs6000_cannot_change_mode_class_ptr):
-       Delete.
-       * config/rs6000/rs6000.c (rs6000_cannot_change_mode_class_ptr): Delete.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (rs6000_option_override_internal): Assign to
-       targetm.can_change_mode_class instead of
-       rs6000_cannot_change_mode_class_ptr.
-       (rs6000_cannot_change_mode_class): Replace with...
-       (rs6000_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (rs6000_debug_cannot_change_mode_class): Replace with...
-       (rs6000_debug_can_change_mode_class): ...this new function.
-       * config/s390/s390.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/s390/s390-protos.h (s390_cannot_change_mode_class): Delete.
-       * config/s390/s390.c (s390_cannot_change_mode_class): Replace with...
-       (s390_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/sh/sh.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/sh/sh-protos.h (sh_cannot_change_mode_class): Delete.
-       * config/sh/sh.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (sh_cannot_change_mode_class): Replace with...
-       (sh_can_change_mode_class): ...this new function, inverting the
-       sense of the return value.
-       * config/sparc/sparc.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/sparc/sparc.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (sparc_can_change_mode_class): New function.
-       * config/spu/spu.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/spu/spu.c (spu_can_change_mode_class): New function.
-       (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       * config/visium/visium.h (CANNOT_CHANGE_MODE_CLASS): Delete.
-       * config/visium/visium.c (TARGET_CAN_CHANGE_MODE_CLASS): Redefine.
-       (visium_can_change_mode_class): New function.
-       * system.h (CANNOT_CHANGE_MODE_CLASS): Poison.
- 2017-09-15  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/82217
-       * tree-ssa-sccvn.c (visit_phi): Properly handle all VN_TOP
-       but not undefined case.
- 2017-09-15  Jakub Jelinek  <jakub@redhat.com>
-       PR target/82145
-       * postreload.c (reload_cse_simplify_operands): Skip
-       NOTE_INSN_DELETED_LABEL similarly to skipping CODE_LABEL.
- 2017-09-15  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/68823
-       * graphite-scop-detection.c (build_alias_set): If we have a
-       possible dependence check whether we can handle them by just
-       looking at the DRs DR_ACCESS_FNs.
-       (build_scops): If build_alias_set fails, fail the SCOP.
- 2017-09-14  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000-builtin.def (BU_FLOAT128_1_HW): New macros
-       to support float128 built-in functions that require the ISA 3.0
-       hardware.
-       (BU_FLOAT128_3_HW): Likewise.
-       (SQRTF128): Add support for the IEEE 128-bit square root and fma
-       built-in functions.
-       (FMAF128): Likewise.
-       (FMAQ): Likewise.
-       * config/rs6000/rs6000.c (rs6000_builtin_mask_calculate): Add
-       support for built-in functions that need the ISA 3.0 IEEE 128-bit
-       floating point instructions.
-       (rs6000_invalid_builtin): Likewise.
-       (rs6000_builtin_mask_names): Likewise.
-       * config/rs6000/rs6000.h (MASK_FLOAT128_HW): Likewise.
-       (RS6000_BTM_FLOAT128_HW): Likewise.
-       (RS6000_BTM_COMMON): Likewise.
-       * config/rs6000/rs6000.md (fma<mode>4_hw): Add a generator
-       function.
-       * doc/extend.texi (RS/6000 built-in functions): Document the
-       IEEE 128-bit floating point square root and fused multiply-add
-       built-in functions.
+ 2018-03-20  Tom de Vries  <tom@codesourcery.com>
  
- 2017-09-14  Pat Haugen  <pthaugen@us.ibm.com>
+       PR target/84952
+       * config/nvptx/nvptx.c (nvptx_single): Don't neuter bar.sync.
+       (nvptx_process_pars): Emit bar.sync asap and alap.
  
-       * config/rs6000/rs6000.c (rs6000_set_up_by_prologue): Make sure the TOC
-       reg (r2) isn't in the set of registers defined in the prologue.
+ 2018-03-20  Tom de Vries  <tom@codesourcery.com>
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/84954
+       * config/nvptx/nvptx.c (prevent_branch_around_nothing): Also update
+       seen_label if seen_label is already set.
  
-       * tree-vectorizer.h (_loop_vec_info): Add max_vectorization_factor.
-       (LOOP_VINFO_MAX_VECT_FACTOR): New macro.
-       (LOOP_VINFO_ORIG_VECT_FACTOR): Replace with...
-       (LOOP_VINFO_ORIG_MAX_VECT_FACTOR): ...this new macro.
-       * tree-vect-data-refs.c (vect_analyze_data_ref_dependences): Update
-       accordingly.
-       * tree-vect-loop.c (_loop_vec_info::_loop_vec_info): Initialize
-       max_vectorization_factor.
-       (vect_analyze_loop_2): Set LOOP_VINFO_MAX_VECT_FACTOR.
+ 2018-03-20  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/84945
+       * config/i386/i386.c (fold_builtin_cpu): For features above 31
+       use __cpu_features2 variable instead of __cpu_model.__cpu_features[0].
+       Use 1U instead of 1.  Formatting fixes.
  
-       * tree-vectorizer.h (vect_min_worthwhile_factor): Delete.
-       (vect_worthwhile_without_simd_p): Declare.
-       * tree-vect-loop.c (vect_worthwhile_without_simd_p): New function.
-       (vectorizable_reduction): Use it.
-       * tree-vect-stmts.c (vectorizable_shift): Likewise.
-       (vectorizable_operation): Likewise.
+       PR c/84953
+       * builtins.c (fold_builtin_strpbrk): For strpbrk(x, "") use type
+       instead of TREE_TYPE (s1) for the return value.
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-19  Jakub Jelinek  <jakub@redhat.com>
  
-       * tree-vectorizer.h (vect_get_num_copies): New function.
-       * tree-vect-data-refs.c (vect_get_data_access_cost): Use it.
-       * tree-vect-loop.c (vectorizable_reduction): Likewise.
-       (vectorizable_induction): Likewise.
-       (vectorizable_live_operation): Likewise.
-       * tree-vect-stmts.c (vectorizable_mask_load_store): Likewise.
-       (vectorizable_bswap): Likewise.
-       (vectorizable_call): Likewise.
-       (vectorizable_conversion): Likewise.
-       (vectorizable_assignment): Likewise.
-       (vectorizable_shift): Likewise.
-       (vectorizable_operation): Likewise.
-       (vectorizable_store): Likewise.
-       (vectorizable_load): Likewise.
-       (vectorizable_condition): Likewise.
-       (vectorizable_comparison): Likewise.
-       (vect_analyze_stmt): Pass the slp node to vectorizable_live_operation.
+       PR tree-optimization/84946
+       * gimple-ssa-store-merging.c (mem_valid_for_store_merging): Compute
+       bitsize + bitsize in poly_uint64 rather than poly_int64.
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR sanitizer/78651
+       * dwarf2asm.c: Include fold-const.c.
+       (dw2_output_indirect_constant_1): Set DECL_INITIAL (decl) to ADDR_EXPR
+       of decl rather than decl itself.
  
-       * tree-vect-loop.c (vectorizable_induction): Use gimple_build instead
-       of vect_init_vector.
+       PR rtl-optimization/84643
+       * memmodel.h (enum memmodel): Add MEMMODEL_MAX enumerator.
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-19  Maxim Ostapenko  <m.ostapenko@samsung.com>
  
-       * gimple-fold.h (gimple_build_vector_from_val): Declare, and provide
-       an inline wrapper that provides a location.
-       (gimple_build_vector): Likewise.
-       * gimple-fold.c (gimple_build_vector_from_val): New function.
-       (gimple_build_vector): Likewise.
-       * tree-vect-loop.c (get_initial_def_for_reduction): Use the new
-       functions to build the initial value.  Always return a gimple value.
-       (get_initial_defs_for_reduction): Likewise.  Only compute
-       neutral_vec once.
-       (vect_create_epilog_for_reduction): Don't call force_gimple_operand or
-       vect_init_vector on the results from get_initial_def(s)_for_reduction.
-       (vectorizable_induction): Use gimple_build_vector rather than
-       vect_init_vector.
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR sanitizer/78651
+       * dwarf2asm.c (dw2_output_indirect_constant_1): Disable ASan before
+       calling assemble_variable.
  
-       * target.h (vec_perm_indices): New typedef.
-       (auto_vec_perm_indices): Likewise.
-       * optabs-query.h: Include target.h
-       (can_vec_perm_p): Take a vec_perm_indices *.
-       * optabs-query.c (can_vec_perm_p): Likewise.
-       (can_mult_highpart_p): Update accordingly.  Use auto_vec_perm_indices.
-       * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
-       * tree-vect-generic.c (lower_vec_perm): Likewise.
-       * tree-vect-data-refs.c (vect_grouped_store_supported): Likewise.
-       (vect_grouped_load_supported): Likewise.
-       (vect_shift_permute_load_chain): Likewise.
-       (vect_permute_store_chain): Use auto_vec_perm_indices.
-       (vect_permute_load_chain): Likewise.
-       * fold-const.c (fold_vec_perm): Take vec_perm_indices.
-       (fold_ternary_loc): Update accordingly.  Use auto_vec_perm_indices.
-       Update uses of can_vec_perm_p.
-       * tree-vect-loop.c (calc_vec_perm_mask_for_shift): Replace the
-       mode with a number of elements.  Take a vec_perm_indices *.
-       (vect_create_epilog_for_reduction): Update accordingly.
-       Use auto_vec_perm_indices.
-       (have_whole_vector_shift): Likewise.  Update call to can_vec_perm_p.
-       * tree-vect-slp.c (vect_build_slp_tree_1): Likewise.
-       (vect_transform_slp_perm_load): Likewise.
-       (vect_schedule_slp_instance): Use auto_vec_perm_indices.
-       * tree-vectorizer.h (vect_gen_perm_mask_any): Take a vec_perm_indices.
-       (vect_gen_perm_mask_checked): Likewise.
-       * tree-vect-stmts.c (vect_gen_perm_mask_any): Take a vec_perm_indices.
-       (vect_gen_perm_mask_checked): Likewise.
-       (vectorizable_mask_load_store): Use auto_vec_perm_indices.
-       (vectorizable_store): Likewise.
-       (vectorizable_load): Likewise.
-       (perm_mask_for_reverse): Likewise.  Update call to can_vec_perm_p.
-       (vectorizable_bswap): Likewise.
+ 2018-03-19  Sudakshina Das  <sudi.das@arm.com>
  
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/81647
+       * config/aarch64/aarch64-simd.md (vec_cmp<mode><v_int_equiv>): Modify
+       instructions for UNLT, UNLE, UNGT, UNGE, UNEQ, UNORDERED and ORDERED.
  
-       * tree.h (build_vector): Take a vec<tree> instead of a tree *.
-       * tree.c (build_vector): Likewise.
-       (build_vector_from_ctor): Update accordingly.
-       (build_vector_from_val): Likewise.
-       * gimple-fold.c (gimple_fold_stmt_to_constant_1): Likewise.
-       * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
-       * tree-vect-generic.c (add_rshift): Likewise.
-       (expand_vector_divmod): Likewise.
-       (optimize_vector_constructor): Likewise.
-       * tree-vect-slp.c (vect_get_constant_vectors): Likewise.
-       (vect_transform_slp_perm_load): Likewise.
-       (vect_schedule_slp_instance): Likewise.
-       * tree-vect-stmts.c (vectorizable_bswap): Likewise.
-       (vectorizable_call): Likewise.
-       (vect_gen_perm_mask_any): Likewise.  Add elements in order.
-       * expmed.c (make_tree): Likewise.
-       * fold-const.c (fold_negate_expr_1): Use auto_vec<tree> when building
-       a vector passed to build_vector.
-       (fold_convert_const): Likewise.
-       (exact_inverse): Likewise.
-       (fold_ternary_loc): Likewise.
-       (fold_relational_const): Likewise.
-       (const_binop): Likewise.  Use VECTOR_CST_ELT directly when operating
-       on VECTOR_CSTs, rather than going through vec_cst_ctor_to_array.
-       (const_unop): Likewise.  Store the reduction accumulator in a
-       variable rather than an array.
-       (vec_cst_ctor_to_array): Take the number of elements as a parameter.
-       (fold_vec_perm): Update calls accordingly.  Use auto_vec<tree> for
-       the new vector, rather than constructing it after the input arrays.
-       (native_interpret_vector): Use auto_vec<tree> when building
-       a vector passed to build_vector.  Add elements in order.
-       * tree-vect-loop.c (get_initial_defs_for_reduction): Use
-       auto_vec<tree> when building a vector passed to build_vector.
-       (vect_create_epilog_for_reduction): Likewise.
-       (vectorizable_induction): Likewise.
-       (get_initial_def_for_reduction): Likewise.  Fix indentation of
-       case statements.
-       * config/sparc/sparc.c (sparc_handle_vis_mul8x16): Change n_elts
-       to a vec<tree> *.
-       (sparc_fold_builtin): Use auto_vec<tree> when building a vector
-       passed to build_vector.
- 2017-09-14  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-19  Jim Wilson  <jimw@sifive.com>
  
-       * tree-core.h (tree_base::u): Add an "nelts" field.
-       (tree_vector): Use VECTOR_CST_NELTS as the length.
-       * tree.c (tree_size): Likewise.
-       (make_vector): Initialize VECTOR_CST_NELTS.
-       * tree.h (VECTOR_CST_NELTS): Use the u.nelts field.
-       * cfgexpand.c (expand_debug_expr): Use VECTOR_CST_NELTS instead of
-       TYPE_VECTOR_SUBPARTS.
-       * expr.c (const_vector_mask_from_tree): Consistently use "units"
-       as the number of units, setting it from VECTOR_CST_NELTS.
-       (const_vector_from_tree): Likewise.
-       * fold-const.c (negate_expr_p): Use VECTOR_CST_NELTS instead of
-       TYPE_VECTOR_SUBPARTS for the number of elements in a VECTOR_CST.
-       (fold_negate_expr_1): Likewise.
-       (fold_convert_const): Likewise.
-       (const_binop): Likewise.  Differentiate the number of output and
-       input elements.
-       (const_unop): Likewise.
-       (fold_ternary_loc): Use VECTOR_CST_NELTS for the number of elements
-       in a VECTOR_CST, asserting that it is the same as TYPE_VECTOR_SUBPARTS
-       in cases that did the opposite.
- 2017-09-14  Richard Biener  <rguenther@suse.de>
-       * tree-ssa-sccvn.c (visit_phi): Merge undefined values similar
-       to VN_TOP.
- 2017-09-14  Eric Botcazou  <ebotcazou@adacore.com>
-       * dwarf2out.c (dwarf2out_source_line): Remove superfluous test.
- 2017-09-14  Jakub Jelinek  <jakub@redhat.com>
-       PR target/81325
-       * cfgbuild.c (find_bb_boundaries): Ignore debug insns in decisions
-       if and where to split a bb, except for splitting before debug insn
-       sequences followed by non-label real insn.  Delete debug insns
-       in between basic blocks.
-       * combine.c (make_compound_operation_int): Formatting fixes.
-       * config/alpha/elf.h (LINK_EH_SPEC): Add -static-pie support.
-       * config/alpha/linux.h (LINK_GCC_C_SEQUENCE_SPEC): Likewise.
-       * config/netbsd.h (LINK_EH_SPEC): Likewise.
-       * config/sol2.h (LINK_EH_SPEC): Likewise.
-       * config/arm/uclinux-elf.h (LINK_GCC_C_SEQUENCE_SPEC): Likewise.
-       * config/s390/linux.h (LINK_SPEC): Likewise.
-       * config/freebsd.h (LINK_EH_SPEC): Likewise.
-       * config/openbsd.h (LINK_EH_SPEC): Likewise.
-       * config/lm32/uclinux-elf.h (LINK_GCC_C_SEQUENCE_SPEC): Likewise.
-       * config/aarch64/aarch64-linux.h (LINUX_TARGET_LINK_SPEC): Likewise.
-       * config/powerpcspe/sysv4.h (LINK_EH_SPEC): Likewise.
-       * config/bfin/linux.h (LINK_GCC_C_SEQUENCE_SPEC): Likewise.
-       * config/i386/gnu-user64.h (GNU_USER_TARGET_LINK_SPEC): Fix a typo.
-       * config/i386/gnu-user.h (GNU_USER_TARGET_LINK_SPEC): Formatting fix.
- 2017-09-13  Jakub Jelinek  <jakub@redhat.com>
-       * config/rs6000/sysv4.h (STARTFILE_LINUX_SPEC): Add -static-pie
-       support.
-       (ENDFILE_LINUX_SPEC): Likewise.
-       (LINK_EH_SPEC): Likewise.
-       * config/rs6000/linux64.h (LINK_SHLIB_SPEC): Likewise.
-       (LINK_OS_LINUX_SPEC32): Likewise.
-       (LINK_OS_LINUX_SPEC64): Likewise.
-       * config/rs6000/linux.h (LINK_SHLIB_SPEC): Likewise.
-       (LINK_OS_LINUX_SPEC): Likewise.
+       PR bootstrap/84856
+       * config/riscv/riscv.c (riscv_function_arg_boundary): Use
+       PREFERRED_STACK_BOUNDARY instead of STACK_BOUNDARY.
+       (riscv_first_stack_step): Likewise.
+       (riscv_option_override): Use STACK_BOUNDARY instead of
+       MIN_STACK_BOUNDARY.
+       * config/riscv/riscv.h (STACK_BOUNDARY): Renamed from
+       MIN_STACK_BOUNDARY.
+       (BIGGEST_ALIGNMENT): Set to 128.
+       (PREFERRED_STACK_BOUNDARY): Renamed from STACK_BOUNDARY.
+       (RISCV_STACK_ALIGN): Use PREFERRED_STACK_BOUNDARY instead of
+       STACK_BOUNDARY.
  
- 2017-09-13  Martin Liska  <mliska@suse.cz>
+ 2018-03-19  Richard Biener  <rguenther@suse.de>
  
-       PR middle-end/82154
-       * stmt.c (expand_sjlj_dispatch_table): Use CASE_LOW when
-       CASE_HIGH is NULL_TREE.
+       PR tree-optimization/84933
+       * tree-vrp.c (set_and_canonicalize_value_range): Treat out-of-bound
+       values as -INF/INF when canonicalizing an ANTI_RANGE to a RANGE.
  
- 2017-09-13  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-19  Richard Biener  <rguenther@suse.de>
  
-       * target.def (secondary_memory_needed): New hook.
-       (secondary_reload): Refer to TARGET_SECONDARY_MEMORY_NEEDED
-       instead of SECONDARY_MEMORY_NEEDED.
-       (secondary_memory_needed_mode): Likewise.
-       * hooks.h (hook_bool_mode_reg_class_t_reg_class_t_false): Declare.
-       * hooks.c (hook_bool_mode_reg_class_t_reg_class_t_false): New function.
-       * doc/tm.texi.in (SECONDARY_MEMORY_NEEDED): Replace with...
-       (TARGET_SECONDARY_MEMORY_NEEDED): ...this.
-       (SECONDARY_MEMORY_NEEDED_RTX): Update reference accordingly.
-       * doc/tm.texi: Regenerate.
-       * config/alpha/alpha.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/alpha/alpha.c (alpha_secondary_memory_needed): New function.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       * config/i386/i386.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/i386/i386-protos.h (ix86_secondary_memory_needed): Delete.
-       * config/i386/i386.c (inline_secondary_memory_needed): Put the
-       mode argument first and change the reg_class arguments to reg_class_t.
-       (ix86_secondary_memory_needed): Likewise.  Remove the strict parameter.
-       Make static.  Update the call to inline_secondary_memory_needed.
-       (ix86_register_move_cost): Update the call to
-       inline_secondary_memory_needed.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       * config/ia64/ia64.h (SECONDARY_MEMORY_NEEDED): Delete commented-out
-       definition.
-       * config/ia64/ia64.c (spill_xfmode_rfmode_operand): Refer to
-       TARGET_SECONDARY_MEMORY_NEEDED rather than SECONDARY_MEMORY_NEEDED
-       in comment.
-       * config/mips/mips.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/mips/mips-protos.h (mips_secondary_memory_needed): Delete.
-       * config/mips/mips.c (mips_secondary_memory_needed): Make static
-       and match hook interface.  Add comment from mips.h.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       * config/mmix/mmix.md (truncdfsf2): Refer to
-       TARGET_SECONDARY_MEMORY_NEEDED rather than SECONDARY_MEMORY_NEEDED
-       in comment.
-       * config/pa/pa-64.h (SECONDARY_MEMORY_NEEDED): Rename to...
-       (PA_SECONDARY_MEMORY_NEEDED): ...this, and put the mode argument first.
-       * config/pa/pa.c (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       (pa_secondary_memory_needed): New function.
-       * config/pdp11/pdp11.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/pdp11/pdp11-protos.h (pdp11_secondary_memory_needed): Delete.
-       * config/pdp11/pdp11.c (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       (pdp11_secondary_memory_needed): Make static and match hook interface.
-       * config/powerpcspe/powerpcspe.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/powerpcspe/powerpcspe-protos.h
-       (rs6000_secondary_memory_needed_ptr): Delete.
-       * config/powerpcspe/powerpcspe.c (rs6000_secondary_memory_needed_ptr):
-       Delete.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       (rs6000_option_override_internal): Assign to
-       targetm.secondary_memory_needed rather than
-       rs6000_secondary_memory_needed_ptr.
-       (rs6000_secondary_memory_needed): Match hook interface.
-       (rs6000_debug_secondary_memory_needed): Likewise.
-       * config/riscv/riscv.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/riscv/riscv.c (riscv_secondary_memory_needed): New function.
-       (riscv_register_move_cost): Use it instead of SECONDARY_MEMORY_NEEDED.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       * config/rs6000/rs6000.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/rs6000/rs6000-protos.h (rs6000_secondary_memory_needed_ptr):
-       Delete.
-       * config/rs6000/rs6000.c (rs6000_secondary_memory_needed_ptr): Delete.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       (rs6000_option_override_internal): Assign to
-       targetm.secondary_memory_needed rather than
-       rs6000_secondary_memory_needed_ptr.
-       (rs6000_secondary_memory_needed): Match hook interface.
-       (rs6000_debug_secondary_memory_needed): Likewise.
-       * config/s390/s390.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/s390/s390.c (s390_secondary_memory_needed): New function.
-       (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       * config/sparc/sparc.h (SECONDARY_MEMORY_NEEDED): Delete.
-       * config/sparc/sparc.c (TARGET_SECONDARY_MEMORY_NEEDED): Redefine.
-       (sparc_secondary_memory_needed): New function.
-       * lra-constraints.c (check_and_process_move): Refer to
-       TARGET_SECONDARY_MEMORY_NEEDED rather than SECONDARY_MEMORY_NEEDED
-       in comment.
-       (curr_insn_transform): Likewise.
-       (process_alt_operands): Use targetm.secondary_memory_needed
-       instead of TARGET_SECONDARY_MEMORY_NEEDED.
-       (check_secondary_memory_needed_p): Likewise.
-       (choose_split_class): Likewise.
-       * reload.c: Unconditionally include code that was previously
-       conditional on SECONDARY_MEMORY_NEEDED.
-       (push_secondary_reload): Use targetm.secondary_memory_needed
-       instead of TARGET_SECONDARY_MEMORY_NEEDED.
-       (push_reload): Likewise.
-       * reload1.c: Unconditionally include code that was previously
-       conditional on SECONDARY_MEMORY_NEEDED.
-       (choose_reload_regs): Use targetm.secondary_memory_needed
-       instead of TARGET_SECONDARY_MEMORY_NEEDED.
-       (gen_reload): Likewise.
-       * system.h (SECONDARY_MEMORY_NEEDED): Poison.
- 2017-09-13  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84859
+       * tree-ssa-phiopt.c (single_trailing_store_in_bb): New function.
+       (cond_if_else_store_replacement): Perform sinking operation on
+       single-store BBs regardless of MAX_STORES_TO_SINK setting.
+       Generalize what a BB with a single eligible store is.
  
-       * target.def (secondary_memory_needed_mode): New hook:
-       * targhooks.c (default_secondary_memory_needed_mode): Declare.
-       * targhooks.h (default_secondary_memory_needed_mode): New function.
-       * doc/tm.texi.in (SECONDARY_MEMORY_NEEDED_MODE): Replace with...
-       (TARGET_SECONDARY_MEMORY_NEEDED_MODE): ...this.
-       * doc/tm.texi: Regenerate.
-       * lra-constraints.c (check_and_process_move): Use
-       targetm.secondary_memory_needed_mode instead of
-       TARGET_SECONDARY_MEMORY_NEEDED_MODE.
-       (curr_insn_transform): Likewise.
-       * reload.c (get_secondary_mem): Likewise.
-       * config/alpha/alpha.h (SECONDARY_MEMORY_NEEDED_MODE): Delete.
-       * config/alpha/alpha.c (alpha_secondary_memory_needed_mode): New
-       function.
-       (TARGET_SECONDARY_MEMORY_NEEDED_MODE): Redefine.
-       * config/i386/i386.h (SECONDARY_MEMORY_NEEDED_MODE): Delete.
-       * config/i386/i386.c (ix86_secondary_memory_needed_mode): New function.
-       (TARGET_SECONDARY_MEMORY_NEEDED_MODE): Redefine.
-       * config/powerpcspe/powerpcspe.h (SECONDARY_MEMORY_NEEDED_MODE):
-       Delete.
-       * config/powerpcspe/powerpcspe-protos.h
-       (rs6000_secondary_memory_needed_mode): Delete.
-       * config/powerpcspe/powerpcspe.c
-       (TARGET_SECONDARY_MEMORY_NEEDED_MODE): Redefine.
-       (rs6000_secondary_memory_needed_mode): Make static.
-       * config/rs6000/rs6000.h (SECONDARY_MEMORY_NEEDED_MODE): Delete.
-       * config/rs6000/rs6000-protos.h (rs6000_secondary_memory_needed_mode):
-       Delete.
-       * config/rs6000/rs6000.c (TARGET_SECONDARY_MEMORY_NEEDED_MODE):
-       Redefine.
-       (rs6000_secondary_memory_needed_mode): Make static.
-       * config/s390/s390.h (SECONDARY_MEMORY_NEEDED_MODE): Delete.
-       * config/s390/s390.c (s390_secondary_memory_needed_mode): New function.
-       (TARGET_SECONDARY_MEMORY_NEEDED_MODE): Redefine.
-       * config/sparc/sparc.h (SECONDARY_MEMORY_NEEDED_MODE): Delete.
-       * config/sparc/sparc.c (TARGET_SECONDARY_MEMORY_NEEDED_MODE):
-       Redefine.
-       (sparc_secondary_memory_needed_mode): New function.
-       * system.h (TARGET_SECONDARY_MEMORY_NEEDED_MODE): Poison.
+ 2018-03-19  Richard Biener  <rguenther@suse.de>
  
- 2017-09-13  Jackson Woodruff  <jackson.woodruff@arm.com>
+       PR tree-optimization/84929
+       * tree-data-ref.c (analyze_siv_subscript_cst_affine): Guard
+       chrec_is_positive against non-chrec arg.
  
-       * config/aarch64/constraints.md (Umq): New constraint.
-       * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>):
-       Change to use Umq.
-       (mov<mode>): Update condition.
+ 2018-03-19  Tamar Christina  <tamar.christina@arm.com>
  
- 2017-09-13  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       PR target/84711
+       * config/arm/arm.c (arm_can_change_mode_class): revert r258554.
  
-       * gimple-ssa-store-merging.c (sort_by_bitpos): Compare store order
-       when bitposition is the same.
+ 2018-03-18  Martin Liska  <mliska@suse.cz>
  
- 2017-09-13  Richard Biener  <rguenther@suse.de>
+       PR rtl-optimization/84635
+       * regrename.c (build_def_use): Use matches_mode only when
+       matches >= 0.
  
-       * dwarf2out.c (output_die_symbol): Remove.
-       (output_die): Do not output a DIEs symbol.
+ 2018-03-18  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-09-13  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84913
+       * tree-vect-loop.c (vectorizable_reduction): Don't try to
+       vectorize chains of COND_EXPRs.
  
-       PR middle-end/82128
-       * gimple-fold.c (gimple_fold_call): Update SSA name in-place to
-       default-def to avoid breaking iterator update with the weird
-       interaction with cgraph_update_edges_for_call_stmt_node.
+ 2018-03-18  Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-13  Richard Biener  <rguenther@suse.de>
+       * config/nds32/nds32.h (MAX_REGS_PER_ADDRESS): Fix the value.
  
-       * tree-cfg.c (verify_gimple_assign_binary): Add verification
-       for WIDEN_SUM_EXPR, VEC_WIDEN_MULT_{HI,LO,EVEN,ODD}_EXPR,
-       VEC_PACK_{TRUNC,SAT,FIX_TRUNC}_EXPR.
-       (verify_gimple_assign_ternary): Add verification for DOT_PROD_EXPR.
+ 2018-03-18  Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-13  Kugan Vivekanandarajah  <kuganv@linaro.org>
+       * config/nds32/nds32.h (LOGICAL_OP_NON_SHORT_CIRCUIT): Define.
  
-       * config/aarch64/aarch64.c (aarch64_override_options_after_change_1):
-       Disable pc relative literal load irrespective of
-       TARGET_FIX_ERR_A53_84341 for default.
+ 2018-03-18  Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-12  Eric Botcazou  <ebotcazou@adacore.com>
+       * config/nds32/nds32.h (CLZ_DEFINED_VALUE_AT_ZERO): Define.
  
-       * config/sparc/sparc.c (output_return): Output the source location of
-       the insn in the delay slot, if any.
-       (output_sibcall): Likewise.
+ 2018-03-17  Chung-Ju Wu  <jasonwucj@gmail.com>
+           Kito Cheng  <kito.cheng@gmail.com>
  
- 2017-09-12  H.J. Lu  <hongjiu.lu@intel.com>
-       PR driver/81498
-       * common.opt (-static-pie): New alias.
-       (shared): Negate static-pie.
-       (-no-pie): Update help text.
-       (-pie): Likewise.
-       (static-pie): New option.
-       * config/gnu-user.h (GNU_USER_TARGET_STARTFILE_SPEC): Add
-       -static-pie support.
-       (GNU_USER_TARGET_ENDFILE_SPEC): Likewise.
-       (LINK_EH_SPEC): Likewise.
-       (LINK_GCC_C_SEQUENCE_SPEC): Likewise.
-       * config/i386/gnu-user.h (GNU_USER_TARGET_LINK_SPEC): Likewise.
-       * config/i386/gnu-user64.h (GNU_USER_TARGET_LINK_SPEC): Likewise.
-       * gcc.c (LINK_COMMAND_SPEC): Likewise.
-       (init_gcc_specs): Likewise.
-       (init_spec): Likewise.
-       (display_help): Update help message for -pie.
-       * doc/invoke.texi: Update -pie, -no-pie and -static.  Document
-       -static-pie.
- 2017-09-12  Wilco Dijkstra  <wdijkstr@arm.com>
-       * config/aarch64/aarch64.md (movsi_aarch64): Remove all '*'.
-       (movdi_aarch64): Likewise.
-       (movti_aarch64): Likewise.
- 2017-09-12 Simon Wright <simon@pushface.org>
-       PR target/80204
-       * config/darwin-driver.c (darwin_find_version_from_kernel): Eliminate
-       calculation of the minor version, always output as 0.
- 2017-09-12  Jakub Jelinek  <jakub@redhat.com>
-       PR target/82112
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): For
-       ALTIVEC_BUILTIN_VEC_LD if arg1 has array type call default_conversion
-       on it early, rather than manual conversion late.  For
-       ALTIVEC_BUILTIN_VEC_ST if arg2 has array type call default_conversion
-       instead of performing manual conversion.
- 2017-09-12  Carl Love  <cel@us.ibm.com>
-       * config/rs6000/altivec.md (vec_widen_umult_even_v4si,
-       vec_widen_smult_even_v4si): Add define expands for vmuleuw, vmulesw,
-       vmulouw, vmulosw.
-       * config/rs6000/rs6000-builtin.def (VMLEUW, VMULESW, VMULOUW,
-       VMULOSW): Add definitions.
-       * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
-       ALTIVEC_BUILTIN_VMULESW, ALTIVEC_BUILTIN_VMULEUW,
-       ALTIVEC_BUILTIN_VMULOSW, ALTIVEC_BUILTIN_VMULOUW entries.
-       * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin,
-       builtin_function_type): Add ALTIVEC_BUILTIN_* case statements.
- 2017-09-12  James Greenhalgh  <james.greenhalgh@arm.com>
-       * config/aarch64/aarch64.md (movdi_aarch64): Set load/store
-       types correctly.
-       (movti_aarch64): Likewise.
-       (movdf_aarch64): Likewise.
-       (movtf_aarch64): Likewise.
-       (load_pairdi): Likewise.
-       (store_pairdi): Likewise.
-       (load_pairdf): Likewise.
-       (store_pairdf): Likewise.
-       (loadwb_pair<GPI:mode>_<P:mode>): Likewise.
-       (storewb_pair<GPI:mode>_<P:mode>): Likewise.
-       (ldr_got_small_<mode>): Likewise.
-       (ldr_got_small_28k_<mode>): Likewise.
-       (ldr_got_tiny): Likewise.
-       * config/aarch64/iterators.md (ldst_sz): New.
-       (ldpstp_sz): Likewise.
-       * config/aarch64/thunderx.md (thunderx_storepair): Split store_8
-       to store_16.
-       (thunderx_load): Split load_8 to load_16.
-       * config/aarch64/thunderx2t99.md (thunderx2t99_loadpair): Split
-       load_8 to load_16.
-       (thunderx2t99_storepair_basic): Split store_8 to store_16.
-       * config/arm/xgene1.md (xgene1_load_pair): Split load_8 to load_16.
-       (xgene1_store_pair): Split store_8 to store_16.
-       * config/aarch64/falkor.md (falkor_ld_3_ld): Split load_8 to load_16.
-       (falkor_st_0_st_sd): Split store_8 to store_16.
- 2017-09-12  James Greenhalgh  <james.greenhalgh@arm.com>
-       * config/arm/types.md (type): Rename load1/2/3/4 to load_4/8/12/16
-       and store1/2/3/4 to store_4/8/12/16.
-       * config/aarch64/aarch64.md: Update for rename.
-       * config/arm/arm.md: Likewise.: Likewise.
-       * config/arm/arm.c: Likewise.
-       * config/arm/thumb1.md: Likewise.
-       * config/arm/thumb2.md: Likewise.
-       * config/arm/vfp.md: Likewise.
-       * config/arm/arm-generic.md: Likewise.
-       * config/arm/arm1020e.md: Likewise.
-       * config/arm/arm1026ejs.md: Likewise.
-       * config/arm/arm1136jfs.md: Likewise.
-       * config/arm/arm926ejs.md: Likewise.
-       * config/arm/cortex-a15.md: Likewise.
-       * config/arm/cortex-a17.md: Likewise.
-       * config/arm/cortex-a5.md: Likewise.
-       * config/arm/cortex-a53.md: Likewise.
-       * config/arm/cortex-a57.md: Likewise.
-       * config/arm/cortex-a7.md: Likewise.
-       * config/arm/cortex-a8.md: Likewise.
-       * config/arm/cortex-a9.md: Likewise.
-       * config/arm/cortex-m4.md: Likewise.
-       * config/arm/cortex-m7.md: Likewise.
-       * config/arm/cortex-r4.md: Likewise.
-       * config/arm/exynos-m1.md: Likewise.
-       * config/arm/fa526.md: Likewise.
-       * config/arm/fa606te.md: Likewise.
-       * config/arm/fa626te.md: Likewise.
-       * config/arm/fa726te.md: Likewise.
-       * config/arm/fmp626.md: Likewise.
-       * config/arm/iwmmxt.md: Likewise.
-       * config/arm/ldmstm.md: Likewise.
-       * config/arm/marvell-pj4.md: Likewise.
-       * config/arm/xgene1.md: Likewise.
-       * config/aarch64/thunderx.md: Likewise.
-       * config/aarch64/thunderx2t99.md: Likewise.
-       * config/aarch64/falkor.md: Likewise.
- 2017-09-12  Martin Liska  <mliska@suse.cz>
-       * attribs.c (private_lookup_attribute): New function.
-       * attribs.h (private_lookup_attribute): Declared here.
-       (lookup_attribute): Called from this place.
- 2017-09-12  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/82157
-       * tree-ssa-pre.c (remove_dead_inserted_code): Do not remove
-       stmts with side-effects.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood <david.sherwood@arm.com>
-       * target.def (hard_regno_nregs): New hook.
-       (class_max_nregs): Refer to it instead of HARD_REGNO_NREGS.
-       * targhooks.h (default_hard_regno_nregs): Declare.
-       * targhooks.c (default_hard_regno_nregs): New function.
-       * doc/tm.texi.in (HARD_REGNO_NREGS): Replace with...
-       (TARGET_HARD_REGNO_NREGS): ...this hook.
-       (HARD_REGNO_NREGS_HAS_PADDING): Update accordingly.
-       (CLASS_MAX_NREGS): Likewise.
-       * doc/tm.texi: Regenerate.
-       * reginfo.c (init_reg_modes_target): Use targetm.hard_regno_nregs
-       instead of HARD_REGNO_NREGS.
-       * rtl.h (REG_NREGS): Refer to TARGET_HARD_REGNO_NREGS rather than
-       HARD_REGNO_NREGS in the comment.
-       * config/aarch64/aarch64.h (HARD_REGNO_NREGS): Delete.
-       * config/aarch64/aarch64-protos.h (aarch64_hard_regno_nregs): Delete.
-       * config/aarch64/aarch64.c (aarch64_hard_regno_nregs): Make static.
-       Return an unsigned int.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/alpha/alpha.h (HARD_REGNO_NREGS): Delete.
-       * config/arc/arc.h (HARD_REGNO_NREGS): Delete.
-       * config/arc/arc.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (arc_hard_regno_nregs): New function.
-       * config/arm/arm.h (HARD_REGNO_NREGS): Delete.
-       * config/arm/arm.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (arm_hard_regno_nregs): New function.
-       * config/avr/avr.h (HARD_REGNO_NREGS): Delete.
-       * config/bfin/bfin.h (HARD_REGNO_NREGS): Delete.
-       * config/bfin/bfin.c (bfin_hard_regno_nregs): New function.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/c6x/c6x.h (HARD_REGNO_NREGS): Delete.
-       * config/cr16/cr16.h (LONG_REG_P): Use targetm.hard_regno_nregs.
-       (HARD_REGNO_NREGS): Delete.
-       * config/cr16/cr16.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (cr16_hard_regno_nregs): New function.
-       (cr16_memory_move_cost): Use it instead of HARD_REGNO_NREGS.
-       * config/cris/cris.h (HARD_REGNO_NREGS): Delete.
-       * config/cris/cris.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (cris_hard_regno_nregs): New function.
-       * config/epiphany/epiphany.h (HARD_REGNO_NREGS): Delete.
-       * config/fr30/fr30.h (HARD_REGNO_NREGS): Delete.
-       (CLASS_MAX_NREGS): Use targetm.hard_regno_nregs.
-       * config/frv/frv.h (HARD_REGNO_NREGS): Delete.
-       (CLASS_MAX_NREGS): Remove outdated copy of documentation.
-       * config/frv/frv-protos.h (frv_hard_regno_nregs): Delete.
-       * config/frv/frv.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (frv_hard_regno_nregs): Make static.  Take and return an
-       unsigned int.
-       (frv_class_max_nregs): Remove outdated copy of documentation.
-       * config/ft32/ft32.h (HARD_REGNO_NREGS): Delete.
-       * config/h8300/h8300.h (HARD_REGNO_NREGS): Delete.
-       * config/h8300/h8300-protos.h (h8300_hard_regno_nregs): Delete.
-       * config/h8300/h8300.c (h8300_hard_regno_nregs): Delete.
-       * config/i386/i386.h (HARD_REGNO_NREGS): Delete.
-       * config/i386/i386.c (ix86_hard_regno_nregs): New function.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/ia64/ia64.h (HARD_REGNO_NREGS): Delete.
-       (CLASS_MAX_NREGS): Update comment.
-       * config/ia64/ia64.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (ia64_hard_regno_nregs): New function.
-       * config/iq2000/iq2000.h (HARD_REGNO_NREGS): Delete.
-       * config/lm32/lm32.h (HARD_REGNO_NREGS): Delete.
-       * config/m32c/m32c.h (HARD_REGNO_NREGS): Delete.
-       * config/m32c/m32c-protos.h (m32c_hard_regno_nregs): Delete.
-       * config/m32c/m32c.c (m32c_hard_regno_nregs_1): Take and return
-       an unsigned int.
-       (m32c_hard_regno_nregs): Likewise.  Make static.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/m32r/m32r.h (HARD_REGNO_NREGS): Delete.
-       * config/m68k/m68k.h (HARD_REGNO_NREGS): Delete.
-       * config/m68k/m68k.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (m68k_hard_regno_nregs): New function.
-       * config/mcore/mcore.h (HARD_REGNO_NREGS): Delete.
-       * config/microblaze/microblaze.h (HARD_REGNO_NREGS): Delete.
-       * config/mips/mips.h (HARD_REGNO_NREGS): Delete.
-       * config/mips/mips-protos.h (mips_hard_regno_nregs): Delete.
-       * config/mips/mips.c (mips_hard_regno_nregs): Make static.
-       Take and return an unsigned int.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/mmix/mmix.h (HARD_REGNO_NREGS): Delete.
-       (CLASS_MAX_NREGS): Use targetm.hard_regno_nregs.
-       * config/mn10300/mn10300.h (HARD_REGNO_NREGS): Delete.
-       * config/moxie/moxie.h (HARD_REGNO_NREGS): Delete.
-       * config/msp430/msp430.h (HARD_REGNO_NREGS): Delete.
-       * config/msp430/msp430-protos.h (msp430_hard_regno_nregs): Delete.
-       * config/msp430/msp430.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (msp430_hard_regno_nregs): Make static.  Take and return an
-       unsigned int.
-       * config/nds32/nds32.h (HARD_REGNO_NREGS): Delete.
-       * config/nds32/nds32-protos.h (nds32_hard_regno_nregs): Delete.
-       * config/nds32/nds32.c (nds32_hard_regno_nregs): Delete.
-       (nds32_hard_regno_mode_ok): Use targetm.hard_regno_nregs.
-       * config/nios2/nios2.h (HARD_REGNO_NREGS): Delete.
-       * config/nvptx/nvptx.h (HARD_REGNO_NREGS): Delete.
-       * config/nvptx/nvptx.c (nvptx_hard_regno_nregs): New function.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/pa/pa32-regs.h (HARD_REGNO_NREGS): Rename to...
-       (PA_HARD_REGNO_NREGS): ...this.
-       * config/pa/pa64-regs.h (HARD_REGNO_NREGS): Rename to...
-       (PA_HARD_REGNO_NREGS): ...this.
-       * config/pa/pa.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (pa_hard_regno_nregs): New function.
-       * config/pdp11/pdp11.h (HARD_REGNO_NREGS): Delete.
-       * config/pdp11/pdp11.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (pdp11_hard_regno_nregs): New function.
-       * config/powerpcspe/powerpcspe.h (HARD_REGNO_NREGS): Delete.
-       * config/powerpcspe/powerpcspe.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (rs6000_hard_regno_nregs_hook): New function.
-       * config/riscv/riscv.h (HARD_REGNO_NREGS): Delete.
-       * config/riscv/riscv-protos.h (riscv_hard_regno_nregs): Delete.
-       * config/riscv/riscv.c (riscv_hard_regno_nregs): Make static.
-       Take and return an unsigned int.  Move earlier in file.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/rl78/rl78.h (HARD_REGNO_NREGS): Delete.
-       * config/rl78/rl78-protos.h (rl78_hard_regno_nregs): Delete.
-       * config/rl78/rl78.c (TARGET_HARD_REGNO_NREGS): Reefine.
-       (rl78_hard_regno_nregs): Make static.  Take and return an
-       unsigned int.
-       * config/rs6000/rs6000.h (HARD_REGNO_NREGS): Delete.
-       * config/rs6000/rs6000.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (rs6000_hard_regno_nregs_hook): New function.
-       * config/rx/rx.h (HARD_REGNO_NREGS): Delete.
-       * config/rx/rx.c (rx_hard_regno_nregs): New function.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/s390/s390.h (HARD_REGNO_NREGS): Delete.
-       * config/s390/s390.c (REGNO_PAIR_OK): Use s390_hard_regno_nregs
-       instead of HARD_REGNO_NREGS.
-       (s390_hard_regno_nregs): New function.
-       (s390_hard_regno_mode_ok): Add comment from s390.h.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/sh/sh.h (HARD_REGNO_NREGS): Delete.
-       * config/sh/sh.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (sh_hard_regno_nregs): New function.
-       (sh_pass_in_reg_p): Use it.
-       * config/sparc/sparc.h (HARD_REGNO_NREGS): Delete.
-       * config/sparc/sparc.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (sparc_hard_regno_nregs): New function.
-       * config/spu/spu.h (HARD_REGNO_NREGS): Delete.
-       * config/spu/spu.c (spu_hard_regno_nregs): New function.
-       (spu_function_arg_advance): Use it, supplying a valid register number.
-       (TARGET_HARD_REGNO_NREGS): Redefine.
-       * config/stormy16/stormy16.h (HARD_REGNO_NREGS): Delete.
-       * config/tilegx/tilegx.h (HARD_REGNO_NREGS): Delete.
-       * config/tilepro/tilepro.h (HARD_REGNO_NREGS): Delete.
-       * config/v850/v850.h (HARD_REGNO_NREGS): Delete.
-       * config/vax/vax.h (HARD_REGNO_NREGS): Delete.
-       * config/visium/visium.h (HARD_REGNO_NREGS): Delete.
-       (CLASS_MAX_NREGS): Remove copy of old documentation.
-       * config/visium/visium.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       (visium_hard_regno_nregs): New function.
-       (visium_hard_regno_mode_ok): Use it instead of HARD_REGNO_NREGS.
-       * config/xtensa/xtensa.h (HARD_REGNO_NREGS): Delete.
-       * config/xtensa/xtensa.c (TARGET_HARD_REGNO_NREGS): Redefine.
-       xtensa_hard_regno_nregs): New function.
-       * system.h (HARD_REGNO_NREGS): Poison.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * config/arm/arm.h (THUMB_SECONDARY_INPUT_RELOAD_CLASS): Use
-       hard_regno_nregs instead of HARD_REGNO_NREGS.
-       (THUMB_SECONDARY_OUTPUT_RELOAD_CLASS): Likewise.
-       * config/c6x/c6x.c (c6x_expand_prologue): Likewise.
-       (c6x_expand_epilogue): Likewise.
-       * config/frv/frv.c (frv_alloc_temp_reg): Likewise.
-       (frv_read_iacc_argument): Likewise.
-       * config/sh/sh.c: Include regs.h.
-       (sh_print_operand): Use hard_regno_nregs instead of HARD_REGNO_NREGS.
-       (regs_used): Likewise.
-       (output_stack_adjust): Likewise.
-       * config/xtensa/xtensa.c (xtensa_copy_incoming_a7): Likewise.
-       * expmed.c: Include regs.h.
-       (store_bit_field_1): Use hard_regno_nregs instead of HARD_REGNO_NREGS.
-       * ree.c: Include regs.h.
-       (combine_reaching_defs): Use hard_regno_nregs instead of
-       HARD_REGNO_NREGS.
-       (add_removable_extension): Likewise.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * regs.h (hard_regno_nregs): Turn into a function.
-       (end_hard_regno): Update accordingly.
-       * caller-save.c (setup_save_areas): Likewise.
-       (save_call_clobbered_regs): Likewise.
-       (replace_reg_with_saved_mem): Likewise.
-       (insert_restore): Likewise.
-       (insert_save): Likewise.
-       * combine.c (can_change_dest_mode): Likewise.
-       (move_deaths): Likewise.
-       (distribute_notes): Likewise.
-       * config/mips/mips.c (mips_hard_regno_call_part_clobbered): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_cannot_change_mode_class)
-       (rs6000_split_multireg_move): Likewise.
-       (rs6000_register_move_cost): Likewise.
-       (rs6000_memory_move_cost): Likewise.
-       * config/rs6000/rs6000.c (rs6000_cannot_change_mode_class): Likewise.
-       (rs6000_split_multireg_move): Likewise.
-       (rs6000_register_move_cost): Likewise.
-       (rs6000_memory_move_cost): Likewise.
-       * cselib.c (cselib_reset_table): Likewise.
-       (cselib_lookup_1): Likewise.
-       * emit-rtl.c (set_mode_and_regno): Likewise.
-       * function.c (aggregate_value_p): Likewise.
-       * ira-color.c (setup_profitable_hard_regs): Likewise.
-       (check_hard_reg_p): Likewise.
-       (calculate_saved_nregs): Likewise.
-       (assign_hard_reg): Likewise.
-       (improve_allocation): Likewise.
-       (calculate_spill_cost): Likewise.
-       * ira-emit.c (modify_move_list): Likewise.
-       * ira-int.h (ira_hard_reg_set_intersection_p): Likewise.
-       (ira_hard_reg_in_set_p): Likewise.
-       * ira.c (setup_reg_mode_hard_regset): Likewise.
-       (clarify_prohibited_class_mode_regs): Likewise.
-       (check_allocation): Likewise.
-       * lra-assigns.c (find_hard_regno_for_1): Likewise.
-       (lra_setup_reg_renumber): Likewise.
-       (setup_try_hard_regno_pseudos): Likewise.
-       (spill_for): Likewise.
-       (assign_hard_regno): Likewise.
-       (setup_live_pseudos_and_spill_after_risky_transforms): Likewise.
-       * lra-constraints.c (in_class_p): Likewise.
-       (lra_constraint_offset): Likewise.
-       (simplify_operand_subreg): Likewise.
-       (lra_constraints): Likewise.
-       (split_reg): Likewise.
-       (split_if_necessary): Likewise.
-       (invariant_p): Likewise.
-       (inherit_in_ebb): Likewise.
-       * lra-lives.c (process_bb_lives): Likewise.
-       * lra-remat.c (reg_overlap_for_remat_p): Likewise.
-       (get_hard_regs): Likewise.
-       (do_remat): Likewise.
-       * lra-spills.c (assign_spill_hard_regs): Likewise.
-       * mode-switching.c (create_pre_exit): Likewise.
-       * postreload.c (reload_combine_recognize_pattern): Likewise.
-       * recog.c (peep2_find_free_register): Likewise.
-       * regcprop.c (kill_value_regno): Likewise.
-       (set_value_regno): Likewise.
-       (copy_value): Likewise.
-       (maybe_mode_change): Likewise.
-       (find_oldest_value_reg): Likewise.
-       (copyprop_hardreg_forward_1): Likewise.
-       * regrename.c (check_new_reg_p): Likewise.
-       (regrename_do_replace): Likewise.
-       * reload.c (push_reload): Likewise.
-       (combine_reloads): Likewise.
-       (find_dummy_reload): Likewise.
-       (operands_match_p): Likewise.
-       (find_reloads): Likewise.
-       (find_equiv_reg): Likewise.
-       (reload_adjust_reg_for_mode): Likewise.
-       * reload1.c (count_pseudo): Likewise.
-       (count_spilled_pseudo): Likewise.
-       (find_reg): Likewise.
-       (clear_reload_reg_in_use): Likewise.
-       (free_for_value_p): Likewise.
-       (allocate_reload_reg): Likewise.
-       (choose_reload_regs): Likewise.
-       (reload_adjust_reg_for_temp): Likewise.
-       (emit_reload_insns): Likewise.
-       (delete_output_reload): Likewise.
-       * rtlanal.c (subreg_get_info): Likewise.
-       * sched-deps.c (sched_analyze_reg): Likewise.
-       * sel-sched.c (init_regs_for_mode): Likewise.
-       (mark_unavailable_hard_regs): Likewise.
-       (choose_best_reg_1): Likewise.
-       (verify_target_availability): Likewise.
-       * valtrack.c (dead_debug_insert_temp): Likewise.
-       * var-tracking.c (track_loc_p): Likewise.
-       (emit_note_insn_var_location): Likewise.
-       * varasm.c (make_decl_rtl): Likewise.
-       * reginfo.c (choose_hard_reg_mode): Likewise.
-       (init_reg_modes_target): Refer directly to
-       this_target_regs->x_hard_regno_nregs.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * ira-costs.c (record_operand_costs): Use in_hard_reg_set_p
-       instead of hard_regno_nregs.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * config/aarch64/aarch64.c (aarch64_hard_regno_mode_ok): Use
-       end_hard_regno instead of hard_regno_nregs.
-       * config/s390/s390.c (s390_reg_clobbered_rtx): Likewise.
-       * config/sparc/sparc.h (ASM_DECLARE_REGISTER_GLOBAL): Likewise.
-       * config/visium/visium.c (visium_hard_regno_mode_ok): Likewise.
-       * ira-color.c (improve_allocation): Likewise.
-       * lra-assigns.c (find_hard_regno_for_1): Likewise.
-       * lra-lives.c (mark_regno_live): Likewise.
-       (mark_regno_dead): Likewise.
-       * lra-remat.c (operand_to_remat): Likewise.
-       * lra.c (collect_non_operand_hard_regs): Likewise.
-       * postreload.c (reload_combine_note_store): Likewise.
-       (move2add_valid_value_p): Likewise.
-       * reload.c (regno_clobbered_p): Likewise.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * config/frv/frv.c (FOR_EACH_REGNO): Use END_REGNO instead of
-       hard_regno_nregs.
-       * config/v850/v850.c (v850_reorg): Likewise.
-       * reload.c (refers_to_regno_for_reload_p): Likewise.
-       (find_equiv_reg): Likewise.
-       * reload1.c (reload_reg_reaches_end_p): Likewise.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-       * caller-save.c (add_used_regs): Use REG_NREGS instead of
-       hard_regno_nregs.
-       * config/aarch64/aarch64.c (aarch64_split_combinev16qi): Likewise.
-       * config/arm/arm.c (output_move_neon): Likewise.
-       (arm_attr_length_move_neon): Likewise.
-       (neon_split_vcombine): Likewise.
-       * config/c6x/c6x.c (c6x_mark_reg_read): Likewise.
-       (c6x_mark_reg_written): Likewise.
-       (c6x_dwarf_register_span): Likewise.
-       * config/i386/i386.c (ix86_save_reg): Likewise.
-       * config/ia64/ia64.c (mark_reg_gr_used_mask): Likewise.
-       (rws_access_reg): Likewise.
-       * config/s390/s390.c (s390_call_saved_register_used): Likewise.
-       * mode-switching.c (create_pre_exit): Likewise.
-       * ree.c (combine_reaching_defs): Likewise.
-       (add_removable_extension): Likewise.
-       * regcprop.c (find_oldest_value_reg): Likewise.
-       (copyprop_hardreg_forward_1): Likewise.
-       * reload.c (reload_inner_reg_of_subreg): Likewise.
-       (push_reload): Likewise.
-       (combine_reloads): Likewise.
-       (find_dummy_reload): Likewise.
-       (reload_adjust_reg_for_mode): Likewise.
-       * reload1.c (find_reload_regs): Likewise.
-       (forget_old_reloads_1): Likewise.
-       (reload_reg_free_for_value_p): Likewise.
-       (reload_adjust_reg_for_temp): Likewise.
-       (emit_reload_insns): Likewise.
-       (delete_output_reload): Likewise.
-       * sel-sched.c (choose_best_reg_1): Likewise.
-       (choose_best_pseudo_reg): Likewise.
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood <david.sherwood@arm.com>
-       * defaults.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * target.def (slow_unaligned_access): New hook.
-       * targhooks.h (default_slow_unaligned_access): Declare.
-       * targhooks.c (default_slow_unaligned_access): New function.
-       * doc/tm.texi.in (SLOW_UNALIGNED_ACCESS): Replace with...
-       (TARGET_SLOW_UNALIGNED_ACCESS): ...this.
-       * doc/tm.texi: Regenerate.
-       * config/alpha/alpha.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * config/arm/arm.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * config/i386/i386.h (SLOW_UNALIGNED_ACCESS): Delete commented-out
-       definition.
-       * config/powerpcspe/powerpcspe.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * config/powerpcspe/powerpcspe.c (TARGET_SLOW_UNALIGNED_ACCESS):
-       Redefine.
-       (rs6000_slow_unaligned_access): New function.
-       (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS.
-       (expand_block_compare): Likewise.
-       (expand_strn_compare): Likewise.
-       (rs6000_rtx_costs): Likewise.
-       * config/riscv/riscv.h (SLOW_UNALIGNED_ACCESS): Delete.
-       (riscv_slow_unaligned_access): Likewise.
-       * config/riscv/riscv.c (riscv_slow_unaligned_access): Rename to...
-       (riscv_slow_unaligned_access_p): ...this and make static.
-       (riscv_option_override): Update accordingly.
-       (riscv_slow_unaligned_access): New function.
-       (TARGET_SLOW_UNALIGNED_ACCESS): Redefine.
-       * config/rs6000/rs6000.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * config/rs6000/rs6000.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine.
-       (rs6000_slow_unaligned_access): New function.
-       (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS.
-       (rs6000_rtx_costs): Likewise.
-       * config/rs6000/rs6000-string.c (expand_block_compare)
-       (expand_strn_compare): Use targetm.slow_unaligned_access instead
-       of SLOW_UNALIGNED_ACCESS.
-       * config/tilegx/tilegx.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * config/tilepro/tilepro.h (SLOW_UNALIGNED_ACCESS): Delete.
-       * calls.c (expand_call): Use targetm.slow_unaligned_access instead
-       of SLOW_UNALIGNED_ACCESS.
-       * expmed.c (simple_mem_bitfield_p): Likewise.
-       * expr.c (alignment_for_piecewise_move): Likewise.
-       (emit_group_load_1): Likewise.
-       (emit_group_store): Likewise.
-       (copy_blkmode_from_reg): Likewise.
-       (emit_push_insn): Likewise.
-       (expand_assignment): Likewise.
-       (store_field): Likewise.
-       (expand_expr_real_1): Likewise.
-       * gimple-fold.c (gimple_fold_builtin_memory_op): Likewise.
-       * lra-constraints.c (simplify_operand_subreg): Likewise.
-       * stor-layout.c (bit_field_mode_iterator::next_mode): Likewise.
-       * gimple-ssa-store-merging.c: Likewise in block comment at start
-       of file.
-       * tree-ssa-strlen.c: Include target.h.
-       (handle_builtin_memcmp): Use targetm.slow_unaligned_access instead
-       of SLOW_UNALIGNED_ACCESS.
-       * system.h (SLOW_UNALIGNED_ACCESS): Poison.
+       * config/nds32/nds32-protos.h (nds32_adjust_reg_alloc_order): Declare.
+       * config/nds32/nds32.c (nds32_reg_alloc_order_for_speed): New array.
+       (nds32_adjust_reg_alloc_order): New function.
+       * config/nds32/nds32.h (ADJUST_REG_ALLOC_ORDER): Define.
  
- 2017-09-12  Richard Sandiford  <richard.sandiford@linaro.org>
+ 2018-03-17  Kito Cheng  <kito.cheng@gmail.com>
  
-       PR rtl-optimization/82185
-       * expmed.c (emit_store_flag_int): Only test tem if it has been
-       initialized.
+       * config/nds32/nds32.c (nds32_asm_output_mi_thunk,
+       nds32_print_operand, nds32_print_operand_address): Use
+       HOST_WIDE_INT_PRINT_DEC instead.
  
- 2017-09-12  Richard Biener  <rguenther@suse.de>
+ 2018-03-17  Chung-Ju Wu  <jasonwucj@gmail.com>
  
-       PR middle-end/82149
-       * match.pd ((FTYPE) N CMP CST): Fix typo.
+       * config/nds32/nds32.c (nds32_register_priority): Modify cost.
  
- 2017-09-12  Simon Atanasyan  <simon.atanasyan@imgtec.com>
+ 2018-03-17  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/mips/mips.c (mips_attribute_table): Add 'short_call'
-       attribute.
-       (mips_near_type_p): Add 'short_call' attribute as a synonym
-       for 'near'.
-       * doc/extend.texi (short_call): Document new function attribute.
- 2017-09-12  Jakub Jelinek  <jakub@redhat.com>
-       PR target/82112
-       * c-common.c (sync_resolve_size): Instead of c_dialect_cxx ()
-       assertion check that in the condition.
-       (get_atomic_generic_size): Likewise.  Before testing if parameter
-       has pointer type, if it has array type, call for C++
-       default_conversion to perform array-to-pointer conversion.
- 2017-09-12  Richard Biener  <rguenther@suse.de>
-       * tree-vect-generic.c (expand_vector_operations_1): Do nothing
-       for operations we cannot scalarize.
- 2017-09-12  Aldy Hernandez  <aldyh@redhat.com>
-       * tree-ssa-threadbackward.c (fsm_find_thread_path): Make GC
-       vectors heap vectors.  Clean up comments.
-       Make visited_bbs a reference.
-       (profitable_jump_thread_path): Make GC
-       vectors heap vectors.  Clean up comments.
-       Misc cleanups.
-       (convert_and_register_jump_thread_path): Make GC vectors heap
-       vectors.
-       (check_subpath_and_update_thread_path): Same.  Clean up comments.
-       Make visited_bbs a reference.
-       (handle_phi): Abstract common code to to
-       register_jump_thread_path_if_profitable.
-       Rename VAR_BB to DEF_BB.
-       Update comments.
-       Make GC vectors heap vectors.
-       Make visited_bbs a reference.
-       (handle_assignment): Same.
-       (register_jump_thread_path_if_profitable): New.
-       (fsm_find_control_statement_thread_paths): Rename VAR_BB to
-       DEF_BB.
-       Make GC vectors heap vectors.  Clean up comments.
-       Make visited_bbs a reference.
-       (find_jump_threads_backwards): Make visited_bbs live in the stack.
-       * tree-ssa-threadupdate.c (delete_jump_thread_path): Fix typo in
-       comment.
- 2017-09-11  Max Filippov  <jcmvbkbc@gmail.com>
-       PR target/82181
-       * config/xtensa/xtensa.c (xtensa_mem_offset): Check that both
-       words of E_DImode object are reachable by xtensa_uimm8x4 access.
- 2017-09-11  Vidya Praveen  <vidyapraveen@arm.com>
-       Revert r251800 and r251799.
- 2017-09-11  Martin Jambor  <mjambor@suse.cz>
-       PR hsa/82119
-       * hsa-gen.c (gen_hsa_phi_from_gimple_phi): Process ADDR_EXPRs in
-       arguments in advance.
-       * hsa-regalloc.c (naive_process_phi): New parameter predecessors,
-       use it to find predecessor edges.
-       (naive_outof_ssa): Collect vector of predecessors.
- 2017-09-08  Jason Merrill  <jason@redhat.com>
-       PR c++/70029 - ICE with ref-qualifier and -flto
-       * langhooks.h (struct lang_hooks_for_types): Add
-       copy_lang_qualifiers.
-       * attribs.c (build_type_attribute_qual_variant): Use it.
-       * langhooks-def.h (LANG_HOOKS_COPY_LANG_QUALIFIERS): Default to
-       NULL.
-       (LANG_HOOKS_FOR_TYPES_INITIALIZER): Use it.
-       * tree.c (verify_type): Re-enable TYPE_CANONICAL main variant check.
- 2017-09-08  Eric Botcazou  <ebotcazou@adacore.com>
-       PR target/81988
-       * config/sparc/sparc.md (mulsi3): Rename into *mulsi3_sp32.
-       (*mulsi3_sp64): New instruction.
-       (mulsi3): New expander.
- 2017-09-08  Uros Bizjak  <ubizjak@gmail.com>
-       * config/alpha/alpha.c (alpha_print_operand) <case 'S'>: Remove.
- 2017-09-08  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
-       * sancov.c: Include memmodel.h.
- 2017-09-07  Eric Botcazou  <ebotcazou@adacore.com>
-       PR target/80897
-       * config/sparc/sparc.c (sparc_emit_set_symbolic_const64): Deal with too
-       large offsets.
- 2017-09-07  Carl Love  <cel@us.ibm.com>
-       * config/rs6000/vsx.md (define_insn "*stxvl"): Add missing argument to
-       the sldi instruction.
- 2017-09-07  David Edelsohn  <dje.gcc@gmail.com>
-       * sancov.c: Include tm_p.h.
- 2017-09-07  Jakub Jelinek  <jakub@redhat.com>
-       PR target/81979
-       * output.h (switch_to_other_text_partition): New declaration.
-       * varasm.c (switch_to_other_text_partition): New function.
-       * config/rs6000/rs6000.c (uses_TOC): Return 2 if
-       NOTE_INSN_SWITCH_TEXT_SECTIONS is seen before finding load_toc_* insn.
-       (rs6000_elf_declare_function_name): If uses_TOC returned 2, switch
-       to the other text partition before emitting LCL label and switch back
-       after emitting the word after it.
- 2017-09-07  Richard Biener  <rguenther@suse.de>
-       * passes.def (pass_split_crit_edges): Remove instance before PRE.
-       * tree-ssa-pre.c (pass_pre::execute): Instead manually split
-       critical edges here, after loop init.
-       (pass_data_pre): Remove PROP_no_crit_edges flags.
-       * tree-ssa-sccvn.c (vn_reference_lookup_3): Use vn_valueize
-       for valueization of call args to avoid leaking VN_TOP.
-       (visit_use): Assert we do not visit default defs.
-       (init_scc_vn): Use build_decl for VN_TOP to make name nicer.
-       Use error_mark_node to more easily detect leaking VN_TOP.
-       All default-defs are varying, not VN_TOP.  Mark them visited.
-       (run_scc_vn): Make code match comment.
- 2017-09-07  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000-cpus.def (OTHER_VSX_VECTOR_MASKS): Delete
-       OPTION_MASK_FLOAT128_KEYWORD.
-       (POWERPC_MASKS): Likewise.
-       * config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Delete
-       support for the -mfloat128-type option, and make -mfloat128
-       default on PowerPC Linux systems.  Define or undefine
-       __FLOAT128__ and  __FLOAT128_HARDWARE__ for the current options.
-       Define __float128 to be __ieee128 if IEEE 128-bit support is
-       enabled, or undefine it.
-       (rs6000_cpu_cpp_builtins): Delete defining __FLOAT128__ here.
-       Delete defining __FLOAT128_TYPE__.
-       * config/rs6000/rs6000.opt (x_TARGET_FLOAT128_TYPE): Delete the
-       -mfloat128-type option and make -mfloat128 default on PowerPC
-       Linux systems.
-       (TARGET_FLOAT128_TYPE): Likewise.
-       (-mfloat128-type): Likewise.
-       * config/rs6000/rs6000.c (rs6000_option_override_internal):
-       Delete the -mfloat128-type option and make -mfloat128 default on
-       PowerPC Linux systems.  Always use __ieee128 to be the keyword for
-       the IEEE 128-bit type, and map __float128 to __ieee128 if IEEE
-       128-bit floating point is enabled.  Change tests from using
-       -mfloat128-type to -mfloat128.
-       (rs6000_mangle_type): Use the correct mangling for the __float128
-       type even if normal long double is restricted to 64-bits.
-       (floatn_mode): Enable the _Float128 type by default on VSX Linux
-       systems.
-       * config/rs6000/rs6000.h (MASK_FLOAT128_TYPE): Delete.
-       (MASK_FLOAT128_KEYWORD): Define new shortcut macro.
-       (RS6000BTM_FLOAT128): Define in terms of -mfloat128, not
-       -mfloat128-type.
-       * doc/invoke.texi (RS/6000 and PowerPC Options): Update
-       documentation for -mfloat128.
- 2017-09-06  Olivier Hainque  <hainque@adacore.com>
-       * config.gcc (powerpc-wrs-vxworksspe): Now match as vxworks*spe.
- 2017-09-06  Wish Wu  <wishwu007@gmail.com>
-           Jakub Jelinek  <jakub@redhat.com>
+       PR target/84902
+       * config/i386/i386.c (initial_ix86_tune_features,
+       initial_ix86_arch_features): Use unsigned HOST_WIDE_INT rather than
+       unsigned long long.
+       (set_ix86_tune_features): Change ix86_tune_mask from unsigned int
+       to unsigned HOST_WIDE_INT, initialize to HOST_WIDE_INT_1U << ix86_tune
+       rather than 1u << ix86_tune.  Formatting fix.
+       (ix86_option_override_internal): Change ix86_arch_mask from
+       unsigned int to unsigned HOST_WIDE_INT, initialize to
+       HOST_WIDE_INT_1U << ix86_arch rather than 1u << ix86_arch.
+       (ix86_function_specific_restore): Likewise.
  
-       * asan.c (initialize_sanitizer_builtins): Add
-       BT_FN_VOID_UINT8_UINT8, BT_FN_VOID_UINT16_UINT16,
-       BT_FN_VOID_UINT32_UINT32, BT_FN_VOID_UINT64_UINT64,
-       BT_FN_VOID_FLOAT_FLOAT, BT_FN_VOID_DOUBLE_DOUBLE and
-       BT_FN_VOID_UINT64_PTR variables.
-       * builtin-types.def (BT_FN_VOID_UINT8_UINT8): New fn type.
-       (BT_FN_VOID_UINT16_UINT16): Likewise.
-       (BT_FN_VOID_UINT32_UINT32): Likewise.
-       (BT_FN_VOID_FLOAT_FLOAT): Likewise.
-       (BT_FN_VOID_DOUBLE_DOUBLE): Likewise.
-       (BT_FN_VOID_UINT64_PTR): Likewise.
-       * common.opt (flag_sanitize_coverage): New variable.
-       (fsanitize-coverage=trace-pc): Remove.
-       (fsanitize-coverage=): Add.
-       * flag-types.h (enum sanitize_coverage_code): New enum.
-       * fold-const.c (fold_range_test): Disable non-short-circuit
-       optimization if flag_sanitize_coverage.
-       (fold_truth_andor): Likewise.
-       * tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise.
-       * opts.c (COVERAGE_SANITIZER_OPT): Define.
-       (coverage_sanitizer_opts): New array.
-       (get_closest_sanitizer_option): Add OPTS argument, handle also
-       OPT_fsanitize_coverage_.
-       (parse_sanitizer_options): Adjusted to also handle
-       OPT_fsanitize_coverage_.
-       (common_handle_option): Add OPT_fsanitize_coverage_.
-       * sancov.c (instrument_comparison, instrument_switch): New function.
-       (sancov_pass): Add trace-cmp support.
-       * sanitizer.def (BUILT_IN_SANITIZER_COV_TRACE_CMP1,
-       BUILT_IN_SANITIZER_COV_TRACE_CMP2, BUILT_IN_SANITIZER_COV_TRACE_CMP4,
-       BUILT_IN_SANITIZER_COV_TRACE_CMP8,
-       BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP1,
-       BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP2,
-       BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP4,
-       BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP8,
-       BUILT_IN_SANITIZER_COV_TRACE_CMPF, BUILT_IN_SANITIZER_COV_TRACE_CMPD,
-       BUILT_IN_SANITIZER_COV_TRACE_SWITCH): New builtins.
-       * doc/invoke.texi: Document -fsanitize-coverage=trace-cmp.
- 2017-09-06  Richard Earnshaw  <rearnsha@arm.com>
-       * config/arm/parsecpu.awk (fatal): Note that we've encountered an
-       error.  Only quit immediately if parsing is complete.
-       (BEGIN): Initialize fatal_err and parse_done.
-       (begin fpu, end fpu): Check number of arguments.
-       (begin arch, end arch): Likewise.
-       (begin cpu, end cpu): Likewise.
-       (cname, tune for, tune flags, architecture, fpu, option): Likewise.
-       (optalias): Likewise.
- 2017-09-06  Richard Earnshaw  <rearnsha@arm.com>
-       * config.gcc (arm*-*-*): Don't add arm-isa.h to tm_p_file.
-       * config/arm/arm-isa.h: Delete.  Move definitions to ...
-       * arm-cpus.in: ... here.  Use new feature and fgroup values.
-       * config/arm/arm.c (arm_option_override): Use lower case for feature
-       bit names.
-       * config/arm/arm.h (TARGET_HARD_FLOAT): Likewise.
-       (TARGET_VFP3, TARGET_VFP5, TARGET_FMA): Likewise.
-       * config/arm/parsecpu.awk (END): Add new command 'isa'.
-       (isa_pfx): Delete.
-       (print_isa_bits_for): New function.
-       (gen_isa): New function.
-       (gen_comm_data): Use print_isa_bits_for.
-       (define feature): New keyword.
-       (define fgroup): New keyword.
-       * config/arm/t-arm (OPTIONS_H_EXTRA): Add arm-isa.h
-       (arm-isa.h): Add rule to generate file.
-       * common/config/arm/arm-common.c: (arm_canon_arch_option): Use lower
-       case for feature bit names.
- 2017-09-06  Richard Biener  <rguenther@suse.de>
-       * tree-ssa-pre.c (NECESSARY): Remove.
-       (create_expression_by_pieces): Do not touch pass-local flags.
-       (insert_into_preds_of_block): Likewise.
-       (do_pre_regular_insertion): Likewise.
-       (eliminate_insert): Likewise.
-       (eliminate_dom_walker::before_dom_children): Likewise.
-       (fini_eliminate): Do not look at inserted_exprs.
-       (mark_operand_necessary): Remove.
-       (remove_dead_inserted_code): Replace with simple work-list
-       algorithm based on inserted_exprs and SSA uses.
-       (pass_pre::execute): Re-order fini_eliminate and
-       remove_dead_inserted_code.
+ 2018-03-16  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-09-06  Olivier Hainque  <hainque@adacore.com>
+       PR target/84899
+       * postreload.c (reload_combine_recognize_pattern): Perform
+       INTVAL addition in unsigned HOST_WIDE_INT type to avoid UB and
+       truncate_int_for_mode the result for the destination's mode.
  
-       * config/powerpcspe/vxworks.h (VXCPU_FOR_8548): Correct definition
-       for VxWorks 7.  Adjust surrounding comments.
+       PR c/84909
+       * hsa-gen.c (mem_type_for_type): Fix comment typo.
+       * tree-vect-loop-manip.c (vect_create_cond_for_niters_checks):
+       Likewise.
+       * gimple-ssa-warn-restrict.c (builtin_memref::set_base_and_offset):
+       Likewise.
  
- 2017-09-06  Richard Biener  <rguenther@suse.de>
+ 2018-03-16  Vladimir Makarov  <vmakarov@redhat.com>
  
-       * gimple-ssa-strength-reduction.c
-       (find_candidates_dom_walker::before_dom_children): Also allow
-       pointer types.
+       PR target/84876
+       * lra-assigns.c (lra_split_hard_reg_for): Don't use
+       regno_allocno_class_array and sorted_pseudos.
+       * lra-constraints.c (spill_hard_reg_in_range): Ignore hard regs in
+       insns where regno is used.
  
- 2017-09-06  Richard Biener  <rguenther@suse.de>
+ 2018-03-16  Martin Liska  <mliska@suse.cz>
  
-       PR tree-optimization/82108
-       * tree-vect-stmts.c (vectorizable_load): Fix pointer adjustment
-       for gap in the non-permutation SLP case.
+       PR ipa/84833
+       * multiple_target.c (create_dispatcher_calls): Redirect
+       reference in the symbol table.
  
- 2017-09-06  Martin Jambor  <mjambor@suse.cz>
+ 2018-03-16  Martin Liska  <mliska@suse.cz>
  
-       PR tree-optimization/82078
-       * tree-sra.c (sort_and_splice_var_accesses): Move call to
-       add_access_to_work_queue...
-       (build_accesses_from_assign): ...here.
-       (propagate_all_subaccesses): Make sure racc is the group
-       representative, if there is one.
+       PR ipa/84722
+       * multiple_target.c (create_dispatcher_calls): Redirect also
+       an alias.
  
- 2017-09-06  Jakub Jelinek  <jakub@redhat.com>
+ 2018-03-16  Jakub Jelinek  <jakub@redhat.com>
  
-       PR middle-end/82095
-       * varasm.c (categorize_decl_for_section): Use SECCAT_TBSS for TLS vars with
-       NULL DECL_INITIAL.
+       PR c++/79937
+       PR c++/82410
+       * tree.h (TARGET_EXPR_NO_ELIDE): Define.
+       * gimplify.c (gimplify_modify_expr_rhs): Don't elide TARGET_EXPRs with
+       TARGET_EXPR_NO_ELIDE flag set unless *expr_p is INIT_EXPR.
  
- 2017-09-06  Richard Biener  <rguenther@suse.de>
+ 2018-03-16  Julia Koval  <julia.koval@intel.com>
  
-       * gimple-ssa-strength-reduction.c
-       (find_candidates_dom_walker::before_doom_children): Use a
-       type and not a mode check.
+       * doc/invoke.texi (Skylake Server): Add CLWB.
+       Cannonlake): Remove CLWB.
  
- 2017-09-06  Bernd Edlinger  <bernd.edlinger@hotmail.de>
+ 2018-03-16  Jakub Jelinek  <jakub@redhat.com>
  
-       PR target/77308
-       * config/arm/predicates.md (arm_general_adddi_operand): Create new
-       non-vfp predicate.
-       * config/arm/arm.md (*arm_adddi3, *arm_subdi3): Use new predicates.
+       PR tree-optimization/84841
+       * tree-ssa-reassoc.c (INTEGER_CONST_TYPE): Change to 1 << 4 from
+       1 << 3.
+       (FLOAT_ONE_CONST_TYPE): Define.
+       (constant_type): Return FLOAT_ONE_CONST_TYPE for -1.0 and 1.0.
+       (sort_by_operand_rank): Put entries with higher constant_type last
+       rather than first to match comments.
  
- 2017-09-05  Jeff Law  <law@redhat.com>
+ 2018-03-15  Sandra Loosemore  <sandra@codesourcery.com>
  
-       PR tree-optimization/64910
-       * tree-ssa-reassoc.c (reassociate_bb): Restrict last change to
-       cases where we have 3 or more operands.
+       * config/nios2/nios2.md (movsi_internal): Fix thinko in 
+       split predicate.
  
- 2017-09-05  Jakub Jelinek  <jakub@redhat.com>
+ 2018-03-15  Jakub Jelinek  <jakub@redhat.com>
  
-       PR middle-end/81768
-       * omp-low.c (lower_omp_for): Recompute tree invariant if
-       gimple_omp_for_initial/final is ADDR_EXPR.
+       PR c++/79085
+       * calls.c (expand_call): For TREE_ADDRESSABLE rettype ignore alignment
+       check and use address of target always.
  
-       PR middle-end/81768
-       * omp-expand.c (expand_omp_simd): Force second operands of COND_EXPR
-       into gimple val before gimplification fo the COND_EXPR.
+ 2018-03-15  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/84574
+       * config/i386/i386.c (indirect_thunk_needed): Update comments.
+       (indirect_thunk_bnd_needed): Likewise.
+       (indirect_thunks_used): Likewise.
+       (indirect_thunks_bnd_used): Likewise.
+       (indirect_return_needed): New.
+       (indirect_return_bnd_needed): Likewise.
+       (output_indirect_thunk_function): Add a bool argument for
+       function return.
+       (output_indirect_thunk_function): Don't generate alias for
+       function return thunk.
+       (ix86_code_end): Call output_indirect_thunk_function to generate
+       function return thunks.
+       (ix86_output_function_return): Set indirect_return_bnd_needed
+       and indirect_return_needed instead of indirect_thunk_bnd_needed
+       and indirect_thunk_needed.
+ 2018-03-15  Olga Makhotina  <olga.makhotina@intel.com>
+       * config/i386/sgxintrin.h (_enclv_u32): New intrinsic.
+       (__enclv_bc, __enclv_cd, __enclv_generic): New definitions.
+       (ERDINFO, ETRACKC, ELDBC, ELDUC): New leaves.
+ 2018-03-15  David Malcolm  <dmalcolm@redhat.com>
+           Paul Hua <paul.hua.gm@gmail.com>
+       PR c/84852
+       * gcc.dg/fixits-pr84852-1.c: Fix filename in dg-regexp.
+ 2018-03-15  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/rs6000.c (abi_v4_pass_in_fpr): Add back the
+       TARGET_DOUBLE_FLOAT and TARGET_SINGLE_FLOAT conditions on the DFmode
+       resp. SFmode cases.
+ 2018-03-15  Tamar Christina  <tamar.christina@arm.com>
+       PR target/84711
+       * config/arm/arm.c (arm_can_change_mode_class): Use GET_MODE_UNIT_SIZE
+       instead of GET_MODE_SIZE when comparing Units.
+ 2018-03-15  Vladimir Mezentsev  <vladimir.mezentsev@oracle.com>
+       PR target/68256
+       * varasm.c (hash_section): Return an unchangeble hash value
+       * config/aarch64/aarch64.c (aarch64_use_blocks_for_constant_p):
+       Return !aarch64_can_use_per_function_literal_pools_p ().
+ 2018-03-15  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84860
+       * optabs.c (emit_conditional_move): Pass address of cmode's copy
+       rather than address of cmode as last argument to prepare_cmp_insn.
+ 2018-03-15  Julia Koval  <julia.koval@intel.com>
+       * config/i386/i386.c (F_AVX512VBMI2, F_GFNI, F_VPCLMULQDQ,
+       F_AVX512VNNI, F_AVX512BITALG): New.
+ 2018-03-14  John David Anglin  <danglin@gcc.gnu.org>
+       PR target/83451
+       * config/pa/pa.c (pa_emit_move_sequence):  Always emit secondary reload
+       insn for floating-point loads and stores.
+ 2018-03-14  Carl Love  <cel@us.ibm.com>
+       * config/rs6000/rs6000-c.c: Add macro definitions for
+       ALTIVEC_BUILTIN_VEC_PERMXOR.
+       * config/rs6000/rs6000.h: Add #define for vec_permxor builtin.
+       * config/rs6000/rs6000-builtin.def: Add macro expansions for VPERMXOR.
+       * config/rs6000/altivec.md (altivec_vpermxor): New define expand.
+       * config/rs6000/rs6000-p8swap.c (rtx_is_swappable_p): Add case
+       UNSPEC_VPERMXOR.
+       * config/doc/extend.texi: Add prototypes for vec_permxor.
+ 2018-03-14  David Malcolm  <dmalcolm@redhat.com>
+       PR c/84852
+       * diagnostic-show-locus.c (class layout_point): Convert m_line
+       from int to linenum_type.
+       (line_span::comparator): Use linenum "compare" function when
+       comparing line numbers.
+       (test_line_span): New function.
+       (layout_range::contains_point): Convert param "row" from int to
+       linenum_type.
+       (layout_range::intersects_line_p): Likewise.
+       (layout::will_show_line_p): Likewise.
+       (layout::print_source_line): Likewise.
+       (layout::should_print_annotation_line_p): Likewise.
+       (layout::print_annotation_line): Likewise.
+       (layout::print_leading_fixits): Likewise.
+       (layout::annotation_line_showed_range_p): Likewise.
+       (struct line_corrections): Likewise for field m_row.
+       (line_corrections::line_corrections): Likewise for param "row".
+       (layout::print_trailing_fixits): Likewise.
+       (layout::get_state_at_point): Likewise.
+       (layout::get_x_bound_for_row): Likewise.
+       (layout::print_line): Likewise.
+       (diagnostic_show_locus): Likewise for locals "last_line" and
+       "row".
+       (selftest::diagnostic_show_locus_c_tests): Call test_line_span.
+       * input.c (selftest::test_linenum_comparisons): New function.
+       (selftest::input_c_tests): Call it.
+       * selftest.c (selftest::test_assertions): Test ASSERT_GT,
+       ASSERT_GT_AT, ASSERT_LT, and ASSERT_LT_AT.
+       * selftest.h (ASSERT_GT): New macro.
+       (ASSERT_GT_AT): New macro.
+       (ASSERT_LT): New macro.
+       (ASSERT_LT_AT): New macro.
+ 2018-03-14  Segher Boessenkool  <segher@kernel.crashing.org>
+       PR rtl-optimization/84780
+       * combine.c (distribute_links): Don't make a link based on pc_rtx.
+ 2018-03-14  Martin Liska  <mliska@suse.cz>
+       * tree.c (record_node_allocation_statistics): Use
+       get_stats_node_kind.
+       (get_stats_node_kind): New function extracted from
+       record_node_allocation_statistics.
+       (free_node): Use get_stats_node_kind.
+ 2018-03-14  Richard Biener  <rguenther@suse.de>
+       * tree-ssa-pre.c (compute_antic_aux): Remove code that asserts
+       that the value-set of ANTIC_IN doesn't grow.
  
- 2017-09-05  Aldy Hernandez  <aldyh@redhat.com>
+       Revert
+       * tree-ssa-pre.c (struct bb_bitmap_sets): Add visited_with_visited_succs
+       member.
+       (BB_VISITED_WITH_VISITED_SUCCS): New define.
+       (compute_antic): Initialize BB_VISITED_WITH_VISITED_SUCCS.
+ 2018-03-14  Julia Koval  <julia.koval@intel.com>
+       * config.gcc (icelake-client, icelake-server): New.
+       (icelake): Remove.
+       * config/i386/i386.c (initial_ix86_tune_features): Extend to 64 bit.
+       (initial_ix86_arch_features): Ditto.
+       (PTA_SKYLAKE): Add SGX.
+       (PTA_ICELAKE): Remove.
+       (PTA_ICELAKE_CLIENT): New.
+       (PTA_ICELAKE_SERVER): New.
+       (ix86_option_override_internal): Split up icelake on icelake client and
+       icelake server.
+       (get_builtin_code_for_version): Ditto.
+       (fold_builtin_cpu): Ditto.
+       * config/i386/driver-i386.c (config/i386/driver-i386.c): Ditto.
+       * config/i386/i386-c.c (ix86_target_macros_internal): Ditto
+       * config/i386/i386.h (processor_type): Ditto.
+       * doc/invoke.texi: Ditto.
  
-       * tree-ssa-threadupdate.c (duplicate_thread_path): Remove unused
-       REGION_COPY argument.
-       (thread_through_all_blocks): Remove unused argument to
-       duplicate_thread_path.
+ 2018-03-14  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR sanitizer/83392
+       * sanopt.c (maybe_optimize_ubsan_ptr_ifn): Handle also
+       INTEGER_CST offset, add it together with bitpos / 8 and
+       sign extend based on POINTER_SIZE.
  
-       * config/aarch64/aarch64-protos.h (aarch64_gen_adjusted_ldpstp):
-       Take a scalar_mode rather than a machine_mode.
-       (aarch64_operands_adjust_ok_for_ldpstp): Likewise.
-       * config/aarch64/aarch64.c (aarch64_simd_container_mode): Likewise.
-       (aarch64_operands_adjust_ok_for_ldpstp): Likewise.
-       (aarch64_gen_adjusted_ldpstp): Likewise.
-       (aarch64_expand_vector_init): Use scalar_mode instead of machine_mode.
+       PR target/84844
+       Revert
+       2017-04-20  Uros Bizjak  <ubizjak@gmail.com>
  
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/78090
+       * config/i386/constraints.md (Yc): New register constraint.
+       * config/i386/i386.md (*float<SWI48:mode><MODEF:mode>2_mixed):
+       Use Yc constraint for alternative 2 of operand 0.  Remove
+       preferred_for_speed attribute.
  
-       * config/aarch64/aarch64-protos.h (aarch64_is_extend_from_extract):
-       Take a scalar_int_mode instead of a machine_mode.
-       (aarch64_mask_and_shift_for_ubfiz_p): Likewise.
-       (aarch64_output_scalar_simd_mov_immediate): Likewise.
-       (aarch64_simd_scalar_immediate_valid_for_move): Likewise.
-       (aarch64_simd_attr_length_rglist): Delete.
-       * config/aarch64/aarch64.c (aarch64_is_extend_from_extract): Take
-       a scalar_int_mode instead of a machine_mode.
-       (aarch64_add_offset): Likewise.
-       (aarch64_internal_mov_immediate): Likewise
-       (aarch64_add_constant_internal): Likewise.
-       (aarch64_add_constant): Likewise.
-       (aarch64_movw_imm): Likewise.
-       (aarch64_rtx_arith_op_extract_p): Likewise.
-       (aarch64_mask_and_shift_for_ubfiz_p): Likewise.
-       (aarch64_simd_scalar_immediate_valid_for_move): Likewise.
-       Remove assert that the mode isn't a vector.
-       (aarch64_output_scalar_simd_mov_immediate): Likewise.
-       (aarch64_expand_mov_immediate): Update calls after above changes.
-       (aarch64_output_casesi): Use as_a <scalar_int_mode>.
-       (aarch64_and_bitmask_imm): Check for scalar integer modes.
-       (aarch64_move_imm): Likewise.
-       (aarch64_can_const_movi_rtx_p): Likewise.
-       (aarch64_strip_extend): Likewise.
-       (aarch64_extr_rtx_p): Likewise.
-       (aarch64_rtx_costs): Likewise, using wode_mode as the mode of
-       a CONST_INT when the mode parameter is VOIDmode.
-       (aarch64_float_const_rtx_p): Use scalar_int_mode for a temporary.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * machmode.h (bitwise_mode_for_mode): Return opt_mode.
-       * stor-layout.c (bitwise_mode_for_mode): Likewise.
-       (bitwise_type_for_mode): Update accordingly.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * stor-layout.h (mode_for_size_tree): Return an opt_mode.
-       * stor-layout.c (mode_for_size_tree): Likewise.
-       (mode_for_array): Update accordingly.
-       (layout_decl): Likewise.
-       (compute_record_mode): Likewise.  Only set the mode once.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * target.def (get_mask_mode): Change return type to opt_mode.
-       Expand commentary.
-       * doc/tm.texi: Regenerate.
-       * targhooks.h (default_get_mask_mode): Return an opt_mode.
-       * targhooks.c (default_get_mask_mode): Likewise.
-       * config/i386/i386.c (ix86_get_mask_mode): Likewise.
-       * optabs-query.c (can_vec_mask_load_store_p): Update use of
-       targetm.get_mask_mode.
-       * tree.c (build_truth_vector_type): Likewise.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * machmode.h (mode_for_vector): Return an opt_mode.
-       * stor-layout.c (mode_for_vector): Likewise.
-       (mode_for_int_vector): Update accordingly.
-       (layout_type): Likewise.
-       * config/i386/i386.c (emit_memmov): Likewise.
-       (ix86_expand_set_or_movmem): Likewise.
-       (ix86_expand_vector_init): Likewise.
-       (ix86_get_mask_mode): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_expand_vec_perm_const_1):
-       Likewise.
-       * config/rs6000/rs6000.c (rs6000_expand_vec_perm_const_1): Likewise.
-       * expmed.c (extract_bit_field_1): Likewise.
-       * expr.c (expand_expr_real_2): Likewise.
-       * optabs-query.c (can_vec_perm_p): Likewise.
-       (can_vec_mask_load_store_p): Likewise.
-       * optabs.c (expand_vec_perm): Likewise.
-       * targhooks.c (default_get_mask_mode): Likewise.
-       * tree-vect-stmts.c (vectorizable_store): Likewise.
-       (vectorizable_load): Likewise.
-       (get_vectype_for_scalar_type_and_size): Likewise.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * machmode.h (mode_for_int_vector): New function.
-       * stor-layout.c (mode_for_int_vector): Likewise.
-       * config/aarch64/aarch64.c (aarch64_emit_approx_sqrt): Use it.
-       * config/powerpcspe/powerpcspe.c (rs6000_do_expand_vec_perm): Likewise.
-       * config/rs6000/rs6000.c (rs6000_do_expand_vec_perm): Likewise.
-       * config/s390/s390.c (s390_expand_vec_compare_cc): Likewise.
-       (s390_expand_vcond): Likewise.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * machmode.h (opt_machine_mode): New type.
-       (opt_mode<T>): Allow construction from anything that can be
-       converted to a T.
-       (is_a, as_a, dyn_cast): Add overloads for opt_mode.
-       (mode_for_size): Return an opt_machine_mode.
-       * stor-layout.c (mode_for_size): Likewise.
-       (mode_for_size_tree): Update call accordingly.
-       (bitwise_mode_for_mode): Likewise.
-       (make_fract_type): Likewise.
-       (make_accum_type): Likewise.
-       * caller-save.c (replace_reg_with_saved_mem): Update call
-       accordingly.
-       * config/alpha/alpha.h (SECONDARY_MEMORY_NEEDED_MODE): Likewise.
-       * config/i386/i386.h (SECONDARY_MEMORY_NEEDED_MODE): Likewise.
-       * config/s390/s390.h (SECONDARY_MEMORY_NEEDED_MODE): Likewise.
-       * config/sparc/sparc.h (SECONDARY_MEMORY_NEEDED_MODE): Likewise.
-       * expmed.c (extract_bit_field_1): Likewise.
-       * reload.c (get_secondary_mem): Likewise.
-       * varasm.c (assemble_integer): Likewise.
-       * lower-subreg.c (simplify_subreg_concatn): Likewise.  Move
-       early-out.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * machmode.h (decimal_float_mode_for_size): New function.
-       * real.h (REAL_VALUE_TO_TARGET_LONG_DOUBLE): Use float_mode_for_size.
-       (REAL_VALUE_TO_TARGET_DOUBLE): Likewise.
-       (REAL_VALUE_TO_TARGET_SINGLE): Likewise.
-       (REAL_VALUE_TO_TARGET_DECIMAL128): Use decimal_float_mode_for_size.
-       (REAL_VALUE_TO_TARGET_DECIMAL64): Likewise.
-       (REAL_VALUE_TO_TARGET_DECIMAL32): Likewise.
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
-       * builtins.c (expand_builtin_powi): Use int_mode_for_size.
-       (get_builtin_sync_mode): Likewise.
-       (expand_ifn_atomic_compare_exchange): Likewise.
-       (expand_builtin_atomic_clear): Likewise.
-       (expand_builtin_atomic_test_and_set): Likewise.
-       (fold_builtin_atomic_always_lock_free): Likewise.
-       * calls.c (compute_argument_addresses): Likewise.
-       (emit_library_call_value_1): Likewise.
-       (store_one_arg): Likewise.
-       * combine.c (combine_instructions): Likewise.
-       * config/aarch64/aarch64.c (aarch64_function_value): Likewise.
-       * config/arm/arm.c (arm_function_value): Likewise.
-       (aapcs_allocate_return_reg): Likewise.
-       * config/c6x/c6x.c (c6x_expand_movmem): Likewise.
-       * config/i386/i386.c (construct_container): Likewise.
-       (ix86_gimplify_va_arg): Likewise.
-       (ix86_expand_sse_cmp): Likewise.
-       (emit_memmov): Likewise.
-       (emit_memset): Likewise.
-       (expand_small_movmem_or_setmem): Likewise.
-       (ix86_expand_pextr): Likewise.
-       (ix86_expand_pinsr): Likewise.
-       * config/lm32/lm32.c (lm32_block_move_inline): Likewise.
-       * config/microblaze/microblaze.c (microblaze_block_move_straight):
-       Likewise.
-       * config/mips/mips.c (mips_function_value_1) Likewise.
-       (mips_block_move_straight): Likewise.
-       (mips_expand_ins_as_unaligned_store): Likewise.
-       * config/powerpcspe/powerpcspe.c
-       (rs6000_darwin64_record_arg_advance_flush): Likewise.
-       (rs6000_darwin64_record_arg_flush): Likewise.
-       * config/rs6000/rs6000.c
-       (rs6000_darwin64_record_arg_advance_flush): Likewise.
-       (rs6000_darwin64_record_arg_flush): Likewise.
-       * config/sparc/sparc.c (sparc_function_arg_1): Likewise.
-       (sparc_function_value_1): Likewise.
-       * config/spu/spu.c (adjust_operand): Likewise.
-       (spu_emit_branch_or_set): Likewise.
-       (arith_immediate_p): Likewise.
-       * emit-rtl.c (gen_lowpart_common): Likewise.
-       * expr.c (expand_expr_real_1): Likewise.
-       * function.c (assign_parm_setup_block): Likewise.
-       * gimple-ssa-store-merging.c (encode_tree_to_bitpos): Likewise.
-       * reload1.c (alter_reg): Likewise.
-       * stor-layout.c (mode_for_vector): Likewise.
-       (layout_type): Likewise.
+ 2018-03-14  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84830
+       * tree-ssa-pre.c (compute_antic_aux): Intersect the new ANTIC_IN
+       with the old one to avoid oscillations.
+ 2018-03-13  Vladimir Makarov  <vmakarov@redhat.com>
+       PR target/83712
+       * lra-assigns.c (find_all_spills_for): Ignore uninteresting
+       pseudos.
+       (assign_by_spills): Return a flag of reload assignment failure.
+       Do not process the reload assignment failures.  Do not spill other
+       reload pseudos if they has the same reg class.  Update n if
+       necessary.
+       (lra_assign): Add a return arg.  Set up from the result of
+       assign_by_spills call.
+       (find_reload_regno_insns, lra_split_hard_reg_for): New functions.
+       * lra-constraints.c (split_reg): Add a new arg.  Use it instead of
+       usage_insns if it is not NULL.
+       (spill_hard_reg_in_range): New function.
+       (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg.
+       * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New
+       function prototypes.
+       (lra_assign): Change prototype.
+       * lra.c (lra): Add code to deal with fails by splitting hard reg
+       live ranges.
+ 2018-03-01  Palmer Dabbelt  <palmer@sifive.com>
+       * config/riscv/riscv.opt (mrelax): New option.
+       * config/riscv/riscv.c (riscv_file_start): Emit ".option
+       "norelax" when riscv_mrelax is disabled.
+       * doc/invoke.texi (RISC-V): Document "-mrelax" and "-mno-relax".
+ 2018-03-13  Aaron Sawdey  <acsawdey@linux.vnet.ibm.com>
+       PR target/84743
+       * config/rs6000/rs6000.c (rs6000_reassociation_width): Disable parallel
+       reassociation for int modes.
+ 2018-03-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * tree-vect-loop-manip.c (vect_maybe_permute_loop_masks):
+       Reverse the choice between VEC_UNPACK_LO_EXPR and VEC_UNPACK_HI_EXPR
+       for big-endian.
+       * config/aarch64/iterators.md (hi_lanes_optab): New int attribute.
+       * config/aarch64/aarch64-sve.md
+       (*aarch64_sve_<perm_insn><perm_hilo><mode>): Rename to...
+       (aarch64_sve_<perm_insn><perm_hilo><mode>): ...this.
+       (*extend<mode><Vwide>2): Rename to...
+       (aarch64_sve_extend<mode><Vwide>2): ...this.
+       (vec_unpack<su>_<perm_hilo>_<mode>): Turn into a define_expand,
+       renaming the old pattern to...
+       (aarch64_sve_punpk<perm_hilo>_<mode>): ...this.  Only define
+       unsigned packs.
+       (vec_unpack<su>_<perm_hilo>_<SVE_BHSI:mode>): Turn into a
+       define_expand, renaming the old pattern to...
+       (aarch64_sve_<su>unpk<perm_hilo>_<SVE_BHSI:mode>): ...this.
+       (*vec_unpacku_<perm_hilo>_<mode>_no_convert): Delete.
+       (vec_unpacks_<perm_hilo>_<mode>): Take BYTES_BIG_ENDIAN into
+       account when deciding which SVE instruction the optab should use.
+       (vec_unpack<su_optab>_float_<perm_hilo>_vnx4si): Likewise.
+ 2018-03-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * config/aarch64/aarch64.md (V4_REGNUM, V8_REGNUM, V12_REGNUM)
+       (V20_REGNUM, V24_REGNUM, V28_REGNUM, P1_REGNUM, P2_REGNUM, P3_REGNUM)
+       (P4_REGNUM, P5_REGNUM, P6_REGNUM, P8_REGNUM, P9_REGNUM, P10_REGNUM)
+       (P11_REGNUM, P12_REGNUM, P13_REGNUM, P14_REGNUM): New define_constants.
+       (tlsdesc_small_<mode>): Turn a define_expand and use
+       tlsdesc_small_sve_<mode> for SVE.  Rename original define_insn to...
+       (tlsdesc_small_advsimd_<mode>): ...this.
+       (tlsdesc_small_sve_<mode>): New pattern.
+ 2018-03-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * config/aarch64/iterators.md (UNSPEC_SMUL_HIGHPART)
+       (UNSPEC_UMUL_HIGHPART): New constants.
+       (MUL_HIGHPART): New int iteraor.
+       (su): Handle UNSPEC_SMUL_HIGHPART and UNSPEC_UMUL_HIGHPART.
+       * config/aarch64/aarch64-sve.md (<su>mul<mode>3_highpart): New
+       define_expand.
+       (*<su>mul<mode>3_highpart): New define_insn.
+ 2018-03-13  Eric Botcazou  <ebotcazou@adacore.com>
+       PR lto/84805
+       * ipa-devirt.c (odr_subtypes_equivalent_p): Do not get the ODR type of
+       incomplete types.
+ 2018-03-13  Martin Liska  <mliska@suse.cz>
+       PR ipa/84658.
+       * (sem_item_optimizer::sem_item_optimizer): Initialize new
+       vector.
+       (sem_item_optimizer::~sem_item_optimizer): Release it.
+       (sem_item_optimizer::merge_classes): Register variable aliases.
+       (sem_item_optimizer::fixup_pt_set): New function.
+       (sem_item_optimizer::fixup_points_to_sets): Likewise.
+       * ipa-icf.h: Declare new variables and functions.
+ 2018-03-13  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/84834
+       * match.pd ((A & C) != 0 ? D : 0): Use INTEGER_CST@2 instead of
+       integer_pow2p@2 and test integer_pow2p in condition.
+       (A < 0 ? C : 0): Similarly for @1.
+       PR middle-end/84831
+       * stmt.c (parse_output_constraint): If the CONSTRAINT_LEN (*p, p)
+       characters starting at p contain '\0' character, don't look beyond
+       that.
+       PR target/84827
+       * config/i386/i386.md (round<mode>2): For 387 fancy math, disable
+       pattern if -ftrapping-math -fno-fp-int-builtin-inexact.
+       PR target/84828
+       * reg-stack.c (change_stack): Change update_end var from int to
+       rtx_insn *, if non-NULL don't update just BB_END (current_block), but
+       also call set_block_for_insn on the newly added insns and rescan.
+       PR target/84786
+       * config/i386/sse.md (sse2_loadhpd): Use Yv constraint rather than v
+       on the last operand.
+       PR c++/84704
+       * tree.c (stabilize_reference_1): Return save_expr (e) for
+       STATEMENT_LIST even if it doesn't have side-effects.
+ 2018-03-12  Jonathan Wakely  <jwakely@redhat.com>
+       * doc/invoke.texi (-mclflushopt): Fix spelling of option.
+ 2018-03-12  Renlin Li  <renlin.li@arm.com>
+       * config/aarch64/aarch64.md (movhf_aarch64): Fix mode argument to
+       aarch64_output_scalar_simd_mov_immediate.
+ 2018-03-12  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/83456
+       * gimple-fold.c (gimple_fold_builtin_memory_op): Avoid warning
+       for perfectly overlapping calls to memcpy.
+       (gimple_fold_builtin_memory_chk): Same.
+       (gimple_fold_builtin_strcpy): Handle no-warning.
+       (gimple_fold_builtin_stxcpy_chk): Same.
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Handle no-warning.
+ 2018-03-12  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/rs6000.c (abi_v4_pass_in_fpr): Add bool "named"
+       parameter.  Use it for SFmode.
+       (rs6000_function_arg_advance_1): Adjust.
+       (rs6000_function_arg): Adjust.
+       (rs6000_gimplify_va_arg): Pass false for that new parameter.
+ 2018-03-12  Segher Boessenkool  <segher@kernel.crashing.org>
  
- 2017-09-05  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR rtl-optimization/84169
+       PR rtl-optimization/84780
+       * combine.c (can_combine_p): Check for a 2-insn combination whether
+       the destination register is used between the two insns, too.
  
-       * config/spu/spu.c (exp2_immediate_p): Use int_mode_for_mode.
-       (spu_convert_move): Likewise.
-       * lower-subreg.c (resolve_simple_move): Likewise.
+ 2018-03-12  Richard Biener  <rguenther@suse.de>
  
- 2017-09-05  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       PR tree-optimization/84803
+       * tree-if-conv.c (ifcvt_memrefs_wont_trap): Don't do anything
+       for refs DR analysis didn't process.
  
-       PR target/81833
-       * config/rs6000/altivec.md (altivec_vsum2sws): Convert from a
-       define_insn to a define_expand.
-       (altivec_vsum2sws_direct): New define_insn.
-       (altivec_vsumsws): Convert from a define_insn to a define_expand.
+ 2018-03-12  Richard Biener  <rguenther@suse.de>
  
- 2017-09-05  Wilco Dijkstra  <wdijkstr@arm.com>
+       PR tree-optimization/84777
+       * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): For
+       force-vectorize loops ignore whether we are optimizing for size.
  
-       * config/arm/arm.c (arm_option_params_internal): Improve setting of
-       max_insns_skipped.
+ 2018-03-12  Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-05  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/nds32/nds32.c (nds32_md_asm_adjust): New function.
+       (TARGET_MD_ASM_ADJUST): Define.
  
-       PR target/59501
-       PR target/81624
-       PR target/81769
-       * config/i386/i386.c (ix86_finalize_stack_frame_flags): Don't
-       realign stack if stack alignment needed is less than incoming
-       stack boundary.
+ 2018-03-12  Monk Chiang  <sh.chiang04@gmail.com>
+           Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-05  Marek Polacek  <polacek@redhat.com>
+       * config/nds32/nds32.c (nds32_compute_stack_frame,
+       nds32_emit_stack_push_multiple, nds32_emit_stack_pop_multiple,
+       nds32_emit_stack_v3push, nds32_emit_stack_v3pop,
+       nds32_emit_adjust_frame, nds32_expand_prologue, nds32_expand_epilogue,
+       nds32_expand_prologue_v3push, nds32_expand_epilogue_v3pop): Refine.
+       * config/nds32/nds32.h (NDS32_FIRST_CALLEE_SAVE_GPR_REGNUM,
+       NDS32_LAST_CALLEE_SAVE_GPR_REGNUM, NDS32_V3PUSH_AVAILABLE_P): New.
+       * config/nds32/nds32.md (prologue, epilogue): Use macro
+       NDS32_V3PUSH_AVAILABLE_P to do checking.
  
-       PR sanitizer/82072
-       * convert.c (convert_to_integer_1) <case NEGATE_EXPR>: Move the ubsan
-       check earlier.
+ 2018-03-11  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-09-05  Wilco Dijkstra  <wdijkstr@arm.com>
+       PR debug/58150
+       * dwarf2out.c (gen_enumeration_type_die): Don't guard adding
+       DW_AT_declaration for ENUM_IS_OPAQUE on -gdwarf-4 or -gno-strict-dwarf,
+       but on TYPE_SIZE.  Don't do anything for ENUM_IS_OPAQUE if not creating
+       a new die.  Don't set TREE_ASM_WRITTEN if ENUM_IS_OPAQUE.  Guard
+       addition of most attributes on !orig_type_die or the attribute not
+       being present already.  Assert TYPE_VALUES is NULL for ENUM_IS_OPAQUE.
  
-       * explow.c (get_dynamic_stack_size): Improve dynamic alignment.
+ 2018-03-11  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-05  Richard Biener  <rguenther@suse.de>
+       * config/nds32/nds32.c (nds32_cpu_cpp_builtins): Modify to define
+       __NDS32_VH__ macro.
+       * config/nds32/nds32.opt (mvh): New option.
  
-       PR tree-optimization/82084
-       * fold-const.c (can_native_encode_string_p): Handle wide characters.
+ 2018-03-11  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-05  Richard Biener  <rguenther@suse.de>
+       * config/nds32/nds32-protos.h (nds32_cpu_cpp_builtins): Declare
+       function.
+       * config/nds32/nds32.c (nds32_cpu_cpp_builtins): New function.
+       * config/nds32/nds32.h (TARGET_CPU_CPP_BUILTINS): Modify its
+       definition.
  
-       PR tree-optimization/82102
-       * tree-ssa-pre.c (fini_eliminate): Check if lhs is NULL.
+ 2018-03-11  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-09-05  Martin Liska  <mliska@suse.cz>
+       * config/nds32/nds32-memory-manipulation.c (nds32_expand_strlen): New
+       function.
+       * config/nds32/nds32-multiple.md (strlensi): New pattern.
+       * config/nds32/nds32-protos.h (nds32_expand_strlen): Declare function.
+ 2018-03-11  Monk Chiang  <sh.chiang04@gmail.com>
+           Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/constants.md (unspec_element): Add UNSPEC_FFB,
+       UNSPEC_FFMISM and UNSPEC_FLMISM.
+       * config/nds32/nds32-intrinsic.c (bdesc_2arg): Add builtin description
+       for ffb, ffmism and flmism.
+       * config/nds32/nds32-intrinsic.md (unspec_ffb): Define new pattern.
+       (unspec_ffmism): Ditto.
+       (unspec_flmism): Ditto.
+       (nds32_expand_builtin_impl): Check if string extension is available.
+       * config/nds32/nds32.h (nds32_builtins): Add NDS32_BUILTIN_FFB,
+       NDS32_BUILTIN_FFMISM and NDS32_BUILTIN_FLMISM.
+ 2018-03-10  Vladimir Makarov  <vmakarov@redhat.com>
+       Reverting patch:
+       2018-03-09  Vladimir Makarov  <vmakarov@redhat.com>
+       PR target/83712
+       * lra-assigns.c (assign_by_spills): Return a flag of reload
+       assignment failure.  Do not process the reload assignment
+       failures.  Do not spill other reload pseudos if they has the same
+       reg class.
+       (lra_assign): Add a return arg.  Set up from the result of
+       assign_by_spills call.
+       (find_reload_regno_insns, lra_split_hard_reg_for): New functions.
+       * lra-constraints.c (split_reg): Add a new arg.  Use it instead of
+       usage_insns if it is not NULL.
+       (spill_hard_reg_in_range): New function.
+       (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg.
+       * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New
+       function prototypes.
+       (lra_assign): Change prototype.
+       * lra.c (lra): Add code to deal with fails by splitting hard reg
+       live ranges.
+ 2018-03-10  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/84807
+       * config/i386/i386.opt: Replace Enforcment with Enforcement.
+ 2018-03-10  Alexandre Oliva <aoliva@redhat.com>
+       PR debug/84620
+       * dwarf2out.h (dw_val_class): Add dw_val_class_symview.
+       (dw_val_node): Add val_symbolic_view.
+       * dwarf2out.c (dw_line_info_table): Add symviews_since_reset.
+       (symview_upper_bound): New.
+       (new_line_info_table): Initialize symviews_since_reset.
+       (dwarf2out_source_line): Count symviews_since_reset and set
+       symview_upper_bound.
+       (dw_val_equal_p): Handle symview.
+       (add_AT_symview): New.
+       (print_dw_val): Handle symview.
+       (attr_checksum, attr_checksum_ordered): Likewise.
+       (same_dw_val_p, size_of_die): Likewise.
+       (value_format, output_die): Likewise.
+       (add_high_low_attributes): Use add_AT_symview for entry_view.
+       (dwarf2out_finish): Reset symview_upper_bound, clear
+       zero_view_p.
+ 2018-03-09  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/83969
+       * config/rs6000/rs6000.c (rs6000_offsettable_memref_p): New prototype.
+       Add strict argument and use it.
+       (rs6000_split_multireg_move): Update for new strict argument.
+       (mem_operand_gpr): Disallow all non-offsettable addresses.
+       * config/rs6000/rs6000.md (*movdi_internal64): Use YZ constraint.
+ 2018-03-09  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84772
+       * config/rs6000/rs6000.c (rs6000_gimplify_va_arg): Mark va_arg_tmp
+       temporary TREE_ADDRESSABLE before gimplification of BUILT_IN_MEMCPY.
+       * config/powerpcspe/powerpcspe.c (rs6000_gimplify_va_arg): Likewise.
+       PR c++/84767
+       * tree-inline.c (copy_tree_body_r): For INDIRECT_REF of a remapped
+       decl, use remap_type if we want to use the type.
+ 2018-03-09  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/84526
+       * gimple-ssa-warn-restrict.c (builtin_memref::set_base_and_offset):
+       Remove dead code.
+       (builtin_access::generic_overlap): Be prepared to handle non-array
+       base objects.
+ 2018-03-09  Alexandre Oliva <aoliva@redhat.com>
+       PR rtl-optimization/84682
+       * lra-constraints.c (process_address_1): Check is_address flag
+       for address constraints.
+       (process_alt_operands): Likewise.
+       * lra.c (lra_set_insn_recog_data): Pass asm operand locs to
+       preprocess_constraints.
+       * recog.h (preprocess_constraints): Add oploc parameter.
+       Adjust callers.
+       * recog.c (preprocess_constraints): Test address_operand for
+       CT_ADDRESS constraints.
+ 2018-03-09  Vladimir Makarov  <vmakarov@redhat.com>
+       PR target/83712
+       * lra-assigns.c (assign_by_spills): Return a flag of reload
+       assignment failure.  Do not process the reload assignment
+       failures.  Do not spill other reload pseudos if they has the same
+       reg class.
+       (lra_assign): Add a return arg.  Set up from the result of
+       assign_by_spills call.
+       (find_reload_regno_insns, lra_split_hard_reg_for): New functions.
+       * lra-constraints.c (split_reg): Add a new arg.  Use it instead of
+       usage_insns if it is not NULL.
+       (spill_hard_reg_in_range): New function.
+       (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg.
+       * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New
+       function prototypes.
+       (lra_assign): Change prototype.
+       * lra.c (lra): Add code to deal with fails by splitting hard reg
+       live ranges.
+ 2018-03-09  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       PR target/83193
+       * common/config/arm/arm-common.c (arm_parse_arch_option_name):
+       Accept complain bool parameter.  Only emit errors if it is true.
+       (arm_parse_cpu_option_name): Likewise.
+       (arm_target_thumb_only): Adjust callers of the above.
+       * config/arm/arm-protos.h (arm_parse_cpu_option_name): Adjust
+       prototype to take a default true bool parameter.
+       (arm_parse_arch_option_name): Likewise.
  
-       PR tree-optimization/82032
-       * tree-cfg.c (generate_range_test): New function.
-       * tree-cfg.h (generate_range_test): Declared here.
-       * tree-cfgcleanup.c (convert_single_case_switch): New function.
-       (cleanup_control_expr_graph): Use it.
-       * tree-switch-conversion.c (try_switch_expansion): Remove
-       assert.
-       (emit_case_nodes): Use generate_range_test.
+ 2018-03-09  David Malcolm  <dmalcolm@redhat.com>
+           Francois-Xavier Coudert  <fxcoudert@gcc.gnu.org>
  
- 2017-09-04  Uros Bizjak  <ubizjak@gmail.com>
+       PR jit/64089
+       PR jit/84288
+       * Makefile.in (LD_VERSION_SCRIPT_OPTION, LD_SONAME_OPTION): New.
+       * configure: Regenerate.
+       * configure.ac ("linker --version-script option"): New.
+       ("linker soname option"): New.
  
-       PR target/82098
-       * config/i386/i386.md (*<btsc><mode>_mask): Add
-       TARGET_USE_BT to insn constraint.
-       (*btr<mode>_mask): Ditto.
+ 2018-03-09  Richard Biener  <rguenther@suse.de>
  
- 2017-09-04  Wilco Dijkstra  <wdijkstr@arm.com>
+       PR tree-optimization/84775
+       * tree-if-conv.c (add_bb_predicate_gimplified_stmts): Delink
+       immediate uses of predicate stmts and mark them modified.
  
-       * config/arm/arm.c (arm_legitimate_index_p): Add comment.
-       (thumb2_legitimate_index_p): Use correct range for DI/DF mode.
+       Revert
+       PR tree-optimization/84178
+       * tree-if-conv.c (combine_blocks): Move insert_gimplified_predicates
+       to caller.
+       (version_loop_for_if_conversion): Delay update_ssa call.
+       (tree_if_conversion): Delay update_ssa until after predicate
+       insertion.
  
- 2017-09-04  Bernd Edlinger  <bernd.edlinger@hotmail.de>
+ 2018-03-09  Eric Botcazou  <ebotcazou@adacore.com>
  
-       PR target/77308
-       * config/arm/arm.md (*arm_adddi3, *arm_subdi3): Split early except for
-       TARGET_NEON and TARGET_IWMMXT.
-       (anddi3, iordi3, xordi3, one_cmpldi2): Split while expanding except for
-       TARGET_NEON and TARGET_IWMMXT.
-       (*one_cmpldi2_insn): Moved the body of one_cmpldi2 here.
+       PR target/84763
+       * config/i386/winnt.c (i386_pe_seh_cold_init): Use small pre-allocation
+       when the function accesses prior frames.
  
- 2017-09-04  Uros Bizjak  <ubizjak@gmail.com>
+ 2018-03-08  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/i386/i386-protos.h (ix86_tls_address_pattern_p) New prototype.
-       (ix86_rewrite_tls_address): Ditto.
-       * config/i386/i386.c (ix86_tls_address_pattern_p) New function.
-       (ix86_rewrite_tls_address_1): Ditto.
-       (ix86_rewrite_tls_address): Ditto.
-       * config/i386/predicates.md (tls_address_pattern): New predicate.
-       * config/i386/i386.md (TLS address splitter): New splitter.
+       PR debug/84456
+       * dwarf2out.c (dw_loc_list): If list && loc_list->first->next, call
+       gen_llsym, otherwise call maybe_gen_llsym.
  
- 2017-09-04  Richard Biener  <rguenther@suse.de>
+       PR inline-asm/84742
+       * recog.c (asm_operand_ok): Return 0 if multi-character constraint
+       has ',' character inside of it.
  
-       PR tree-optimization/82084
-       * fold-const.h (can_native_encode_string_p): Declare.
-       * fold-const.c (can_native_encode_string_p): Factor out from ...
-       (native_encode_string): ... here.
-       * tree-vect-stmts.c (vectorizable_store): Call it to avoid
-       vectorizing stores from constants we later cannot handle.
+ 2018-03-08  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
  
- 2017-09-04  Marek Polacek  <polacek@redhat.com>
+       PR target/84748
+       * config/aarch64/aarch64.md (*compare_cstore<mode>_insn): Mark pattern
+       as clobbering CC_REGNUM.
  
-       PR c/81783
-       * doc/invoke.texi: Update -Wtautological-compare documentation.
+ 2018-03-08  Richard Biener  <rguenther@suse.de>
  
- 2017-09-04  Jeff Law  <law@redhat.com>
+       PR middle-end/84552
+       * tree-scalar-evolution.c: Include tree-into-ssa.h.
+       (follow_copies_to_constant): Do not follow SSA names registered
+       for update.
  
-       PR tree-optimization/64910
-       * tree-ssa-reassoc.c (reassociate_bb): For bitwise binary ops,
-       swap the first and last operand if the last is a constant.
+ 2018-03-08  Richard Biener  <rguenther@suse.de>
  
- 2017-09-04  Marek Polacek  <polacek@redhat.com>
+       PR tree-optimization/84178
+       * tree-if-conv.c (combine_blocks): Move insert_gimplified_predicates
+       to caller.
+       (version_loop_for_if_conversion): Delay update_ssa call.
+       (tree_if_conversion): Delay update_ssa until after predicate
+       insertion.
  
-       PR sanitizer/82072
-       * convert.c (do_narrow): When sanitizing signed integer overflows,
-       bail out for signed types.
-       (convert_to_integer_1) <case NEGATE_EXPR>: Likewise.
+ 2018-03-08  David Malcolm  <dmalcolm@redhat.com>
  
- 2017-09-04  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84178
+       * tree-if-conv.c (release_bb_predicate): Remove the
+       the assertion that the stmts have NULL use_ops.
+       Discard the statements, asserting that they haven't
+       yet been added to a BB.
  
-       PR tree-optimization/82060
-       * tree-ssa-pre.c (eliminate_dom_walker::before_dom_children):
-       Move devirtualization after stmt folding and before EH/AB/noreturn
-       cleanup to get the stmt refs canonicalized.  Use a bool instead
-       of gimple_modified_p since that doesn't work for NOPs.  Schedule
-       NOPs generated by folding for removal.
+ 2018-03-08  Richard Biener  <rguenther@suse.de>
  
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84746
+       * tree-ssa-pre.c (find_leader_in_sets): Deal with SET1 being NULL.
+       (phi_translate): Pass in destination ANTIC_OUT set.
+       (phi_translate_1): Likewise.  For a simplified result lookup
+       a leader in ANTIC_OUT and AVAIL_OUT, not the ANTIC_IN sets.
+       (phi_translate_set): Adjust.
+       (do_pre_regular_insertion): Likewise.
+       (do_pre_partial_partial_insertion): Likewise.
+ 2018-03-08  Martin Liska  <mliska@suse.cz>
+       PR gcov-profile/84735
+       * doc/gcov.texi: Document usage of profile files.
+       * gcov-io.h: Document changes in the format.
+ 2018-03-08  Alexandre Oliva <aoliva@redhat.com>
+       PR debug/84404
+       PR debug/84408
+       * dwarf2out.c (struct dw_line_info_table): Update comments for
+       view == -1.
+       (FORCE_RESET_NEXT_VIEW): New.
+       (FORCE_RESETTING_VIEW_P): New.
+       (RESETTING_VIEW_P): Check for -1 too.
+       (ZERO_VIEW_P): Likewise.
+       (new_line_info_table): Force-reset next view.
+       (dwarf2out_begin_function): Likewise.
+       (dwarf2out_source_line): Simplify zero_view_p initialization.
+       Test FORCE_RESETTING_VIEW_P and RESETTING_VIEW_P instead of
+       view directly.  Omit view when omitting .loc at line 0.
+ 2018-03-08  Jakub Jelinek  <jakub@redhat.com>
+       PR tree-optimization/84740
+       * tree-switch-conversion.c (process_switch): Call build_constructors
+       only if info.phi_count is non-zero.
+       PR tree-optimization/84739
+       * tree-tailcall.c (find_tail_calls): Check call arguments against
+       DECL_ARGUMENTS (current_function_decl) rather than
+       DECL_ARGUMENTS (func) when checking for tail recursion.
+ 2018-03-07  Jakub Jelinek  <jakub@redhat.com>
+       * doc/contrib.texi: Add entries for Martin Liska, David Malcolm,
+       Marek Polacek, extend Vladimir Makarov's, Jonathan Wakely's and
+       Volker Reichelt's entry and add entries for people that perform
+       GCC fuzzy testing and report numerous bugs.
+ 2018-03-07  Segher Boessenkool  <segher@kernel.crashing.org>
+       PR target/82411
+       * config/rs6000/rs6000.c (rs6000_elf_in_small_data_p): Don't put
+       readonly data in sdata, if that is disabled.
+       * config/rs6000/sysv4.opt (mreadonly-in-sdata): New option.
+       * doc/invoke.texi (RS/6000 and PowerPC Options): Document
+       -mreadonly-in-sdata option.
+ 2018-03-07  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/84468
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Consider successor
+       basic block when looking for nul assignment.
+ 2018-03-07  Eric Botcazou  <ebotcazou@adacore.com>
+       PR target/84277
+       * except.h (output_function_exception_table): Adjust prototype.
+       * except.c (output_function_exception_table): Remove FNNAME parameter
+       and add SECTION parameter.  Ouput one part of the table at a time.
+       * final.c (final_scan_insn_1) <NOTE_INSN_SWITCH_TEXT_SECTIONS>: Output
+       the first part of the exception table and emit unwind directives.
+       * config/i386/i386-protos.h (i386_pe_end_cold_function): Declare.
+       (i386_pe_seh_cold_init): Likewise.
+       * config/i386/cygming.h (ASM_DECLARE_COLD_FUNCTION_NAME): New macro.
+       (ASM_DECLARE_COLD_FUNCTION_SIZE): Likewise.
+       * config/i386/i386.c (x86_expand_epilogue): Fix wording in comment.
+       (ix86_output_call_insn): Emit a nop in one more case for SEH.
+       * config/i386/winnt.c: Include except.h.
+       (struct seh_frame_state): Add reg_offset, after_prologue and
+       in_cold_section fields.
+       (i386_pe_seh_end_prologue): Set seh->after_prologue.
+       (i386_pe_seh_cold_init): New function.
+       (i386_pe_seh_fini): Add COLD parameter and bail out if it is not equal
+       to seh->in_cold_section.
+       (seh_emit_push): Record the offset of the push.
+       (seh_emit_save): Record the offet of the save.
+       (i386_pe_seh_unwind_emit): Deal with NOTE_INSN_SWITCH_TEXT_SECTIONS.
+       Test seh->after_prologue to disregard the epilogue.
+       (i386_pe_end_function): Pass FALSE to i386_pe_seh_fini.
+       (i386_pe_end_cold_function): New function.
+ 2018-03-07  Jakub Jelinek  <jakub@redhat.com>
+       PR fortran/84565
+       * config/aarch64/predicates.md (aarch64_simd_reg_or_zero): Use
+       aarch64_simd_or_scalar_imm_zero rather than aarch64_simd_imm_zero.
+       PR c++/84704
+       * gimple-expr.c (create_tmp_var_raw): Set DECL_NAMELESS flag
+       on tmp_var.
+       * tree-pretty-print.c (dump_decl_name): For TDF_COMPARE_DEBUG,
+       don't print names of DECL_NAMELESS DECL_IGNORED_P decls.
+       PR middle-end/84723
+       * multiple_target.c: Include tree-inline.h and intl.h.
+       (expand_target_clones): Diagnose and fail if node->definition and
+       !tree_versionable_function_p (node->decl).
+ 2018-03-06  John David Anglin  <danglin@gcc.gnu.org>
+       * config/pa/pa.h (ASM_GENERATE_INTERNAL_LABEL): Revise to use
+       sprint_ul.
+       (ASM_OUTPUT_ADDR_VEC_ELT): Revise for above change.
+       (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
+       * config/pa/pa64-hpux.h (ASM_GENERATE_INTERNAL_LABEL): Revise as above.
+ 2018-03-06  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84710
+       * combine.c (try_combine): Use reg_or_subregno instead of handling
+       just paradoxical SUBREGs and REGs.
+ 2018-03-06  Claudiu Zissulescu  <claziss@synopsys.com>
+        * config/arc/arc.c (arc_finalize_pic): Remove function.
+        (arc_must_save_register): We use single base PIC register, remove
+        checks to save/restore the PIC register.
+        (arc_expand_prologue): Likewise.
+        * config/arc/arc-protos.h (arc_set_default_type_attributes):
+        Remove.
+        (arc_verify_short): Likewise.
+        (arc_attr_type): Likewise.
+        * config/arc/arc.c (arc_set_default_type_attributes): Remove.
+        (walk_stores): Likewise.
+        (arc_address_cost): Make it static.
+        (arc_verify_short): Likewise.
+        (branch_dest): Likewise.
+        (arc_attr_type): Likewise.
+        * config/arc/arc.c (TARGET_ADJUST_INSN_LENGTH): Remove.
+        (TARGET_INSN_LENGTH_PARAMETERS): Likewise.
+        (arc_final_prescan_insn): Remove inserting the nops due to
+        hardware hazards.  It is done in reorg step.
+        (insn_length_variant_t): Remove.
+        (insn_length_parameters_t): Likewise.
+        (arc_insn_length_parameters): Likewise.
+        (arc_get_insn_variants): Likewise.
+        * config/arc/arc.h (TARGET_UPSIZE_DBR): Remove.
+ 2018-03-06  Jakub Jelinek  <jakub@redhat.com>
+       PR inline-asm/84683
+       * reg-stack.c (move_for_stack_reg): If any_malformed_asm, avoid
+       assertion failure.
+       PR tree-optimization/84687
+       * omp-simd-clone.c (simd_clone_create): Clear DECL_BUILT_IN_CLASS
+       on new_node->decl.
+       * match.pd (pow(C,x)*expN(y) -> expN(logN(C)*x+y)): New optimization.
+ 2018-03-05  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       * config/rs6000/rs6000-builtin.def (rs6000_speculation_barrier):
+       Rename to ppc_speculation_barrier.
+       * config/rs6000/rs6000.c (rs6000_init_builtins): Rename builtin to
+       __builtin_ppc_speculation_barrier.
+ 2018-03-05  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84700
+       * combine.c (combine_simplify_rtx): Don't try to simplify if
+       if_then_else_cond returned non-NULL, but either true_rtx or false_rtx
+       are equal to x.
+ 2018-03-05  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/rs6000.c (rs6000_loop_align): Don't align tiny loops
+       to 32 bytes when compiling for POWER9.
+ 2018-03-05  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84564
+       * config/i386/i386.c (ix86_function_ok_for_sibcall): Check for
+       regparm >= 3 with no arg reg available also for calls with
+       flag_force_indirect_call.  Pass decl to ix86_function_regparm.
+       PR target/84524
+       * config/i386/sse.md (*<code><mode>3): Replace <mask_prefix3> with
+       orig,vex.
+       (*<plusminus_insn><mode>3): Likewise.  Remove <mask_operand3> uses.
+ 2018-03-05  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/84264
+       * config/rs6000/vector.md (mov<mode>): Disallow altivec memory operands.
+ 2018-03-05  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84486
+       * tree-ssa-pre.c (create_expression_by_pieces): Remove dead code.
+       When inserting a __builtin_assume_aligned call set the LHS
+       SSA name alignment info accordingly.
+ 2018-03-05  Wilco Dijkstra  <wdijkstr@arm.com>
+       PR tree-optimization/84114
+       * config/aarch64/aarch64.c (aarch64_reassociation_width)
+       Avoid reassociation of FLOAT_MODE addition.
+ 2018-03-05  Olga Makhotina  <olga.makhotina@intel.com>
+       * common/config/i386/i386-common.c (OPTION_MASK_ISA_PCONFIG_SET,
+       OPTION_MASK_ISA_PCONFIG_UNSET, OPTION_MASK_ISA_WBNOINVD_SET,
+       OPTION_MASK_ISA_WBNOINVD_UNSET): New definitions.
+       (ix86_handle_option): Handle -mpconfig and -mwbnoinvd.
+       * config.gcc (pconfigintrin.h, wbnoinvdintrin.h) : Add headers.
+       * config/i386/cpuid.h (bit_PCONFIG, bit_WBNOINVD): New.
+       * config/i386/driver-i386.c (host_detect_local_cpu): Detect -mpconfig
+       and -mwbnoinvd.
+       * config/i386/i386-builtin.def (__builtin_ia32_wbnoinvd,
+       __builtin_ia32_wbinvd): New builtins.
+       (SPECIAL_ARGS2): New.
+       * config/i386/i386-c.c (__WBNOINVD__, __PCONFIG__): New.
+       (SPECIAL_ARGS2): New.
+       * config/i386/i386.c (ix86_target_string): Add -mpconfig and -mwbnoinvd.
+       (ix86_valid_target_attribute_inner_p): Ditto.
+       (ix86_init_mmx_sse_builtins): Add special_args2.
+       * config/i386/i386.h (TARGET_PCONFIG, TARGET_PCONFIG_P, TARGET_WBNOINVD,
+       TARGET_WBNOINVD_P): New.
+       * config/i386/i386.md (UNSPECV_WBINVD, UNSPECV_WBNOINVD): New.
+       (define_insn "wbinvd", define_insn "wbnoinvd"): New.
+       * config/i386/i386.opt: Add -mpconfig and -mwbnoinvd.
+       * config/i386/immintrin.h (_wbinvd): New intrinsic.
+       * config/i386/pconfigintrin.h: New file.
+       * config/i386/wbnoinvdintrin.h: Ditto.
+       * config/i386/x86intrin.h: Add headers pconfigintrin.h and wbnoinvdintrin.h.
+       * doc/invoke.texi (-mpconfig, -mwbnoinvd): New.
+ 2018-03-05  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84670
+       * tree-ssa-pre.c (struct bb_bitmap_sets): Add visited_with_visited_succs
+       member.
+       (BB_VISITED_WITH_VISITED_SUCCS): New define.
+       (compute_antic): Initialize BB_VISITED_WITH_VISITED_SUCCS.
+       (compute_antic_aux): Only assert the number of values in ANTIC_IN
+       doesn't grow if all successors (recursively) were visited at least
+       once.
+ 2018-03-05  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84650
+       * tree-ssa-loop-im.c (pass_lim::execute): Reset the SCEV cache
+       if executed in the loop pipeline.
+ 2018-03-05  Sandra Loosemore  <sandra@codesourcery.com>
+       * doc/configfiles.texi (Configuration Files): Move info about
+       conditionalizing $target-protos.h to...
+       * doc/sourcebuild.texi (Back End): Here.  Explain how $target.h
+       differs from $target-protos.h.
+ 2018-03-05  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32-protos.h (nds32_expand_setmem): Declare.
+       * config/nds32/nds32-multiple.md (setmemsi): Define.
+       * config/nds32/nds32-memory-manipulation.c
+       (nds32_gen_dup_4_byte_to_word_value): New.
+       (emit_setmem_word_loop): New.
+       (emit_setmem_byte_loop): New.
+       (nds32_expand_setmem_loop): New.
+       (nds32_expand_setmem_loop_v3m): New.
+       (nds32_expand_setmem_unroll): New.
+       (nds32_expand_setmem): New.
+ 2018-03-04  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32-memory-manipulation.c
+       (nds32_emit_load_store): New.
+       (nds32_emit_post_inc_load_store): New.
+       (nds32_emit_mem_move): New.
+       (nds32_emit_mem_move_block): New.
+       (nds32_expand_movmemsi_loop_unknown_size): New.
+       (nds32_expand_movmemsi_loop_known_size): New.
+       (nds32_expand_movmemsi_loop): New.
+       (nds32_expand_movmemsi_unroll): New.
+       (nds32_expand_movmemqi): Rename ...
+       (nds32_expand_movmemsi): ... to this.
+       * config/nds32/nds32-multiple.md (movmemqi): Rename ...
+       (movmemsi): ... to this.
+       * config/nds32/nds32-protos.h (nds32_expand_movmemqi): Rename ...
+       (nds32_expand_movmemsi): ... to this.
+ 2018-03-04  Kito Cheng  <kito.cheng@gmail.com>
+           Monk Chiang  <sh.chiang04@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32-protos.h
+       (nds32_expand_load_multiple): New arguments.
+       (nds32_expand_store_multiple): Ditto.
+       (nds32_valid_multiple_load_store): Rename ...
+       (nds32_valid_multiple_load_store_p): ... to this.
+       * config/nds32/nds32-memory-manipulation.c
+       (nds32_expand_load_multiple): Refine implementation.
+       (nds32_expand_store_multiple): Ditto.
+       * config/nds32/nds32-multiple.md
+       (load_multiple): Update nds32_expand_load_multiple interface.
+       (store_multiple): Update nds32_expand_store_multiple interface.
+       * config/nds32/nds32-predicates.c
+       (nds32_valid_multiple_load_store): Rename ...
+       (nds32_valid_multiple_load_store_p): ... to this and refine
+       implementation.
+       * config/nds32/predicates.md
+       (nds32_load_multiple_and_update_address_operation): New predicate.
+       (nds32_store_multiple_and_update_address_operation): New predicate.
+ 2018-03-04  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32.md (type): Add load_multiple and store_multiple.
+       (combo): New attribute.
+       * config/nds32/nds32-multiple.md: Refine patterns with new attributes.
+ 2018-03-03  Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32.opt: Change -mcmodel= default value.
+ 2018-03-03  Kito Cheng  <kito.cheng@gmail.com>
+           Monk Chiang  <sh.chiang04@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/constants.md (unspec_element): New enum.
+       * config/nds32/constraints.md (Umw): New constraint.
+       * config/nds32/nds32-intrinsic.c: Add more builtin functions.
+       * config/nds32/nds32-intrinsic.md: Likewise.
+       * config/nds32/nds32-md-auxiliary.c (nds32_regno_to_enable4): New.
+       (nds32_valid_smw_lwm_base_p): New.
+       (nds32_output_smw_single_word): New.
+       (nds32_output_lmw_single_word): New.
+       (nds32_expand_unaligned_load): New.
+       (nds32_expand_unaligned_store): New.
+       * config/nds32/nds32-protos.h (nds32_valid_smw_lwm_base_p): Declare.
+       (nds32_output_smw_single_word): Declare.
+       (nds32_output_lmw_single_word): Declare.
+       (nds32_expand_unaligned_load): Declare.
+       (nds32_expand_unaligned_store): Declare.
+       * config/nds32/nds32.h (nds32_builtins): Add NDS32_BUILTIN_UALOAD_HW,
+       NDS32_BUILTIN_UALOAD_W, NDS32_BUILTIN_UALOAD_DW,
+       NDS32_BUILTIN_UASTORE_HW, NDS32_BUILTIN_UASTORE_W,
+       NDS32_BUILTIN_UASTORE_DW.
+       * config/nds32/predicates.md (nds32_lmw_smw_base_operand): New
+       predicate.
  
-       * coretypes.h (pad_direction): New enum.
-       * defaults.h (DEFAULT_FUNCTION_ARG_PADDING): Delete.
-       (FUNCTION_ARG_PADDING): Likewise.
-       * target.def (function_arg_padding): New hook.
-       * targhooks.h (default_function_arg_padding): Declare.
-       * targhooks.c (default_function_arg_padding): New function.
-       * doc/tm.texi.in (FUNCTION_ARG_PADDING): Replace with...
-       (TARGET_FUNCTION_ARG_PADDING): ...this.
-       * doc/tm.texi: Regenerate.
-       * calls.c (store_unaligned_arguments_into_pseudos): Use pad_direction
-       instead of direction.
-       (compute_argument_addresses): Likewise.
-       (load_register_parameters): Likewise.
-       (emit_library_call_value_1): Likewise.
-       (store_one_arg): Use targetm.calls.function_arg_padding instead
-       of FUNCTION_ARG_PADDING.
-       (must_pass_in_stack_var_size_or_pad): Likewise.
-       * expr.c (emit_group_load_1): Use pad_direction instead of direction.
-       (emit_group_store): Likewise.
-       (emit_single_push_insn_1): Use targetm.calls.function_arg_padding
-       instead of FUNCTION_ARG_PADDING.
-       (emit_push_insn): Likewise, and propagate enum change throughout
-       function.
-       * function.h (direction): Delete.
-       (locate_and_pad_arg_data::where_pad): Use pad_direction instead
-       of direction.
-       * function.c (assign_parm_find_stack_rtl): Likewise.
-       (assign_parm_setup_block_p): Likewise.
-       (assign_parm_setup_block): Likewise.
-       (gimplify_parameters): Likewise.
-       (locate_and_pad_parm): Use targetm.calls.function_arg_padding
-       instead of FUNCTION_ARG_PADDING, and propagate enum change throughout
-       function.
-       * config/aarch64/aarch64.h (FUNCTION_ARG_PADDING): Delete.
-       (BLOCK_REG_PADDING): Use pad_direction instead of direction.
-       * config/aarch64/aarch64-protos.h (aarch64_pad_arg_upward): Delete.
-       * config/aarch64/aarch64.c (aarch64_pad_arg_upward): Replace with...
-       (aarch64_function_arg_padding): ...this new function.
-       (aarch64_gimplify_va_arg_expr): Use pad_direction instead of direction.
-       (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       * config/arm/arm.h (FUNCTION_ARG_PADDING): Delete.
-       (BLOCK_REG_PADDING): Use pad_direction instead of direction.
-       * config/arm/arm-protos.h (arm_pad_arg_upward): Delete.
-       * config/arm/arm.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (arm_pad_arg_upward): Replace with...
-       (arm_function_arg_padding): ...this new function.
-       * config/c6x/c6x.h (BLOCK_REG_PADDING): Use pad_direction instead
-       of direction.
-       * config/ia64/hpux.h (FUNCTION_ARG_PADDING): Delete.
-       * config/ia64/ia64-protos.h (ia64_hpux_function_arg_padding): Delete.
-       * config/ia64/ia64.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (ia64_hpux_function_arg_padding): Replace with...
-       (ia64_function_arg_padding): ...this new function.  Use pad_direction
-       instead of direction.  Check for TARGET_HPUX.
-       * config/iq2000/iq2000.h (FUNCTION_ARG_PADDING): Delete.
-       * config/iq2000/iq2000.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (iq2000_function_arg_padding): New function.
-       * config/mips/mips-protos.h (mips_pad_arg_upward): Delete.
-       * config/mips/mips.c (mips_pad_arg_upward): Replace with...
-       (mips_function_arg_padding): ...this new function.
-       (mips_pad_reg_upward): Update accordingly.
-       (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       * config/mips/mips.h (PAD_VARARGS_DOWN): Use
-       targetm.calls.function_arg_padding.
-       (FUNCTION_ARG_PADDING): Delete.
-       (BLOCK_REG_PADDING): Use pad_direction instead of direction.
-       * config/nios2/nios2.h (FUNCTION_ARG_PADDING): Delete.
-       (PAD_VARARGS_DOWN): Use targetm.calls.function_arg_padding.
-       * config/nios2/nios2-protos.h (nios2_function_arg_padding): Delete.
-       (nios2_block_reg_padding): Return pad_direction instead of direction.
-       * config/nios2/nios2.c (nios2_block_reg_padding): Return pad_direction
-       instead of direction.
-       (nios2_function_arg_padding): Likewise.  Make static.
-       (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       * config/pa/pa.h (FUNCTION_ARG_PADDING): Delete.
-       (BLOCK_REG_PADDING): Use targetm.calls.function_arg_padding.
-       * config/pa/pa-protos.h (pa_function_arg_padding): Delete.
-       * config/pa/pa.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (pa_function_arg_padding): Make static.  Return pad_direction instead
-       of direction.
-       * config/powerpcspe/powerpcspe.h (FUNCTION_ARG_PADDING): Delete.
-       (PAD_VARARGS_DOWN): Use targetm.calls.function_arg_padding.
-       * config/powerpcspe/aix.h (BLOCK_REG_PADDING): Use pad_direction
-       instead of direction.  Use targetm.calls.function_arg_padding.
-       * config/powerpcspe/darwin.h (BLOCK_REG_PADDING): Likewise.
-       * config/powerpcspe/freebsd64.h (BLOCK_REG_PADDING): Likewise.
-       * config/powerpcspe/linux64.h (BLOCK_REG_PADDING): Likewise.
-       * config/powerpcspe/powerpcspe-protos.h (function_arg_padding): Delete.
-       * config/powerpcspe/powerpcspe.c (TARGET_FUNCTION_ARG_PADDING):
-       Redefine.
-       (function_arg_padding): Rename to...
-       (rs6000_function_arg_padding): ...this.  Make static.  Return
-       pad_direction instead of direction.
-       (rs6000_return_in_msb): Use rs6000_function_arg_padding.
-       * config/rs6000/rs6000.h (FUNCTION_ARG_PADDING): Delete.
-       (PAD_VARARGS_DOWN): Use targetm.calls.function_arg_padding.
-       * config/rs6000/aix.h (BLOCK_REG_PADDING): Use pad_direction
-       instead of direction.  Use targetm.calls.function_arg_padding.
-       * config/rs6000/darwin.h (BLOCK_REG_PADDING): Likewise.
-       * config/rs6000/freebsd64.h (BLOCK_REG_PADDING): Likewise.
-       * config/rs6000/linux64.h (BLOCK_REG_PADDING): Likewise.
-       * config/rs6000/rs6000-protos.h (function_arg_padding): Delete.
-       * config/rs6000/rs6000.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (function_arg_padding): Rename to...
-       (rs6000_function_arg_padding): ...this.  Make static.  Return
-       pad_direction instead of direction.
-       (rs6000_return_in_msb): Use rs6000_function_arg_padding.
-       * config/s390/s390.h (FUNCTION_ARG_PADDING): Delete.
-       * config/s390/s390.c (s390_function_arg_padding): New function.
-       (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       * config/sparc/sparc.h (FUNCTION_ARG_PADDING): Delete.
-       * config/sparc/sparc-protos.h (function_arg_padding): Delete.
-       * config/sparc/sparc.c (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       (function_arg_padding): Rename to...
-       (sparc_function_arg_padding): ...this.  Make static.  Return
-       pad_direction instead of direction.
-       * config/spu/spu.h (FUNCTION_ARG_PADDING): Delete.
-       * config/spu/spu.c (spu_function_arg_padding): New function.
-       (TARGET_FUNCTION_ARG_PADDING): Redefine.
-       * system.h (FUNCTION_ARG_PADDING): Poison.
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-03  Monk Chiang  <sh.chiang04@gmail.com>
+           Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32-intrinsic.c
+       (nds32_expand_builtin_null_ftype_reg): Delete.
+       (nds32_expand_builtin_reg_ftype_imm): Ditto.
+       (nds32_expand_builtin_null_ftype_reg_imm): Ditto.
+       (nds32_read_argument): New.
+       (nds32_legitimize_target): Ditto.
+       (nds32_legitimize_argument): Ditto.
+       (nds32_check_constant_argument): Ditto.
+       (nds32_expand_unop_builtin): Ditto.
+       (nds32_expand_unopimm_builtin): Ditto.
+       (nds32_expand_binop_builtin): Ditto.
+       (nds32_builtin_decl_impl): Ditto.
+       (builtin_description): Ditto.
+       (nds32_expand_builtin_impl): Rewrite with new infrastructure.
+       (nds32_init_builtins_impl): Ditto.
+       * config/nds32/nds32.c (TARGET_BUILTIN_DECL): Define.
+       (nds32_builtin_decl): New.
+       * config/nds32/nds32.h (nds32_builtins): Add NDS32_BUILTIN_COUNT.
+       * config/nds32/nds32-protos.h (nds32_builtin_decl_impl): Declare.
+ 2018-03-02  Jeff Law  <law@redhat.com>
+       * reorg.c (stop_search_p): Handle DEBUG_INSN.
+       (redundant_insn, fill_simple_delay_slots): Likewise.
+       (fill_slots_from_thread): Likewise.
+       * resource.c (mark_referenced_resources): Likewise.
+       (mark_set_resources, find_dead_or_set_registers): Likewise.
+ 2018-03-02  Jakub Jelinek  <jakub@redhat.com>
+       * substring-locations.h (format_warning_va): Formatting fix for
+       ATTRIBUTE_GCC_DIAG.
+       (format_warning_at_substring): Fix up ATTRIBUTE_GCC_DIAG second
+       argument.
+       (format_warning_n_va, format_warning_at_substring_n): New prototypes.
+       * substring-locations.c: Include intl.h.
+       (format_warning_va): Turned into small wrapper around
+       format_warning_n_va, renamed to ...
+       (format_warning_n_va): ... this, add N and PLURAL_GMSGID arguments,
+       rename GMSGID to SINGULAR_GMSGID, if SINGULAR_GMSGID != PLURAL_GMSGID,
+       use ngettext.
+       (format_warning_at_substring_n): New function.
+       * gimple-ssa-sprintf.c: Remove GCC diagnostic ignored pragma.
+       (fmtwarn): Add ATTRIBUTE_GCC_DIAG.  Turn into a copy of
+       format_warning_at_substring with just a shorter name instead of
+       const function pointer.
+       (fmtwarn_n): New function.
+       (maybe_warn, format_directive, parse_directive): Use fmtwarn_n where
+       appropriate, get rid of all the fmtstr temporaries, move conditionals
+       with G_() wrapped string literals directly into fmtwarn arguments,
+       cast dir.len to (int), formatting fixes.
+ 2018-03-02  Thomas Schwinge  <thomas@codesourcery.com>
+       * doc/invoke.texi: Remove "Cilk Plus" references.
+ 2018-03-02  Jakub Jelinek  <jakub@redhat.com>
+           Richard Biener  <rguenther@suse.de>
  
-       * target.def (modes_tieable_p): New hook.
-       * doc/tm.texi (MODES_TIEABLE_P): Replace with...
-       (TARGET_MODES_TIEABLE_P): ...this.
-       * doc/tm.texi.in: Regenerate.
-       * hooks.h (hook_bool_mode_mode_true): Declare.
-       * hooks.c (hook_bool_mode_mode_true): New function.
-       * combine.c (subst): Use targetm.modes_tieable_p instead of
-       MODES_TIEABLE_P.
-       * dse.c (find_shift_sequence): Likewise.
-       * expmed.c (extract_low_bits): Likewise.
-       * lower-subreg.c: Include target.h.
-       (find_decomposable_subregs): Use targetm.modes_tieable_p instead of
-       MODES_TIEABLE_P.
-       * rtlanal.c (rtx_cost): Likewise.
-       * config/aarch64/aarch64.h (MODES_TIEABLE_P): Delete.
-       * config/aarch64/aarch64-protos.h (aarch64_modes_tieable_p): Delete.
-       * config/aarch64/aarch64.c (aarch64_modes_tieable_p): Make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/alpha/alpha.h (MODES_TIEABLE_P): Delete.
-       * config/alpha/alpha.c (alpha_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/arc/arc.h (MODES_TIEABLE_P): Delete.
-       * config/arc/arc.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (arc_modes_tieable_p): New function.
-       * config/arm/arm.h (MODES_TIEABLE_P): Delete.
-       * config/arm/arm-protos.h (arm_modes_tieable_p): Delete.
-       * config/arm/arm.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (arm_modes_tieable_p): Make static.
-       * config/avr/avr.h (MODES_TIEABLE_P): Delete.
-       * config/bfin/bfin.h (MODES_TIEABLE_P): Delete.
-       * config/bfin/bfin.c (bfin_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/c6x/c6x.h (MODES_TIEABLE_P): Delete.
-       * config/c6x/c6x.c (c6x_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/cr16/cr16.h (MODES_TIEABLE_P): Delete.
-       * config/cr16/cr16.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (cr16_modes_tieable_p): New function.
-       * config/cris/cris.h (MODES_TIEABLE_P): Delete.
-       * config/epiphany/epiphany.h (MODES_TIEABLE_P): Delete.
-       * config/fr30/fr30.h (MODES_TIEABLE_P): Delete.
-       (TRULY_NOOP_TRUNCATION): Update comment.
-       * config/frv/frv.h (MODES_TIEABLE_P): Delete.
-       (TRULY_NOOP_TRUNCATION): Update comment.
-       * config/frv/frv.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (frv_modes_tieable_p): New function.
-       * config/ft32/ft32.h (MODES_TIEABLE_P): Delete.
-       * config/h8300/h8300.h (MODES_TIEABLE_P): Delete.
-       * config/h8300/h8300.c (h8300_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/i386/i386.h (MODES_TIEABLE_P): Delete.
-       * config/i386/i386-protos.h (ix86_modes_tieable_p): Delete.
-       * config/i386/i386.c (ix86_modes_tieable_p): Make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/ia64/ia64.h (MODES_TIEABLE_P): Delete.
-       * config/ia64/ia64.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (ia64_modes_tieable_p): New function.
-       * config/iq2000/iq2000.h (MODES_TIEABLE_P): Delete.
-       * config/iq2000/iq2000.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (iq2000_modes_tieable_p): New function.
-       * config/lm32/lm32.h (MODES_TIEABLE_P): Delete.
-       * config/lm32/lm32.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (lm32_modes_tieable_p): New function.
-       * config/m32c/m32c.h (MODES_TIEABLE_P): Delete.
-       * config/m32c/m32c-protos.h (m32c_modes_tieable_p): Delete.
-       * config/m32c/m32c.c (m32c_modes_tieable_p): Make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/m32r/m32r.h (MODES_TIEABLE_P): Delete.
-       * config/m32r/m32r.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (m32r_modes_tieable_p): New function.
-       * config/m68k/m68k.h (MODES_TIEABLE_P): Delete.
-       * config/m68k/m68k.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (m68k_modes_tieable_p): New function.
-       * config/mcore/mcore.h (MODES_TIEABLE_P): Delete.
-       * config/mcore/mcore.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (mcore_modes_tieable_p): New function.
-       * config/microblaze/microblaze.h (MODES_TIEABLE_P): Delete.
-       * config/microblaze/microblaze.c (microblaze_modes_tieable_p): New
-       function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/mips/mips.h (MODES_TIEABLE_P): Delete.
-       * config/mips/mips-protos.h (mips_modes_tieable_p): Delete.
-       * config/mips/mips.c (mips_modes_tieable_p): Make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/mmix/mmix.h (MODES_TIEABLE_P): Delete.
-       * config/mn10300/mn10300.h (MODES_TIEABLE_P): Delete.
-       * config/mn10300/mn10300-protos.h (mn10300_modes_tieable): Delete.
-       * config/mn10300/mn10300.c (mn10300_modes_tieable): Rename to...
-       (mn10300_modes_tieable_p): ...this and make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/moxie/moxie.h (MODES_TIEABLE_P): Delete.
-       * config/msp430/msp430.h (MODES_TIEABLE_P): Delete.
-       * config/msp430/msp430-protos.h (msp430_modes_tieable_p): Delete.
-       * config/msp430/msp430.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (msp430_modes_tieable_p): Make static.
-       * config/nds32/nds32.h (MODES_TIEABLE_P): Delete.
-       * config/nds32/nds32.c (nds32_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/nios2/nios2.h (MODES_TIEABLE_P): Delete.
-       * config/nvptx/nvptx.h (MODES_TIEABLE_P): Delete.
-       * config/nvptx/nvptx.c (nvptx_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/pa/pa.h (MODES_TIEABLE_P): Delete.
-       * config/pa/pa-protos.h (pa_modes_tieable_p): Delete.
-       * config/pa/pa.c (pa_modes_tieable_p): Make static.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/pdp11/pdp11.h (MODES_TIEABLE_P): Delete.
-       * config/pdp11/pdp11.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (pdp11_modes_tieable_p): New function.
-       * config/powerpcspe/powerpcspe.h (MODES_TIEABLE_P): Delete.
-       * config/powerpcspe/powerpcspe.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (rs6000_modes_tieable_p): New function.
-       (rs6000_debug_reg_global): Use it instead of MODES_TIEABLE_P.
-       * config/powerpcspe/powerpcspe.md: Update comment.
-       * config/riscv/riscv.h (MODES_TIEABLE_P): Delete.
-       * config/riscv/riscv.c (riscv_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/rl78/rl78.h (MODES_TIEABLE_P): Delete.
-       * config/rl78/rl78.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (rl78_modes_tieable_p): New function.
-       * config/rs6000/rs6000.h (MODES_TIEABLE_P): Delete.
-       * config/rs6000/rs6000.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (rs6000_modes_tieable_p): New function.
-       (rs6000_debug_reg_global): Use it instead of MODES_TIEABLE_P.
-       * config/rs6000/rs6000.md: Update comment.
-       * config/rx/rx.h (MODES_TIEABLE_P): Delete.
-       * config/rx/rx.c (rx_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/s390/s390.h (MODES_TIEABLE_P): Delete.
-       * config/s390/s390.c (s390_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/sh/sh.h (MODES_TIEABLE_P): Delete.
-       * config/sh/sh.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (sh_modes_tieable_p): New function.
-       * config/sparc/sparc.h (MODES_TIEABLE_P): Delete.
-       * config/sparc/sparc-protos.h (sparc_modes_tieable_p): Delete.
-       * config/sparc/sparc.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (sparc_modes_tieable_p): Make static.
-       * config/spu/spu.h (MODES_TIEABLE_P): Delete.
-       * config/spu/spu.c (spu_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/stormy16/stormy16.h (MODES_TIEABLE_P): Delete.
-       * config/stormy16/stormy16.c (xstormy16_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/tilegx/tilegx.h (MODES_TIEABLE_P): Delete.
-       * config/tilepro/tilepro.h (MODES_TIEABLE_P): Delete.
-       * config/v850/v850.h (MODES_TIEABLE_P): Delete.
-       * config/v850/v850.c (v850_modes_tieable_p): New function.
-       (TARGET_MODES_TIEABLE_P): Redefine.
-       * config/vax/vax.h (MODES_TIEABLE_P): Delete.
-       * config/visium/visium.h (MODES_TIEABLE_P): Delete.
-       * config/visium/visium.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (visium_modes_tieable_p): New function.
-       * config/xtensa/xtensa.h (MODES_TIEABLE_P): Delete.
-       * config/xtensa/xtensa.c (TARGET_MODES_TIEABLE_P): Redefine.
-       (xtensa_modes_tieable_p): New function.
-       * system.h (MODES_TIEABLE_P): Poison.
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR ipa/84628
+       * expr.c (expand_expr_real_1) <case CALL_EXPR>: Don't emit diagnostics
+       for error or warning attributes if CALL_FROM_THUNK_P is set.
+       Formatting fixes.
  
-       * target.def (hard_regno_mode_ok): New hook.
-       * doc/tm.texi (HARD_REGNO_MODE_OK): Replace with...
-       (TARGET_HARD_REGNO_MODE_OK): ...this.
-       * doc/tm.texi.in: Regenerate.
-       * hooks.h (hook_bool_uint_mode_true): Declare.
-       * hooks.c (hook_bool_uint_mode_true): New function.
-       * doc/md.texi: Refer to targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * genpreds.c (write_insn_preds_c): Add an include of target.h.
-       * alias.c (init_alias_target): Use targetm.hard_regno_mode_ok
-       instead of HARD_REGNO_MODE_OK.
-       * caller-save.c: Include target.h.
-       (reg_save_code): Use targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * combine.c (can_combine_p): Likewise.
-       (combinable_i3pat): Likewise.
-       (can_change_dest_mode): Likewise.
-       * expr.c (init_expr_target): Likewise.
-       (convert_move): Likewise.
-       (convert_modes): Likewise.
-       * ira.c (setup_prohibited_class_mode_regs): Likewise.
-       (setup_prohibited_mode_move_regs): Likewise.
-       * ira.h (target_ira): Likewise.
-       * lra-assigns.c (find_hard_regno_for_1): Likewise.
-       * lra-constraints.c (process_alt_operands): Likewise.
-       (split_reg): Likewise.
-       * recog.c (peep2_find_free_register): Likewise.
-       * ree.c (combine_reaching_defs): Likewise.
-       * regcprop.c (maybe_mode_change): Likewise.
-       * reginfo.c (init_reg_sets_1): Likewise.
-       (choose_hard_reg_mode): Likewise.
-       (simplifiable_subregs): Likewise.
-       * regrename.c (check_new_reg_p): Likewise.
-       * reload.c (find_valid_class): Likewise.
-       (find_valid_class_1): Likewise.
-       (reload_inner_reg_of_subreg): Likewise.
-       (push_reload): Likewise.
-       (combine_reloads): Likewise.
-       (find_dummy_reload): Likewise.
-       (find_reloads): Likewise.
-       * reload1.c (find_reg): Likewise.
-       (set_reload_reg): Likewise.
-       (allocate_reload_reg): Likewise.
-       (choose_reload_regs): Likewise.
-       (reload_adjust_reg_for_temp): Likewise.
-       * rtlanal.c (subreg_size_offset_from_lsb): Likewise.
-       (simplify_subreg_regno): Likewise.
-       * sel-sched.c (init_regs_for_mode): Likewise.
-       * varasm.c (make_decl_rtl): Likewise.
-       * config/aarch64/aarch64.h (HARD_REGNO_MODE_OK): Delete.
-       (MODES_TIEABLE_P): Use targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * config/aarch64/aarch64-protos.h (aarch64_hard_regno_mode_ok): Delete.
-       * config/aarch64/aarch64.c (aarch64_hard_regno_mode_ok): Make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/alpha/alpha.h (HARD_REGNO_MODE_OK): Delete.
-       * config/alpha/alpha.c (alpha_hard_regno_mode_ok): New function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/arc/arc.h (arc_hard_regno_mode_ok): Delete.
-       (arc_mode_class): Delete.
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/arc/arc.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (arc_hard_regno_mode_ok): Rename old array to...
-       (arc_hard_regno_mode_ok_modes): ...this.
-       (arc_conditional_register_usage): Update accordingly.
-       (arc_mode_class): Make static.
-       (arc_hard_regno_mode_ok): New function.
-       * config/arm/arm.h (HARD_REGNO_MODE_OK): Delete.
-       * config/arm/arm-protos.h (arm_hard_regno_mode_ok): Delete.
-       * config/arm/arm.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (arm_hard_regno_mode_ok): Make static.
-       * config/arm/arm.md (movdi): Use targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * config/avr/avr-protos.h (avr_hard_regno_mode_ok): Delete.
-       * config/avr/avr.h (HARD_REGNO_MODE_OK): Delete.
-       * config/avr/avr.c (avr_hard_regno_mode_ok): Make static and
-       return a bool.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/bfin/bfin-protos.h (hard_regno_mode_ok): Delete.
-       * config/bfin/bfin.h (HARD_REGNO_MODE_OK): Delete.
-       * config/bfin/bfin.c (hard_regno_mode_ok): Rename to...
-       (bfin_hard_regno_mode_ok): ...this.  Make static and return a bool.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/bfin/predicates.md (valid_reg_operand): Use
-       targetm.hard_regno_mode_ok instead of HARD_REGNO_MODE_OK.
-       * config/c6x/c6x.h (HARD_REGNO_MODE_OK): Delete.
-       * config/c6x/c6x.c (c6x_hard_regno_mode_ok): New function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/cr16/cr16.h (HARD_REGNO_MODE_OK): Delete.
-       * config/cr16/cr16-protos.h (cr16_hard_regno_mode_ok): Delete.
-       * config/cr16/cr16.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (cr16_hard_regno_mode_ok): Make static and return a bool.
-       * config/cris/cris.h (HARD_REGNO_MODE_OK): Delete.
-       * config/cris/cris.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (cris_hard_regno_mode_ok): New function.
-       * config/epiphany/epiphany.h (epiphany_hard_regno_mode_ok): Delete.
-       (epiphany_mode_class): Delete.
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/epiphany/epiphany-protos.h (hard_regno_mode_ok): Delete.
-       * config/epiphany/epiphany.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (hard_regno_mode_ok): Rename to...
-       (epiphany_hard_regno_mode_ok): ...this.  Make static and return a bool.
-       * config/fr30/fr30.h (HARD_REGNO_MODE_OK): Delete.
-       * config/fr30/fr30.md: Refer to targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * config/frv/frv.h (HARD_REGNO_MODE_OK): Delete.
-       * config/frv/frv-protos.h (frv_hard_regno_mode_ok): Delete.
-       * config/frv/frv.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (frv_hard_regno_mode_ok): Make static and return a bool.
-       * config/frv/frv.md: Refer to targetm.hard_regno_mode_ok instead of
-       HARD_REGNO_MODE_OK.
-       * config/ft32/ft32.h (HARD_REGNO_MODE_OK): Delete.
-       * config/h8300/h8300.h (HARD_REGNO_MODE_OK): Delete.
-       * config/h8300/h8300-protos.h (h8300_hard_regno_mode_ok): Delete.
-       * config/h8300/h8300.c (h8300_hard_regno_mode_ok): Make static
-       and return a bool.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/i386/i386.h (HARD_REGNO_MODE_OK): Delete.
-       * config/i386/i386-protos.h (ix86_hard_regno_mode_ok): Delete.
-       * config/i386/i386.c (ix86_hard_regno_mode_ok): Make static and
-       return a bool.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/ia64/ia64.h (HARD_REGNO_MODE_OK): Delete.
-       * config/ia64/ia64.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (ia64_hard_regno_mode_ok): New function.
-       * config/iq2000/iq2000.h (HARD_REGNO_MODE_OK): Delete.
-       * config/iq2000/iq2000.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (iq2000_hard_regno_mode_ok): New function.
-       * config/lm32/lm32.h (HARD_REGNO_MODE_OK): Delete.
-       * config/lm32/lm32.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (lm32_hard_regno_mode_ok): New function.
-       * config/m32c/m32c.h (HARD_REGNO_MODE_OK): Delete.
-       * config/m32c/m32c-protos.h (m32c_hard_regno_ok): Delete.
-       * config/m32c/m32c.c (class_can_hold_mode): Use m32c_hard_regno_mode_ok
-       instead of HARD_REGNO_MODE_OK.
-       (m32c_hard_regno_ok): Rename to...
-       (m32c_hard_regno_mode_ok): ...this.  Make static and return a bool.
-       (m32c_cannot_change_mode_class): Update accordingly.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/m32r/m32r.h (m32r_hard_regno_mode_ok): Delete.
-       (m32r_mode_class): Delete.
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/m32r/m32r.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (m32r_hard_regno_mode_ok): Rename to...
-       (m32r_hard_regno_modes): ...this.
-       (m32r_mode_class): Make static.
-       (m32r_hard_regno_mode_ok): New function.
-       * config/m68k/m68k.h (HARD_REGNO_MODE_OK): Delete.
-       * config/m68k/m68k-protos.h (m68k_regno_mode_ok): Delete.
-       * config/m68k/m68k.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (m68k_hard_regno_mode_ok): Make static.
-       * config/mcore/mcore.h (HARD_REGNO_MODE_OK): Delete.
-       * config/mcore/mcore.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (mcore_hard_regno_mode_ok): New function.
-       * config/microblaze/microblaze.h (microblaze_hard_regno_mode_ok)
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/microblaze/microblaze.c (microblaze_hard_regno_mode_ok):
-       Rename to...
-       (microblaze_hard_regno_mode_ok_p): ...this and make static.
-       (microblaze_hard_regno_mode_ok): New function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/mips/mips.h (HARD_REGNO_MODE_OK): Delete.
-       (mips_hard_regno_mode_ok): Delete.
-       * config/mips/mips.c (mips_hard_regno_mode_ok): Rename to...
-       (mips_hard_regno_mode_ok_p): ...this and make static.
-       (mips_hard_regno_mode_ok_p): Rename to...
-       (mips_hard_regno_mode_ok_uncached): ...this.
-       (mips_hard_regno_mode_ok): New function.
-       (mips_class_max_nregs): Use mips_hard_regno_mode_ok instead
-       of HARD_REGNO_MODE_OK.
-       (mips_option_override): Update after above name changes.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/mmix/mmix.h (HARD_REGNO_MODE_OK): Delete.
-       * config/mn10300/mn10300.h (HARD_REGNO_MODE_OK): Delete.
-       * config/mn10300/mn10300-protos.h (mn10300_hard_regno_mode_ok): Delete.
-       * config/mn10300/mn10300.c (mn10300_hard_regno_mode_ok): Make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/moxie/moxie.h (HARD_REGNO_MODE_OK): Delete.
-       * config/msp430/msp430.h (HARD_REGNO_MODE_OK): Delete.
-       * config/msp430/msp430-protos.h (msp430_hard_regno_mode_ok): Delete.
-       * config/msp430/msp430.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (msp430_hard_regno_mode_ok): Make static and return a bool.
-       * config/nds32/nds32.h (HARD_REGNO_MODE_OK): Delete.
-       * config/nds32/nds32-protos.h (nds32_hard_regno_mode_ok): Delete.
-       * config/nds32/nds32.c (nds32_hard_regno_mode_ok): Make static
-       and return a bool.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/nios2/nios2.h (HARD_REGNO_MODE_OK): Delete.
-       * config/nvptx/nvptx.h (HARD_REGNO_MODE_OK): Delete.
-       * config/pa/pa.h (MODES_TIEABLE_P): Update commentary.
-       * config/pa/pa32-regs.h (HARD_REGNO_MODE_OK): Rename to...
-       (PA_HARD_REGNO_MODE_OK): ...this
-       * config/pa/pa64-regs.h (HARD_REGNO_MODE_OK): Rename to...
-       (PA_HARD_REGNO_MODE_OK): ...this.
-       * config/pa/pa.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (pa_hard_regno_mode_ok): New function.
-       * config/pdp11/pdp11.h (HARD_REGNO_MODE_OK): Delete.
-       * config/pdp11/pdp11.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (pdp11_hard_regno_mode_ok): New function.
-       * config/powerpcspe/powerpcspe.h (HARD_REGNO_MODE_OK): Delete.
-       * config/powerpcspe/powerpcspe-protos.h (rs6000_hard_regno_mode_ok_p):
-       Delete.
-       * config/powerpcspe/powerpcspe.c (rs6000_hard_regno_mode_ok_p):
-       Make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (rs6000_hard_regno_mode_ok): Rename to...
-       (rs6000_hard_regno_mode_ok_uncached): ...this.
-       (rs6000_init_hard_regno_mode_ok): Update accordingly.
-       (rs6000_hard_regno_mode_ok): New function.
-       * config/riscv/riscv.h (HARD_REGNO_MODE_OK): Delete.
-       * config/riscv/riscv-protos.h (riscv_hard_regno_mode_ok_p): Delete.
-       * config/riscv/riscv.c (riscv_hard_regno_mode_ok_p): Rename to...
-       (riscv_hard_regno_mode_ok): ...this and make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/rl78/rl78.h (HARD_REGNO_MODE_OK): Delete.
-       * config/rl78/rl78-protos.h (rl78_hard_regno_mode_ok): Delete.
-       * config/rl78/rl78.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (rl78_hard_regno_mode_ok): Make static and return bool.
-       * config/rs6000/rs6000.h (HARD_REGNO_MODE_OK): Delete.
-       * config/rs6000/rs6000-protos.h (rs6000_hard_regno_mode_ok_p):
-       Delete.
-       * config/rs6000/rs6000.c (rs6000_hard_regno_mode_ok_p): Make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (rs6000_hard_regno_mode_ok): Rename to...
-       (rs6000_hard_regno_mode_ok_uncached): ...this.
-       (rs6000_init_hard_regno_mode_ok): Update accordingly.
-       (rs6000_hard_regno_mode_ok): New function.
-       * config/rx/rx.h (HARD_REGNO_MODE_OK): Delete.
-       * config/rx/rx.c (rx_hard_regno_mode_ok): New function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/s390/s390.h (HARD_REGNO_MODE_OK): Delete.
-       * config/s390/s390-protos.h (s390_hard_regno_mode_ok): Delete.
-       * config/s390/s390.c (s390_hard_regno_mode_ok): Make static.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/sh/sh.h (HARD_REGNO_MODE_OK): Delete.
-       * config/sh/sh-protos.h (sh_hard_regno_mode_ok): Delete.
-       * config/sh/sh.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (sh_hard_regno_mode_ok): Make static.
-       * config/sparc/constraints.md: Refer to targetm.hard_regno_mode_ok
-       instead of HARD_REGNO_MODE_OK.
-       * config/sparc/sparc.h (hard_regno_mode_classes): Delete.
-       (sparc_mode_class): Delete.
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/sparc/sparc.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (hard_regno_mode_classes): Make static.
-       (sparc_mode_class): Likewise.
-       (sparc_hard_regno_mode_ok): New function.
-       * config/spu/spu.h (HARD_REGNO_MODE_OK): Delete.
-       * config/stormy16/stormy16.h (HARD_REGNO_MODE_OK): Delete.
-       * config/stormy16/stormy16.c (xstormy16_hard_regno_mode_ok): New
-       function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/tilegx/tilegx.h (HARD_REGNO_MODE_OK): Delete.
-       * config/tilepro/tilepro.h (HARD_REGNO_MODE_OK): Delete.
-       * config/v850/v850.h (HARD_REGNO_MODE_OK): Delete.
-       * config/v850/v850.c (v850_hard_regno_mode_ok): New function.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       * config/vax/vax.h (HARD_REGNO_MODE_OK): Delete.
-       * config/visium/visium.h (HARD_REGNO_MODE_OK): Delete.
-       * config/visium/visium.c (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (visium_hard_regno_mode_ok): New function.
-       * config/visium/visium.md: Refer to targetm.hard_regno_mode_ok
-       instead of HARD_REGNO_MODE_OK.
-       * config/xtensa/xtensa.h (xtensa_hard_regno_mode_ok): Delete.
-       (HARD_REGNO_MODE_OK): Delete.
-       * config/xtensa/xtensa.c (xtensa_hard_regno_mode_ok): Rename to...
-       (xtensa_hard_regno_mode_ok_p): ...this and make static.
-       (xtensa_option_override): Update accordingly.
-       (TARGET_HARD_REGNO_MODE_OK): Redefine.
-       (xtensa_hard_regno_mode_ok): New function.
-       * system.h (HARD_REGNO_MODE_OK): Poison.
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-02  Jakub Jelinek  <jakub@redhat.com>
  
-       * target.def (hard_regno_call_part_clobbered): New hook.
-       * doc/tm.texi.in (HARD_REGNO_CALL_PART_CLOBBERED): Replace with...
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): ...this hook.
-       * doc/tm.texi: Regenerate.
-       * hooks.h (hook_bool_uint_mode_false): Declare.
-       * hooks.c (hook_bool_uint_mode_false): New function.
-       * regs.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * cselib.c (cselib_process_insn): Use
-       targetm.hard_regno_call_part_clobbered instead of
-       HARD_REGNO_CALL_PART_CLOBBERED.
-       * ira-conflicts.c (ira_build_conflicts): Likewise.
-       * ira-costs.c (ira_tune_allocno_costs): Likewise.
-       * lra-constraints.c (need_for_call_save_p): Likewise.
-       * lra-lives.c: Include target.h.
-       (check_pseudos_live_through_calls): Use
-       targetm.hard_regno_call_part_clobbered instead of
-       HARD_REGNO_CALL_PART_CLOBBERED.
-       * regcprop.c: Include target.h.
-       (copyprop_hardreg_forward_1): Use
-       targetm.hard_regno_call_part_clobbered instead of
-       HARD_REGNO_CALL_PART_CLOBBERED.
-       * reginfo.c (choose_hard_reg_mode): Likewise.
-       * regrename.c (check_new_reg_p): Likewise.
-       * reload.c (find_equiv_reg): Likewise.
-       * reload1.c (emit_reload_insns): Likewise.
-       * sched-deps.c (deps_analyze_insn): Likewise.
-       * sel-sched.c (init_regs_for_mode): Likewise.
-       (mark_unavailable_hard_regs): Likewise.
-       * targhooks.c (default_dwarf_frame_reg_mode): Likewise.
-       * config/aarch64/aarch64.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/aarch64/aarch64.c (aarch64_hard_regno_call_part_clobbered):
-       New function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/avr/avr.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/avr/avr-protos.h (avr_hard_regno_call_part_clobbered):
-       Delete.
-       * config/avr/avr.c (avr_hard_regno_call_part_clobbered): Make static
-       and return a bool.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/i386/i386.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/i386/i386.c (ix86_hard_regno_call_part_clobbered): New
-       function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/mips/mips.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/mips/mips.c (mips_hard_regno_call_part_clobbered): New
-       function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/powerpcspe/powerpcspe.h (HARD_REGNO_CALL_PART_CLOBBERED):
-       Delete.
-       * config/powerpcspe/powerpcspe.c
-       (rs6000_hard_regno_call_part_clobbered): New function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/rs6000/rs6000.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/rs6000/rs6000.c (rs6000_hard_regno_call_part_clobbered):
-       New function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/s390/s390.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * config/s390/s390.c (s390_hard_regno_call_part_clobbered): New
-       function.
-       (TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Redefine.
-       * config/sh/sh.h (HARD_REGNO_CALL_PART_CLOBBERED): Delete.
-       * system.h (HARD_REGNO_CALL_PART_CLOBBERED): Poison.
+       PR target/56540
+       * config/pa/pa.h (TARGET_CPU_CPP_BUILTINS): Predefine
+       __SIZEOF_128__ macro if HPUX_LONG_DOUBLE_LIBRARY.
  
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/56540
+       * config/ia64/ia64.h (TARGET_CPU_CPP_BUILTINS): Predefine
+       __SIZEOF_{FPREG,FLOAT{80,128}}__ macros.
  
-       * rtl.h (subreg_memory_offset): Declare.
-       * emit-rtl.c (subreg_memory_offset): New function.
-       * expmed.c (store_bit_field_1): Use it.
-       * expr.c (undefined_operand_subword_p): Likewise.
-       * simplify-rtx.c (simplify_subreg): Likewise.
+       * predict.c (test_prediction_value_range): Use PROB_UNINITIALIZED
+       instead of -1U in last predictors element's probability member.
  
- 2017-09-04  Alexander Monakov  <amonakov@ispras.ru>
+ 2018-03-02  Eric Botcazou  <ebotcazou@adacore.com>
  
-       PR rtl-optimization/57448
-       PR target/67458
-       PR target/81316
-       * optabs.c (expand_atomic_load): Place compiler memory barriers if
-       using atomic_load pattern.
-       (expand_atomic_store): Likewise.
+       PR ipa/83983
+       * ipa-devirt.c (odr_subtypes_equivalent_p): Get the ODR type of both
+       arguments if they are comparable.
  
- 2017-09-04  Jakub Jelinek  <jakub@redhat.com>
-       PR sanitizer/81981
-       * gimple-fold.c (gimple_fold_call): Optimize away useless UBSAN_PTR
-       and UBSAN_BOUNDS internal calls.  Clean up IFN_UBSAN_OBJECT_SIZE
-       handling.  Use replace_call_with_value with NULL instead of
-       gsi_replace, unlink_stmt_vdef and release_defs.
-       * gdbhooks.py (OptMachineModePrinter.to_string): Use 8 spaces
-       instead of tab.
-       * lra-remat.c (reg_overlap_for_remat_p): Fix a pasto.
- 2017-09-04  Richard Sandiford  <richard.sandiford@linaro.org>
-       PR bootstrap/82045
-       * rtl.h (emit_library_call_value_1): Declare.
-       (emit_library_call): Replace declaration with a series of overloads.
-       Remove the parameter count argument.
-       (emit_library_call_value): Likewise.
-       * calls.c (emit_library_call_value_1): Make global.  Replace varargs
-       with an "rtx_mode_t *".
-       (emit_library_call_value): Delete.
-       (emit_library_call): Likewise.
-       * asan.c (asan_emit_stack_protection): Update calls accordingly.
-       (asan_emit_allocas_unpoison): Likewise.
-       * builtins.c (expand_builtin_powi): Likewise.
-       (expand_asan_emit_allocas_unpoison): Likewise.
-       * cfgexpand.c (expand_main_function): Likewise.
-       * config/aarch64/aarch64.c (aarch64_trampoline_init): Likewise.
-       * config/aarch64/aarch64.h (PROFILE_HOOK): Likewise.
-       * config/alpha/alpha.c (alpha_trampoline_init): Likewise.
-       * config/arm/arm.c (arm_trampoline_init): Likewise.
-       (arm_call_tls_get_addr): Likewise.
-       (arm_expand_divmod_libfunc): Likewise.
-       * config/bfin/bfin.md (umulsi3_highpart): Likewise.
-       (smulsi3_highpart): Likewise.
-       * config/c6x/c6x.c (c6x_initialize_trampoline): Likewise.
-       (c6x_expand_compare): Likewise.
-       (c6x_expand_movmem): Likewise.
-       * config/frv/frv.c (frv_trampoline_init): Likewise.
-       * config/i386/i386.c (ix86_trampoline_init): Likewise.
-       (ix86_expand_divmod_libfunc): Likewise.
-       * config/ia64/ia64.c (ia64_expand_tls_address): Likewise.
-       (ia64_expand_compare): Likewise.
-       (ia64_profile_hook): Likewise.
-       * config/ia64/ia64.md (save_stack_nonlocal): Likewise.
-       (nonlocal_goto): Likewise.
-       (restore_stack_nonlocal): Likewise.
-       * config/m32r/m32r.c (block_move_call): Likewise.
-       (m32r_trampoline_init): Likewise.
-       * config/m68k/linux.h (FINALIZE_TRAMPOLINE): Likewise.
-       * config/m68k/m68k.c (m68k_call_tls_get_addr): Likewise.
-       (m68k_call_m68k_read_tp): Likewise.
-       * config/microblaze/microblaze.c (microblaze_call_tls_get_addr)
-       (microblaze_expand_divide): Likewise.
-       * config/mips/mips.h (mips_args): Likewise.
-       * config/mips/sdemtk.h (mips_sync_icache): Likewise.
-       (MIPS_ICACHE_SYNC): Likewise.
-       * config/nios2/nios2.c (nios2_emit_expensive_div): Likewise.
-       (nios2_trampoline_init): Likewise.
-       * config/pa/pa.c (hppa_tls_call): Likewise.
-       (pa_trampoline_init): Likewise.
-       * config/pa/pa.md (canonicalize_funcptr_for_compare): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_legitimize_tls_address)
-       (expand_strn_compare): Likewise.
-       (rs6000_generate_compare): Likewise.
-       (rs6000_expand_float128_convert): Likewise.
-       (output_profile_hook): Likewise.
-       (rs6000_trampoline_init): Likewise.
-       * config/powerpcspe/powerpcspe.md (neg<mode>2): Likewise.
-       * config/riscv/riscv.h (PROFILE_HOOK): Likewise.
-       * config/rs6000/rs6000-string.c (expand_strn_compare): Likewise.
-       * config/rs6000/rs6000.c (rs6000_legitimize_tls_address): Likewise.
-       (rs6000_generate_compare): Likewise.
-       (rs6000_expand_float128_convert): Likewise.
-       (output_profile_hook): Likewise.
-       (rs6000_trampoline_init): Likewise.
-       * config/rs6000/rs6000.md (neg<mode>2): Likewise.
-       * config/sh/sh.c (sh_trampoline_init): Likewise.
-       * config/sparc/sparc.c (emit_soft_tfmode_libcall): Likewise.
-       (sparc_emit_float_lib_cmp): Likewise.
-       (sparc32_initialize_trampoline): Likewise.
-       (sparc64_initialize_trampoline): Likewise.
-       (sparc_profile_hook): Likewise.
-       * config/spu/spu.c (ea_load_store): Likewise.
-       * config/spu/spu.md (floatunssidf2): Likewise.
-       * config/tilegx/tilegx.c (tilegx_trampoline_init): Likewise.
-       * config/tilepro/tilepro.c (tilepro_trampoline_init): Likewise.
-       * config/visium/visium.c (expand_block_move_4): Likewise.
-       (expand_block_move_2): Likewise.
-       (expand_block_move_1): Likewise.
-       (expand_block_set_4): Likewise.
-       (expand_block_set_2): Likewise.
-       (expand_block_set_1): Likewise.
-       (visium_trampoline_init): Likewise.
-       (visium_profile_hook): Likewise.
-       * config/xtensa/xtensa.c (xtensa_expand_nonlocal_goto): Likewise.
-       (xtensa_setup_frame_addresses): Likewise.
-       (xtensa_trampoline_init): Likewise.
-       * except.c (sjlj_emit_function_enter): Likewise.
-       (sjlj_emit_function_exit): Likewise.
-       * explow.c (allocate_dynamic_stack_space): Likewise.
-       (probe_stack_range): Likewise.
-       * expr.c (convert_mode_scalar): Likewise.
-       * optabs.c (expand_binop): Likewise.
-       (expand_twoval_binop_libfunc): Likewise.
-       (expand_unop): Likewise.
-       (prepare_cmp_insn): Likewise.
-       (prepare_float_lib_cmp): Likewise.
-       (expand_float): Likewise.
-       (expand_fix): Likewise.
-       (expand_fixed_convert): Likewise.
-       (maybe_emit_sync_lock_test_and_set): Likewise.
-       (expand_atomic_compare_and_swap): Likewise.
-       (expand_mem_thread_fence): Likewise.
-       (expand_atomic_fetch_op): Likewise.
- 2017-09-03  Gerald Pfeifer  <gerald@pfeifer.com>
-       * doc/generic.texi (OpenACC): Adjust URL.
-       * doc/invoke.texi (C Dialect Options): Ditto.
- 2017-09-03  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.md (*bt<mode>): Use nonimmediate_operand
-       predicate for operand 1.  Add (m,<S>) constraint.
-       (*jcc_bt<mode>): Use nonimmediate_operand predicate for operand 1.
-       Prevent memory operand 1 with register operand 2.
- 2017-09-01  Segher Boessenkool  <segher@kernel.crashing.org>
-       PR rtl-optimization/82024
-       * combine.c (try_combine): If the combination result is a PARALLEL,
-       and we only need to retain the SET in there that would be placed
-       at I2, check that we can place that at I3 instead, before doing so.
- 2017-09-01  Jakub Jelinek  <jakub@redhat.com>
-       PR target/81766
-       * config/i386/i386.c (ix86_init_large_pic_reg): Return label
-       instead of void.
-       (ix86_init_pic_reg): Remember label from ix86_init_large_pic_reg,
-       if non-NULL and preceded by NOTE_INSN_BASIC_BLOCK, swap the note
-       and label.
- 2017-09-01  Joerg Sonnenberger  <joerg@bec.de>
-           Jeff Law  <law@redhat.com>
+ 2018-03-02  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * varasm.c (bss_initializer_p): Do not put constants into .bss
-       (categorize_decl_for_section): Handle bss_initializer_p returning
-       false when DECL_INITIAL is NULL.
- 2017-09-01  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
-       PR target/82012
-       * config/s390/s390.c (s390_can_inline_p): New function.
- 2017-09-01  Jeff Law  <law@redhat.com>
-       PR tree-optimization/82052
-       * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr):
-       Always initialize the returned slot after a hash table miss
-       when INSERT is true.
- 2017-09-01  Alexander Monakov  <amonakov@ispras.ru>
-       * config/s390/s390.md (mem_signal_fence): Remove.
-       * doc/md.texi (mem_signal_fence): Remove.
-       * optabs.c (expand_mem_signal_fence): Remove uses of mem_signal_fence.
-       Update comments.
-       * target-insns.def (mem_signal_fence): Remove.
- 2017-09-01  Jakub Jelinek  <jakub@redhat.com>
-       PR sanitizer/81902
-       * doc/invoke.texi: Document -fsanitize=pointer-overflow.
-       PR sanitizer/81923
-       * asan.c (create_odr_indicator): Strip name encoding from assembler
-       name before appending it after __odr_asan_.
- 2017-09-01  Martin Liska  <mliska@suse.cz>
-       PR tree-optimization/82059
-       * gimple-ssa-isolate-paths.c (isolate_path): Add profile and
-       frequency only when an edge is redirected.
- 2017-09-01  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc-c.c (__ARC_LPC_WIDTH__): Add builtin define.
-       * config/arc/arc.c (ARC_MAX_LOOP_LENGTH): Define.
-       (arc_conditional_register_usage): Remove ARC600 lp_count
-       exception.
-       (arc_file_start): Emit Tag_ARC_CPU_variation.
-       (arc_can_use_doloop_p): New conditions to use ZOLs.
-       (hwloop_fail): New function.
-       (hwloop_optimize): Likewise.
-       (hwloop_pattern_reg): Likewise.
-       (arc_doloop_hooks): New struct, to be used with reorg_loops.
-       (arc_reorg_loops): New function, calls reorg_loops.
-       (arc_reorg): Call arc_reorg_loops.  Remove old ZOL handling.
-       (arc600_corereg_hazard): Remove ZOL checking, case handled by
-       hwloop_optimize.
-       (arc_loop_hazard): Remove function, functionality moved into
-       hwloop_optimize.
-       (arc_hazard): Remove arc_loop_hazard call.
-       (arc_adjust_insn_length): Remove ZOL handling, functionality moved
-       into hwloop_optimize.
-       (arc_label_align): Remove ZOL handling.
-       * config/arc/arc.h (LOOP_ALIGN): Changed to 0.
-       * config/arc/arc.md (doloop_begin): Remove pattern.
-       (doloop_begin_i): Likewise.
-       (doloop_end_i): Likewise.
-       (doloop_fallback): Likewise.
-       (doloop_fallback_m): Likewise.
-       (doloop_end): Reimplement expand.
-       (arc_lp): New pattern for LP instruction.
-       (loop_end): New pattern.
-       (loop_fail): Likewise.
-       (decrement_and_branch_until_zero): Likewise.
-       * config/arc/arc.opt (mlpc-width): New option.
-       * doc/invoke.texi (mlpc-width): Document option.
- 2017-09-01  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc.c (arc_ifcvt): Remove use of merge_blocks call.
-       (arc_ccfsm_advance): Fix checking for delay slots.
-       (arc_reorg): Add rtl dump after each call to arc_ifcvt.
- 2017-09-01  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc.md (movqi_insn): Add stores to save constant long
-       immediates.
-       (movhi_insn): Update store instruction constraint which are saving
-       6-bit short immediates.
-       (movsi_insn): Consider also short scaled load operations.
-       (zero_extendhisi2_i): Use Usd constraint instead of T.
-       (extendhisi2_i): Add q constraint.
-       (arc_clzsi2): Add type and length attributes.
-       (arc_ctzsi2): Likewise.
-       * config/arc/constraints.md (Usc): Update constraint, the
-       assembler can parse two relocations for a single instruction.
- 2017-09-01  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc.c (arc_use_anchors_for_symbol_p): New function.
-       (TARGET_USE_ANCHORS_FOR_SYMBOL_P): Define.
- 2017-08-31  Olivier Hainque  <hainque@adacore.com>
-       * config.gcc (powerpc-wrs-vxworks|vxworksae|vxworksmils): Now
-       match as powerpc-wrs-vxworks*.
- 2017-08-31  James Greenhalgh  <james.greenhalgh@arm.com>
-       * config/aarch64/aarch64-simd.md (aarch64_mla_elt_merge<mode>): Fix
-       register constraint for by-element operand.
-       (aarch64_mls_elt_merge<mode>): Likewise.
- 2017-08-31  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc.c (arc_can_follow_jump): Check for short
-       branches.
- 2017-08-31  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config.gcc: Use g.opt for arc.
-       * config/arc/arc.c (LEGITIMATE_SCALED_ADDRESS_P): Deleted,
-       functionality moved to ...
-       (legitimate_scaled_address_p): New function, ...here.
-       (LEGITIMATE_SMALL_DATA_OFFSET_P): New define.
-       (LEGITIMATE_SMALL_DATA_ADDRESS_P): Use the above define.
-       (legitimate_offset_address_p): Delete TARGET_NO_SDATA_SET
-       condition.
-       (arc_override_options): Handle G option.
-       (arc_output_pic_addr_const): Correct function definition.
-       (arc_legitimate_address_p): Use legitimate_scaled_address_p.
-       (arc_decl_anon_ns_mem_p): Delete.
-       (arc_in_small_data_p): Overhaul this function to take into
-       consideration the value given via G option.
-       (arc_rewrite_small_data_1): Renamed and corrected old
-       arc_rewrite_small_data function.
-       (arc_rewrite_small_data): New function.
-       (small_data_pattern): Don't use pic_offset_table_rtx.
-       * config/arc/arc.h (CC1_SPEC): Recognize G option.
-       * config/arc/simdext.md (movmisalignv2hi): Use
-       prepare_move_operands function.
-       (mov*): Likewise.
-       (movmisalign*): Likewise.
-       * doc/invoke.texi (ARC options): Document -G option.
- 2017-08-31  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc-protos.h (compact_sda_memory_operand): Update
-       prototype.
-       * config/arc/arc.c (arc_print_operand): Output scalled address for
-       sdata whenever is possible.
-       (arc_in_small_data_p): Allow sdata for 64bit datum when double
-       load/stores are available.
-       (compact_sda_memory_operand): Check for the alignment required by
-       code density instructions.
-       * config/arc/arc.md (movsi_insn): Use newly introduced Us0
-       constraint.
-       * config/arc/constraints.md (Usd): Update constraint.
-       (Us0): New constraint.
-       (Usc): Update constraint.
- 2017-08-31  Richard Biener  <rguenther@suse.de>
-       PR middle-end/82054
-       * dwarf2out.c (dwarf2out_early_global_decl): Process each
-       function only once.
- 2017-08-31  Tamar Christina  <tamar.christina@arm.com>
-       * config/aarch64/aarch64-builtins.c (aarch64_init_simd_builtins):
-       Resize type_signature.
- 2017-08-31  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84634
+       * tree-vect-stmts.c (vectorizable_store, vectorizable_load): Replace
+       masks and masked_loop_p with a single loop_masks, making sure it's
+       null for bb vectorization.
  
-       * config/aarch64/aarch64.c (aarch64_base_register_rtx_p): Only allow
-       subregs whose inner modes can be stored in GPRs.
-       (aarch64_classify_index): Likewise.
+ 2018-03-02  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-31  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * tree-vect-data-refs.c (vect_analyze_data_ref_dependence)
+       (vect_analyze_data_ref_access): Use loop->safe_len rather than
+       loop->force_vectorize to check whether there is no alias.
  
-       * config/aarch64/iterators.md (V_cmp_result): Rename to...
-       (V_INT_EQUIV): ...this.
-       (v_cmp_result): Rename to...
-       (v_int_equiv): ...this.
-       * config/aarch64/aarch64.md (xorsign<mode>3): Update accordingly.
-       * config/aarch64/aarch64-simd.md (xorsign<mode>3): Likewise.
-       (copysign<mode>3): Likewise.
-       (aarch64_simd_bsl<mode>_internal): Likewise.
-       (aarch64_simd_bsl<mode>): Likewise.
-       (vec_cmp<mode><mode>): Likewise.
-       (vcond<mode><mode>): Likewise.
-       (vcond<v_cmp_mixed><mode>): Likewise.
-       (vcondu<mode><v_cmp_mixed>): Likewise.
-       (aarch64_cm<optab><mode>): Likewise.
-       (aarch64_cmtst<mode>): Likewise.
-       (aarch64_fac<optab><mode>): Likewise.
-       (vec_perm_const<mode>): Likewise.
-       (vcond_mask_<mode><v_cmp_result>): Rename to...
-       (vcond_mask_<mode><v_int_equiv>): ...this.
-       (vec_cmp<mode><v_cmp_result>): Rename to...
-       (vec_cmp<mode><v_int_equiv>): ...this.
- 2017-08-31  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-03-02  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/aarch64/aarch64-modes.def: Remove 32-, 48- and 64-byte
-       vector modes.
-       * config/aarch64/iterators.md (VRL2, VRL3, VRL4): Delete.
-       * config/aarch64/aarch64.md (UNSPEC_LD2_DREG, UNSPEC_LD3_DREG)
-       (UNSPEC_LD4_DREG): New unspecs.
-       * config/aarch64/aarch64-simd.md (aarch64_ld2<mode>_dreg_le)
-       (aarch64_ld2<mode>_dreg_be): Replace with...
-       (aarch64_ld2<mode>_dreg): ...this pattern and use the new DREG
-       unspec.
-       (aarch64_ld3<mode>_dreg_le)
-       (aarch64_ld3<mode>_dreg_be): Replace with...
-       (aarch64_ld3<mode>_dreg): ...this pattern and use the new DREG
-       unspec.
-       (aarch64_ld4<mode>_dreg_le)
-       (aarch64_ld4<mode>_dreg_be): Replace with...
-       (aarch64_ld4<mode>_dreg): ...this pattern and use the new DREG
-       unspec.
- 2017-08-30  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
-       PR tree-optimization/81987
-       * gimple-ssa-strength-reduction.c (insert_initializers): Don't
-       insert an initializer in a location not dominated by the stride
-       definition.
+       PR target/84614
+       * rtl.h (prev_real_nondebug_insn, next_real_nondebug_insn): New
+       prototypes.
+       * emit-rtl.c (next_real_insn, prev_real_insn): Fix up function
+       comments.
+       (next_real_nondebug_insn, prev_real_nondebug_insn): New functions.
+       * cfgcleanup.c (try_head_merge_bb): Use prev_real_nondebug_insn
+       instead of a loop around prev_real_insn.
+       * combine.c (move_deaths): Use prev_real_nondebug_insn instead of
+       prev_real_insn.
+       PR inline-asm/84625
+       * config/i386/i386.c (ix86_print_operand): Use conditional
+       output_operand_lossage instead of gcc_assert if CONST_VECTOR is not
+       zero vector.
+ 2018-03-02  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84427
+       * tree-ssa-pre.c (bitmap_remove_expr_from_set): Remove.
+       (bitmap_set_subtract_values): Rewrite to handle multiple
+       exprs per value.
+       (clean): Likewise.
+       (prune_clobbered_mems): Likewise.
+       (phi_translate): Take edge instead of pred/phiblock.
+       (phi_translate_1): Likewise.
+       (phi_translate_set): Likewise.  Insert all translated
+       exprs for a value into the set, keeping possibly multiple
+       expressions per value.
+       (compute_antic_aux): Adjust for phi_translate changes.
+       When intersecting union the expressions and prune those
+       not in the final value set, keeping possibly multiple
+       expressions per value.  Do not use value-insertion
+       for unioning ANTIC_OUT U EXP_GEN - TMP_GEN but merge
+       all expressions.  Add verification that the value-sets
+       only shrink during iteration.
+       (compute_partial_antic_aux): Adjust for the phi_translate changes.
+       (do_pre_regular_insertion): Likewise.
+       (do_pre_partial_partial_insertion): Likewise.
  
- 2017-08-30  Eric Botcazou  <ebotcazou@adacore.com>
+ 2018-03-02  Richard Biener  <rguenther@suse.de>
  
-       * tree-eh.c (lower_try_finally_switch): Set the location of the finally
-       on the entire header of the finally block in the fallthru case.
+       PR target/82005
+       * config/darwin.c (saved_debug_info_level): New static global.
+       (darwin_asm_lto_start): Disable debug info generation for LTO out.
+       (darwin_asm_lto_end): Restore debug info generation settings.
  
- 2017-08-30  Eric Botcazou  <ebotcazou@adacore.com>
+ 2018-03-01  Martin Liska  <mliska@suse.cz>
  
-       * varasm.c (decode_addr_const): Deal with INDIRECT_REF <INTEGER_CST>.
+       PR sanitizer/82484
+       * sanopt.c (sanitize_rewrite_addressable_params): Do not handle
+       volatile arguments.
  
- 2017-08-30  Pat Haugen  <pthaugen@us.ibm.com>
+ 2018-03-01  Richard Biener  <rguenther@suse.de>
  
-       * config/rs6000/rs6000.c (rs6000_emit_prologue_move_from_cr): Rename from
-       rs6000_emit_move_from_cr and call renamed function.
-       (rs6000_emit_prologue): Call renamed functions.
-       * config/rs6000/rs6000.md (prologue_movesi_from_cr): Rename from
-       movesi_from_cr, remove volatile CRs.
+       PR debug/84645
+       * dwarf2out.c (gen_variable_die): Properly handle late VLA
+       type annotation with LTO when debug was disabled at compile-time.
  
- 2017-08-30  Jon Beniston  <jon@beniston.com>
-           Richard Biener  <rguenther@suse.de>
+ 2018-03-01  Matthew Fortune  <mfortune@gmail.com>
  
-       * tree-vect-patterns.c (vect_pattern_recog_1): Use VECTOR_TYPE_P instead
-       of VECTOR_MODE_P check.
-       * tree-vect-stmts.c (get_vectype_for_scalar_type_and_size): Allow single
-       element vector types.
+       * config/mips/mips.c (mips_final_prescan_insn): Fix incorrect
+       XINT with INTVAL.
+       (mips_final_postscan_insn): Likewise.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
+ 2018-03-01  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * df.h (df_read_modify_subreg_p): Remove in favor of...
-       * rtl.h (read_modify_subreg_p): ...this new function.  Take a
-       const_rtx instead of an rtx.
-       * cprop.c (local_cprop_find_used_regs): Update accordingly.
-       * df-problems.c (df_word_lr_mark_ref): Likewise.
-       * ira-lives.c (mark_pseudo_reg_live): Likewise.
-       (mark_pseudo_reg_dead): Likewise.
-       (mark_ref_dead): Likewise.
-       * reginfo.c (init_subregs_of_mode): Likewise.
-       * sched-deps.c (sched_analyze_1): Likewise.
-       * df-scan.c (df_def_record_1): Likewise.
-       (df_uses_record): Likewise.
-       (df_read_modify_subreg_p): Remove in favor of...
-       * rtlanal.c (read_modify_subreg_p): ...this new function.  Take a
-       const_rtx instead of an rtx.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR rtl-optimization/84528
+       * alias.c (init_alias_target): Add commentary.
+       (init_alias_analysis): Only give HARD_FRAME_POINTER_REGNUM
+       a unique base value if the frame pointer is not eliminated
+       to the stack pointer.
  
-       * rtl.h (partial_subreg_p): New function.
-       * caller-save.c (save_call_clobbered_regs): Use it.
-       * calls.c (expand_call): Likewise.
-       * combine.c (combinable_i3pat): Likewise.
-       (simplify_set): Likewise.
-       (make_extraction): Likewise.
-       (make_compound_operation_int): Likewise.
-       (gen_lowpart_or_truncate): Likewise.
-       (force_to_mode): Likewise.
-       (make_field_assignment): Likewise.
-       (reg_truncated_to_mode): Likewise.
-       (record_truncated_value): Likewise.
-       (move_deaths): Likewise.
-       * cse.c (record_jump_cond): Likewise.
-       (cse_insn): Likewise.
-       * cselib.c (cselib_lookup_1): Likewise.
-       * expmed.c (extract_bit_field_using_extv): Likewise.
-       * function.c (assign_parm_setup_reg): Likewise.
-       * ifcvt.c (noce_convert_multiple_sets): Likewise.
-       * ira-build.c (create_insn_allocnos): Likewise.
-       * lra-coalesce.c (merge_pseudos): Likewise.
-       * lra-constraints.c (match_reload): Likewise.
-       (simplify_operand_subreg): Likewise.
-       (curr_insn_transform): Likewise.
-       * lra-lives.c (process_bb_lives): Likewise.
-       * lra.c (new_insn_reg): Likewise.
-       (lra_substitute_pseudo): Likewise.
-       * regcprop.c (mode_change_ok): Likewise.
-       (maybe_mode_change): Likewise.
-       (copyprop_hardreg_forward_1): Likewise.
-       * reload.c (push_reload): Likewise.
-       (find_reloads): Likewise.
-       (find_reloads_subreg_address): Likewise.
-       * reload1.c (alter_reg): Likewise.
-       (eliminate_regs_1): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+ 2018-03-01  Tom de Vries  <tom@codesourcery.com>
  
- 2017-08-30  David Edelsohn  <dje.gcc@gmail.com>
+       PR rtl-optimization/83327
+       * lra-int.h (hard_regs_spilled_into): Declare.
+       * lra.c (hard_regs_spilled_into): Define.
+       (init_reg_info): Init hard_regs_spilled_into.
+       * lra-spills.c (assign_spill_hard_regs): Update hard_regs_spilled_into.
+       * lra-lives.c (make_hard_regno_born, make_hard_regno_dead)
+       (process_bb_lives): Handle hard_regs_spilled_into.
+       (lra_create_live_ranges_1): Before doing liveness propagation, clear
+       regs in all_hard_regs_bitmap if set in hard_regs_spilled_into.
  
-       * config/rs6000/rs6000.c (rs6000_expand_binop_builtin): Revert
-       back to if statements, including unpack.
+ 2018-02-28  David Edelsohn  <dje.gcc@gmail.com>
  
- 2017-08-30  Martin Liska  <mliska@suse.cz>
+       * config.gcc (powerpc-ibm-aix7.1.*): New stanza.
+       (powerpc-ibm-aix[789]*): Default to AIX 7.2.
+       * config/rs6000/aix71.h (TARGET_DEFAULT): Revert to Power4 ISA.
+       * config/rs6000/aix72.h: New file.
  
-       PR inline-asm/82001
-       * ipa-icf-gimple.c (func_checker::compare_tree_list_operand):
-       Rename to ...
-       (func_checker::compare_asm_inputs_outputs): ... this function.
-       (func_checker::compare_gimple_asm): Use the function to compare
-       also ASM constrains.
-       * ipa-icf-gimple.h: Rename the function.
+ 2018-02-28  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * gimple-ssa-warn-restrict.c (maybe_diag_overlap): Use warning_n
+       instead of warning_at with conditional singular and plural messages
+       where possible.
  
-       * coretypes.h (complex_mode): New type.
-       * gdbhooks.py (build_pretty_printer): Handle it.
-       * machmode.h (complex_mode): New class.
-       (complex_mode::includes_p): New function.
-       (is_complex_int_mode): Likewise.
-       (is_complex_float_mode): Likewise.
-       * genmodes.c (get_mode_class): Handle complex mode classes.
-       * function.c (expand_function_end): Use is_complex_int_mode.
+       PR target/52991
+       * stor-layout.c (update_alignment_for_field): For
+       targetm.ms_bitfield_layout_p (rli->t), if !is_bitfield
+       && !DECL_PACKED (field), do the alignment update, just use
+       only desired_align instead of MAX (type_align, desired_align)
+       as the alignment.
+       (place_field): Don't do known_align < desired_align handling
+       early if targetm.ms_bitfield_layout_p (rli->t) and rli->prev_field
+       is non-NULL, instead do it after rli->prev_field handling and
+       only if not within a bitfield word.  For DECL_PACKED (field)
+       use type_align of BITS_PER_UNIT.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-28  Eric Botcazou  <ebotcazou@adacore.com>
  
-       * coretypes.h (scalar_mode_pod): New typedef.
-       * gdbhooks.py (build_pretty_printer): Handle it.
-       * machmode.h (gt_ggc_mx, gt_pch_nx): New functions.
-       * fixed-value.h (fixed_value::mode): Change type to scalar_mode_pod.
-       * fold-const.c (fold_convert_const_int_from_fixed): Use scalar_mode.
-       * tree-streamer-in.c (unpack_ts_fixed_cst_value_fields): Use
-       as_a <scalar_mode>.
+       * config/aarch64/aarch64.c (aarch64_emit_probe_stack_range): Remove
+       superfluous parentheses and trailing spaces.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-28  Richard Biener  <rguenther@suse.de>
  
-       * machmode.h (mode_for_vector): Take a scalar_mode instead
-       of a machine_mode.
-       * stor-layout.c (mode_for_vector): Likewise.
-       * explow.c (promote_mode): Use as_a <scalar_mode>.
-       * sdbout.c (sdbout_parms): Use is_a <scalar_mode>.
+       PR tree-optimization/84584
+       * graphite-scop-detection.c (scop_detection::add_scop): Discard
+       SCoPs with fake exit edge.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-28  Martin Liska  <mliska@suse.cz>
  
-       * target.def (preferred_simd_mode): Take a scalar_mode
-       instead of a machine_mode.
-       * targhooks.h (default_preferred_simd_mode): Likewise.
-       * targhooks.c (default_preferred_simd_mode): Likewise.
-       * config/arc/arc.c (arc_preferred_simd_mode): Likewise.
-       * config/arm/arm.c (arm_preferred_simd_mode): Likewise.
-       * config/c6x/c6x.c (c6x_preferred_simd_mode): Likewise.
-       * config/epiphany/epiphany.c (epiphany_preferred_simd_mode): Likewise.
-       * config/i386/i386.c (ix86_preferred_simd_mode): Likewise.
-       * config/mips/mips.c (mips_preferred_simd_mode): Likewise.
-       * config/nvptx/nvptx.c (nvptx_preferred_simd_mode): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_preferred_simd_mode):
-       Likewise.
-       * config/rs6000/rs6000.c (rs6000_preferred_simd_mode): Likewise.
-       * config/s390/s390.c (s390_preferred_simd_mode): Likewise.
-       * config/sparc/sparc.c (sparc_preferred_simd_mode): Likewise.
-       * config/aarch64/aarch64.c (aarch64_preferred_simd_mode): Likewise.
-       (aarch64_simd_scalar_immediate_valid_for_move): Update accordingly.
-       * doc/tm.texi: Regenerate.
-       * optabs-query.c (can_vec_mask_load_store_p): Return false for
-       non-scalar modes.
+       PR testsuite/84597
+       * timevar.c (timer::print): Fix format to properly print 100%
+       values.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-28  Richard Biener  <rguenther@suse.de>
+       PR middle-end/84607
+       * genmatch.c (capture_info::walk_match): Do not mark
+       captured expressions without operands as expr_p given
+       they act more like predicates and should be subject to
+       "lost tail" side-effect preserving.
+ 2018-02-28  Alexandre Oliva  <aoliva@redhat.com>
+       PR rtl-optimization/81611
+       * auto-inc-dec.c (attempt_change): Move dead note from
+       mem_insn if it's the next use of regno
+       (find_address): Take address use of reg holding
+       non-incremented value.  Add parm to limit search to the named
+       reg only.
+       (merge_in_block): Attempt to use a mem insn that is the next
+       use of the original regno.
+ 2018-02-27  Martin Sebor  <msebor@redhat.com>
+       PR c++/83871
+       * gcc/doc/invoke.texi (-Wmissing-attributes): New option.
+       * gcc/print-tree.c (print_node): Handle DECL_UNINLINABLE.
+ 2018-02-27  Martin Sebor  <msebor@redhat.com>
+       PR translation/84207
+       * diagnostic-core.h (warning_n, error_n, inform_n): Change
+       n argument to unsigned HOST_WIDE_INT.
+       * diagnostic.c (warning_n, error_n, inform_n): Ditto.
+       (diagnostic_n_impl): Ditto.  Handle arguments in excess of LONG_MAX.
+       * gimple-ssa-sprintf.c (format_directive): Simplify inform_n call.
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Use warning_n.
+ 2018-02-27  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84512
+       * tree-vect-loop.c (vect_compute_single_scalar_iteration_cost):
+       Do not use the estimate returned from record_stmt_cost for
+       the scalar iteration cost but sum properly using add_stmt_cost.
+ 2018-02-27  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84466
+       * graphite-scop-detection.c (scop_detection::stmt_simple_for_scop_p):
+       Adjust last change to less strictly validate use operands.
+ 2018-02-27  Martin Liska  <mliska@suse.cz>
+       PR gcov-profile/84548
+       * gcov.c (process_file): Allow partial overlap and consider it
+       also as group functions.
+       (output_lines): Properly calculate range of lines for a group.
+ 2018-02-27  Martin Liska  <mliska@suse.cz>
+       * timevar.c (timer::print_row): Remove 'usr', 'sys', 'wall' and
+       'ggc' suffixes.  Change first column width.
+       (timer::print): Fix formatting of the column.
+ 2018-02-27  Alexandre Oliva  <aoliva@redhat.com>
+       * tree-ssa-live.c (remove_unused_scope_block_p): Do not
+       preserve inline entry blocks for the sake of debug inline
+       entry point markers alone.
+       (remove_unused_locals): Suggest in comments a better place to
+       force the preservation of inline entry blocks that are
+       otherwise unused, but do not preserve them.
+ 2018-02-26  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386.c (ix86_output_indirect_jmp): Update comments.
+ 2018-02-26  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/84039
+       * config/i386/constraints.md (Bs): Replace
+       ix86_indirect_branch_register with
+       TARGET_INDIRECT_BRANCH_REGISTER.
+       (Bw): Likewise.
+       * config/i386/i386.md (indirect_jump): Likewise.
+       (tablejump): Likewise.
+       (*sibcall_memory): Likewise.
+       (*sibcall_value_memory): Likewise.
+       Peepholes of indirect call and jump via memory: Likewise.
+       (*sibcall_GOT_32): Disallowed for TARGET_INDIRECT_BRANCH_REGISTER.
+       (*sibcall_value_GOT_32): Likewise.
+       * config/i386/predicates.md (indirect_branch_operand): Likewise.
+       (GOT_memory_operand): Likewise.
+       (call_insn_operand): Likewise.
+       (sibcall_insn_operand): Likewise.
+       (GOT32_symbol_operand): Likewise.
+       * config/i386/i386.h (TARGET_INDIRECT_BRANCH_REGISTER): New.
+ 2018-02-26  Eric Botcazou  <ebotcazou@adacore.com>
+       PR rtl-optimization/83496
+       * reorg.c (steal_delay_list_from_target): Change REDUNDANT array from
+       booleans to RTXes.  Call fix_reg_dead_note on every non-null element.
+       (steal_delay_list_from_fallthrough): Call fix_reg_dead_note on a
+       redundant insn, if any.
+       (relax_delay_slots): Likewise.
+       (update_reg_unused_notes): Rename REDUNDANT_INSN to OTHER_INSN.
+ 2018-02-26  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/83965
+       * tree-vect-patterns.c (vect_reassociating_reduction_p): Assume
+       that grouped statements are part of a reduction chain.  Return
+       true if the statement is not marked as a reduction itself but
+       is part of a group.
+       (vect_recog_dot_prod_pattern): Don't check whether the statement
+       is part of a group here.
+       (vect_recog_sad_pattern): Likewise.
+       (vect_recog_widen_sum_pattern): Likewise.
+ 2018-02-26  Eric Botcazou  <ebotcazou@adacore.com>
+       PR debug/84545
+       * final.c (rest_of_clean_state): Also look for calls inside sequences.
+ 2018-02-26  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/84530
+       * config/i386/i386-protos.h (ix86_output_indirect_jmp): Remove
+       the bool argument.
+       (ix86_output_indirect_function_return): New prototype.
+       (ix86_split_simple_return_pop_internal): Likewise.
+       * config/i386/i386.c (indirect_return_via_cx): New.
+       (indirect_return_via_cx_bnd): Likewise.
+       (indirect_thunk_name): Handle return va CX_REG.
+       (output_indirect_thunk_function): Create alias for
+       __x86_return_thunk_[re]cx and __x86_return_thunk_[re]cx_bnd.
+       (ix86_output_indirect_jmp): Remove the bool argument.
+       (ix86_output_indirect_function_return): New function.
+       (ix86_split_simple_return_pop_internal): Likewise.
+       * config/i386/i386.md (*indirect_jump): Don't pass false
+       to ix86_output_indirect_jmp.
+       (*tablejump_1): Likewise.
+       (simple_return_pop_internal): Change it to define_insn_and_split.
+       Call ix86_split_simple_return_pop_internal to split it for
+       -mfunction-return=.
+       (simple_return_indirect_internal): Call
+       ix86_output_indirect_function_return instead of
+       ix86_output_indirect_jmp.
+ 2018-02-26  Jakub Jelinek  <jakub@redhat.com>
+       PR bootstrap/84405
+       * vec.h (vec_default_construct): For BROKEN_VALUE_INITIALIZATION use
+       memset and value initialization afterwards.
+ 2018-02-26  Christophe Lyon  <christophe.lyon@linaro.org>
+       * Makefile.in (lto-wrapper): Use ALL_LINKERFLAGS.
+ 2018-02-26  Ramana Radhakrishnan  <ramana.radhakrishnan@arm.com>
+         PR target/84521
+       * common/config/aarch64/aarch64-common.c
+       (aarch_option_optimization_table[]): Switch
+       off fomit-frame-pointer
+ 2018-02-26  Kito Cheng  <kito.cheng@gmail.com>
+           Chung-Ju Wu  <jasonwucj@gmail.com>
+       * config/nds32/nds32-multiple.md (load_multiple): Disallow
+       volatile memory.
+       (store_multiple): Ditto.
+ 2018-02-26  Kito Cheng  <kito.cheng@gmail.com>
+       * config.gcc: Add --with-cpu support for nds32 target.
+       * config/nds32/nds32-opts.h (nds32_cpu_type): New.
+       * config/nds32/nds32.opt: Add -mcpu= option.
+ 2018-02-25  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/rs6000.opt (mvrsave=no, mvrsave=yes, isel=no,
+       isel=yes): Warn for these deprecated options.
+ 2018-02-23  David Edelsohn  <dje.gcc@gmail.com>
+       * config/rs6000/aix71.h (TARGET_DEFAULT): Change to
+       ISA_2_5_MASKS_EMBEDDED.
+ 2018-02-23  Jakub Jelinek  <jakub@redhat.com>
+       * ipa-prop.c (ipa_vr_ggc_hash_traits::hash): Hash p->min and
+       p->max as pointers rather than using iterative_hash_expr.
+ 2018-02-23  Carl Love  <cel@us.ibm.com>
+       * config/rs6000/rs6000-builtin.def: Change VSIGNED2 and VUNSIGNED2
+       macro expansions from BU_VSX_2 to BU_P8V_VSX_2 and BU_VSX_OVERLOAD_2 to
+       BU_P8V_OVERLOAD_2.
+       * config/rs6000/rs6000-c.c: Change VSX_BUILTIN_VEC_VSIGNED2 to
+       P8V_BUILTIN_VEC_VSIGNED2.  Change VSX_BUILTIN_VEC_VUNSIGNED2 to
+       P8V_BUILTIN_VEC_VUNSIGNED2.
+ 2018-02-22  Vladimir Makarov  <vmakarov@redhat.com>
+       PR target/81572
+       * lra-int.h (LRA_UNKNOWN_ALT, LRA_NON_CLOBBERED_ALT): New macros.
+       * lra.c (lra_set_insn_recog_data, lra_update_insn_recog_data): Use
+       LRA_UNKNOWN_ALT.
+       * lra-constraints.c (curr_insn_transform): Set up
+       LRA_NON_CLOBBERED_ALT for moves processed on the fast path.  Use
+       LRA_UNKNOWN_ALT.
+       (remove_inheritance_pseudos): Use LRA_UNKNOWN_ALT.
+       * lra-eliminations.c (spill_pseudos): Ditto.
+       (process_insn_for_elimination): Ditto.
+       * lra-lives.c (reg_early_clobber_p): Use the new macros.
+       * lra-spills.c (spill_pseudos): Use LRA_UNKNOWN_ALT and
+       LRA_NON_CLOBBERED_ALT.
+ 2018-02-22  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/84480
+       * gimple-fold.c (gimple_fold_builtin_strcpy): Move warnings
+       to maybe_diag_stxncpy_trunc.  Call it.
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Integrate warnings
+       from gimple_fold_builtin_strcpy.  Print inlining stack.
+       (handle_builtin_stxncpy): Print inlining stack.
+       * tree-ssa-strlen.h (maybe_diag_stxncpy_trunc): Declare.
+ 2018-02-22  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/84176
+       * config/i386/i386.c (ix86_set_indirect_branch_type): Issue an
+       error when -mindirect-branch=thunk-extern, -fcf-protection=branch
+       and -fcheck-pointer-bounds are used together.
+       (indirect_thunk_prefix): New enum.
+       (indirect_thunk_need_prefix): New function.
+       (indirect_thunk_name): Replace need_bnd_p with need_prefix.  Use
+       "_nt" instead of "_bnd" for NOTRACK prefix.
+       (output_indirect_thunk): Replace need_bnd_p with need_prefix.
+       (output_indirect_thunk_function): Likewise.
+       (): Likewise.
+       (ix86_code_end): Update output_indirect_thunk_function calls.
+       (ix86_output_indirect_branch_via_reg): Replace
+       ix86_bnd_prefixed_insn_p with indirect_thunk_need_prefix.
+       (ix86_output_indirect_branch_via_push): Likewise.
+       (ix86_output_function_return): Likewise.
+       * doc/invoke.texi: Document -mindirect-branch=thunk-extern is
+       incompatible with -fcf-protection=branch and
+       -fcheck-pointer-bounds.
  
-       * target.def (scalar_mode_supported_p): Take a scalar_mode
-       instead of a machine_mode.
-       * targhooks.h (default_scalar_mode_supported_p): Likewise.
-       * targhooks.c (default_scalar_mode_supported_p): Likewise.
-       * config/aarch64/aarch64.c (aarch64_scalar_mode_supported_p): Likewise.
-       * config/alpha/alpha.c (alpha_scalar_mode_supported_p): Likewise.
-       * config/arm/arm.c (arm_scalar_mode_supported_p): Likewise.
-       * config/avr/avr.c (avr_scalar_mode_supported_p): Likewise.
-       * config/c6x/c6x.c (c6x_scalar_mode_supported_p): Likewise.
-       * config/i386/i386.c (ix86_scalar_mode_supported_p): Likewise.
-       * config/ia64/ia64.c (ia64_scalar_mode_supported_p): Likewise.
-       * config/mips/mips.c (mips_scalar_mode_supported_p): Likewise.
-       * config/msp430/msp430.c (msp430_scalar_mode_supported_p): Likewise.
-       * config/pa/pa.c (pa_scalar_mode_supported_p): Likewise.
-       * config/pdp11/pdp11.c (pdp11_scalar_mode_supported_p): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_scalar_mode_supported_p):
-       Likewise.
-       * config/rs6000/rs6000.c (rs6000_scalar_mode_supported_p): Likewise.
-       * config/s390/s390.c (s390_scalar_mode_supported_p): Likewise.
-       * config/spu/spu.c (spu_scalar_mode_supported_p): Likewise.
-       * config/tilegx/tilegx.c (tilegx_scalar_mode_supported_p): Likewise.
-       * config/tilepro/tilepro.c (tilepro_scalar_mode_supported_p):
-       Likewise.
-       * doc/tm.texi: Regenerate.
+ 2018-02-22  Steve Ellcey  <sellcey@cavium.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/83335
+       * config/aarch64/aarch64.c (aarch64_print_address_internal):
+       Change gcc_assert call to output_operand_lossage.
  
-       * coretypes.h (opt_scalar_mode): New typedef.
-       * gdbhooks.py (build_pretty_printers): Handle it.
-       * machmode.h (mode_iterator::get_2xwider): Add overload for
-       opt_mode<T>.
-       * emit-rtl.c (init_emit_once): Use opt_scalar_mode when iterating
-       over scalar modes.
-       * expr.c (convert_mode_scalar): Likewise.
-       * omp-low.c (omp_clause_aligned_alignment): Likewise.
-       * optabs.c (expand_float): Likewise.
-       (expand_fix): Likewise.
-       * tree-vect-stmts.c (vectorizable_conversion): Likewise.
+ 2018-02-22  Steve Ellcey  <sellcey@cavium.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * doc/extend.texi (__builtin_extend_pointer): Document builtin.
  
-       * optabs.c (expand_float): Explicitly check for scalars before
-       using a branching expansion.
-       (expand_fix): Likewise.
+ 2018-02-22  DJ Delorie  <dj@redhat.com>
+           Sebastian Perta  <sebastian.perta@renesas.com>
+           Oleg Endo  <olegendo@gcc.gnu.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rx/rx.c (rx_rtx_costs): New function.
+       (TARGET_RTX_COSTS): Override to use rx_rtx_costs.
  
-       * expr.c (convert_mode): Split scalar handling out into...
-       (convert_mode_scalar): ...this new function.  Treat the modes
-       as scalar_modes.
+ 2018-02-22  Thomas Preud'homme  <thomas.preudhomme@arm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/arm/t-multilib: Map Armv8-R to Armv7 multilibs.
  
-       * omp-expand.c (expand_omp_atomic): Use is_int_mode, is_float_mode
-       and scalar_mode.
-       * tree-vect-stmts.c (get_vectype_for_scalar_type_and_size): Likewise.
+ 2018-02-22  Martin Liska  <mliska@suse.cz>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR driver/83193
+       * common/config/arm/arm-common.c (arm_print_hint_for_cpu_option):
+       Add "native" as a possible value.
  
-       * fixed-value.h (fixed_from_double_int): Take a scalar_mode
-       rather than a machine_mode.
-       (fixed_from_string): Likewise.
-       (fixed_convert): Likewise.
-       (fixed_convert_from_int): Likewise.
-       (fixed_convert_from_real): Likewise.
-       (real_convert_from_fixed): Likewise.
-       * fixed-value.c (fixed_from_double_int): Likewise.
-       (fixed_from_string): Likewise.
-       (fixed_convert): Likewise.
-       (fixed_convert_from_int): Likewise.
-       (fixed_convert_from_real): Likewise.
-       (real_convert_from_fixed): Likewise.
-       * config/avr/avr.c (avr_out_round): Use as_a <scalar_mode>.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-22  Martin Liska  <mliska@suse.cz>
  
-       * emit-rtl.c (immed_double_const): Use is_a <scalar_mode> instead
-       of separate mode class checks.  Do not allow vector modes here.
-       (immed_wide_int_const): Use as_a <scalar_mode>.
-       * explow.c (trunc_int_for_mode): Likewise.
-       * rtl.h (wi::int_traits<rtx_mode_t>::get_precision): Likewise.
-       (wi::shwi): Likewise.
-       (wi::min_value): Likewise.
-       (wi::max_value): Likewise.
-       * dwarf2out.c (loc_descriptor): Likewise.
-       * simplify-rtx.c (simplify_immed_subreg): Fix rtx_mode_t argument
-       for CONST_WIDE_INT.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR driver/83193
+       * config/i386/i386.c (ix86_option_override_internal):
+       Add "native" as a possible value for -march and -mtune.
+ 2018-02-22  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84502
+       * stor-layout.c (finalize_type_size): Propagate TYPE_EMPTY_P flag
+       to all type variants.
+       PR tree-optimization/84503
+       * gimple-ssa-store-merging.c (merged_store_group::merge_into): Compute
+       width as info->bitpos + info->bitsize - start.
+       (merged_store_group::merge_overlapping): Simplify width computation.
+       (check_no_overlap): New function.
+       (imm_store_chain_info::try_coalesce_bswap): Compute expected
+       start + width and last_order of the group, fail if check_no_overlap
+       fails.
+       (imm_store_chain_info::coalesce_immediate_stores): Don't merge info
+       to group if check_no_overlap fails.
+ 2018-02-21  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/altivec.md: Delete contraint arguments to
+       define_expand, define_split, and define_peephole2, and in
+       define_insn_and_split if always unused.
+       * config/rs6000/darwin.md: Ditto.
+       * config/rs6000/dfp.md: Ditto.
+       * config/rs6000/rs6000.md: Ditto.
+       * config/rs6000/sync.md: Ditto.
+       * config/rs6000/vector.md: Ditto.
+       * config/rs6000/vsx.md: Ditto.
  
-       * tree.h (SCALAR_TYPE_MODE): New macro.
-       * expr.c (expand_expr_addr_expr_1): Use it.
-       (expand_expr_real_2): Likewise.
-       * fold-const.c (fold_convert_const_fixed_from_fixed): Likeise.
-       (fold_convert_const_fixed_from_int): Likewise.
-       (fold_convert_const_fixed_from_real): Likewise.
-       (native_encode_fixed): Likewise
-       (native_encode_complex): Likewise
-       (native_encode_vector): Likewise.
-       (native_interpret_fixed): Likewise.
-       (native_interpret_real): Likewise.
-       (native_interpret_complex): Likewise.
-       (native_interpret_vector): Likewise.
-       * omp-simd-clone.c (simd_clone_adjust_return_type): Likewise.
-       (simd_clone_adjust_argument_types): Likewise.
-       (simd_clone_init_simd_arrays): Likewise.
-       (simd_clone_adjust): Likewise.
-       * stor-layout.c (layout_type): Likewise.
-       * tree.c (build_minus_one_cst): Likewise.
-       * tree-cfg.c (verify_gimple_assign_ternary): Likewise.
-       * tree-inline.c (estimate_move_cost): Likewise.
-       * tree-ssa-math-opts.c (convert_plusminus_to_widen): Likewise.
-       * tree-vect-loop.c (vect_create_epilog_for_reduction): Likewise.
-       (vectorizable_reduction): Likewise.
-       * tree-vect-patterns.c (vect_recog_widen_mult_pattern): Likewise.
-       (vect_recog_mixed_size_cond_pattern): Likewise.
-       (check_bool_pattern): Likewise.
-       (adjust_bool_pattern): Likewise.
-       (search_type_for_mask_1): Likewise.
-       * tree-vect-slp.c (vect_schedule_slp_instance): Likewise.
-       * tree-vect-stmts.c (vectorizable_conversion): Likewise.
-       (vectorizable_load): Likewise.
-       (vectorizable_store): Likewise.
-       * ubsan.c (ubsan_encode_value): Likewise.
-       * varasm.c (output_constant): Likewise.
+ 2018-02-21  Segher Boessenkool  <segher@kernel.crashing.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/altivec.md: Write output control strings as braced
+       blocks instead of double-quoted strings.
+       * config/rs6000/darwin.md: Ditto.
+       * config/rs6000/rs6000.md: Ditto.
+       * config/rs6000/vector.md: Ditto.
+       * config/rs6000/vsx.md: Ditto.
  
-       * coretypes.h (scalar_mode): New class.
-       * machmode.h (scalar_mode): Likewise.
-       (scalar_mode::includes_p): New function.
-       (mode_to_inner): Return a scalar_mode rather than a machine_mode.
-       * gdbhooks.py (build_pretty_printers): Handle scalar_mode.
-       * genmodes.c (get_mode_class): Handle remaining scalar modes.
-       * cfgexpand.c (expand_debug_expr): Use scalar_mode.
-       * expmed.c (store_bit_field_1): Likewise.
-       (extract_bit_field_1): Likewise.
-       * expr.c (write_complex_part): Likewise.
-       (read_complex_part): Likewise.
-       (emit_move_complex_push): Likewise.
-       (expand_expr_real_2): Likewise.
-       * function.c (assign_parm_setup_reg): Likewise.
-       (assign_parms_unsplit_complex): Likewise.
-       * optabs.c (expand_binop): Likewise.
-       * rtlanal.c (subreg_get_info): Likewise.
-       * simplify-rtx.c (simplify_immed_subreg): Likewise.
-       * varasm.c (output_constant_pool_2): Likewise.
+ 2018-02-21  Jason Merrill  <jason@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR c++/84314 - ICE with templates and fastcall attribute.
+       * attribs.c (build_type_attribute_qual_variant): Remove assert.
  
-       * expmed.c (extract_high_half): Use scalar_int_mode and remove
-       assertion.
-       (expmed_mult_highpart_optab): Likewise.
-       (expmed_mult_highpart): Likewise.
+ 2018-02-21  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * ipa-cp.c (determine_versionability): Fix comment typos.
  
-       * builtins.h (builtin_strncpy_read_str): Take a scalar_int_mode
-       instead of a machine_mode.
-       (builtin_memset_read_str): Likewise.
-       * builtins.c (c_readstr): Likewise.
-       (builtin_memcpy_read_str): Likewise.
-       (builtin_strncpy_read_str): Likewise.
-       (builtin_memset_read_str): Likewise.
-       (builtin_memset_gen_str): Likewise.
-       (expand_builtin_signbit): Use scalar_int_mode for local variables.
-       * cfgexpand.c (convert_debug_memory_address): Take a scalar_int_mode
-       instead of a machine_mode.
-       * combine.c (simplify_if_then_else): Use scalar_int_mode for local
-       variables.
-       (make_extraction): Likewise.
-       (try_widen_shift_mode): Take and return scalar_int_modes instead
-       of machine_modes.
-       * config/aarch64/aarch64.c (aarch64_libgcc_cmp_return_mode): Return
-       a scalar_int_mode instead of a machine_mode.
-       * config/avr/avr.c (avr_addr_space_address_mode): Likewise.
-       (avr_addr_space_pointer_mode): Likewise.
-       * config/cr16/cr16.c (cr16_unwind_word_mode): Likewise.
-       * config/msp430/msp430.c (msp430_addr_space_pointer_mode): Likewise.
-       (msp430_unwind_word_mode): Likewise.
-       * config/spu/spu.c (spu_unwind_word_mode): Likewise.
-       (spu_addr_space_pointer_mode): Likewise.
-       (spu_addr_space_address_mode): Likewise.
-       (spu_libgcc_cmp_return_mode): Likewise.
-       (spu_libgcc_shift_count_mode): Likewise.
-       * config/rl78/rl78.c (rl78_addr_space_address_mode): Likewise.
-       (rl78_addr_space_pointer_mode): Likewise.
-       (fl78_unwind_word_mode): Likewise.
-       (rl78_valid_pointer_mode): Take a scalar_int_mode instead of a
-       machine_mode.
-       * config/alpha/alpha.c (vms_valid_pointer_mode): Likewise.
-       * config/ia64/ia64.c (ia64_vms_valid_pointer_mode): Likewise.
-       * config/mips/mips.c (mips_mode_rep_extended): Likewise.
-       (mips_valid_pointer_mode): Likewise.
-       * config/tilegx/tilegx.c (tilegx_mode_rep_extended): Likewise.
-       * config/ft32/ft32.c (ft32_valid_pointer_mode): Likewise.
-       (ft32_addr_space_pointer_mode): Return a scalar_int_mode instead
-       of a machine_mode.
-       (ft32_addr_space_address_mode): Likewise.
-       * config/m32c/m32c.c (m32c_valid_pointer_mode): Take a
-       scalar_int_mode instead of a machine_mode.
-       (m32c_addr_space_pointer_mode): Return a scalar_int_mode instead
-       of a machine_mode.
-       (m32c_addr_space_address_mode): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_abi_word_mode): Likewise.
-       (rs6000_eh_return_filter_mode): Likewise.
-       * config/rs6000/rs6000.c (rs6000_abi_word_mode): Likewise.
-       (rs6000_eh_return_filter_mode): Likewise.
-       * config/s390/s390.c (s390_libgcc_cmp_return_mode): Likewise.
-       (s390_libgcc_shift_count_mode): Likewise.
-       (s390_unwind_word_mode): Likewise.
-       (s390_valid_pointer_mode): Take a scalar_int_mode rather than a
-       machine_mode.
-       * target.def (mode_rep_extended): Likewise.
-       (valid_pointer_mode): Likewise.
-       (addr_space.valid_pointer_mode): Likewise.
-       (eh_return_filter_mode): Return a scalar_int_mode rather than
-       a machine_mode.
-       (libgcc_cmp_return_mode): Likewise.
-       (libgcc_shift_count_mode): Likewise.
-       (unwind_word_mode): Likewise.
-       (addr_space.pointer_mode): Likewise.
-       (addr_space.address_mode): Likewise.
-       * doc/tm.texi: Regenerate.
-       * dojump.c (prefer_and_bit_test): Take a scalar_int_mode rather than
-       a machine_mode.
-       (do_jump): Use scalar_int_mode for local variables.
-       * dwarf2cfi.c (init_return_column_size): Take a scalar_int_mode
-       rather than a machine_mode.
-       * dwarf2out.c (convert_descriptor_to_mode): Likewise.
-       (scompare_loc_descriptor_wide): Likewise.
-       (scompare_loc_descriptor_narrow): Likewise.
-       * emit-rtl.c (adjust_address_1): Use scalar_int_mode for local
-       variables.
-       * except.c (sjlj_emit_dispatch_table): Likewise.
-       (expand_builtin_eh_copy_values): Likewise.
-       * explow.c (convert_memory_address_addr_space_1): Likewise.
-       Take a scalar_int_mode rather than a machine_mode.
-       (convert_memory_address_addr_space): Take a scalar_int_mode rather
-       than a machine_mode.
-       (memory_address_addr_space): Use scalar_int_mode for local variables.
-       * expmed.h (expand_mult_highpart_adjust): Take a scalar_int_mode
-       rather than a machine_mode.
-       * expmed.c (mask_rtx): Likewise.
-       (init_expmed_one_conv): Likewise.
-       (expand_mult_highpart_adjust): Likewise.
-       (extract_high_half): Likewise.
-       (expmed_mult_highpart_optab): Likewise.
-       (expmed_mult_highpart): Likewise.
-       (expand_smod_pow2): Likewise.
-       (expand_sdiv_pow2): Likewise.
-       (emit_store_flag_int): Likewise.
-       (adjust_bit_field_mem_for_reg): Use scalar_int_mode for local
-       variables.
-       (extract_low_bits): Likewise.
-       * expr.h (by_pieces_constfn): Take a scalar_int_mode rather than
-       a machine_mode.
-       * expr.c (pieces_addr::adjust):  Likewise.
-       (can_store_by_pieces): Likewise.
-       (store_by_pieces): Likewise.
-       (clear_by_pieces_1): Likewise.
-       (expand_expr_addr_expr_1): Likewise.
-       (expand_expr_addr_expr): Use scalar_int_mode for local variables.
-       (expand_expr_real_1): Likewise.
-       (try_casesi): Likewise.
-       * final.c (shorten_branches): Likewise.
-       * fold-const.c (fold_convert_const_int_from_fixed): Change the
-       type of "mode" to machine_mode.
-       * internal-fn.c (expand_arith_overflow_result_store): Take a
-       scalar_int_mode rather than a machine_mode.
-       (expand_mul_overflow): Use scalar_int_mode for local variables.
-       * loop-doloop.c (doloop_modify): Likewise.
-       (doloop_optimize): Likewise.
-       * optabs.c (expand_subword_shift): Take a scalar_int_mode rather
-       than a machine_mode.
-       (expand_doubleword_shift_condmove): Likewise.
-       (expand_doubleword_shift): Likewise.
-       (expand_doubleword_clz): Likewise.
-       (expand_doubleword_popcount): Likewise.
-       (expand_doubleword_parity): Likewise.
-       (expand_absneg_bit): Use scalar_int_mode for local variables.
-       (prepare_float_lib_cmp): Likewise.
-       * rtl.h (convert_memory_address_addr_space_1): Take a scalar_int_mode
-       rather than a machine_mode.
-       (convert_memory_address_addr_space): Likewise.
-       (get_mode_bounds): Likewise.
-       (get_address_mode): Return a scalar_int_mode rather than a
-       machine_mode.
-       * rtlanal.c (get_address_mode): Likewise.
-       * stor-layout.c (get_mode_bounds): Take a scalar_int_mode rather
-       than a machine_mode.
-       * targhooks.c (default_mode_rep_extended): Likewise.
-       (default_valid_pointer_mode): Likewise.
-       (default_addr_space_valid_pointer_mode): Likewise.
-       (default_eh_return_filter_mode): Return a scalar_int_mode rather
-       than a machine_mode.
-       (default_libgcc_cmp_return_mode): Likewise.
-       (default_libgcc_shift_count_mode): Likewise.
-       (default_unwind_word_mode): Likewise.
-       (default_addr_space_pointer_mode): Likewise.
-       (default_addr_space_address_mode): Likewise.
-       * targhooks.h (default_eh_return_filter_mode): Likewise.
-       (default_libgcc_cmp_return_mode): Likewise.
-       (default_libgcc_shift_count_mode): Likewise.
-       (default_unwind_word_mode): Likewise.
-       (default_addr_space_pointer_mode): Likewise.
-       (default_addr_space_address_mode): Likewise.
-       (default_mode_rep_extended): Take a scalar_int_mode rather than
-       a machine_mode.
-       (default_valid_pointer_mode): Likewise.
-       (default_addr_space_valid_pointer_mode): Likewise.
-       * tree-ssa-address.c (addr_for_mem_ref): Use scalar_int_mode for
-       local variables.
-       * tree-ssa-loop-ivopts.c (get_shiftadd_cost): Take a scalar_int_mode
-       rather than a machine_mode.
-       * tree-switch-conversion.c (array_value_type): Use scalar_int_mode
-       for local variables.
-       * tree-vrp.c (simplify_float_conversion_using_ranges): Likewise.
-       * var-tracking.c (use_narrower_mode): Take a scalar_int_mode rather
-       than a machine_mode.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-21  Jan Hubicka  <hubicka@ucw.cz>
  
-       * dojump.c (do_jump_by_parts_greater_rtx): Change the type of
-       the mode argument to scalar_int_mode.
-       (do_jump_by_parts_zero_rtx): Likewise.
-       (do_jump_by_parts_equality_rtx): Likewise.
-       (do_jump_by_parts_greater): Take a mode argument.
-       (do_jump_by_parts_equality): Likewise.
-       (do_jump_1): Update calls accordingly.
+       PR c/84229
+       * ipa-cp.c (determine_versionability): Do not version functions caling
+       va_arg_pack.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-21  Martin Liska  <mliska@suse.cz>
  
-       * is-a.h (safe_dyn_cast): New function.
-       * rtl.h (rtx_jump_table_data::get_data_mode): New function.
-       (jump_table_for_label): Likewise.
-       * final.c (final_addr_vec_align): Take an rtx_jump_table_data *
-       instead of an rtx_insn *.
-       (shorten_branches): Use dyn_cast instead of LABEL_P and
-       JUMP_TABLE_DATA_P.  Use jump_table_for_label and
-       rtx_jump_table_data::get_data_mode.
-       (final_scan_insn): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR driver/83193
+       * config/aarch64/aarch64.c (aarch64_print_hint_for_core_or_arch):
+       Add "native" as a possible value.
+       * config/aarch64/aarch64.h (HAVE_LOCAL_CPU_DETECT):  Define
+       the macro when native cpu detection is available.
  
-       * combine.c (try_combine): Use is_a <scalar_int_mode> when
-       trying to combine a full-register integer set with a subreg
-       integer set.
+ 2018-02-21  Martin Liska  <mliska@suse.cz>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR driver/83193
+       * common/config/arm/arm-common.c (arm_print_hint_for_arch_option):
+       Add "native" as a possible value.
+       * config/arm/arm.h (HAVE_LOCAL_CPU_DETECT): Define the macro
+       when native cpu detection is available.
  
-       * expr.c (expand_expr_addr_expr): Add a new_tmode local variable
-       that is always either address_mode or pointer_mode.
+ 2018-02-21  Jakub Jelinek  <jakub@redhat.com>
+           Martin Sebor  <msebor@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84478
+       * gimple-fold.h (get_range_strlen): Add a bool argument defaulted to
+       false.
+       * gimple-fold.c (get_range_strlen): Make minlen const and assume it
+       can't be NULL.  Change FUZZY from bool to int, for 1 add PHI/COND_EXPR
+       support which is conservatively correct, for 2 only stay conservative
+       for maxlen.  Formatting and comment capitalization fixes.  Add STRICT
+       argument to the 2 argument get_range_strlen, adjust 6 arg
+       get_range_strlen caller and clear minmaxlen[0] and [1] if it returned
+       false.
+       (get_maxval_strlen): Adjust 6 arg get_range_strlen caller.
+       (gimple_fold_builtin_strlen): Pass true as last argument to
+       get_range_strlen.
+ 2018-02-20  Martin Sebor  <msebor@redhat.com>
+       PR middle-end/84095
+       * gimple-ssa-warn-restrict.c (builtin_memref::extend_offset_range): New.
+       (builtin_memref::set_base_and_offset): Same.  Handle inner references.
+       (builtin_memref::builtin_memref): Factor out parts into
+       set_base_and_offset and call it.
+ 2018-02-20  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR middle-end/84406
+       * optabs-query.c (find_widening_optab_handler_and_mode): If from_mode
+       is a scalar_int_mode, assert that to_mode is a scalar_int_mode with
+       greater precision.  If to_mode is a MODE_PARTIAL_INT, stop the
+       search at the associated MODE_INT.
+ 2018-02-20  Jeff Law  <law@redhat.com>
+       PR middle-end/82123
+       PR tree-optimization/81592
+       PR middle-end/79257
+       * gimple-ssa-sprintf.c (format_integer): Query EVRP range analyzer
+       for range data rather than using global data.
+       * gimple-ssa-sprintf.c (get_int_range): Query EVRP range analyzer for
+       range data rather than using global data.
+       * gimple-ssa-sprintf.c (get_int_range): Accept vr_values parameter
+       pass it to children as needed.
+       (struct directive::fmtresult): Similarly.
+       (struct directive::set_width): Similarly.
+       (struct directive::set_precision): Similarly.
+       (format_integer, format_directive, parse_directive): Similarly.
+       (format_none): Accept unnamed vr_values parameter.
+       (format_percent, format_floating, format_character): Similarly.
+       (format_string, format_plain): Similarly.
+       * gimple-ssa-sprintf.c (sprintf_dom_walker::handle_gimple_call): Query
+       the EVRP range analyzer for range data rather than using global data.
+       * gimple-ssa-sprintf.c: Include alloc-pool.h, vr-values.h and 
+       gimple-ssa-evrp-analyze.h
+       (class sprintf_dom_walker): Add after_dom_children member function.
+       Add evrp_range_analyzer member.
+       (sprintf_dom_walker::before_dom_children): Call into the EVRP
+       range analyzer as needed.
+       (sprintf_dom_walker::after_dom_children): New member function.
+       * gimple-ssa-evrp-analyze.c (evrp_range_analyzer::enter): Do nothing
+       if not optimizing.
+       (evrp_range_analyzer::record_ranges_from_stmt): Likewise.
+       (evrp_range_analyzer::pop_to_marker): Likewise.
+ 2018-02-20  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/84419
+       * internal-fn.c (expand_call_mem_ref): Create a TARGET_MEM_REF
+       with the required type if its current type is compatible but
+       different.
+ 2018-02-20  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/82004
+       * match.pd (pow(C,x) -> exp(log(C)*x)): Delay all folding until
+       after vectorization.
+ 2018-02-20  Martin Liska  <mliska@suse.cz>
+       PR driver/83193
+       * config/aarch64/aarch64.c (aarch64_print_hint_for_core_or_arch): Print
+       possible values if we don't have a hint.
+ 2018-02-20  Martin Liska  <mliska@suse.cz>
+       PR c/84310
+       PR target/79747
+       * final.c (shorten_branches): Build align_tab array with one
+       more element.
+       * opts.c (finish_options): Add alignment option limit check.
+       (MAX_CODE_ALIGN): Likewise.
+       (MAX_CODE_ALIGN_VALUE): Likewise.
+       * doc/invoke.texi: Document maximum allowed option value for
+       all -falign-* options.
+ 2018-02-19  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84146
+       * reg-notes.def (REG_CALL_ARG_LOCATION): New reg note.
+       * insn-notes.def (NOTE_INSN_CALL_ARG_LOCATION): Remove.
+       * var-tracking.c (emit_note_insn_var_location): Remove all references
+       to NOTE_INSN_CALL_ARG_LOCATION.
+       (emit_notes_in_bb): Emit arguments as REG_CALL_ARG_LOCATION note on
+       the CALL_INSN rather than separate NOTE_INSN_CALL_ARG_LOCATION note.
+       Use copy_rtx_if_shared.
+       * dwarf2out.c (gen_subprogram_die): Use XEXP with 0 instead of
+       NOTE_VAR_LOCATION on ca_loc->call_arg_loc_note.
+       (dwarf2out_var_location): Remove handling of
+       NOTE_INSN_CALL_ARG_LOCATION, instead handle REG_CALL_ARG_LOCATION note
+       on call_insn.
+       * final.c (final_scan_insn): Remove all references to
+       NOTE_INSN_CALL_ARG_LOCATION.
+       (rest_of_clean_state): Likewise.  Remove REG_CALL_ARG_LOCATION notes
+       before dumping final insns.
+       * except.c (emit_note_eh_region_end): Remove all references to
+       NOTE_INSN_CALL_ARG_LOCATION.
+       * config/alpha/alpha.c (alpha_pad_function_end): Likewise.
+       * config/c6x/c6x.c (c6x_gen_bundles): Likewise.
+       * config/arc/arc.c (hwloop_optimize): Likewise.
+       * config/arm/arm.c (create_fix_barrier): Likewise.
+       * config/s390/s390.c (s390_chunkify_start): Likewise.
+       * config/sh/sh.c (find_barrier): Likewise.
+       * config/i386/i386.c (rest_of_insert_endbranch,
+       ix86_seh_fixup_eh_fallthru): Likewise.
+       * config/xtensa/xtensa.c (hwloop_optimize): Likewise.
+       * config/iq2000/iq2000.c (final_prescan_insn): Likewise.
+       * config/frv/frv.c (frv_function_prologue): Likewise.
+       * emit-rtl.c (try_split): Likewise.  Copy over REG_CALL_ARG_LOCATION
+       reg note.
+       (note_outside_basic_block_p): Remove all references to
+       NOTE_INSN_CALL_ARG_LOCATION.
+       * gengtype.c (adjust_field_rtx_def): Likewise.
+       * print-rtl.c (rtx_writer::print_rtx_operand_code_0, print_insn):
+       Likewise.
+       * jump.c (cleanup_barriers, delete_related_insns): Likewise.
+       * cfgrtl.c (force_nonfallthru_and_redirect): Likewise.
  
-       * expr.c (expand_expr_real_2): Use word_mode instead of innermode
-       when the two are known to be equal.
+       PR c++/84444
+       * builtins.c (builtin_mathfn_code): Don't check if CALL_EXPR_FN (t)
+       is ADDR_EXPR.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84452
+       * tree-vect-patterns.c (vect_recog_pow_pattern): Don't call
+       expand_simd_clones if targetm.simd_clone.compute_vecsize_and_simdlen
+       is NULL.
  
-       * simplify-rtx.c (simplify_const_unary_operation): Use
-       is_a <scalar_int_mode> instead of checking for a nonzero
-       precision.  Forcibly convert op_mode to a scalar_int_mode
-       in that case.  More clearly differentiate the operand and
-       result modes and use the former when deciding what the value
-       of a count-bits operation should be.  Use is_int_mode instead
-       of checking for a MODE_INT.  Remove redundant check for whether
-       this mode has a zero precision.
+ 2018-02-19  Martin Liska  <mliska@suse.cz>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR sanitizer/82183
+       * passes.def: Put pass_sancov_O0 before pass_lower_switch with -O0.
  
-       * optabs.c (widen_leading): Change the type of the mode argument
-       to scalar_int_mode.  Use opt_scalar_int_mode for the mode iterator.
-       (widen_bswap): Likewise.
-       (expand_parity): Likewise.
-       (expand_ctz): Change the type of the mode argument to scalar_int_mode.
-       (expand_ffs): Likewise.
-       (epand_unop): Check for scalar integer modes before calling the
-       above routines.
+ 2018-02-19  Martin Liska  <mliska@suse.cz>
+           Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/82491
+       * gimple-fold.c (get_base_constructor): Make earlier bail out
+       to prevent ubsan.
  
-       * expr.c (const_scalar_mask_from_tree): Add a mode argument.
-       Expand commentary.
-       (expand_expr_real_1): Update call accordingly.
+ 2018-02-19  Carl Love  <cel@us.ibm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/rs6000-builtin.def: Change NEG macro expansions from
+       BU_ALTIVEC_A to BU_P8V_AV_1 and BU_ALTIVEC_OVERLOAD_1 to
+       BU_P8V_OVERLOAD_1.
+       * config/rs6000/rs6000-c.c: Change ALTIVEC_BUILTIN_VEC_NEG to
+       P8V_BUILTIN_VEC_NEG.
  
-       * expmed.c (store_bit_field_using_insv): Add op0_mode and
-       value_mode arguments.  Use scalar_int_mode internally.
-       (store_bit_field_1): Rename the new integer mode from imode
-       to op0_mode and use it instead of GET_MODE (op0).  Update calls
-       to store_split_bit_field, store_bit_field_using_insv and
-       store_fixed_bit_field.
-       (store_fixed_bit_field): Add op0_mode and value_mode arguments.
-       Use scalar_int_mode internally.  Use a bit count rather than a mode
-       when calculating the largest bit size for get_best_mode.
-       Update calls to store_split_bit_field and store_fixed_bit_field_1.
-       (store_fixed_bit_field_1): Add mode and value_mode arguments.
-       Remove assertion that OP0 has a scalar integer mode.
-       (store_split_bit_field): Add op0_mode and value_mode arguments.
-       Update calls to extract_fixed_bit_field.
-       (extract_bit_field_using_extv): Add an op0_mode argument.
-       Use scalar_int_mode internally.
-       (extract_bit_field_1): Rename the new integer mode from imode to
-       op0_mode and use it instead of GET_MODE (op0).  Update calls to
-       extract_split_bit_field, extract_bit_field_using_extv and
-       extract_fixed_bit_field.
-       (extract_fixed_bit_field): Add an op0_mode argument.  Update calls
-       to extract_split_bit_field and extract_fixed_bit_field_1.
-       (extract_fixed_bit_field_1): Add a mode argument.  Remove assertion
-       that OP0 has a scalar integer mode.  Use as_a <scalar_int_mode>
-       on the target mode.
-       (extract_split_bit_field): Add an op0_mode argument.  Update call
-       to extract_fixed_bit_field.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-19  Sebastian Perta  <sebastian.perta@renesas.com>
  
-       * cse.c (cse_insn): Use opt_scalar_int_mode for the mode iterator.
-       * explow.c (hard_function_value): Likewise.
-       * expmed.c (extract_fixed_bit_field_1): Likewise.  Move the
-       convert_to_mode call outside the loop.
-       * expr.c (alignment_for_piecewise_move): Use opt_scalar_int_mode
-       for the mode iterator.  Require the mode specified by max_pieces
-       to exist.
-       (emit_block_move_via_movmem): Use opt_scalar_int_mode for the
-       mode iterator.
-       (copy_blkmode_to_reg): Likewise.
-       (set_storage_via_setmem): Likewise.
-       * optabs.c (prepare_cmp_insn): Likewise.
-       * rtlanal.c (init_num_sign_bit_copies_in_rep): Likewise.
-       * stor-layout.c (finish_bitfield_representative): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rl78/rl78.md (movdf): New define expand.
  
-       * rtl.h (subreg_unpromoted_mode, subreg_promoted_mode): New functions.
-       * expr.c (convert_move): Use them.
-       (convert_modes): Likewise.
-       (store_expr_with_bounds): Likewise.
+ 2018-02-19  Martin Liska  <mliska@suse.cz>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR other/80589
+       * doc/invoke.texi: Fix typo.
+       * params.def (PARAM_MAX_LOOP_HEADER_INSNS): Likewise.
  
-       * rtl.h (rtl_hooks::reg_nonzero_bits): Add a scalar_int_mode
-       parameter for the mode of "x".  Remove the "known_x", "known_mode"
-       and "known_ret" arguments.  Change the type of the mode argument
-       to scalar_int_mode.
-       (rtl_hooks:reg_num_sign_bit_copies): Likewise.
-       * combine.c (reg_nonzero_bits_for_combine): Update accordingly.
-       (reg_num_sign_bit_copies_for_combine): Likewise.
-       * rtlanal.c (nonzero_bits1): Likewise.
-       (num_sign_bit_copies1): Likewise.
-       * rtlhooks-def.h (reg_nonzero_bits_general): Likewise.
-       (reg_num_sign_bit_copies_general): Likewise.
-       * rtlhooks.c (reg_num_sign_bit_copies_general): Likewise.
-       (reg_nonzero_bits_general): Likewise.
+ 2018-02-18  Segher Boessenkool  <segher@kernel.crashing.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/rs6000.c (rs6000_option_override_internal): Don't
+       handle rs6000_single_float and rs6000_double_float specially for
+       e500 family CPUs.
  
-       * rtlanal.c (num_sign_bit_copies): Handle VOIDmode here rather
-       than in subroutines.  Return 1 for non-integer modes.
-       (cached_num_sign_bit_copies): Change the type of the mode parameter
-       to scalar_int_mode.
-       (num_sign_bit_copies1): Likewise.  Remove early exit for other mode
-       classes.  Handle CONST_INT_P first and then check whether X also
-       has a scalar integer mode.  Check the same thing for inner registers
-       of a SUBREG and for values that are being extended or truncated.
+ 2018-02-16  Jeff Law  <law@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rx/rx.c (add_pop_cfi_notes): New function.;
+       (pop_regs): Use it.
  
-       * rtlanal.c (nonzero_bits): Handle VOIDmode here rather than
-       in subroutines.  Return the mode mask for non-integer modes.
-       (cached_nonzero_bits): Change the type of the mode parameter
-       to scalar_int_mode.
-       (nonzero_bits1): Likewise.  Remove early exit for other mode
-       classes.  Handle CONST_INT_P first and then check whether X
-       also has a scalar integer mode.
+ 2018-02-16  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR ipa/84425
+       * ipa-inline.c (inline_small_functions): Fix a typo.
  
-       * expr.c (widest_int_mode_for_size): Make the comment match the code.
-       Return a scalar_int_mode and assert that the size is greater than
-       one byte.
-       (by_pieces_ninsns): Update accordingly and remove VOIDmode handling.
-       (op_by_pieces_d::op_by_pieces_d): Likewise.
-       (op_by_pieces_d::run): Likewise.
-       (can_store_by_pieces): Likewise.
+ 2018-02-16  Nathan Sidwell  <nathan@acm.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * doc/extend.texi (Backwards Compatibility): Americanize 'behaviour'.
  
-       * combine.c (extract_left_shift): Add a mode argument and update
-       recursive calls.
-       (make_compound_operation_int): Change the type of the mode parameter
-       to scalar_int_mode and update the call to extract_left_shift.
+ 2018-02-16  Carl Love  <cel@us.ibm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/rs6000-builtin.def: Add BU_P8V_VSX_2 macro definition.
+       Change FLOAT2 expansions from BU_VSX_2 to BU_P8V_VSX_2 and
+       from BU_VSX_OVERLOAD_2 to BU_P8V_OVERLOAD_2.
+       * config/rs6000/rs6000-c.c: Changed macro VSX_BUILTIN_VEC_FLOAT2
+       expansion to P8V_BUILTIN_VEC_FLOAT2.
  
-       * combine.c (simplify_and_const_int): Change the type of the mode
-       parameter to scalar_int_mode.
-       (simplify_and_const_int_1): Likewise.  Update recursive call.
+ 2018-02-16  Vladimir Makarov  <vmakarov@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR rtl-optimization/70023
+       * lra-constraints.c (inherit_in_ebb): Take hard reg mode of
+       src_regno into account.
  
-       * combine.c (simplify_compare_const): Check that the mode is a
-       scalar_int_mode (rather than VOIDmode) before testing its
-       precision.
-       (simplify_comparison): Move COMPARISON_P handling out of the
-       loop and restrict the latter part of the loop to scalar_int_modes.
-       Check is_a <scalar_int_mode> before calling HWI_COMPUTABLE_MODE_P
-       and when considering SUBREG_REGs.  Use is_int_mode instead of
-       checking GET_MODE_CLASS against MODE_INT.
+ 2018-02-16  Carl Love  <cel@us.ibm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/altivec.h: Remove vec_vextract4b and vec_vinsert4b.
+       * config/rs6000/rs6000-builtin.def: Remove macro expansion for
+       VEXTRACT4B, VINSERT4B, VINSERT4B_DI and VEXTRACT4B.
+       * config/rs6000/rs6000.c: Remove case statements for
+       P9V_BUILTIN_VEXTRACT4B, P9V_BUILTIN_VEC_VEXTRACT4B,
+       P9V_BUILTIN_VINSERT4B, P9V_BUILTIN_VINSERT4B_DI,
+       and P9V_BUILTIN_VEC_VINSERT4B.
+       * config/rs6000/rs6000-c.c (altivec_expand_builtin): Remove entries for
+       P9V_BUILTIN_VEC_VEXTRACT4B and P9V_BUILTIN_VEC_VINSERT4B.
+       * config/rs6000/vsx.md:
+       * doc/extend.texi: Remove vec_vextract4b, non ABI definitions for
+       vec_insert4b.
  
-       * combine.c (try_widen_shift_mode): Move check for equal modes to...
-       (simplify_shift_const_1): ...here.  Use scalar_int_mode for
-       shift_unit_mode and for modes involved in scalar shifts.
+ 2018-02-16  Carl Love  <cel@us.ibm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rs6000/altivec.h: Add builtin names vec_extract4b
+       vec_insert4b.
+       * config/rs6000/rs6000-builtin.def: Add INSERT4B and EXTRACT4B
+       definitions.
+       * config/rs6000/rs6000-c.c: Add the definitions for
+       P9V_BUILTIN_VEC_EXTRACT4B and P9V_BUILTIN_VEC_INSERT4B.
+       * config/rs6000/rs6000.c (altivec_expand_builtin): Add
+       P9V_BUILTIN_EXTRACT4B and P9V_BUILTIN_INSERT4B case statements.
+       * config/rs6000/vsx.md: Add define_insn extract4b.  Add define_expand
+       definition for insert4b and define insn *insert3b_internal.
+       * doc/extend.texi: Add documentation for vec_extract4b.
  
-       * combine.c (force_int_to_mode): New function, split out from...
-       (force_to_mode): ...here.  Keep xmode up-to-date and use it
-       instead of GET_MODE (x).
+ 2018-02-16  Nathan Sidwell  <nathan@acm.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * doc/extend.texi (Backwards Compatibility): Mention friend
+       injection.  Note for-scope is deprecated.
+       * doc/invoke.texi (-ffriend-injection): Deprecate.
  
-       * optabs-query.h (extraction_insn::struct_mode): Change type to
-       opt_scalar_int_mode and update comment.
-       (extraction_insn::field_mode): Change type to scalar_int_mode.
-       (extraction_insn::pos_mode): Likewise.
-       * combine.c (make_extraction): Update accordingly.
-       * optabs-query.c (get_traditional_extraction_insn): Likewise.
-       (get_optab_extraction_insn): Likewise.
-       * recog.c (simplify_while_replacing): Likewise.
-       * expmed.c (narrow_bit_field_mem): Change the type of the mode
-       parameter to opt_scalar_int_mode.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Segher Boessenkool  <segher@kernel.crashing.org>
  
-       * machmode.h (bit_field_mode_iterator::next_mode): Take a pointer
-       to a scalar_int_mode instead of a machine_mode.
-       (bit_field_mode_iterator::m_mode): Change type to opt_scalar_int_mode.
-       (get_best_mode): Return a boolean and use a pointer argument to store
-       the selected mode.  Replace the limit mode parameter with a bit limit.
-       * expmed.c (adjust_bit_field_mem_for_reg): Use scalar_int_mode
-       for the values returned by bit_field_mode_iterator::next_mode.
-       (store_bit_field): Update call to get_best_mode.
-       (store_fixed_bit_field): Likewise.
-       (extract_fixed_bit_field): Likewise.
-       * expr.c (optimize_bitfield_assignment_op): Likewise.
-       * fold-const.c (optimize_bit_field_compare): Likewise.
-       (fold_truth_andor_1): Likewise.
-       * stor-layout.c (bit_field_mode_iterator::next_mode): As above.
-       Update for new type of m_mode.
-       (get_best_mode): As above.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * combine.c (try_combine): When adjusting LOG_LINKS for the destination
+       that moved to I2, also allow destinations that are a paradoxical
+       subreg (instead of a normal reg).
  
-       * expmed.c (strict_volatile_bitfield_p): Change the type of fieldmode
-       to scalar_int_mode.  Remove check for SCALAR_INT_MODE_P.
-       (store_bit_field): Check is_a <scalar_int_mode> before calling
-       strict_volatile_bitfield_p.
-       (extract_bit_field): Likewise.
+ 2018-02-16  Oleg Endo  <olegendo@gcc.gnu.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/83831
+       * config/rx/rx.c (rx_fuse_in_memory_bitop): Convert shift operand
+       to QImode.
  
-       * target.def (cstore_mode): Return a scalar_int_mode.
-       * doc/tm.texi: Regenerate.
-       * config/sparc/sparc.c (sparc_cstore_mode): Return a scalar_int_mode.
-       * targhooks.h (default_cstore_mode): Likewise.
-       * targhooks.c (default_cstore_mode): Likewise, using a forced
-       conversion.
-       * expmed.c (emit_cstore): Expect the target of the cstore to be
-       a scalar_int_mode.
+ 2018-02-16  Richard Biener  <rguenther@suse.de>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84037
+       PR tree-optimization/84016
+       PR target/82862
+       * config/i386/i386.c (ix86_builtin_vectorization_cost):
+       Adjust vec_construct for the fact we need additional higher latency
+       128bit inserts for AVX256 and AVX512 vector builds.
+       (ix86_add_stmt_cost): Scale vector construction cost for
+       elementwise loads.
  
-       * cfgloop.h (rtx_iv): Change type of extend_mode and mode to
-       scalar_int_mode.
-       (niter_desc): Likewise mode.
-       (iv_analyze): Add a mode parameter.
-       (biv_p): Likewise.
-       (iv_analyze_expr): Pass the mode paraeter before the rtx it describes
-       and change its type to scalar_int_mode.
-       * loop-iv.c: Update commentary at head of file.
-       (iv_constant): Pass the mode paraeter before the rtx it describes
-       and change its type to scalar_int_mode.  Remove VOIDmode handling.
-       (iv_subreg): Change the type of the mode parameter to scalar_int_mode.
-       (iv_extend): Likewise.
-       (shorten_into_mode): Likewise.
-       (iv_add): Use scalar_int_mode.
-       (iv_mult): Likewise.
-       (iv_shift): Likewise.
-       (canonicalize_iv_subregs): Likewise.
-       (get_biv_step_1): Pass the outer_mode parameter before the rtx
-       it describes and change its mode to scalar_int_mode.   Also change
-       the type of the returned inner_mode to scalar_int_mode.
-       (get_biv_step): Likewise, turning outer_mode from a pointer
-       into a direct parameter.  Update call to get_biv_step_1.
-       (iv_analyze_biv): Add an outer_mode parameter.  Update calls to
-       iv_constant and get_biv_step.
-       (iv_analyze_expr): Pass the mode parameter before the rtx it describes
-       and change its type to scalar_int_mode.  Don't initialise iv->mode
-       to VOIDmode and remove later checks for its still being VOIDmode.
-       Update calls to iv_analyze_op and iv_analyze_expr.  Check
-       is_a <scalar_int_mode> when changing the mode under consideration.
-       (iv_analyze_def): Ignore registers that don't have a scalar_int_mode.
-       Update call to iv_analyze_expr.
-       (iv_analyze_op): Add a mode parameter.  Reject subregs whose
-       inner register is not also a scalar_int_mode.  Update call to
-       iv_analyze_biv.
-       (iv_analyze): Add a mode parameter.  Update call to iv_analyze_op.
-       (biv_p): Add a mode parameter.  Update call to iv_analyze_biv.
-       (iv_number_of_iterations): Use is_a <scalar_int_mode> instead of
-       separate mode class checks.  Update calls to iv_analyze.  Remove
-       fix-up of VOIDmodes after iv_analyze_biv.
-       * loop-unroll.c (analyze_iv_to_split_insn): Reject registers that
-       don't have a scalar_int_mode.  Update call to biv_p.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Richard Biener  <rguenther@suse.de>
  
-       * cfgexpand.c (convert_debug_memory_address): Use
-       as_a <scalar_int_mode>.
-       * combine.c (expand_compound_operation): Likewise.
-       (make_extraction): Likewise.
-       (change_zero_ext): Likewise.
-       (simplify_comparison): Likewise.
-       * cse.c (cse_insn): Likewise.
-       * dwarf2out.c (minmax_loc_descriptor): Likewise.
-       (mem_loc_descriptor): Likewise.
-       (loc_descriptor): Likewise.
-       * expmed.c (init_expmed_one_mode): Likewise.
-       (synth_mult): Likewise.
-       (emit_store_flag_1): Likewise.
-       (expand_divmod): Likewise.  Use HWI_COMPUTABLE_MODE_P instead
-       of a comparison with size.
-       * expr.c (expand_assignment): Use as_a <scalar_int_mode>.
-       (reduce_to_bit_field_precision): Likewise.
-       * function.c (expand_function_end): Likewise.
-       * internal-fn.c (expand_arith_overflow_result_store): Likewise.
-       * loop-doloop.c (doloop_modify): Likewise.
-       * optabs.c (expand_binop): Likewise.
-       (expand_unop): Likewise.
-       (expand_copysign_absneg): Likewise.
-       (prepare_cmp_insn): Likewise.
-       (maybe_legitimize_operand): Likewise.
-       * recog.c (const_scalar_int_operand): Likewise.
-       * rtlanal.c (get_address_mode): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
-       (simplify_cond_clz_ctz): Likewise.
-       * tree-nested.c (get_nl_goto_field): Likewise.
-       * tree.c (build_vector_type_for_mode): Likewise.
-       * var-tracking.c (use_narrower_mode): Likewise.
+       PR tree-optimization/84417
+       * tree-ssa.c (non_rewritable_mem_ref_base): Properly constrain
+       the MEM_REF offset when conversion to BIT_FIELD_REF is desired.
+       (non_rewritable_lvalue_p): Likewise, use poly-ints.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Martin Liska  <mliska@suse.cz>
  
-       * tree.h (SCALAR_INT_TYPE_MODE): New macro.
-       * builtins.c (expand_builtin_signbit): Use it.
-       * cfgexpand.c (expand_debug_expr): Likewise.
-       * dojump.c (do_jump): Likewise.
-       (do_compare_and_jump): Likewise.
-       * dwarf2cfi.c (expand_builtin_init_dwarf_reg_sizes): Likewise.
-       * expmed.c (make_tree): Likewise.
-       * expr.c (expand_expr_real_2): Likewise.
-       (expand_expr_real_1): Likewise.
-       (try_casesi): Likewise.
-       * fold-const-call.c (fold_const_call_ss): Likewise.
-       * fold-const.c (unextend): Likewise.
-       (extract_muldiv_1): Likewise.
-       (fold_single_bit_test): Likewise.
-       (native_encode_int): Likewise.
-       (native_encode_string): Likewise.
-       (native_interpret_int): Likewise.
-       * gimple-fold.c (gimple_fold_builtin_memset): Likewise.
-       * internal-fn.c (expand_addsub_overflow): Likewise.
-       (expand_neg_overflow): Likewise.
-       (expand_mul_overflow): Likewise.
-       (expand_arith_overflow): Likewise.
-       * match.pd: Likewise.
-       * stor-layout.c (layout_type): Likewise.
-       * tree-cfg.c (verify_gimple_assign_ternary): Likewise.
-       * tree-ssa-math-opts.c (convert_mult_to_widen): Likewise.
-       * tree-ssanames.c (get_range_info): Likewise.
-       * tree-switch-conversion.c (array_value_type) Likewise.
-       * tree-vect-patterns.c (vect_recog_rotate_pattern): Likewise.
-       (vect_recog_divmod_pattern): Likewise.
-       (vect_recog_mixed_size_cond_pattern): Likewise.
-       * tree-vrp.c (extract_range_basic): Likewise.
-       (simplify_float_conversion_using_ranges): Likewise.
-       * tree.c (int_fits_type_p): Likewise.
-       * ubsan.c (instrument_bool_enum_load): Likewise.
-       * varasm.c (mergeable_string_section): Likewise.
-       (narrowing_initializer_constant_valid_p): Likewise.
-       (output_constant): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR sanitizer/84307
+       * internal-fn.def (ASAN_CHECK): Set proper flags.
+       (ASAN_MARK): Likewise.
  
-       * machmode.h (NARROWEST_INT_MODE): New macro.
-       * expr.c (alignment_for_piecewise_move): Use it instead of
-       GET_CLASS_NARROWEST_MODE (MODE_INT).
-       (push_block): Likewise.
-       * stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
-       Likewise.
-       * tree-vrp.c (simplify_float_conversion_using_ranges): Likewise.
+ 2018-02-16  Julia Koval  <julia.koval@intel.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/i386/i386.c (ix86_option_override_internal): Remove PTA_CLWB
+       from PTA_CANNONLAKE.
  
-       * postreload.c (move2add_valid_value_p): Change the type of the
-       mode parameter to scalar_int_mode.
-       (move2add_use_add2_insn): Add a mode parameter and use it instead
-       of GET_MODE (reg).
-       (move2add_use_add3_insn): Likewise.
-       (reload_cse_move2add): Update accordingly.
+ 2018-02-16  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84272
+       * config/aarch64/cortex-a57-fma-steering.c (fma_forest::merge_forest):
+       Use ++iter rather than iter++ for std::list iterators.
+       (func_fma_steering::dfs): Likewise.  Don't delete nodes right away,
+       defer deleting them until all nodes in the forest are processed.  Do
+       free even leaf nodes.  Change to_process into auto_vec.
+       PR bootstrap/84405
+       * system.h (BROKEN_VALUE_INITIALIZATION): Define for GCC < 4.3.
+       * vec.h (vec_default_construct): Use memset instead of placement new
+       if BROKEN_VALUE_INITIALIZATION is defined.
+       * hash-table.h (hash_table<Descriptor, Allocator>::empty_slow): Use
+       memset instead of value initialization if BROKEN_VALUE_INITIALIZATION
+       is defined.
+       PR rtl-optimization/83723
+       * lra-int.h (lra_substitute_pseudo): Add DEBUG_P argument.
+       * lra.c (lra_substitute_pseudo): Likewise.  If true, use
+       gen_rtx_raw_SUBREG instead of gen_rtx_SUBREG.  Pass DEBUG_P to
+       recursive calls.
+       (lra_substitute_pseudo_within_insn): Adjust lra_substitute_pseudo
+       callers.
+       * lra-constraints.c (inherit_reload_reg, split_reg): Likewise.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Eric Botcazou  <ebotcazou@adacore.com>
  
-       * expr.c (expand_expr_real_2): Use scalar_int_mode for the
-       double-word mode.
-       * lower-subreg.c (resolve_shift_zext): Use is_a <scalar_int_mode>.
-       * optabs.c (expand_unop): Likewise.
+       PR rtl-optimization/81443
+       * rtlanal.c (num_sign_bit_copies1) <SUBREG>: Do not propagate results
+       from inner REGs to paradoxical SUBREGs.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Richard Biener  <rguenther@suse.de>
  
-       * dwarf2out.c (typed_binop): Change mode parameter to scalar_int_mode.
-       (clz_loc_descriptor): Likewise.  Remove SCALAR_INT_MODE_P check.
-       (popcount_loc_descriptor): Likewise.
-       (bswap_loc_descriptor): Likewise.
-       (rotate_loc_descriptor): Likewise.
-       (mem_loc_descriptor): Add is_a <scalar_int_mode> checks before
-       calling the functions above.
+       PR tree-optimization/84399
+       * graphite-scop-detection.c (scop_detection::stmt_simple_for_scop_p):
+       For operands we can analyze at their definition make sure we can
+       analyze them at each use as well.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-16  Richard Biener  <rguenther@suse.de>
  
-       * combine.c (sign_extend_short_imm): Add is_a <scalar_int_mode>
-       checks.
-       (try_combine): Likewise.
-       (simplify_if_then_else): Likewise.
-       * cse.c (cse_insn): Likewise.
-       * dwarf2out.c (mem_loc_descriptor): Likewise.
-       * emit-rtl.c (gen_lowpart_common): Likewise.
-       * simplify-rtx.c (simplify_truncation): Likewise.
-       (simplify_binary_operation_1): Likewise.
-       (simplify_const_relational_operation): Likewise.
-       (simplify_ternary_operation): Likewise.
-       * tree-ssa-loop-ivopts.c (force_expr_to_var_cost): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84190
+       * tree-ssa.c (non_rewritable_mem_ref_base): Do not touch
+       volatile accesses if the decl isn't volatile.
  
-       * cse.c (cse_insn): Add is_a <scalar_int_mode> checks.
-       * reload.c (push_reload): Likewise.
-       (find_reloads): Likewise.
+ 2018-02-15  Jason Merrill  <jason@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR c++/84314 - ICE with templates and fastcall attribute.
+       * attribs.c (build_type_attribute_qual_variant): Don't clobber
+       TYPE_CANONICAL on an existing type.
  
-       * combine.c (find_split_point): Add is_a <scalar_int_mode> checks.
-       (make_compound_operation_int): Likewise.
-       (change_zero_ext): Likewise.
-       * expr.c (convert_move): Likewise.
-       (convert_modes): Likewise.
-       * fwprop.c (forward_propagate_subreg): Likewise.
-       * loop-iv.c (get_biv_step_1): Likewise.
-       * optabs.c (widen_operand): Likewise.
-       * postreload.c (move2add_valid_value_p): Likewise.
-       * recog.c (simplify_while_replacing): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
-       (simplify_binary_operation_1): Likewise.  Remove redundant
-       mode equality check.
+ 2018-02-15  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84383
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Don't look at
+       dstoff nor call operand_equal_p if dstbase is NULL.
  
-       * combine.c (combine_simplify_rtx): Add checks for
-       is_a <scalar_int_mode>.
-       (simplify_if_then_else): Likewise.
-       (make_field_assignment): Likewise.
-       (simplify_comparison): Likewise.
-       * ifcvt.c (noce_try_bitop): Likewise.
-       * loop-invariant.c (canonicalize_address_mult): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+       PR tree-optimization/84334
+       * match.pd ((A +- CST1) +- CST2 -> A + CST3): If A is
+       also a CONSTANT_CLASS_P, punt.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Jim Wilson  <jimw@sifive.com>
  
-       * gimple-fold.c (gimple_fold_builtin_memory_op): Use
-       is_a <scalar_int_mode> instead of != BLKmode.
+       * config/riscv/riscv.c (riscv_first_stack_step): Move locals after
+       first SMALL_OPERAND check.  New local min_second_step.  Move assert
+       to where locals are set.  Add TARGET_RVC support.
+       * config/riscv/riscv.h (C_SxSP_BITS, SWSP_REACH, SDSP_REACH): New.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Indu Bhagat  <indu.bhagat@oracle.com>
  
-       * cfgexpand.c (expand_debug_expr): Use is_a <scalar_int_mode>
-       instead of != VOIDmode.
-       * combine.c (if_then_else_cond): Likewise.
-       (change_zero_ext): Likewise.
-       * dwarf2out.c (mem_loc_descriptor): Likewise.
-       (loc_descriptor): Likewise.
-       * rtlanal.c (canonicalize_condition): Likewise.
-       * simplify-rtx.c (simplify_relational_operation_1): Likewise.
+       * doc/invoke.texi: Correct -Wformat-overflow code sample.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Martin Sebor  <msebor@redhat.com>
  
-       * simplify-rtx.c (simplify_binary_operation_1): Use
-       is_a <scalar_int_mode> instead of !VECTOR_MODE_P.
+       PR tree-optimization/83698
+       * gimple-ssa-warn-restrict.c (builtin_memref::builtin_memref): For
+       arrays constrain the offset range to their bounds.
+       (builtin_access::strcat_overlap): Adjust the bounds of overlap offset.
+       (builtin_access::overlap): Avoid setting the size of overlap if it's
+       already been set.
+       (maybe_diag_overlap): Also consider arrays when deciding what values
+       of offsets to include in diagnostics.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Martin Sebor  <msebor@redhat.com>
  
-       * wide-int.h (int_traits<unsigned char>) New class.
-       (int_traits<unsigned short>) Likewise.
-       * cfgexpand.c (expand_debug_expr): Use is_a <scalar_int_mode>.
-       Use GET_MODE_UNIT_PRECISION and remove redundant test for
-       SCALAR_INT_MODE_P.
-       * combine.c (set_nonzero_bits_and_sign_copies): Use
-       is_a <scalar_int_mode>.
-       (find_split_point): Likewise.
-       (combine_simplify_rtx): Likewise.
-       (simplify_logical): Likewise.
-       (expand_compound_operation): Likewise.
-       (expand_field_assignment): Likewise.
-       (make_compound_operation): Likewise.
-       (extended_count): Likewise.
-       (change_zero_ext): Likewise.
-       (simplify_comparison): Likewise.
-       * dwarf2out.c (scompare_loc_descriptor): Likewise.
-       (ucompare_loc_descriptor): Likewise.
-       (minmax_loc_descriptor): Likewise.
-       (mem_loc_descriptor): Likewise.
-       (loc_descriptor): Likewise.
-       * expmed.c (init_expmed_one_mode): Likewise.
-       * lra-constraints.c (lra_constraint_offset): Likewise.
-       * optabs.c (prepare_libcall_arg): Likewise.
-       * postreload.c (move2add_note_store): Likewise.
-       * reload.c (operands_match_p): Likewise.
-       * rtl.h (load_extend_op): Likewise.
-       * rtlhooks.c (gen_lowpart_general): Likewise.
-       * simplify-rtx.c (simplify_truncation): Likewise.
-       (simplify_unary_operation_1): Likewise.
-       (simplify_binary_operation_1): Likewise.
-       (simplify_const_binary_operation): Likewise.
-       (simplify_const_relational_operation): Likewise.
-       (simplify_subreg): Likewise.
-       * stor-layout.c (bitwise_mode_for_mode): Likewise.
-       * var-tracking.c (adjust_mems): Likewise.
-       (prepare_call_arguments): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR c/84108
+       * attribs.c (diag_attr_exclusions): Consider the exclusion(s)
+       that correspond to the kind of a declaration.
  
-       * machmode.h (is_int_mode): New fuction.
-       * combine.c (find_split_point): Use it.
-       (combine_simplify_rtx): Likewise.
-       (simplify_if_then_else): Likewise.
-       (simplify_set): Likewise.
-       (simplify_shift_const_1): Likewise.
-       (simplify_comparison): Likewise.
-       * config/aarch64/aarch64.c (aarch64_rtx_costs): Likewise.
-       * cse.c (notreg_cost): Likewise.
-       (cse_insn): Likewise.
-       * cselib.c (cselib_lookup_1): Likewise.
-       * dojump.c (do_jump_1): Likewise.
-       (do_compare_rtx_and_jump): Likewise.
-       * dse.c (get_call_args): Likewise.
-       * dwarf2out.c (rtl_for_decl_init): Likewise.
-       (native_encode_initializer): Likewise.
-       * expmed.c (emit_store_flag_1): Likewise.
-       (emit_store_flag): Likewise.
-       * expr.c (convert_modes): Likewise.
-       (store_field): Likewise.
-       (expand_expr_real_1): Likewise.
-       * fold-const.c (fold_read_from_constant_string): Likewise.
-       * gimple-ssa-sprintf.c (get_format_string): Likewise.
-       * optabs-libfuncs.c (gen_int_libfunc): Likewise.
-       * optabs.c (expand_binop): Likewise.
-       (expand_unop): Likewise.
-       (expand_abs_nojump): Likewise.
-       (expand_one_cmpl_abs_nojump): Likewise.
-       * simplify-rtx.c (mode_signbit_p): Likewise.
-       (val_signbit_p): Likewise.
-       (val_signbit_known_set_p): Likewise.
-       (val_signbit_known_clear_p): Likewise.
-       (simplify_relational_operation_1): Likewise.
-       * tree.c (vector_type_mode): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  John David Anglin  <danglin@gcc.gnu.org>
  
-       * machmode.h (smallest_mode_for_size): Fix formatting.
-       (smallest_int_mode_for_size): New function.
-       * cfgexpand.c (expand_debug_expr): Use smallest_int_mode_for_size
-       instead of smallest_mode_for_size.
-       * combine.c (make_extraction): Likewise.
-       * config/arc/arc.c (arc_expand_movmem): Likewise.
-       * config/arm/arm.c (arm_expand_divmod_libfunc): Likewise.
-       * config/i386/i386.c (ix86_get_mask_mode): Likewise.
-       * config/s390/s390.c (s390_expand_insv): Likewise.
-       * config/sparc/sparc.c (assign_int_registers): Likewise.
-       * config/spu/spu.c (spu_function_value): Likewise.
-       (spu_function_arg): Likewise.
-       * coverage.c (get_gcov_type): Likewise.
-       (get_gcov_unsigned_t): Likewise.
-       * dse.c (find_shift_sequence): Likewise.
-       * expmed.c (store_bit_field_1): Likewise.
-       * expr.c (convert_move): Likewise.
-       (store_field): Likewise.
-       * internal-fn.c (expand_arith_overflow): Likewise.
-       * optabs-query.c (get_best_extraction_insn): Likewise.
-       * optabs.c (expand_twoval_binop_libfunc): Likewise.
-       * stor-layout.c (layout_type): Likewise.
-       (initialize_sizetypes): Likewise.
-       * targhooks.c (default_get_mask_mode): Likewise.
-       * tree-ssa-loop-manip.c (canonicalize_loop_ivs): Likewise.
+       PR target/83984
+       * config/pa/pa.md: Load address of PIC label using the linkage table
+       if the label is nonlocal.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Kelvin Nilsen  <kelvin@gcc.gnu.org>
  
-       * machmode.h (opt_mode::else_blk): New function.
-       (int_mode_for_mode): Declare.
-       * stor-layout.c (int_mode_for_mode): Return an opt_scalar_int_mode.
-       * builtins.c (expand_builtin_signbit): Adjust for new int_mode_for_mode
-       return type.
-       * cfgexpand.c (expand_debug_expr): Likewise.
-       * combine.c (gen_lowpart_or_truncate): Likewise.
-       (gen_lowpart_for_combine): Likewise.
-       * config/aarch64/aarch64.c (aarch64_emit_approx_sqrt): Likewise.
-       * config/avr/avr.c (avr_to_int_mode): Likewise.
-       (avr_out_plus_1): Likewise.
-       (avr_out_plus): Likewise.
-       (avr_out_round): Likewise.
-       * config/i386/i386.c (ix86_split_to_parts): Likewise.
-       * config/s390/s390.c (s390_expand_vec_compare_cc): Likewise.
-       (s390_expand_vcond): Likewise.
-       * config/spu/spu.c (spu_split_immediate): Likewise.
-       (spu_expand_mov): Likewise.
-       * dse.c (get_stored_val): Likewise.
-       * expmed.c (store_bit_field_1): Likewise.
-       (convert_extracted_bit_field): Use int_mode_for_mode instead of
-       int_mode_for_size.
-       (extract_bit_field_1): Adjust for new int_mode_for_mode return type.
-       (extract_low_bits): Likewise.
-       * expr.c (emit_group_load_1): Likewise.  Separate out the BLKmode
-       handling rather than repeating the check.
-       (emit_group_store): Likewise.
-       (emit_move_via_integer): Adjust for new int_mode_for_mode return type.
-       * optabs.c (expand_absneg_bit): Likewise.
-       (expand_copysign_absneg): Likewise.
-       (expand_copysign_bit): Likewise.
-       * tree-if-conv.c (ifcvt_can_use_mask_load_store): Likewise.
-       * tree-vect-slp.c (vect_transform_slp_perm_load): Likewise.
-       * tree-vect-stmts.c (vect_gen_perm_mask_any): Likewise.
-       * var-tracking.c (prepare_call_arguments):  Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_do_expand_vec_perm): Use
-       int_mode_for_mode instead of mode_for_size.
-       * config/rs6000/rs6000.c (rs6000_do_expand_vec_perm): Likewise.
+       * config/rs6000/rs6000.c (rs6000_option_override_internal): Issue
+       warning message if user requests -maltivec=be.
+       * doc/invoke.texi: Document deprecation of -maltivec=be.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Will Schmidt  <will_schmidt@vnet.ibm.com>
  
-       * machmode.h (int_mode_for_size): New function.
-       * builtins.c (set_builtin_user_assembler_name): Use int_mode_for_size
-       instead of mode_for_size.
-       * calls.c (save_fixed_argument_area): Likewise.  Make use of BLKmode
-       explicit.
-       * combine.c (expand_field_assignment): Use int_mode_for_size
-       instead of mode_for_size.
-       (make_extraction): Likewise.
-       (simplify_shift_const_1): Likewise.
-       (simplify_comparison): Likewise.
-       * dojump.c (do_jump): Likewise.
-       * dwarf2out.c (mem_loc_descriptor): Likewise.
-       * emit-rtl.c (init_derived_machine_modes): Likewise.
-       * expmed.c (flip_storage_order): Likewise.
-       (convert_extracted_bit_field): Likewise.
-       * expr.c (copy_blkmode_from_reg): Likewise.
-       * graphite-isl-ast-to-gimple.c (max_mode_int_precision): Likewise.
-       * internal-fn.c (expand_mul_overflow): Likewise.
-       * lower-subreg.c (simple_move): Likewise.
-       * optabs-libfuncs.c (init_optabs): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
-       * tree.c (vector_type_mode): Likewise.
-       * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
-       * tree-vect-data-refs.c (vect_lanes_optab_supported_p): Likewise.
-       * tree-vect-generic.c (expand_vector_parallel): Likewise.
-       * tree-vect-stmts.c (vectorizable_load): Likewise.
-       (vectorizable_store): Likewise.
+       PR target/84220
+       * config/rs6000/rs6000-c.c: Update definitions for
+       ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VEC_SLDW,
+       VEC_XXSLDWI and ALTIVEC_BUILTIN_VEC_XXPERMDI builtins.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Igor Tsimbalist  <igor.v.tsimbalist@intel.com>
  
-       * coretypes.h (pod_mode): New type.
-       (scalar_int_mode_pod): New typedef.
-       * machmode.h (pod_mode): New class.
-       (int_n_data_t::m): Change type to scalar_int_mode_pod.
-       * genmodes.c (emit_mode_int_n): Update accordingly.
-       * lower-subreg.h (target_lower_subreg): Change type to
-       scalar_int_mode_pod.
-       * gdbhooks.py (build_pretty_printer): Handle pod_mode and
-       scalar_int_mode_pod.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/84239
+       * config/i386/cetintrin.h: Remove _rdssp[d|q] and
+       add _get_ssp intrinsics. Remove argument from
+       __builtin_ia32_rdssp[d|q].
+       * config/i386/i386-builtin-types.def: Add UINT_FTYPE_VOID.
+       * config/i386/i386-builtin.def: Remove argument from
+       __builtin_ia32_rdssp[d|q].
+       * config/i386/i386.c: Use UINT_FTYPE_VOID. Use
+       ix86_expand_special_args_builtin for _rdssp[d|q].
+       * config/i386/i386.md: Remove argument from rdssp[si|di] insn.
+       Clear register before usage.
+       * doc/extend.texi: Remove argument from __builtin_ia32_rdssp[d|q].
+       Add documentation for new _get_ssp and _inc_ssp intrinsics.
  
-       * config/powerpcspe/powerpcspe.h (rs6000_pmode): Change type from
-       machine_mode to scalar_int_mode.
-       * config/powerpcspe/powerpcspe.c (rs6000_pmode): Likewise.
-       (rs6000_option_override_internal): Remove cast to int.
-       * config/rs6000/rs6000.h (rs6000_pmode): Change type from
-       machine_mode to scalar_int_mode.
-       * config/rs6000/rs6000.c (rs6000_pmode): Likewise.
-       (rs6000_option_override_internal): Remove cast to int.
-       * config/s390/s390.h (Pmode): Remove cast to machine_mode.
-       * config/epiphany/epiphany.h (RTX_OK_FOR_OFFSET_P): Add cast
-       to machine_mode.
-       * config/s390/s390.c (s390_expand_builtin): Likewise.
-       * coretypes.h (scalar_int_mode): New type.
-       (opt_scalar_int_mode): New typedef.
-       * machmode.h (scalar_int_mode): New class.
-       (scalar_int_mode::includes_p): New function.
-       (byte_mode): Change type to scalar_int_mode.
-       (word_mode): Likewise.
-       (ptr_mode): Likewise.
-       * emit-rtl.c (byte_mode): Likewise.
-       (word_mode): Likewise.
-       (ptr_mode): Likewise.
-       (init_derived_machine_modes): Update accordingly.
-       * genmodes.c (get_mode_class): Return scalar_int_mode for MODE_INT
-       and MODE_PARTIAL_INT.
-       * gdbhooks.py (build_pretty_printer): Handle scalar_int_mode and
-       opt_scalar_int_mode.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * target.def (libgcc_floating_mode_supported_p): Take a
-       scalar_float_mode.
-       * doc/tm.texi: Regenerate.
-       * targhooks.h (default_libgcc_floating_mode_supported_p): Take a
-       scalar_float_mode.
-       * targhooks.c (default_libgcc_floating_mode_supported_p): Likewise.
-       * config/aarch64/aarch64.c (aarch64_libgcc_floating_mode_supported_p):
-       Likewise.
+       PR tree-optimization/84357
+       * tree-data-ref.c (object_address_invariant_in_loop_p): Check
+       operand 1 of an ARRAY_REF too.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-14  Oleg Endo  <olegendo@gcc.gnu.org>
  
-       * target.def (default_floatn_mode): Return an opt_scalar_float_mode.
-       * doc/tm.texi: Regenerate.
-       * config/arm/arm.c (arm_floatn_mode): Return an opt_scalar_float_mode.
-       * config/powerpcspe/powerpcspe.c (rs6000_floatn_mode): Likewise.
-       * config/rs6000/rs6000.c (rs6000_floatn_mode): Likewise.
-       * targhooks.h (default_floatn_mode): Likewise.
-       * targhooks.c (default_floatn_mode): Likewise.
-       * tree.c (build_common_tree_nodes): Update accordingly.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/83831
+       * config/rx/rx-protos.h (rx_reg_dead_or_unused_after_insn,
+       rx_copy_reg_dead_or_unused_notes, rx_fuse_in_memory_bitop): New
+       declarations.
+       (set_of_reg): New struct.
+       (rx_find_set_of_reg, rx_find_use_of_reg): New functions.
+       * config/rx/rx.c (rx_reg_dead_or_unused_after_insn,
+       rx_copy_reg_dead_or_unused_notes, rx_fuse_in_memory_bitop): New
+       functions.
+       * config/rx/rx.md (andsi3, iorsi3, xorsi3): Convert to insn_and_split.
+       Split into bitclr, bitset, bitinvert patterns if appropriate.
+       (*bitset, *bitinvert, *bitclr): Convert to named insn_and_split and
+       use rx_fuse_in_memory_bitop.
+       (*bitset_in_memory, *bitinvert_in_memory, *bitclr_in_memory): Convert
+       to named insn, correct maximum insn length.
+ 2018-02-14  Jozef Lawrynowicz  <jozefl.gcc@gmail.com>
+       PR target/79242
+       * machmode.def: Define a complex mode for PARTIAL_INT.
+       * genmodes.c (complex_class): Return MODE_COMPLEX_INT for
+       MODE_PARTIAL_INT.
+       * doc/rtl.texi: Document CSPImode.
+       * config/msp430/msp430.c (msp430_hard_regno_nregs): Add CPSImode
+       handling.
+       (msp430_hard_regno_nregs_with_padding): Likewise.
+ 2018-02-13  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/84279
+       * config/rs6000/rs6000.c (mem_operand_gpr): Disallow altivec addresses.
+ 2018-02-13  Segher Boessenkool  <segher@kernel.crashing.org>
+       PR rtl-optimization/84169
+       * combine.c (try_combine): New variable split_i2i3.  Set it to true if
+       we generated a parallel as new i3 and we split that to new i2 and i3
+       instructions.  Handle split_i2i3 similar to swap_i2i3: scan the
+       LOG_LINKs of i3 to see which of those need to link to i2 now.  Link
+       those to i2, not i1.  Partially rewrite this scan code.
+ 2018-02-13  Jakub Jelinek  <jakub@redhat.com>
+       PR c/82210
+       * stor-layout.c (place_field): For variable length fields, adjust
+       offset_align afterwards not just based on the field's alignment,
+       but also on the size.
+       PR middle-end/84309
+       * match.pd (pow(C,x) -> exp(log(C)*x)): Use exp2s and log2s instead
+       of exps and logs in the use_exp2 case.
+ 2018-02-13  Jeff Law  <law@redhat.com>
+       * config/rl/rl78.c (rl78_attribute_table): Fix terminator and
+       entry for "vector".
+       * config/rl78/rl78.c (rl78_handle_func_attribute): Mark
+       ARGS as unused.
+ 2018-02-13  Alexandre Oliva  <aoliva@redhat.com>
+       PR debug/84342
+       PR debug/84319
+       * common.opt (gas-loc-support, gas-locview-support): New.
+       (ginline-points, ginternal-reset-location-views): New.
+       * doc/invoke.texi: Document them.  Use @itemx where intended.
+       (gvariable-location-views): Adjust.
+       * target.def (reset_location_view): New.
+       * doc/tm.texi.in (DWARF2_ASM_VIEW_DEBUG_INFO): New.
+       (TARGET_RESET_LOCATION_VIEW): New.
+       * doc/tm.texi: Rebuilt.
+       * dwarf2out.c (dwarf2out_default_as_loc_support): New.
+       (dwarf2out_default_as_locview_support): New.
+       (output_asm_line_debug_info): Use option variables.
+       (dwarf2out_maybe_output_loclist_view_pair): Likewise.
+       (output_loc_list): Likewise.
+       (add_high_low_attributes): Check option variables.
+       Don't output entry view attribute in strict mode.
+       (gen_inlined_subroutine_die): Check option variables.
+       (dwarf2out_inline_entry): Likewise.
+       (init_sections_and_labels): Likewise.
+       (dwarf2out_early_finish): Likewise.
+       (maybe_reset_location_view): New, from...
+       (dwarf2out_var_location): ... here.  Call it.
+       * debug.h (dwarf2out_default_as_loc_support): Declare.
+       (dwarf2out_default_as_locview_support): Declare.
+       * hooks.c (hook_int_rtx_insn_0): New.
+       * hooks.h (hook_int_rtx_insn_0): Declare.
+       * toplev.c (process_options): Take -gas-loc-support and
+       -gas-locview-support from dwarf2out.  Enable
+       -gvariable-location-views by default only with locview
+       assembler support.  Enable -ginternal-reset-location-views by
+       default only if the target defines the corresponding hook.
+       Enable -ginline-points by default if location views are
+       enabled; force it disabled if statement frontiers are
+       disabled.
+       * tree-inline.c (expand_call_inline): Check option variables.
+       * tree-ssa-live.c (remove_unused_scope_block_p): Likewise.
+ 2018-02-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/84321
+       * tree-vrp.c (intersect_range_with_nonzero_bits): Fix VR_ANTI_RANGE
+       handling.  Also check whether the anti-range contains any values
+       that satisfy the mask; switch to a VR_RANGE if not.
+ 2018-02-13  Paolo Bonzini  <bonzini@gnu.org>
+       PR sanitizer/84340
+       * internal-fn.def (ASAN_CHECK, ASAN_MARK): Revert changes to fnspec.
+ 2018-02-13  Martin Jambor  <mjambor@suse.cz>
+       PR c++/83990
+       * ipa-param-manipulation.c (ipa_modify_call_arguments): Use location
+       of call statements, also set location of a load to a temporary.
+ 2018-02-13  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.c (add_vector_labels): New function.
+       * config/rl78/rl78.c (rl78_handle_vector_attribute): New function.
+       * config/rl78/rl78.c (rl78_start_function): Call add_vector_labels.
+       * config/rl78/rl78.c (rl78_handle_func_attribute): Removed the assert 
+       which checks that no arguments are passed.
+       * config/rl78/rl78.c (rl78_attribute_table): Add "vector" attribute.
+       * doc/extend.texi: Documentation for the new attribute.
+ 2018-02-13  Andreas Schwab  <schwab@suse.de>
+       * config/riscv/linux.h (CPP_SPEC): Define.
+ 2018-02-13  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84335
+       * config/i386/i386.c (ix86_init_mmx_sse_builtins): Pass
+       OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2 instead of
+       OPTION_MASK_ISA_AES as first argument to def_builtin_const
+       for AES builtins.  Pass OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2
+       instead of OPTION_MASK_ISA_PCLMUL as first argument to
+       def_builtin_const for __builtin_ia32_pclmulqdq128 builtin.
+       * config/i386/wmmintrin.h: If __SSE2__ is not defined, enable it
+       temporarily for AES and PCLMUL builtins.
+       PR tree-optimization/84339
+       * gimple-fold.c (get_range_strlen): Set *FLEXP to true when handling
+       ARRAY_REF where first operand is array_at_struct_end_p COMPONENT_REF.
+       Formatting fixes.
  
-       * machmode.h (mode_iterator::start): Provide overload for opt_modes.
-       (mode_iterator::iterate_p): Likewise.
-       (mode_iterator::get_wider): Likewise.
-       * expr.c (init_expr_target): Use opt_scalar_float_mode.
+       PR middle-end/84309
+       * match.pd (pow(C,x) -> exp(log(C)*x)): Optimize instead into
+       exp2(log2(C)*x) if C is a power of 2 and c99 runtime is available.
+       * generic-match-head.c (canonicalize_math_after_vectorization_p): New
+       inline function.
+       * gimple-match-head.c (canonicalize_math_after_vectorization_p): New
+       inline function.
+       * omp-simd-clone.h: New file.
+       * omp-simd-clone.c: Include omp-simd-clone.h.
+       (expand_simd_clones): No longer static.
+       * tree-vect-patterns.c: Include fold-const-call.h, attribs.h,
+       cgraph.h and omp-simd-clone.h.
+       (vect_recog_pow_pattern): Optimize pow(C,x) to exp(log(C)*x).
+       (vect_recog_widen_shift_pattern): Formatting fix.
+       (vect_pattern_recog_1): Don't check optab for calls.
+       PR target/84336
+       * config/i386/sse.md (<avx512>_vpermi2var<mode>3_mask): Force
+       operands[2] into a REG before using gen_lowpart on it.
+ 2018-02-12  Jeff Law  <law@redhat.com>
+       PR target/83760
+       * config/sh/sh.c (find_barrier): Consider a sibling call
+       a barrier as well.
+       * cse.c (try_back_substitute_reg): Move any REG_ARGS_SIZE note when
+       successfully back substituting a reg.
+ 2018-02-12  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84037
+       * tree-vect-slp.c (vect_analyze_slp_cost): Add visited
+       parameter, move visited init to caller.
+       (vect_slp_analyze_operations): Separate cost from validity
+       check, initialize visited once for all instances.
+       (vect_schedule_slp): Analyze map to CSE vectorized nodes once
+       for all instances.
+       * tree-vect-stmts.c (vect_model_simple_cost): Make early
+       out an assert.
+       (vect_model_promotion_demotion_cost): Likewise.
+       (vectorizable_bswap): Guard cost modeling with !slp_node
+       instead of !PURE_SLP_STMT to avoid double-counting on hybrid
+       SLP stmts.
+       (vectorizable_call): Likewise.
+       (vectorizable_conversion): Likewise.
+       (vectorizable_assignment): Likewise.
+       (vectorizable_shift): Likewise.
+       (vectorizable_operation): Likewise.
+       (vectorizable_store): Likewise.
+       (vectorizable_load): Likewise.
+       (vectorizable_condition): Likewise.
+       (vectorizable_comparison): Likewise.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-12  Paolo Bonzini  <bonzini@gnu.org>
+       PR sanitizer/84307
+       * internal-fn.def (ASAN_CHECK): Fix fnspec to account for return value.
+       (ASAN_MARK): Fix fnspec to account for return value, change pointer
+       argument from 'R' to 'W' so that the pointed-to datum is clobbered.
+ 2018-02-08  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83665
+       * params.def (inline-min-speedup): Increase from 8 to 15.
+       (max-inline-insns-auto): Decrease from 40 to 30.
+       * ipa-split.c (consider_split): Add some buffer for function to
+       be considered inlining candidate.
+       * invoke.texi (max-inline-insns-auto, inline-min-speedup): UPdate
+       default values.
+ 2018-02-12  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84037
+       * tree-vect-slp.c (vect_build_slp_tree_2): Try swapping the
+       matched stmts if we cannot swap the non-matched ones.
+ 2018-02-12  Olga Makhotina  <olga.makhotina@intel.com>
+       * config/i386/avx512fintrin.h (_mm_mask_scalef_round_sd,
+       _mm_maskz_scalef_round_sd, _mm_mask_scalef_round_ss,
+       _mm_maskz_scalef_round_ss): New intrinsics.
+       (__builtin_ia32_scalefsd_round, __builtin_ia32_scalefss_round): Fix.
+       * config/i386/i386-builtin.def (__builtin_ia32_scalefsd_round,
+       __builtin_ia32_scalefss_round): Remove.
+       (__builtin_ia32_scalefsd_mask_round,
+       __builtin_ia32_scalefss_mask_round): New intrinsics.
+       * config/i386/sse.md (vmscalef<mode><round_name>): Renamed to ...
+       (vmscalef<mode><mask_scalar_name><round_scalar_name>): ... this.
+       ((match_operand:VF_128 2 "<round_nimm_predicate>"
+       "<round_constraint>")): Changed to ...
+       ((match_operand:VF_128 2 "<round_scalar_nimm_predicate>"
+       "<round_scalar_constraint>")): ... this.
+       ("vscalef<ssescalarmodesuffix>\t{<round_op3>%2, %1, %0|
+       %0, %1, %2<round_op3>}"): Changed to ...
+       ("vscalef<ssescalarmodesuffix>\t{<round_scalar_mask_op3>%2, %1,
+       %0<mask_scalar_operand3>|%0<mask_scalar_operand3>, %1,
+       %2<round_scalar_mask_op3>}"): ... this.
+       * config/i386/subst.md (round_scalar_nimm_predicate): New.
+ 2018-02-12  Olga Makhotina  <olga.makhotina@intel.com>
+       * config/i386/avx512fintrin.h (_mm_mask_sqrt_round_sd)
+       (_mm_maskz_sqrt_round_sd, _mm_mask_sqrt_round_ss)
+       (_mm_maskz_sqrt_round_ss): New intrinsics.
+       (__builtin_ia32_sqrtsd_round, __builtin_ia32_sqrtss_round): Remove.
+       (__builtin_ia32_sqrtsd_mask_round)
+       (__builtin_ia32_sqrtss_mask_round): New builtins.
+       * config/i386/i386-builtin.def (__builtin_ia32_sqrtsd_round)
+       (__builtin_ia32_sqrtss_round): Remove.
+       (__builtin_ia32_sqrtsd_mask_round)
+       (__builtin_ia32_sqrtss_mask_round): New builtins.
+       * config/i386/sse.md (vmsqrt<mode>2<round_name>): Renamed to ...
+       (vmsqrt<mode>2<mask_scalar_name><round_scalar_name>): ... this.
+       ((match_operand:VF_128 1 "vector_operand"
+       "xBm,<round_constraint>")): Changed to ...
+       ((match_operand:VF_128 1 "vector_operand"
+       "xBm,<round_scalar_constraint>")): ... this.
+       (vsqrt<ssescalarmodesuffix>\t{<round_op3>%1, %2, %0|
+       %0, %2, %<iptr>1<round_op3>}): Changed to ...
+       (vsqrt<ssescalarmodesuffix>\t{<round_scalar_mask_op3>%1, %2,
+       %0<mask_scalar_operand3>|%0<mask_scalar_operand3>, %2,
+       %<iptr>1<round_scalar_mask_op3>}): ... this.
+       ((set_attr "prefix" "<round_prefix>")): Changed to ...
+       ((set_attr "prefix" "<round_scalar_prefix>")): ... this.
+ 2018-02-11  Steven Munroe  <munroesj@gcc.gnu.org>
+       PR target/84266
+       * config/rs6000/mmintrin.h (_mm_cmpeq_pi32 [_ARCH_PWR9]):
+       Cast vec_cmpeq result to correct type.
+       * config/rs6000/mmintrin.h (_mm_cmpgt_pi32 [_ARCH_PWR9]):
+       Cast vec_cmpgt result to correct type.
+ 2018-02-11  Alexandre Oliva  <aoliva@redhat.com>
+       * final.c (final_scan_insn_1): Renamed from...
+       (final_scan_insn): ... this.  New wrapper, to recover
+       seen from the outermost call in recursive ones.
+       * config/sparc/sparc.c (output_return): Drop seen from call.
+       (output_sibcall): Likewise.
+       * config/visium/visium.c (output_branch): Likewise.
  
-       * coretypes.h (opt_scalar_float_mode): New typedef.
-       * machmode.h (float_mode_for_size): New function.
-       * emit-rtl.c (double_mode): Delete.
-       (init_emit_once): Use float_mode_for_size.
-       * stor-layout.c (layout_type): Likewise.
-       * gdbhooks.py (build_pretty_printer): Handle opt_scalar_float_mode.
+ 2018-02-10  John David Anglin  <danglin@gcc.gnu.org>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/pa/pa.c (hppa_profile_hook): Mark SYMBOL_REF for _mcount as
+       function label.
  
-       * output.h (assemble_real): Take a scalar_float_mode.
-       * config/arm/arm.c (arm_assemble_integer): Update accordingly.
-       * config/arm/arm.md (consttable_4): Likewise.
-       (consttable_8): Likewise.
-       (consttable_16): Likewise.
-       * config/mips/mips.md (consttable_float): Likewise.
-       * config/s390/s390.c (s390_output_pool_entry): Likewise.
-       * varasm.c (assemble_real): Take a scalar_float_mode.
-       (output_constant_pool_2): Update accordingly.
-       (output_constant): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-10  Alan Modra  <amodra@gmail.com>
  
-       * tree.h (SCALAR_FLOAT_TYPE_MODE): New macro.
-       * builtins.c (expand_builtin_signbit): Use it instead of TYPE_MODE.
-       * fold-const.c (fold_convert_const_real_from_fixed): Likewise.
-       (native_encode_real): Likewise.
-       (native_interpret_real): Likewise.
-       * hsa-brig.c (emit_immediate_scalar_to_buffer): Likewise.
-       * tree-vrp.c (simplify_float_conversion_using_ranges): Likewise.
+       PR target/84300
+       * config/rs6000/rs6000.md (split_stack_return): Remove (use ..).
+       Specify LR as an input.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-10  Jakub Jelinek  <jakub@redhat.com>
  
-       * optabs-libfuncs.c (gen_trunc_conv_libfunc): Use is_a
-       <scalar_float_mode>.  Simplify.
-       (gen_extend_conv_libfunc): Likewise.
+       PR sanitizer/83987
+       * omp-low.c (maybe_remove_omp_member_access_dummy_vars,
+       remove_member_access_dummy_vars): New functions.
+       (lower_omp_for, lower_omp_taskreg, lower_omp_target,
+       lower_omp_1, execute_lower_omp): Use them.
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR rtl-optimization/84308
+       * shrink-wrap.c (spread_components): Release todo vector.
  
-       * coretypes.h (scalar_float_mode): New type.
-       * machmode.h (mode_traits::from_int): Use machine_mode if
-       USE_ENUM_MODES is defined.
-       (is_a): New function.
-       (as_a): Likewise.
-       (dyn_cast): Likewise.
-       (scalar_float_mode): New class.
-       (scalar_float_mode::includes_p): New function.
-       (is_float_mode): Likewise.
-       * gdbhooks.py (MachineModePrinter): New class.
-       (build_pretty_printer): Use it for scalar_float_mode.
-       * real.h (FLOAT_MODE_FORMAT): Use as_a <scalar_float_mode>.
-       (format_helper::format_helper): Turn into a template.
-       * genmodes.c (get_mode_class): New function.
-       (emit_insn_modes_h): Give modes the class returned by get_mode_class,
-       or machine_mode if none.
-       * config/aarch64/aarch64.c (aarch64_simd_valid_immediate): Use
-       as_a <scalar_float_mode>.
-       * dwarf2out.c (mem_loc_descriptor): Likewise.
-       (insert_float): Likewise.
-       (add_const_value_attribute): Likewise.
-       * simplify-rtx.c (simplify_immed_subreg): Likewise.
-       * optabs.c (expand_absneg_bit): Take a scalar_float_mode.
-       (expand_unop): Update accordingly.
-       (expand_abs_nojump): Likewise.
-       (expand_copysign_absneg): Take a scalar_float_mode.
-       (expand_copysign_bit): Likewise.
-       (expand_copysign): Update accordingly.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-09  Vladimir Makarov  <vmakarov@redhat.com>
  
-       * coretypes.h (opt_mode): New class.
-       * machmode.h (opt_mode): Likewise.
-       (opt_mode::else_void): New function.
-       (opt_mode::require): Likewise.
-       (opt_mode::exists): Likewise.
-       (GET_MODE_WIDER_MODE): Turn into a function and return an opt_mode.
-       (GET_MODE_2XWIDER_MODE): Likewise.
-       (mode_iterator::get_wider): Update accordingly.
-       (mode_iterator::get_2xwider): Likewise.
-       (mode_iterator::get_known_wider): Likewise, turning into a template.
-       * combine.c (make_extraction): Update use of GET_MODE_WIDER_MODE,
-       forcing a wider mode to exist.
-       * config/cr16/cr16.h (LONG_REG_P): Likewise.
-       * rtlanal.c (init_num_sign_bit_copies_in_rep): Likewise.
-       * config/c6x/c6x.c (c6x_rtx_costs): Update use of
-       GET_MODE_2XWIDER_MODE, forcing a wider mode to exist.
-       * lower-subreg.c (init_lower_subreg): Likewise.
-       * optabs-libfuncs.c (init_sync_libfuncs_1): Likewise, but not
-       on the final iteration.
-       * config/i386/i386.c (ix86_expand_set_or_movmem): Check whether
-       a wider mode exists before asking for a move pattern.
-       (get_mode_wider_vector): Update use of GET_MODE_WIDER_MODE,
-       forcing a wider mode to exist.
-       (expand_vselect_vconcat): Update use of GET_MODE_2XWIDER_MODE,
-       returning false if no such mode exists.
-       * config/ia64/ia64.c (expand_vselect_vconcat): Likewise.
-       * config/mips/mips.c (mips_expand_vselect_vconcat): Likewise.
-       * expmed.c (init_expmed_one_mode): Update use of GET_MODE_WIDER_MODE.
-       Avoid checking for a MODE_INT if we already know the mode is not a
-       SCALAR_INT_MODE_P.
-       (extract_high_half): Update use of GET_MODE_WIDER_MODE,
-       forcing a wider mode to exist.
-       (expmed_mult_highpart_optab): Likewise.
-       (expmed_mult_highpart): Likewise.
-       * expr.c (expand_expr_real_2): Update use of GET_MODE_WIDER_MODE,
-       using else_void.
-       * lto-streamer-in.c (lto_input_mode_table): Likewise.
-       * optabs-query.c (find_widening_optab_handler_and_mode): Likewise.
-       * stor-layout.c (bit_field_mode_iterator::next_mode): Likewise.
-       * internal-fn.c (expand_mul_overflow): Update use of
-       GET_MODE_2XWIDER_MODE.
-       * omp-low.c (omp_clause_aligned_alignment): Likewise.
-       * tree-ssa-math-opts.c (convert_mult_to_widen): Update use of
-       GET_MODE_WIDER_MODE.
-       (convert_plusminus_to_widen): Likewise.
-       * tree-switch-conversion.c (array_value_type): Likewise.
-       * var-tracking.c (emit_note_insn_var_location): Likewise.
-       * tree-vrp.c (simplify_float_conversion_using_ranges): Likewise.
-       Return false inside rather than outside the loop if no wider mode
-       exists
-       * optabs.c (expand_binop): Update use of GET_MODE_WIDER_MODE
-       and GET_MODE_2XWIDER_MODE
-       (can_compare_p): Use else_void.
-       * gdbhooks.py (OptMachineModePrinter): New class.
-       (build_pretty_printer): Use it for opt_mode.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR rtl-optimization/57193
+       * ira-color.c (struct allocno_color_data): Add member
+       conflict_allocno_hard_prefs.
+       (update_conflict_allocno_hard_prefs): New.
+       (bucket_allocno_compare_func): Add a preference based on
+       conflict_allocno_hard_prefs.
+       (push_allocno_to_stack): Update conflict_allocno_hard_prefs.
+       (color_allocnos): Remove a dead code.  Initiate
+       conflict_allocno_hard_prefs.  Call update_costs_from_prefs.
  
-       * tree-switch-conversion.c (array_value_type): Only read TYPE_MODE
-       once.  Use get_narrowest_mode instead of GET_CLASS_NARROWEST_MODE.
+ 2018-02-09  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/84226
+       * config/rs6000/vsx.md (p9_xxbrq_v16qi): Change input operand
+       constraint from =wa to wa.  Avoid a subreg on the output operand,
+       instead use a pseudo and subreg it in a move.
+       (p9_xxbrd_<mode>): Changed to ...
+       (p9_xxbrd_v2di): ... this insn, without VSX_D iterator.
+       (p9_xxbrd_v2df): New expander.
+       (p9_xxbrw_<mode>): Changed to ...
+       (p9_xxbrw_v4si): ... this insn, without VSX_W iterator.
+       (p9_xxbrw_v4sf): New expander.
  
-       * machmode.h (mode_traits): New structure.
-       (get_narrowest_mode): New function.
-       (mode_iterator::start): Likewise.
-       (mode_iterator::iterate_p): Likewise.
-       (mode_iterator::get_wider): Likewise.
-       (mode_iterator::get_known_wider): Likewise.
-       (mode_iterator::get_2xwider): Likewise.
-       (FOR_EACH_MODE_IN_CLASS): New mode iterator.
-       (FOR_EACH_MODE): Likewise.
-       (FOR_EACH_MODE_FROM): Likewise.
-       (FOR_EACH_MODE_UNTIL): Likewise.
-       (FOR_EACH_WIDER_MODE): Likewise.
-       (FOR_EACH_2XWIDER_MODE): Likewise.
-       * builtins.c (expand_builtin_strlen): Use new mode iterators.
-       * combine.c (simplify_comparison): Likewise
-       * config/i386/i386.c (type_natural_mode): Likewise.
-       * cse.c (cse_insn): Likewise.
-       * dse.c (find_shift_sequence): Likewise.
-       * emit-rtl.c (init_derived_machine_modes): Likewise.
-       (init_emit_once): Likewise.
-       * explow.c (hard_function_value): Likewise.
-       * expmed.c (extract_fixed_bit_field_1): Likewise.
-       (extract_bit_field_1): Likewise.
-       (expand_divmod): Likewise.
-       (emit_store_flag_1): Likewise.
-       * expr.c (init_expr_target): Likewise.
-       (convert_move): Likewise.
-       (alignment_for_piecewise_move): Likewise.
-       (widest_int_mode_for_size): Likewise.
-       (emit_block_move_via_movmem): Likewise.
-       (copy_blkmode_to_reg): Likewise.
-       (set_storage_via_setmem): Likewise.
-       (compress_float_constant): Likewise.
-       * omp-low.c (omp_clause_aligned_alignment): Likewise.
-       * optabs-query.c (get_best_extraction_insn): Likewise.
-       * optabs.c (expand_binop): Likewise.
-       (expand_twoval_unop): Likewise.
-       (expand_twoval_binop): Likewise.
-       (widen_leading): Likewise.
-       (widen_bswap): Likewise.
-       (expand_parity): Likewise.
-       (expand_unop): Likewise.
-       (prepare_cmp_insn): Likewise.
-       (prepare_float_lib_cmp): Likewise.
-       (expand_float): Likewise.
-       (expand_fix): Likewise.
-       (expand_sfix_optab): Likewise.
-       * postreload.c (move2add_use_add2_insn): Likewise.
-       * reg-stack.c (reg_to_stack): Likewise.
-       * reginfo.c (choose_hard_reg_mode): Likewise.
-       * rtlanal.c (init_num_sign_bit_copies_in_rep): Likewise.
-       * stor-layout.c (mode_for_size): Likewise.
-       (smallest_mode_for_size): Likewise.
-       (mode_for_vector): Likewise.
-       (finish_bitfield_representative): Likewise.
-       * tree-ssa-math-opts.c (target_supports_divmod_p): Likewise.
-       * tree-vect-generic.c (type_for_widest_vector_mode): Likewise.
-       * tree-vect-stmts.c (vectorizable_conversion): Likewise.
-       * var-tracking.c (prepare_call_arguments): Likewise.
+ 2018-02-09  Sebastian Perta  <sebastian.perta@renesas.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * config/rx/rx.md (movsicc): Update expander to be matched by GCC.
  
-       * genconditions.c (write_header): Add a "#define USE_ENUM_MODES".
-       * genmodes.c (emit_insn_modes_h): Define FOOmode to E_FOOmode if
-       USE_ENUM_MODES is defined and to ((void) 0, E_FOOmode) otherwise.
-       * machmode.h (mode_size): Move earlier in file.
-       (mode_precision): Likewise.
-       (mode_inner): Likewise.
-       (mode_nunits): Likewise.
-       (mode_unit_size): Likewise.
-       (unit_unit_precision): Likewise.
-       (mode_wider): Likewise.
-       (mode_2xwider): Likewise.
-       (machine_mode): New class.
-       (mode_to_bytes): New function.
-       (mode_to_bits): Likewise.
-       (mode_to_precision): Likewise.
-       (mode_to_inner): Likewise.
-       (mode_to_unit_size): Likewise.
-       (mode_to_unit_precision): Likewise.
-       (mode_to_nunits): Likewise.
-       (GET_MODE_SIZE): Use mode_to_bytes.
-       (GET_MODE_BITSIZE): Use mode_to_bits.
-       (GET_MODE_PRECISION): Use mode_to_precision.
-       (GET_MODE_INNER): Use mode_to_inner.
-       (GET_MODE_UNIT_SIZE): Use mode_to_unit_size.
-       (GET_MODE_UNIT_PRECISION): Use mode_to_unit_precision.
-       (GET_MODE_NUNITS): Use mode_to_nunits.
-       * system.h (ALWAYS_INLINE): New macro.
-       * config/powerpcspe/powerpcspe-c.c
-       (altivec_resolve_overloaded_builtin): Use machine_mode instead of
-       int for arg1_mode and arg2_mode.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-09  Peter Bergner  <bergner@vnet.ibm.com>
  
-       * config/aarch64/aarch64-builtins.c (aarch64_simd_builtin_std_type):
-       Prefix mode names with E_ in case statements.
-       * config/aarch64/aarch64-elf.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/aarch64/aarch64.c (aarch64_split_simd_combine): Likewise.
-       (aarch64_split_simd_move): Likewise.
-       (aarch64_gen_storewb_pair): Likewise.
-       (aarch64_gen_loadwb_pair): Likewise.
-       (aarch64_gen_store_pair): Likewise.
-       (aarch64_gen_load_pair): Likewise.
-       (aarch64_get_condition_code_1): Likewise.
-       (aarch64_constant_pool_reload_icode): Likewise.
-       (get_rsqrte_type): Likewise.
-       (get_rsqrts_type): Likewise.
-       (get_recpe_type): Likewise.
-       (get_recps_type): Likewise.
-       (aarch64_gimplify_va_arg_expr): Likewise.
-       (aarch64_simd_container_mode): Likewise.
-       (aarch64_emit_load_exclusive): Likewise.
-       (aarch64_emit_store_exclusive): Likewise.
-       (aarch64_expand_compare_and_swap): Likewise.
-       (aarch64_gen_atomic_cas): Likewise.
-       (aarch64_emit_bic): Likewise.
-       (aarch64_emit_atomic_swap): Likewise.
-       (aarch64_emit_atomic_load_op): Likewise.
-       (aarch64_evpc_trn): Likewise.
-       (aarch64_evpc_uzp): Likewise.
-       (aarch64_evpc_zip): Likewise.
-       (aarch64_evpc_ext): Likewise.
-       (aarch64_evpc_rev): Likewise.
-       (aarch64_evpc_dup): Likewise.
-       (aarch64_gen_ccmp_first): Likewise.
-       (aarch64_gen_ccmp_next): Likewise.
-       * config/alpha/alpha.c (alpha_scalar_mode_supported_p): Likewise.
-       (alpha_emit_xfloating_libcall): Likewise.
-       (emit_insxl): Likewise.
-       (alpha_arg_type): Likewise.
-       * config/arc/arc.c (arc_vector_mode_supported_p): Likewise.
-       (arc_preferred_simd_mode): Likewise.
-       (arc_secondary_reload): Likewise.
-       (get_arc_condition_code): Likewise.
-       (arc_print_operand): Likewise.
-       (arc_legitimate_constant_p): Likewise.
-       * config/arc/arc.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/arc/arc.md (casesi_load): Likewise.
-       (casesi_compact_jump): Likewise.
-       * config/arc/predicates.md (proper_comparison_operator): Likewise.
-       (cc_use_register): Likewise.
-       * config/arm/aout.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/arm/arm-builtins.c (arm_simd_builtin_std_type): Likewise.
-       (arm_init_iwmmxt_builtins): Likewise.
-       * config/arm/arm.c (thumb1_size_rtx_costs): Likewise.
-       (neon_expand_vector_init): Likewise.
-       (arm_attr_length_move_neon): Likewise.
-       (maybe_get_arm_condition_code): Likewise.
-       (arm_emit_vector_const): Likewise.
-       (arm_preferred_simd_mode): Likewise.
-       (arm_output_iwmmxt_tinsr): Likewise.
-       (thumb1_output_casesi): Likewise.
-       (thumb2_output_casesi): Likewise.
-       (arm_emit_load_exclusive): Likewise.
-       (arm_emit_store_exclusive): Likewise.
-       (arm_expand_compare_and_swap): Likewise.
-       (arm_evpc_neon_vuzp): Likewise.
-       (arm_evpc_neon_vzip): Likewise.
-       (arm_evpc_neon_vrev): Likewise.
-       (arm_evpc_neon_vtrn): Likewise.
-       (arm_evpc_neon_vext): Likewise.
-       (arm_validize_comparison): Likewise.
-       * config/arm/neon.md (neon_vc<cmp_op><mode>): Likewise.
-       * config/avr/avr-c.c (avr_resolve_overloaded_builtin): Likewise.
-       * config/avr/avr.c (avr_rtx_costs_1): Likewise.
-       * config/c6x/c6x.c (c6x_vector_mode_supported_p): Likewise.
-       (c6x_preferred_simd_mode): Likewise.
-       * config/epiphany/epiphany.c (get_epiphany_condition_code): Likewise.
-       (epiphany_rtx_costs): Likewise.
-       * config/epiphany/predicates.md (proper_comparison_operator):
-       Likewise.
-       * config/frv/frv.c (condexec_memory_operand): Likewise.
-       (frv_emit_move): Likewise.
-       (output_move_single): Likewise.
-       (output_condmove_single): Likewise.
-       (frv_hard_regno_mode_ok): Likewise.
-       (frv_matching_accg_mode): Likewise.
-       * config/h8300/h8300.c (split_adds_subs): Likewise.
-       (h8300_rtx_costs): Likewise.
-       (h8300_print_operand): Likewise.
-       (compute_mov_length): Likewise.
-       (output_logical_op): Likewise.
-       (compute_logical_op_length): Likewise.
-       (compute_logical_op_cc): Likewise.
-       (h8300_shift_needs_scratch_p): Likewise.
-       (output_a_shift): Likewise.
-       (compute_a_shift_length): Likewise.
-       (compute_a_shift_cc): Likewise.
-       (expand_a_rotate): Likewise.
-       (output_a_rotate): Likewise.
-       * config/i386/i386.c (classify_argument): Likewise.
-       (function_arg_advance_32): Likewise.
-       (function_arg_32): Likewise.
-       (function_arg_64): Likewise.
-       (function_value_64): Likewise.
-       (ix86_gimplify_va_arg): Likewise.
-       (ix86_legitimate_constant_p): Likewise.
-       (put_condition_code): Likewise.
-       (split_double_mode): Likewise.
-       (ix86_avx256_split_vector_move_misalign): Likewise.
-       (ix86_expand_vector_logical_operator): Likewise.
-       (ix86_split_idivmod): Likewise.
-       (ix86_expand_adjust_ufix_to_sfix_si): Likewise.
-       (ix86_build_const_vector): Likewise.
-       (ix86_build_signbit_mask): Likewise.
-       (ix86_match_ccmode): Likewise.
-       (ix86_cc_modes_compatible): Likewise.
-       (ix86_expand_branch): Likewise.
-       (ix86_expand_sse_cmp): Likewise.
-       (ix86_expand_sse_movcc): Likewise.
-       (ix86_expand_int_sse_cmp): Likewise.
-       (ix86_expand_vec_perm_vpermi2): Likewise.
-       (ix86_expand_vec_perm): Likewise.
-       (ix86_expand_sse_unpack): Likewise.
-       (ix86_expand_int_addcc): Likewise.
-       (ix86_split_to_parts): Likewise.
-       (ix86_vectorize_builtin_gather): Likewise.
-       (ix86_vectorize_builtin_scatter): Likewise.
-       (avx_vpermilp_parallel): Likewise.
-       (inline_memory_move_cost): Likewise.
-       (ix86_tieable_integer_mode_p): Likewise.
-       (x86_maybe_negate_const_int): Likewise.
-       (ix86_expand_vector_init_duplicate): Likewise.
-       (ix86_expand_vector_init_one_nonzero): Likewise.
-       (ix86_expand_vector_init_one_var): Likewise.
-       (ix86_expand_vector_init_concat): Likewise.
-       (ix86_expand_vector_init_interleave): Likewise.
-       (ix86_expand_vector_init_general): Likewise.
-       (ix86_expand_vector_set): Likewise.
-       (ix86_expand_vector_extract): Likewise.
-       (emit_reduc_half): Likewise.
-       (ix86_emit_i387_round): Likewise.
-       (ix86_mangle_type): Likewise.
-       (ix86_expand_round_sse4): Likewise.
-       (expand_vec_perm_blend): Likewise.
-       (canonicalize_vector_int_perm): Likewise.
-       (ix86_expand_vec_one_operand_perm_avx512): Likewise.
-       (expand_vec_perm_1): Likewise.
-       (expand_vec_perm_interleave3): Likewise.
-       (expand_vec_perm_even_odd_pack): Likewise.
-       (expand_vec_perm_even_odd_1): Likewise.
-       (expand_vec_perm_broadcast_1): Likewise.
-       (ix86_vectorize_vec_perm_const_ok): Likewise.
-       (ix86_expand_vecop_qihi): Likewise.
-       (ix86_expand_mul_widen_hilo): Likewise.
-       (ix86_expand_sse2_abs): Likewise.
-       (ix86_expand_pextr): Likewise.
-       (ix86_expand_pinsr): Likewise.
-       (ix86_preferred_simd_mode): Likewise.
-       (ix86_simd_clone_compute_vecsize_and_simdlen): Likewise.
-       * config/i386/sse.md (*andnot<mode>3): Likewise.
-       (<mask_codefor><code><mode>3<mask_name>): Likewise.
-       (*<code><mode>3): Likewise.
-       * config/ia64/ia64.c (ia64_expand_vecint_compare): Likewise.
-       (ia64_expand_atomic_op): Likewise.
-       (ia64_arg_type): Likewise.
-       (ia64_mode_to_int): Likewise.
-       (ia64_scalar_mode_supported_p): Likewise.
-       (ia64_vector_mode_supported_p): Likewise.
-       (expand_vec_perm_broadcast): Likewise.
-       * config/iq2000/iq2000.c (iq2000_move_1word): Likewise.
-       (iq2000_function_arg_advance): Likewise.
-       (iq2000_function_arg): Likewise.
-       * config/m32c/m32c.c (m32c_preferred_reload_class): Likewise.
-       * config/m68k/m68k.c (output_dbcc_and_branch): Likewise.
-       (m68k_libcall_value): Likewise.
-       (m68k_function_value): Likewise.
-       (sched_attr_op_type): Likewise.
-       * config/mcore/mcore.c (mcore_output_move): Likewise.
-       * config/microblaze/microblaze.c (microblaze_function_arg_advance):
-       Likewise.
-       (microblaze_function_arg): Likewise.
-       * config/mips/mips.c (mips16_build_call_stub): Likewise.
-       (mips_print_operand): Likewise.
-       (mips_mode_ok_for_mov_fmt_p): Likewise.
-       (mips_vector_mode_supported_p): Likewise.
-       (mips_preferred_simd_mode): Likewise.
-       (mips_expand_vpc_loongson_even_odd): Likewise.
-       (mips_expand_vec_unpack): Likewise.
-       (mips_expand_vi_broadcast): Likewise.
-       (mips_expand_vector_init): Likewise.
-       (mips_expand_vec_reduc): Likewise.
-       (mips_expand_msa_cmp): Likewise.
-       * config/mips/mips.md (casesi_internal_mips16_<mode>): Likewise.
-       * config/mn10300/mn10300.c (mn10300_print_operand): Likewise.
-       (cc_flags_for_mode): Likewise.
-       * config/msp430/msp430.c (msp430_print_operand): Likewise.
-       * config/nds32/nds32-md-auxiliary.c (nds32_mem_format): Likewise.
-       (nds32_output_casesi_pc_relative): Likewise.
-       * config/nds32/nds32.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/nvptx/nvptx.c (nvptx_ptx_type_from_mode): Likewise.
-       (nvptx_gen_unpack): Likewise.
-       (nvptx_gen_pack): Likewise.
-       (nvptx_gen_shuffle): Likewise.
-       (nvptx_gen_wcast): Likewise.
-       (nvptx_preferred_simd_mode): Likewise.
-       * config/pa/pa.c (pa_secondary_reload): Likewise.
-       * config/pa/predicates.md (base14_operand): Likewise.
-       * config/powerpcspe/powerpcspe-c.c
-       (altivec_resolve_overloaded_builtin): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_setup_reg_addr_masks):
-       Likewise.
-       (rs6000_preferred_simd_mode): Likewise.
-       (output_vec_const_move): Likewise.
-       (rs6000_expand_vector_extract): Likewise.
-       (rs6000_split_vec_extract_var): Likewise.
-       (reg_offset_addressing_ok_p): Likewise.
-       (rs6000_legitimate_offset_address_p): Likewise.
-       (rs6000_legitimize_address): Likewise.
-       (rs6000_emit_set_const): Likewise.
-       (rs6000_const_vec): Likewise.
-       (rs6000_emit_move): Likewise.
-       (spe_build_register_parallel): Likewise.
-       (rs6000_darwin64_record_arg_recurse): Likewise.
-       (swap_selector_for_mode): Likewise.
-       (spe_init_builtins): Likewise.
-       (paired_init_builtins): Likewise.
-       (altivec_init_builtins): Likewise.
-       (do_load_for_compare): Likewise.
-       (rs6000_generate_compare): Likewise.
-       (rs6000_expand_float128_convert): Likewise.
-       (emit_load_locked): Likewise.
-       (emit_store_conditional): Likewise.
-       (rs6000_output_function_epilogue): Likewise.
-       (rs6000_handle_altivec_attribute): Likewise.
-       (rs6000_function_value): Likewise.
-       (emit_fusion_gpr_load): Likewise.
-       (emit_fusion_p9_load): Likewise.
-       (emit_fusion_p9_store): Likewise.
-       * config/powerpcspe/predicates.md (easy_fp_constant): Likewise.
-       (fusion_gpr_mem_load): Likewise.
-       (fusion_addis_mem_combo_load): Likewise.
-       (fusion_addis_mem_combo_store): Likewise.
-       * config/rs6000/predicates.md (easy_fp_constant): Likewise.
-       (fusion_gpr_mem_load): Likewise.
-       (fusion_addis_mem_combo_load): Likewise.
-       (fusion_addis_mem_combo_store): Likewise.
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
-       Likewise.
-       * config/rs6000/rs6000-string.c (do_load_for_compare): Likewise.
-       * config/rs6000/rs6000.c (rs6000_setup_reg_addr_masks): Likewise.
-       (rs6000_preferred_simd_mode): Likewise.
-       (output_vec_const_move): Likewise.
-       (rs6000_expand_vector_extract): Likewise.
-       (rs6000_split_vec_extract_var): Likewise.
-       (reg_offset_addressing_ok_p): Likewise.
-       (rs6000_legitimate_offset_address_p): Likewise.
-       (rs6000_legitimize_address): Likewise.
-       (rs6000_emit_set_const): Likewise.
-       (rs6000_const_vec): Likewise.
-       (rs6000_emit_move): Likewise.
-       (rs6000_darwin64_record_arg_recurse): Likewise.
-       (swap_selector_for_mode): Likewise.
-       (paired_init_builtins): Likewise.
-       (altivec_init_builtins): Likewise.
-       (rs6000_expand_float128_convert): Likewise.
-       (emit_load_locked): Likewise.
-       (emit_store_conditional): Likewise.
-       (rs6000_output_function_epilogue): Likewise.
-       (rs6000_handle_altivec_attribute): Likewise.
-       (rs6000_function_value): Likewise.
-       (emit_fusion_gpr_load): Likewise.
-       (emit_fusion_p9_load): Likewise.
-       (emit_fusion_p9_store): Likewise.
-       * config/rx/rx.c (rx_gen_move_template): Likewise.
-       (flags_from_mode): Likewise.
-       * config/s390/predicates.md (s390_alc_comparison): Likewise.
-       (s390_slb_comparison): Likewise.
-       * config/s390/s390.c (s390_handle_vectorbool_attribute): Likewise.
-       (s390_vector_mode_supported_p): Likewise.
-       (s390_cc_modes_compatible): Likewise.
-       (s390_match_ccmode_set): Likewise.
-       (s390_canonicalize_comparison): Likewise.
-       (s390_emit_compare_and_swap): Likewise.
-       (s390_branch_condition_mask): Likewise.
-       (s390_rtx_costs): Likewise.
-       (s390_secondary_reload): Likewise.
-       (__SECONDARY_RELOAD_CASE): Likewise.
-       (s390_expand_cs): Likewise.
-       (s390_preferred_simd_mode): Likewise.
-       * config/s390/vx-builtins.md (vec_packsu_u<mode>): Likewise.
-       * config/sh/sh.c (sh_print_operand): Likewise.
-       (dump_table): Likewise.
-       (sh_secondary_reload): Likewise.
-       * config/sh/sh.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/sh/sh.md (casesi_worker_1): Likewise.
-       (casesi_worker_2): Likewise.
-       * config/sparc/predicates.md (icc_comparison_operator): Likewise.
-       (fcc_comparison_operator): Likewise.
-       * config/sparc/sparc.c (sparc_expand_move): Likewise.
-       (emit_soft_tfmode_cvt): Likewise.
-       (sparc_preferred_simd_mode): Likewise.
-       (output_cbranch): Likewise.
-       (sparc_print_operand): Likewise.
-       (sparc_expand_vec_perm_bmask): Likewise.
-       (vector_init_bshuffle): Likewise.
-       * config/spu/spu.c (spu_scalar_mode_supported_p): Likewise.
-       (spu_vector_mode_supported_p): Likewise.
-       (spu_expand_insv): Likewise.
-       (spu_emit_branch_or_set): Likewise.
-       (spu_handle_vector_attribute): Likewise.
-       (spu_builtin_splats): Likewise.
-       (spu_builtin_extract): Likewise.
-       (spu_builtin_promote): Likewise.
-       (spu_expand_sign_extend): Likewise.
-       * config/tilegx/tilegx.c (tilegx_scalar_mode_supported_p): Likewise.
-       (tilegx_simd_int): Likewise.
-       * config/tilepro/tilepro.c (tilepro_scalar_mode_supported_p): Likewise.
-       (tilepro_simd_int): Likewise.
-       * config/v850/v850.c (const_double_split): Likewise.
-       (v850_print_operand): Likewise.
-       (ep_memory_offset): Likewise.
-       * config/vax/vax.c (vax_rtx_costs): Likewise.
-       (vax_output_int_move): Likewise.
-       (vax_output_int_add): Likewise.
-       (vax_output_int_subtract): Likewise.
-       * config/visium/predicates.md (visium_branch_operator): Likewise.
-       * config/visium/visium.c (rtx_ok_for_offset_p): Likewise.
-       (visium_print_operand_address): Likewise.
-       * config/visium/visium.h (ASM_OUTPUT_ADDR_DIFF_ELT): Likewise.
-       * config/xtensa/xtensa.c (xtensa_mem_offset): Likewise.
-       (xtensa_expand_conditional_branch): Likewise.
-       (xtensa_copy_incoming_a7): Likewise.
-       (xtensa_output_literal): Likewise.
-       * dfp.c (decimal_real_maxval): Likewise.
-       * targhooks.c (default_libgcc_floating_mode_supported_p): Likewise.
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR target/83926
+       * config/rs6000/vsx.md (vsx_mul_v2di): Handle generating a 64-bit
+       multiply in 32-bit mode.
+       (vsx_div_v2di): Handle generating a 64-bit signed divide in 32-bit mode.
+       (vsx_udiv_v2di): Handle generating a 64-bit unsigned divide in 32-bit
+       mode.
  
-       * genmodes.c (mode_size_inline): Add an E_ prefix to mode names.
-       (mode_nunits_inline): Likewise.
-       (mode_inner_inline): Likewise.
-       (mode_unit_size_inline): Likewise.
-       (mode_unit_precision_inline): Likewise.
-       (emit_insn_modes_h): Likewise.  Also emit a #define of the
-       unprefixed name.
-       (emit_mode_wider): Add an E_ prefix to mode names.
-       (emit_mode_complex): Likewise.
-       (emit_mode_inner): Likewise.
-       (emit_mode_adjustments): Likewise.
-       (emit_mode_int_n): Likewise.
-       * config/aarch64/aarch64-builtins.c (v8qi_UP, v4hi_UP, v4hf_UP)
-       (v2si_UP, v2sf_UP, v1df_UP, di_UP, df_UP, v16qi_UP, v8hi_UP, v8hf_UP)
-       (v4si_UP, v4sf_UP, v2di_UP, v2df_UP, ti_UP, oi_UP, ci_UP, xi_UP)
-       (si_UP, sf_UP, hi_UP, hf_UP, qi_UP): Likewise.
-       (CRC32_BUILTIN, ENTRY): Likewise.
-       * config/aarch64/aarch64.c (aarch64_push_regs): Likewise.
-       (aarch64_pop_regs): Likewise.
-       (aarch64_process_components): Likewise.
-       * config/alpha/alpha.c (alpha_emit_conditional_move): Likewise.
-       * config/arm/arm-builtins.c (v8qi_UP, v4hi_UP, v4hf_UP, v2si_UP)
-       (v2sf_UP, di_UP, v16qi_UP, v8hi_UP, v8hf_UP, v4si_UP, v4sf_UP)
-       (v2di_UP, ti_UP, ei_UP, oi_UP, hf_UP, si_UP, void_UP): Likewise.
-       * config/arm/arm.c (arm_init_libfuncs): Likewise.
-       * config/i386/i386-builtin-types.awk (ix86_builtin_type_vect_mode):
-       Likewise.
-       * config/i386/i386-builtin.def (pcmpestr): Likewise.
-       (pcmpistr): Likewise.
-       * config/microblaze/microblaze.c (double_memory_operand): Likewise.
-       * config/mmix/mmix.c (mmix_output_condition): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_init_hard_regno_mode_ok):
-       Likewise.
-       * config/rl78/rl78.c (mduc_regs): Likewise.
-       * config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Likewise.
-       (htm_expand_builtin): Likewise.
-       * config/sh/sh.h (REGISTER_NATURAL_MODE): Likewise.
-       * config/sparc/sparc.c (emit_save_or_restore_regs): Likewise.
-       * config/xtensa/xtensa.c (print_operand): Likewise.
-       * expmed.h (NUM_MODE_PARTIAL_INT): Likewise.
-       (NUM_MODE_VECTOR_INT): Likewise.
-       * genoutput.c (null_operand): Likewise.
-       (output_operand_data): Likewise.
-       * genrecog.c (print_parameter_value): Likewise.
-       * lra.c (debug_operand_data): Likewise.
+ 2018-02-09  Sebastian Perta  <sebastian.perta@renesas.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
+       * config/rx/constraints.md (CALL_OP_SYMBOL_REF): Added new constraint
+       to allow or block "symbol_ref" depending on the value of TARGET_JSR.
+       * config/rx/rx.md (call_internal): Use CALL_OP_SYMBOL_REF.
+       * config/rx/rx.md (call_value_internal): Use CALL_OP_SYMBOL_REF.
  
-       * dwarf2out.c (scompare_loc_descriptor_wide)
-       (scompare_loc_descriptor_narrow): New functions, split out from...
-       (scompare_loc_descriptor): ...here.
-       * expmed.c (emit_store_flag_int): New function, split out from...
-       (emit_store_flag): ...here.
+ 2018-02-09  Pierre-Marie de Rodat  <derodat@adacore.com>
  
- 2017-08-30  Richard Biener  <rguenther@suse.de>
+       PR lto/84213
+       * dwarf2out.c (is_trivial_indirect_ref): New function.
+       (dwarf2out_late_global_decl): Do not generate a location
+       attribute for variables that have a non-trivial DECL_VALUE_EXPR
+       and that are not defined in the current unit.
  
-       * dwarf2out.c (dwarf2out_finish): Remove setting AT_pubnames.
-       (dwarf2out_early_finish): Move setting of AT_pubnames from
-       early debug output to early finish.
+ 2018-02-09  Eric Botcazou  <ebotcazou@adacore.com>
  
- 2017-08-30  Jozef Lawrynowicz  <jozef.l@somniumtech.com>
+       * optabs.c (prepare_cmp_insn): Try harder to emit a direct comparison
+       instead of a libcall for UNORDERED.
  
-       * gcc/config/msp430/msp430.h: Pass -mcode/data-region to the linker
-       and -mdata-region to the assembler.
+ 2018-02-09  Tamar Christina  <tamar.christina@arm.com>
  
- 2017-08-30  Richard Biener  <rguenther@suse.de>
+       PR target/82641
+       * config/arm/arm-c.c (arm_cpu_builtins): Un-define __ARM_FEATURE_LDREX,
+       __ARM_ARCH_PROFILE, __ARM_ARCH_ISA_THUMB, __ARM_FP and __ARM_NEON_FP.
  
-       * dwarf2out.c (add_dwarf_attr): Check we don't add duplicate
-       attributes.
-       (gen_subprogram_die): Add DW_AT_object_pointer only early.
-       (dwarf2out_early_global_decl): Only generate a DIE for the
-       abstract origin if it doesn't already exist or is a declaration DIE.
-       (resolve_addr): Do not add the linkage name twice when
-       generating a stub DIE for the DW_TAG_GNU_call_site target.
+ 2018-02-09  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
  
- 2017-08-30  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR target/PR84295
+       * config/s390/s390.c (s390_set_current_function): Invoke
+       s390_indirect_branch_settings also if fndecl didn't change.
  
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
-       Use machine_mode rather than int for arg1_mode.
+ 2018-02-09  Alexandre Oliva  <aoliva@redhat.com>
  
- 2017-08-29  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.md (blockage): Set length to zero.
  
-       PR target/82015
-       * config/rs6000/rs6000.c (rs6000_expand_binop_builtin): Insure
-       that the second argument of the built-in functions to unpack
-       128-bit scalar types to 64-bit values is 0 or 1.  Change to use a
-       switch statement instead a lot of if statements.
-       * config/rs6000/rs6000.md (unpack<mode>, FMOVE128_VSX iterator):
-       Allow 64-bit values to be in Altivec registers as well as
-       traditional floating point registers.
-       (pack<mode>, FMOVE128_VSX iterator): Likewise.
+ 2018-02-09  Eric Botcazou  <ebotcazou@adacore.com>
  
- 2017-08-29  Alexander Monakov  <amonakov@ispras.ru>
+       * expr.c (optimize_bitfield_assignment_op): Remove obsolete assertion.
  
-       * ira-costs.c (record_address_regs): Handle both operands of PLUS for
-       MAX_REGS_PER_ADDRESS == 1.
+ 2018-02-09  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-29  Uros Bizjak  <ubizjak@gmail.com>
+       PR sanitizer/84285
+       * gcc.c (STATIC_LIBASAN_LIBS, STATIC_LIBTSAN_LIBS,
+       STATIC_LIBLSAN_LIBS, STATIC_LIBUBSAN_LIBS): Handle -static like
+       -static-lib*san.
  
-       * config/i386/i386.opt (flag_fentry): Do not init to -1.
-       * config/i386/i386.c (ix86_option_override_internal): Simplify
-       setting of opts->x_flag_entry.
+       PR debug/84252
+       * var-tracking.c (vt_add_function_parameter): Punt for non-onepart
+       PARALLEL incoming that failed vt_get_decl_and_offset check.
  
- 2017-08-29  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       PR middle-end/84237
+       * output.h (bss_initializer_p): Add NAMED argument, defaulted to false.
+       * varasm.c (bss_initializer_p): Add NAMED argument, if true, ignore
+       TREE_READONLY bit.
+       (get_variable_section): For decls in named .bss* sections pass true as
+       second argument to bss_initializer_p.
+ 2018-02-09  Marek Polacek  <polacek@redhat.com>
            Jakub Jelinek  <jakub@redhat.com>
-           Richard Biener  <rguenther@suse.de>
  
-       PR tree-optimization/81503
-       * gimple-ssa-strength-reduction.c (replace_mult_candidate): Ensure
-       folded constant fits in the target type; reorder tests for clarity.
- 2017-08-29  Martin Liska  <mliska@suse.cz>
-       * passes.def: Include pass_lower_switch.
-       * stmt.c (dump_case_nodes): Remove and move to
-       tree-switch-conversion.
-       (case_values_threshold): Likewise.
-       (expand_switch_as_decision_tree_p): Likewise.
-       (emit_case_decision_tree): Likewise.
-       (expand_case): Likewise.
-       (balance_case_nodes): Likewise.
-       (node_has_low_bound): Likewise.
-       (node_has_high_bound): Likewise.
-       (node_is_bounded): Likewise.
-       (emit_case_nodes): Likewise.
-       (struct simple_case_node): New struct.
-       (add_case_node): Remove.
-       (emit_case_dispatch_table): Use vector instead of case_list.
-       (reset_out_edges_aux): Remove.
-       (compute_cases_per_edge): Likewise.
-       (expand_case): Build list of simple_case_node.
-       (expand_sjlj_dispatch_table): Use it.
-       * tree-switch-conversion.c (struct case_node): Moved from
-       stmt.c and adjusted.
-       (emit_case_nodes): Likewise.
-       (node_has_low_bound): Likewise.
-       (node_has_high_bound): Likewise.
-       (node_is_bounded): Likewise.
-       (case_values_threshold): Likewise.
-       (reset_out_edges_aux): Likewise.
-       (compute_cases_per_edge): Likewise.
-       (add_case_node): Likewise.
-       (dump_case_nodes): Likewise.
-       (balance_case_nodes): Likewise.
-       (expand_switch_as_decision_tree_p): Likewise.
-       (emit_jump): Likewise.
-       (emit_case_decision_tree): Likewise.
-       (try_switch_expansion): Likewise.
-       (do_jump_if_equal): Likewise.
-       (emit_cmp_and_jump_insns): Likewise.
-       (fix_phi_operands_for_edge): New function.
-       (record_phi_operand_mapping): Likewise.
-       (class pass_lower_switch): New pass.
-       (pass_lower_switch::execute): New function.
-       (make_pass_lower_switch): Likewise.
-       (conditional_probability):
-       * timevar.def: Add TV_TREE_SWITCH_LOWERING.
-       * tree-pass.h: Add make_pass_lower_switch.
- 2017-08-29  Jozef Lawrynowicz  <jozef.l@somniumtech.com>
-       PR target/80993
-       * gcc/config/msp430/msp430.c (msp430_attr): Mark interrupt
-       handlers as used.
- 2017-08-29  Richard Biener  <rguenther@suse.de>
-       * dwarf2out.c (add_dwarf_attr): When checking is enabled verify
-       we do not add a DW_AT_inline attribute twice.
-       (gen_subprogram_die): Remove code setting DW_AT_inline on
-       DECL_ABSTRACT_P nodes.
- 2017-08-29  Richard Sandiford  <richard.sandiford@linaro.org>
-       * gimplify.c (gimplify_call_expr): Copy the nothrow flag to
-       calls to internal functions.
-       (gimplify_modify_expr): Likewise.
-       * tree-call-cdce.c (use_internal_fn): Likewise.
-       * tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise.
-       (convert_to_divmod): Set the nothrow flag.
-       * tree-if-conv.c (predicate_mem_writes):  Likewise.
-       * tree-vect-stmts.c (vectorizable_mask_load_store): Likewise.
-       (vectorizable_call): Likewise.
-       (vectorizable_store): Likewise.
-       (vectorizable_load): Likewise.
-       * tree-vect-patterns.c (vect_recog_pow_pattern): Likewise.
-       (vect_recog_mask_conversion_pattern): Likewise.
+       PR c++/83659
+       * fold-const.c (fold_indirect_ref_1): Use VECTOR_TYPE_P macro.
+       Formatting fixes.  Verify first that tree_fits_poly_int64_p (op01).
+       Sync some changes from cxx_fold_indirect_ref.
  
- 2017-08-29  Martin Liska  <mliska@suse.cz>
-       PR other/39851
-       * gcc.c (driver_handle_option): Add new argument.
-       * opts-common.c (handle_option): Pass
-       target_option_override_hook.
-       * opts-global.c (lang_handle_option): Add new option.
-       (set_default_handlers):  Add new argument.
-       (decode_options): Likewise.
-       * opts.c (target_handle_option): Likewise.
-       (common_handle_option): Call target_option_override_hook.
-       * opts.h (struct cl_option_handler_func): Add hook for
-       target option override.
-       (struct cl_option_handlers): Likewise.
-       (set_default_handlers): Add new argument.
-       (decode_options): Likewise.
-       (common_handle_option): Likewise.
-       (target_handle_option): Likewise.
-       * toplev.c (toplev::main): Pass targetm.target_option.override
-       hook.
- 2017-08-29  Richard Biener  <rguenther@suse.de>
-       Dominik Infuehr <dominik.infuehr@theobroma-systems.com>
-       * tree-vect-slp.c (vect_bb_slp_scalar_cost): Properly confine
-       life to the active subtree.
- 2017-08-28  Jeff Law  <law@redhat.com>
-       * tree-ssa-dom.c (edge_info::record_simple_equiv): Call
-       derive_equivalences.
-       (derive_equivalences_from_bit_ior, record_temporary_equivalences):
-       Code moved into....
-       (edge_info::derive_equivalences): New private member function
-       * tree-ssa-dom.c (class edge_info): Changed from a struct
-       to a class.  Add ctor/dtor, methods and data members.
-       (edge_info::edge_info): Renamed from allocate_edge_info.
-       Initialize additional members.
-       (edge_info::~edge_info): New.
-       (free_dom_edge_info): Delete the edge info.
-       (record_edge_info): Use new class & associated member functions.
-       Tighten forms for testing for edge equivalences.
-       (record_temporary_equivalences): Iterate over the simple
-       equivalences rather than assuming there's only one per edge.
-       (cprop_into_successor_phis): Iterate over the simple
-       equivalences rather than assuming there's only one per edge.
-       (optimize_stmt): Use operand_equal_p rather than pointer
-       equality for mini-DSE code.
- 2017-08-28  Nathan Sidwell  <nathan@acm.org>
-       * gcc.c (execute): Fold SIGPIPE handling into switch
-       statement.  Adjust internal error message.
- 2017-08-28  Richard Biener  <rguenther@suse.de>
-       PR debug/81993
-       * dwarf2out.c (gen_remaining_tmpl_value_param_die_attributes):
-       Do nothing for removed DIEs.
- 2017-08-28  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81977
-       * tree-ssa-sccvn.c (vn_reference_lookup_3): Fix look through
-       memcpy.
- 2017-08-28  Alexander Monakov  <amonakov@ispras.ru>
-       PR target/80640
-       * doc/md.texi (mem_thread_fence): Remove mention of mode.  Rewrite.
-       * optabs.c (expand_mem_thread_fence): Emit a compiler barrier when
-       using targetm.gen_mem_thread_fence.
- 2017-08-27  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81995
-       * config/i386/i386.md (*<btsc><mode>): Change operand 2
-       predicate to register_operand.  Reorder operands.
-       (*btr<mode>): Ditto.
-       (*<btsc><mode>_mask): Change operand 3 predicate to register_operand.
-       (*btr<mode>_mask): Ditto.
- 2017-08-25  Steven Munroe  <munroesj@gcc.gnu.org>
-       * config.gcc (powerpc*-*-*): Add xmmintrin.h and mm_malloc.h.
-       * config/rs6000/xmmintrin.h: New file.
-       * config/rs6000/x86intrin.h [__ALTIVEC__]: Include xmmintrin.h.
- 2017-08-25  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
-       PR target/81504
-       * config/rs6000/rs6000-p8swap.c (find_alignment_op): Add reference
-       parameter and_insn and return it.
-       (recombine_lvx_pattern): Insert a copy to ensure availability of
-       the base register of the copied masking operation at the point of
-       the instruction replacement.
-       (recombine_stvx_pattern): Likewise.
- 2017-08-25  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000.opt (-mpower9-dform-scalar): Delete
-       undocumented switches.
-       (-mpower9-dform-vector): Likewise.
-       (-mpower9-dform): Likewise.
-       * config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Update
-       comments to delete references to -mpower9-dform* switches.
-       * config/rs6000/predicates.md (vsx_quad_dform_memory_operand):
-       Delete reference to -mpower9-dform* switches, test for
-       -mpower9-vector instead.
-       * config/rs6000/rs6000-cpus.def (ISA_3_0_MASKS_SERVER): Likewise.
-       (OTHER_P9_VECTOR_MASKS): Likewise.
-       (POWERPC_MASKS): Likewise.
-       * config/rs6000/rs6000.c (rs6000_setup_reg_addr_masks): Change
-       tests against -mpower9-dform* to -mpower9-vector.  Delete code
-       that checked for -mpower9-dform* consistancy with other options.
-       Add test for -mpower9-misc to enable other power9 switches.
-       (rs6000_init_hard_regno_mode_ok): Likewise.
-       (rs6000_option_override_internal): Likewise.
-       (rs6000_emit_prologue): Likewise.
-       (rs6000_emit_epilogue): Likewise.
-       (rs6000_opt_masks): Delete -mpower9-dform-{scalar,vector}.
-       (rs6000_disable_incompatiable_switches): Delete -mpower9-dform.
-       (emit_fusion_p9_load): Change tests for -mpower9-dform-scalar
-       -mpower9-vector.
-       (emit_fusion_p9_store): Likewise.
-       * config/rs6000/rs6000.h (TARGET_P9_DFORM_SCALAR): Delete
-       resetting these macros if the assembler does not support ISA 3.0
-       instructions.
-       (TARGET_P9_DFORM_VECTOR): Likewise.
-       * config/rs6000/rs6000.md (peepholes to optimize altivec memory):
-       Change to use -mpower9-vector instead of -mpower9-dform-scalar.
- 2017-08-25  Alan Modra  <amodra@gmail.com>
-       PR rtl-optimization/81747
-       * cse.c (cse_extended_basic_block): Don't attempt to record
-       equivalences for degenerate conditional branches that branch
-       to their fall-through.
- 2017-08-24  Martin Sebor  <msebor@redhat.com>
-       PR middle-end/81908
-       * gimple-fold.c (size_must_be_zero_p): New function.
-       (gimple_fold_builtin_memory_op): Call it.
- 2017-08-24  Steven Munroe  <munroesj@gcc.gnu.org>
-       * config/rs6000/mm_malloc.h: New file.
- 2017-08-24  Bin Cheng  <bin.cheng@arm.com>
-       PR tree-optimization/81913
-       * tree-ssa-loop-niter.c (number_of_iterations_cond): Skip niter
-       analysis when either IVs in condition can wrap.
- 2017-08-24  Uros Bizjak  <ubizjak@gmail.com>
-       * dwarf2out.c (MAX_ARTIFICIAL_LABEL_BYTES): Increase to 40.
-       * dwarf2cfi.c (MAX_ARTIFICIAL_LABEL_BYTES): Remove.
- 2017-08-24  Richard Biener  <rguenther@suse.de>
-       PR target/81921
-       * targhooks.c (default_target_can_inline_p): Properly
-       use target_option_default_node when no DECL_FUNCTION_SPECIFIC_TARGET
-       is present and always compare.
-       * config/i386/i386.c (ix86_valid_target_attribute_tree): Do not
-       infer -mfpmath=sse from TARGET_SSE_P.
-       (ix86_can_inline_p): Properly use target_option_default_node when
-       no DECL_FUNCTION_SPECIFIC_TARGET is present and always compare.
- 2017-08-24  Richard Biener  <rguenther@suse.de>
-       PR debug/81936
-       * dwarf2out.c (output_die): Handle flag_generate_offload like
-       flag_generate_lto.
-       (output_comp_unit): Likewise.
-       (gen_array_type_die): Likewise.
-       (dwarf2out_early_finish): Likewise.
-       (note_variable_value_in_expr): Likewise.
-       (dwarf2out_finish): Likewise.  Adjust assert.
-       * cgraphunit.c (symbol_table::compile): Move setting of
-       flag_generate_offload earlier ...
-       (symbol_table::finalize_compilation_unit): ... here, before
-       early debug finalization.
+ 2018-02-09  Alexandre Oliva  <aoliva@redhat.com>
  
- 2017-08-24  Richard Biener  <rguenther@suse.de>
+       * cfgexpand.c (expand_gimple_basic_block): Handle inline entry
+       markers.
+       * dwarf2out.c (dwarf2_debug_hooks): Enable inline_entry hook.
+       (BLOCK_INLINE_ENTRY_LABEL): New.
+       (dwarf2out_var_location): Disregard inline entry markers.
+       (inline_entry_data): New struct.
+       (inline_entry_data_hasher): New hashtable type.
+       (inline_entry_data_hasher::hash): New.
+       (inline_entry_data_hasher::equal): New.
+       (inline_entry_data_table): New variable.
+       (add_high_low_attributes): Add DW_AT_entry_pc and
+       DW_AT_GNU_entry_view attributes if a pending entry is found
+       in inline_entry_data_table.  Add old entry_pc attribute only
+       if debug nonbinding markers are disabled.
+       (gen_inlined_subroutine_die): Set BLOCK_DIE if nonbinding
+       markers are enabled.
+       (block_within_block_p, dwarf2out_inline_entry): New.
+       (dwarf2out_finish): Check that no entries remained in
+       inline_entry_data_table.
+       * final.c (reemit_insn_block_notes): Handle inline entry notes.
+       (final_scan_insn, notice_source_line): Likewise.
+       (rest_of_clean_state): Skip inline entry markers.
+       * gimple-pretty-print.c (dump_gimple_debug): Handle inline entry
+       markers.
+       * gimple.c (gimple_build_debug_inline_entry): New.
+       * gimple.h (enum gimple_debug_subcode): Add
+       GIMPLE_DEBUG_INLINE_ENTRY.
+       (gimple_build_debug_inline_entry): Declare.
+       (gimple_debug_inline_entry_p): New.
+       (gimple_debug_nonbind_marker_p): Adjust.
+       * insn-notes.def (INLINE_ENTRY): New.
+       * print-rtl.c (rtx_writer::print_rtx_operand_code_0): Handle
+       inline entry marker notes.
+       (print_insn): Likewise.
+       * rtl.h (NOTE_MARKER_P): Add INLINE_ENTRY support.
+       (INSN_DEBUG_MARKER_KIND): Likewise.
+       (GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT): New.
+       * tree-inline.c (expand_call_inline): Build and insert
+       debug_inline_entry stmt.
+       * tree-ssa-live.c (remove_unused_scope_block_p): Preserve
+       inline entry blocks early, if nonbind markers are enabled.
+       (dump_scope_block): Dump fragment info.
+       * var-tracking.c (reemit_marker_as_note): Handle inline entry note.
+       * doc/gimple.texi (gimple_debug_inline_entry_p): New.
+       (gimple_build_debug_inline_entry): New.
+       * doc/invoke.texi (gstatement-frontiers, gno-statement-frontiers):
+       Enable/disable inline entry points too.
+       * doc/rtl.texi (NOTE_INSN_INLINE_ENTRY): New.
+       (DEBUG_INSN): Describe inline entry markers.
+       * common.opt (gvariable-location-views): New.
+       (gvariable-location-views=incompat5): New.
+       * config.in: Rebuilt.
+       * configure: Rebuilt.
+       * configure.ac: Test assembler for view support.
+       * dwarf2asm.c (dw2_asm_output_symname_uleb128): New.
+       * dwarf2asm.h (dw2_asm_output_symname_uleb128): Declare.
+       * dwarf2out.c (var_loc_view): New typedef.
+       (struct dw_loc_list_struct): Add vl_symbol, vbegin, vend.
+       (dwarf2out_locviews_in_attribute): New.
+       (dwarf2out_locviews_in_loclist): New.
+       (dw_val_equal_p): Compare val_view_list of dw_val_class_view_lists.
+       (enum dw_line_info_opcode): Add LI_adv_address.
+       (struct dw_line_info_table): Add view.
+       (RESET_NEXT_VIEW, RESETTING_VIEW_P): New macros.
+       (DWARF2_ASM_VIEW_DEBUG_INFO): Define default.
+       (zero_view_p): New variable.
+       (ZERO_VIEW_P): New macro.
+       (output_asm_line_debug_info): New.
+       (struct var_loc_node): Add view.
+       (add_AT_view_list, AT_loc_list): New.
+       (add_var_loc_to_decl): Add view param.  Test it against last.
+       (new_loc_list): Add view params.  Record them.
+       (AT_loc_list_ptr): Handle loc and view lists.
+       (view_list_to_loc_list_val_node): New.
+       (print_dw_val): Handle dw_val_class_view_list.
+       (size_of_die): Likewise.
+       (value_format): Likewise.
+       (loc_list_has_views): New.
+       (gen_llsym): Set vl_symbol too.
+       (maybe_gen_llsym, skip_loc_list_entry): New.
+       (dwarf2out_maybe_output_loclist_view_pair): New.
+       (output_loc_list): Output view list or entries too.
+       (output_view_list_offset): New.
+       (output_die): Handle dw_val_class_view_list.
+       (output_dwarf_version): New.
+       (output_compilation_unit_header): Use it.
+       (output_skeleton_debug_sections): Likewise.
+       (output_rnglists, output_line_info): Likewise.
+       (output_pubnames, output_aranges): Update version comments.
+       (output_one_line_info_table): Output view numbers in asm comments.
+       (dw_loc_list): Determine current endview, pass it to new_loc_list.
+       Call maybe_gen_llsym.
+       (loc_list_from_tree_1): Adjust.
+       (add_AT_location_description): Create view list attribute if
+       needed, check it's absent otherwise.
+       (convert_cfa_to_fb_loc_list): Adjust.
+       (maybe_emit_file): Call output_asm_line_debug_info for test.
+       (dwarf2out_var_location): Reset views as needed.  Precompute
+       add_var_loc_to_decl args.  Call get_attr_min_length only if we have the
+       attribute.  Set view.
+       (new_line_info_table): Reset next view.
+       (set_cur_line_info_table): Call output_asm_line_debug_info for test.
+       (dwarf2out_source_line): Likewise.  Output view resets and labels to
+       the assembler, or select appropriate line info opcodes.
+       (prune_unused_types_walk_attribs): Handle dw_val_class_view_list.
+       (optimize_string_length): Catch it.  Adjust.
+       (resolve_addr): Copy vl_symbol along with ll_symbol.  Handle
+       dw_val_class_view_list, and remove it if no longer needed.
+       (hash_loc_list): Hash view numbers.
+       (loc_list_hasher::equal): Compare them.
+       (optimize_location_lists): Check whether a view list symbol is
+       needed, and whether the locview attribute is present, and
+       whether they match.  Remove the locview attribute if no longer
+       needed.
+       (index_location_lists): Call skip_loc_list_entry for test.
+       (dwarf2out_finish): Call output_asm_line_debug_info for test.
+       Use output_dwarf_version.
+       * dwarf2out.h (enum dw_val_class): Add dw_val_class_view_list.
+       (struct dw_val_node): Add val_view_list.
+       * final.c (SEEN_NEXT_VIEW): New.
+       (set_next_view_needed): New.
+       (clear_next_view_needed): New.
+       (maybe_output_next_view): New.
+       (final_start_function): Rename to...
+       (final_start_function_1): ... this.  Take pointer to FIRST,
+       add SEEN parameter.  Emit param bindings in the initial view.
+       (final_start_function): Reintroduce SEEN-less interface.
+       (final): Rename to...
+       (final_1): ... this.  Take SEEN parameter.  Output final pending
+       next view at the end.
+       (final): Reintroduce seen-less interface.
+       (final_scan_insn): Output pending next view before switching
+       sections or ending a block.  Mark the next view as needed when
+       outputting variable locations.  Notify debug backend of section
+       changes, and of location view changes.
+       (rest_of_handle_final): Adjust.
+       * toplev.c (process_options): Autodetect value for debug variable
+       location views option.  Warn on incompat5 without -gdwarf-5.
+       * doc/invoke.texi (gvariable-location-views): New.
+       (gvariable-location-views=incompat5): New.
+       (gno-variable-location-views): New.
+ 2018-02-08  David Malcolm  <dmalcolm@redhat.com>
+       PR tree-optimization/84136
+       * tree-cfg.c (find_taken_edge_computed_goto): Remove assertion
+       that the result of find_edge is non-NULL.
+ 2018-02-08  Sergey Shalnov  <sergey.shalnov@intel.com>
+       PR target/83008
+       * config/i386/x86-tune-costs.h (skylake_cost): Fix cost of
+       storing integer register in SImode.  Fix cost of 256 and 512
+       byte aligned SSE register store.
+ 2018-02-08  Sergey Shalnov  <sergey.shalnov@intel.com>
+       * config/i386/i386.c (ix86_multiplication_cost): Fix
+       multiplication cost for TARGET_AVX512DQ.
+ 2018-02-08  Marek Polacek  <polacek@redhat.com>
+       PR tree-optimization/84238
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Verify the result of
+       get_range_strlen.
+ 2018-02-08  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/84265
+       * tree-vect-stmts.c (vectorizable_store): Don't treat
+       VMAT_CONTIGUOUS accesses as grouped.
+       (vectorizable_load): Likewise.
  
-       * config/i386/i386.c: Include symbol-summary.h, ipa-prop.h
-       and ipa-fnsummary.h.
-       (ix86_can_inline_p): When ix86_fpmath flags do not match
-       check whether the callee uses FP math at all.
+ 2018-02-08  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/81635
+       * wide-int.h (wi::round_down_for_mask, wi::round_up_for_mask): Declare.
+       * wide-int.cc (wi::round_down_for_mask, wi::round_up_for_mask)
+       (test_round_for_mask): New functions.
+       (wide_int_cc_tests): Call test_round_for_mask.
+       * tree-vrp.h (intersect_range_with_nonzero_bits): Declare.
+       * tree-vrp.c (intersect_range_with_nonzero_bits): New function.
+       * tree-data-ref.c (split_constant_offset_1): Use it to refine the
+       range returned by get_range_info.
+ 2018-02-08  Jan Hubicka  <hubicka@ucw.cz>
+       PR ipa/81360
+       * cgraph.h (symtab_node::output_to_lto_symbol_table_p): Declare
+       * symtab.c: Include builtins.h
+       (symtab_node::output_to_lto_symbol_table_p): Move here
+       from lto-streamer-out.c:output_symbol_p.
+       * lto-streamer-out.c (write_symbol): Turn early exit to assert.
+       (output_symbol_p): Move all logic to symtab.c
+       (produce_symtab): Update.
+ 2018-02-08  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
+       * config/s390/s390-opts.h (enum indirect_branch): Define.
+       * config/s390/s390-protos.h (s390_return_addr_from_memory)
+       (s390_indirect_branch_via_thunk)
+       (s390_indirect_branch_via_inline_thunk): Add function prototypes.
+       (enum s390_indirect_branch_type): Define.
+       * config/s390/s390.c (struct s390_frame_layout, struct
+       machine_function): Remove.
+       (indirect_branch_prez10thunk_mask, indirect_branch_z10thunk_mask)
+       (indirect_branch_table_label_no, indirect_branch_table_name):
+       Define variables.
+       (INDIRECT_BRANCH_NUM_OPTIONS): Define macro.
+       (enum s390_indirect_branch_option): Define.
+       (s390_return_addr_from_memory): New function.
+       (s390_handle_string_attribute): New function.
+       (s390_attribute_table): Add new attribute handler.
+       (s390_execute_label): Handle UNSPEC_EXECUTE_JUMP patterns.
+       (s390_indirect_branch_via_thunk): New function.
+       (s390_indirect_branch_via_inline_thunk): New function.
+       (s390_function_ok_for_sibcall): When jumping via thunk disallow
+       sibling call optimization for non z10 compiles.
+       (s390_emit_call): Force indirect branch target to be a single
+       register.  Add r1 clobber for non-z10 compiles.
+       (s390_emit_epilogue): Emit return jump via return_use expander.
+       (s390_reorg): Handle JUMP_INSNs as execute targets.
+       (s390_option_override_internal): Perform validity checks for the
+       new command line options.
+       (s390_indirect_branch_attrvalue): New function.
+       (s390_indirect_branch_settings): New function.
+       (s390_set_current_function): Invoke s390_indirect_branch_settings.
+       (s390_output_indirect_thunk_function):  New function.
+       (s390_code_end): Implement target hook.
+       (s390_case_values_threshold): Implement target hook.
+       (TARGET_ASM_CODE_END, TARGET_CASE_VALUES_THRESHOLD): Define target
+       macros.
+       * config/s390/s390.h (struct s390_frame_layout)
+       (struct machine_function): Move here from s390.c.
+       (TARGET_INDIRECT_BRANCH_NOBP_RET)
+       (TARGET_INDIRECT_BRANCH_NOBP_JUMP)
+       (TARGET_INDIRECT_BRANCH_NOBP_JUMP_THUNK)
+       (TARGET_INDIRECT_BRANCH_NOBP_JUMP_INLINE_THUNK)
+       (TARGET_INDIRECT_BRANCH_NOBP_CALL)
+       (TARGET_DEFAULT_INDIRECT_BRANCH_TABLE)
+       (TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL)
+       (TARGET_INDIRECT_BRANCH_THUNK_NAME_EX)
+       (TARGET_INDIRECT_BRANCH_TABLE): Define macros.
+       * config/s390/s390.md (UNSPEC_EXECUTE_JUMP)
+       (INDIRECT_BRANCH_THUNK_REGNUM): Define constants.
+       (mnemonic attribute): Add values which aren't recognized
+       automatically.
+       ("*cjump_long", "*icjump_long", "*basr", "*basr_r"): Disable
+       pattern for branch conversion.  Fix mnemonic attribute.
+       ("*c<code>", "*sibcall_br", "*sibcall_value_br", "*return"): Emit
+       indirect branch via thunk if requested.
+       ("indirect_jump", "<code>"): Expand patterns for branch conversion.
+       ("*indirect_jump"): Disable for branch conversion using out of
+       line thunks.
+       ("indirect_jump_via_thunk<mode>_z10")
+       ("indirect_jump_via_thunk<mode>")
+       ("indirect_jump_via_inlinethunk<mode>_z10")
+       ("indirect_jump_via_inlinethunk<mode>", "*casesi_jump")
+       ("casesi_jump_via_thunk<mode>_z10", "casesi_jump_via_thunk<mode>")
+       ("casesi_jump_via_inlinethunk<mode>_z10")
+       ("casesi_jump_via_inlinethunk<mode>", "*basr_via_thunk<mode>_z10")
+       ("*basr_via_thunk<mode>", "*basr_r_via_thunk_z10")
+       ("*basr_r_via_thunk", "return<mode>_prez10"): New pattern.
+       ("*indirect2_jump"): Disable for branch conversion.
+       ("casesi_jump"): Turn into expander and expand patterns for branch
+       conversion.
+       ("return_use"): New expander.
+       ("*return"): Emit return via thunk and rename it to ...
+       ("*return<mode>"): ... this one.
+       * config/s390/s390.opt: Add new options and and enum for the
+       option values.
  
- 2017-08-24  Aldy Hernandez  <aldyh@redhat.com>
+ 2018-02-08  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR middle-end/81931
-       * tree-ssanames.c (get_nonzero_bits): Use element_precision
-       instead of TYPE_PRECISION.
+       * lra-constraints.c (match_reload): Unconditionally use
+       gen_lowpart_SUBREG, rather than selecting between that
+       and equivalent gen_rtx_SUBREG code.
  
- 2017-08-24  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-08  Richard Biener  <rguenther@suse.de>
  
-       * combine.c (make_extraction): Use subreg_offset_from_lsb.
- 2017-08-23  Daniel Santos  <daniel.santos@pobox.com>
-       * config/i386/i386.h (ix86_frame::stack_realign_allocate_offset):
-       Remove field.
-       (ix86_frame::stack_realign_allocate): New field.
-       (struct machine_frame_state): Modify comments.
-       (machine_frame_state::sp_realigned_fp_end): New field.
-       * config/i386/i386.c (ix86_compute_frame_layout): Rework stack frame
-       layout calculation.
-       (sp_valid_at): Add assertion to assure no attempt to access invalid
-       offset of a realigned stack.
-       (fp_valid_at): Likewise.
-       (choose_baseaddr): Modify comments.
-       (ix86_emit_outlined_ms2sysv_save): Adjust to changes in
-       ix86_expand_prologue.
-       (ix86_expand_prologue): Modify stack realignment and allocation.
-       (ix86_expand_epilogue): Modify comments.
-       * doc/sourcebuild.texi: Add documentation for target selectors avx2,
-       avx2_runtime, avx512f, and avx512f_runtime.
- 2017-08-23  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.opt: Remove unneeded Init(0) initializations.
-       (mstackrealign): Do not init to -1.
-       * config/i386/i386.c (ix86_option_override_internal):
-       Check opts_set, not opts when setting default value of
-       opts->x_ix86_force_align_arg_pointer.
+       PR tree-optimization/84233
+       * tree-ssa-phiprop.c (propagate_with_phi): Use separate
+       changed flag instead of boguously re-using phi_inserted.
  
- 2017-08-23  Richard Biener  <rguenther@suse.de>
+ 2018-02-08  Martin Jambor  <mjambor@suse.cz>
  
-       * function.c (fndecl_name): Use verbosity 1 (no arguments) for
-       lang_hooks.decl_printable_name.
-       * print-rtl-function.c (print_rtx_function): Likewise.
-       * tree-pretty-print.c (dump_function_header): Likewise.
+       * hsa-gen.c (get_symbol_for_decl): Set program allocation for
+       static local variables.
  
- 2017-08-23  Richard Biener  <rguenther@suse.de>
+ 2018-02-08  Richard Biener  <rguenther@suse.de>
  
-       PR lto/81940
-       * dwarf2out.c (dwarf2out_abstract_function): Handle LTO with
-       -g0 at compile-time.
+       PR tree-optimization/84278
+       * tree-vect-stmts.c (vectorizable_store): When looking for
+       smaller vector types to perform grouped strided loads/stores
+       make sure the mode is supported by the target.
+       (vectorizable_load): Likewise.
  
- 2017-08-23  Tamar Christina  <tamar.christina@arm.com>
+ 2018-02-08  Wilco Dijkstra  <wdijkstr@arm.com>
  
-       PR middle-end/19706
-       * doc/sourcebuild.texi (Other hardware attributes):
-       Document xorsign.
+       * config/aarch64/aarch64.c (aarch64_components_for_bb):
+       Increase LDP/STP opportunities by adding adjacent callee-saves.
  
- 2017-08-23  Tamar Christina  <tamar.christina@arm.com>
+ 2018-02-08  Wilco Dijkstra  <wdijkstr@arm.com>
  
-       PR middle-end/19706
-       * tree-ssa-math-opts.c (convert_expand_mult_copysign):
-       Fix single-use check.
+       PR rtl-optimization/84068
+       PR rtl-optimization/83459
+       * haifa-sched.c (rank_for_schedule): Fix SCHED_PRESSURE_MODEL sorting.
  
- 2017-08-23  Thomas Preud'homme  <thomas.preudhomme@arm.com>
+ 2018-02-08  Aldy Hernandez  <aldyh@redhat.com>
  
-       * gcc.c (execute): Only test for SIGKILL and SIGQUIT if available.
+       PR tree-optimization/84224
+       * gimple-ssa-warn-alloca.c (pass_walloca::execute): Remove assert.
+       * calls.c (gimple_alloca_call_p): Only return TRUE when we have
+       non-zero arguments.
  
- 2017-08-22  Daniel Santos  <daniel.santos@pobox.com>
+ 2018-02-07  Iain Sandoe  <iain@codesourcery.com>
  
-       * doc/install.texi: Modify to add more details on running selected
-       tests.
+       PR target/84113
+       * config/rs6000/altivec.md (*restore_world): Remove LR use.
+       * config/rs6000/predicates.md (restore_world_operation): Adjust op
+       count, remove one USE.
  
- 2017-08-22  Daniel Santos  <daniel.santos@pobox.com>
+ 2018-02-07  Michael Meissner  <meissner@linux.vnet.ibm.com>
  
-       * config/i386/i386.c (ix86_option_override_internal): Error when -mx32
-       is combined with -mabi=ms.
-       (ix86_function_type_abi): Limit errors for mixing -mx32 with attribute
-       ms_abi.
+       * doc/install.texi (Configuration): Document the
+       --with-long-double-format={ibm,ieee} PowerPC configuration
+       options.
  
- 2017-08-22  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       PR target/84154
+       * config/rs6000/rs6000.md (fix_trunc<SFDF:mode><QHI:mode>2):
+       Convert from define_expand to be define_insn_and_split.  Rework
+       float/double/_Float128 conversions to QI/HI/SImode to work with
+       both ISA 2.07 (power8) or ISA 3.0 (power9).  Fix regression where
+       conversions to QI/HImode types did a store and then a load to
+       truncate the value.  For conversions to VSX registers, don't split
+       the insn, instead emit the code directly.  Use the code iterator
+       any_fix to combine signed and unsigned conversions.
+       (fix<uns>_trunc<SFDF:mode>si2_p8): Likewise.
+       (fixuns_trunc<SFDF:mode><QHI:mode>2): Likewise.
+       (fix_trunc<IEEE128:mode><QHI:mode>2): Likewise.
+       (fix<uns>_trunc<SFDF:mode><QHI:mode>2): Likewise.
+       (fix_<mode>di2_hw): Likewise.
+       (fixuns_<mode>di2_hw): Likewise.
+       (fix_<mode>si2_hw): Likewise.
+       (fixuns_<mode>si2_hw): Likewise.
+       (fix<uns>_<IEEE128:mode><SDI:mode>2_hw): Likewise.
+       (fix<uns>_trunc<IEEE128:mode><QHI:mode>2): Likewise.
+       (fctiw<u>z_<mode>_smallint): Rename fctiw<u>z_<mode>_smallint to
+       fix<uns>_trunc<SFDF:mode>si2_p8.
+       (fix_trunc<SFDF:mode><QHI:mode>2_internal): Delete, no longer
+       used.
+       (fixuns_trunc<SFDF:mode><QHI:mode>2_internal): Likewise.
+       (fix<uns>_<mode>_mem): Likewise.
+       (fctiw<u>z_<mode>_mem): Likewise.
+       (fix<uns>_<mode>_mem): Likewise.
+       (fix<uns>_trunc<SFDF:mode><QHSI:mode>2_mem): On ISA 3.0, prevent
+       the register allocator from doing a direct move to the GPRs to do
+       a store, and instead use the ISA 3.0 store byte/half-word from
+       vector register instruction.  For IEEE 128-bit floating point,
+       also optimize stores of 32-bit ints.
+       (fix<uns>_trunc<IEEE128:mode><QHSI:mode>2_mem): Likewise.
+ 2018-02-07  Alan Hayward  <alan.hayward@arm.com>
+       * genextract.c (push_pathstr_operand): New function to support
+       [a-zA-Z].
+       (walk_rtx): Call push_pathstr_operand.
+       (print_path): Support [a-zA-Z].
+ 2018-02-07  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/84037
+       * tree-vectorizer.h (struct _loop_vec_info): Add ivexpr_map member.
+       (cse_and_gimplify_to_preheader): Declare.
+       (vect_get_place_in_interleaving_chain): Likewise.
+       * tree-vect-loop.c (_loop_vec_info::_loop_vec_info): Initialize
+       ivexpr_map.
+       (_loop_vec_info::~_loop_vec_info): Delete it.
+       (cse_and_gimplify_to_preheader): New function.
+       * tree-vect-slp.c (vect_get_place_in_interleaving_chain): Export.
+       * tree-vect-stmts.c (vectorizable_store): CSE base and steps.
+       (vectorizable_load): Likewise.  For grouped stores always base
+       the IV on the first element.
+       * tree-vect-loop-manip.c (vect_loop_versioning): Unshare versioning
+       condition before gimplifying.
  
-       PR tree-optimization/81488
-       * gimple-ssa-strength-reduction (struct slsr_cand_d): Add visited
-       and cached_basis fields.
-       (MAX_SPREAD): New constant.
-       (alloc_cand_and_find_basis): Initialize new fields.
-       (clear_visited): New function.
-       (create_phi_basis_1): Rename from create_phi_basis, set visited
-       and cached_basis fields.
-       (create_phi_basis): New wrapper function.
-       (phi_add_costs_1): Rename from phi_add_costs, add spread
-       parameter, set visited field, short-circuit when limits reached.
-       (phi_add_costs): New wrapper function.
-       (record_phi_increments_1): Rename from record_phi_increments, set
-       visited field.
-       (record_phi_increments): New wrapper function.
-       (phi_incr_cost_1): Rename from phi_incr_cost, set visited field.
-       (phi_incr_cost): New wrapper function.
-       (all_phi_incrs_profitable_1): Rename from
-       all_phi_incrs_profitable, set visited field.
-       (all_phi_incrs_profitable): New wrapper function.
+ 2018-02-07  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-22  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       * tree-eh.c (operation_could_trap_helper_p): Ignore honor_trapv for
+       *DIV_EXPR and *MOD_EXPR.
  
-       * rtl.h (paradoxical_subreg_p): Define inline, and add a version
-       that takes the outer and inner modes.
-       * doc/rtl.texi: Use paradoxical_subreg_p instead of a GET_MODE_SIZE
-       comparison as the canonical test for a paradoxical subreg.
-       * combine.c (simplify_set): Use paradoxical_subreg_p.
-       (make_extraction): Likewise.
-       (force_to_mode): Likewise.
-       (rtx_equal_for_field_assignment_p): Likewise.
-       (gen_lowpart_for_combine): Likewise.
-       (simplify_comparison): Likewise.
-       * cse.c (equiv_constant): Likewise.
-       * expmed.c (store_bit_field_1): Likewise.
-       * final.c (alter_subreg): Likewise.
-       * fwprop.c (propagate_rtx): Likewise.
-       (forward_propagate_subreg): Likewise.
-       * ira-conflicts.c (ira_build_conflicts): Likewise.
-       * lower-subreg.c (simplify_gen_subreg_concatn): Likewise.
-       * lra-constraints.c (curr_insn_transform): Likewise.
-       (split_reg): Likewise.
-       * lra-eliminations.c (move_plus_up): Likewise.
-       (lra_eliminate_regs_1): Likewise.
-       * recog.c (general_operand): Likewise.
-       * ree.c (combine_reaching_defs): Likewise.
-       * reload.c (push_reload): Likewise.
-       (find_reloads): Likewise.
-       * reload1.c (elimination_effects): Likewise.
-       (compute_reload_subreg_offset): Likewise.
-       (choose_reload_regs): Likewise.
-       * rtlanal.c (subreg_lsb_1): Likewise.
-       * simplify-rtx.c (simplify_unary_operation_1): Likewise.
-       (simplify_subreg): Likewise.
-       * var-tracking.c (track_loc_p): Likewise.
-       * emit-rtl.c (byte_lowpart_offset): Likewise.
-       (paradoxical_subreg_p): Delete out-of-line definition.
- 2017-08-22  Jeff Law  <law@redhat.com>
-       PR tree-optimization/81741
-       PR tree-optimization/71947
-       * tree-ssa-dom.c: Include tree-inline.h.
-       (record_temporary_equivalences): Only record SSA_NAME = SSA_NAME
-       equivalences if one is more expensive to compute than the other.
-       * tree-ssa-scopedtables.h (class const_or_copies): Make
-       record_const_or_copy_raw method private.
-       (class avail_exprs_stack): New method simplify_binary_operation.
-       * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Call
-       avail_exprs_stack::simplify_binary_operation as needed.
-       (avail_exprs_stack::simplify_binary_operation): New function.
- 2017-08-22  Sebastian Huber  <sebastian.huber@embedded-brains.de>
-       * config.gcc (powerpc-*-rtems*): Add rs6000/linux64.opt.
-       * config/rs6000/rtems.h (ASM_PREFERRED_EH_DATA_FORMAT): New define.
-       (DOT_SYMBOLS): Likewise.
-       (MINIMAL_TOC_SECTION_ASM_OP): Likewise.
-       (RELOCATABLE_NEEDS_FIXUP): Likewise.
-       (RS6000_ABI_NAME): Likewise.
-       (TARGET_CMODEL): Likewise.
-       (TOC_SECTION_ASM_OP): Likewise.
-       (SET_CMODEL): New macro.
-       (SUBSUBTARGET_OVERRIDE_OPTIONS): Evaluate cmodel options.
- 2017-08-22  Richard Biener  <rguenther@suse.de>
-       * tree-inline.c (remap_type_1): Change asserts on TYPE_SIZE[_UNIT]
-       to allow for free-lang-data replacements similar to verify_type_variant.
- 2017-08-22  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-07  H.J. Lu  <hongjiu.lu@intel.com>
  
-       * config/aarch64/aarch64.md (casesi): Use DImode rather than
-       VOIDmode for the LABEL_REF.
+       PR target/84248
+       * config/i386/i386.c (ix86_option_override_internal): Mask out
+       the CF_SET bit when checking -fcf-protection.
  
- 2017-08-22  Richard Biener  <rguenther@suse.de>
+ 2018-02-07  Tom de Vries  <tom@codesourcery.com>
  
-       * tree-cfg.c (gimple_split_edge): Avoid reallocating target PHI nodes.
+       PR libgomp/84217
+       * omp-expand.c (expand_oacc_collapse_init): Ensure diff_type is large
+       enough.
  
- 2017-08-22  Richard Biener  <rguenther@suse.de>
+ 2018-02-07  Richard Biener  <rguenther@suse.de>
  
-       * common.opt (feliminate-dwarf2-dups): Ignore.
-       * doc/invoke.texi (feliminate-dwarf2-dups): Remove documentation.
-       * dwarf2out.c (push_new_compile_unit, pop_compile_unit,
-       same_die_p_wrap, compute_section_prefix,
-       is_symbol_die, assign_symbol_names, break_out_includes): Remove.
-       (comdat_symbol_id, comdat_symbol_number): Likewise.
-       (cu_hash_table_entry, cu_hash_table_entry_hasher, cu_hash_type):
-       Likewise.
-       (check_duplicate_cu, record_comdat_symbol_number): Likewise.
-       (output_die): Mark unreachable path unreachable.
-       (dwarf2out_start_source_file): Do not create DW_TAG_GNU_BINCL.
-       (dwarf2out_end_source_file): Do not create DW_TAG_GNU_EINCL.
-       (dwarf2out_init): Remove code handling flag_eliminate_dwarf2_dups.
-       (dwarf2out_early_finish): Likewise.
+       PR tree-optimization/84204
+       * tree-chrec.c (chrec_fold_plus_1): Remove size limiting in
+       this place.
  
- 2017-08-22  Aldy Hernandez  <aldyh@redhat.com>
+       PR tree-optimization/84205
+       * graphite-isl-ast-to-gimple.c (binary_op_to_tree): Also
+       special-case isl_ast_op_zdiv_r.
  
-       * wide-int.h (hwi_with_prec::hwi_with_prec): Sign extend.
+       PR tree-optimization/84223
+       * graphite-scop-detection.c (gather_bbs::before_dom_children):
+       Only add conditions from within the region.
+       (gather_bbs::after_dom_children): Adjust.
  
- 2017-08-22  Georg-Johann Lay  <avr@gjlay.de>
-       PR target/81910
-       * config/avr/avr.c (avr_handle_addr_attribute): Early return if
-       not VAR_P. Filter attribute warnings with OPT_Wattributes.
-       (avr_attribute_table) <io, io_low, address>: Initialize
-       .decl_required with true.
- 2017-08-21  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000-cpus.def (-mvsx-scalar-float): Delete
-       undocumented debugging options.
-       (-mvsx-scalar-double): Likewise.
-       (-mallow-df-permute): Likewise.
-       (-mvectorize-builtins): Likewise.
-       * config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Likewise.
-       (rs6000_builtin_vectorized_function): Likewise.
-       (rs6000_builtin_md_vectorized_function): Likewise.
-       (rs6000_opt_vars): Likewise.
- 2017-08-21  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/46091
-       * config/i386/i386.md (*btsq_imm): Rename from *btsq.
-       (*btrq_imm): Rename from *btrq.
-       (*btcq_imm): Rename from *btcq.
-       (btsc): New code attribute.
-       (*<btsc><mode>): New insn pattern.
-       (*btr<mode>): Ditto.
-       (*<btsc><mode>_mask): New insn_and_split pattern.
-       (*btr<mode>_mask): Ditto.
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-07  Georg-Johann Lay  <avr@gjlay.de>
  
-       * function.c (pad_below): Simplify padding calculation.
+       PR target/84209
+       * config/avr/avr.h (GENERAL_REGNO_P, GENERAL_REG_P): New macros.
+       * config/avr/avr.md: Only post-reload split REG-REG moves if
+       either register is GENERAL_REG_P.
  
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-07  Jakub Jelinek  <jakub@redhat.com>
  
-       * target.def (function_prologue): Remove frame size argument.
-       (function_epilogue): Likewise.
-       * doc/tm.texi: Regenerate.
-       * final.c (final_start_function): Update call to function_prologue.
-       (final_end_function): Update call to function_epilogue.
-       (default_function_pro_epilogue): Remove frame size argument.
-       * output.h (default_function_pro_epilogue): Likewise.
-       * config/arm/arm.c (arm_output_function_epilogue): Likewise.
-       (arm_output_function_prologue): Likewise.
-       * config/frv/frv.c (frv_function_prologue): Likewise.
-       (frv_function_epilogue): Likewise.
-       * config/i386/i386.c (ix86_output_function_epilogue): Likewise.
-       * config/ia64/ia64.c (ia64_output_function_prologue): Likewise.
-       (ia64_output_function_epilogue): Likewise.
-       * config/m32r/m32r.c (m32r_output_function_prologue): Likewise.
-       (m32r_output_function_epilogue): Likewise.
-       * config/microblaze/microblaze.c (microblaze_function_prologue)
-       (microblaze_function_epilogue): Likewise.
-       * config/mips/mips.c (mips_output_function_prologue): Likewise.
-       (mips_output_function_epilogue): Likewise.
-       * config/mmix/mmix.c (mmix_target_asm_function_prologue): Likewise.
-       (mmix_target_asm_function_epilogue): Likewise.
-       * config/msp430/msp430.c (msp430_start_function): Likewise.
-       * config/nds32/nds32.c (nds32_asm_function_prologue): Likewise.
-       (nds32_asm_function_epilogue): Likewise.
-       * config/nios2/nios2.c (nios2_asm_function_prologue): Likewise.
-       * config/pa/pa.c (pa_output_function_prologue): Likewise.
-       (pa_output_function_epilogue): Likewise.
-       * config/powerpcspe/powerpcspe.c (rs6000_output_function_prologue)
-       (rs6000_output_function_epilogue): Likewise.
-       * config/rl78/rl78.c (rl78_start_function): Likewise.
-       * config/rs6000/rs6000.c (rs6000_output_function_prologue): Likewise.
-       (rs6000_output_function_epilogue): Likewise.
-       * config/rx/rx.c (rx_output_function_prologue): Likewise.
-       * config/sh/sh.c (sh_output_function_epilogue): Likewise.
-       * config/sparc/sparc.c (sparc_asm_function_prologue): Likewise.
-       (sparc_asm_function_epilogue): Likewise.
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-       * tree.h (type_has_mode_precision_p): New function.
-       * convert.c (convert_to_integer_1): Use it.
-       * expr.c (expand_expr_real_2): Likewise.
-       (expand_expr_real_1): Likewise.
-       * fold-const.c (fold_single_bit_test_into_sign_test): Likewise.
-       * match.pd: Likewise.
-       * tree-ssa-forwprop.c (simplify_rotate): Likewise.
-       * tree-ssa-math-opts.c (convert_mult_to_fma): Likewise.
-       * tree-tailcall.c (process_assignment): Likewise.
-       * tree-vect-loop.c (vectorizable_reduction): Likewise.
-       * tree-vect-patterns.c (vect_recog_vector_vector_shift_pattern)
-       (vect_recog_mult_pattern, vect_recog_divmod_pattern): Likewise.
-       * tree-vect-stmts.c (vectorizable_conversion): Likewise.
-       (vectorizable_assignment): Likewise.
-       (vectorizable_shift): Likewise.
-       (vectorizable_operation): Likewise.
-       * tree-vrp.c (register_edge_assert_for_2): Likewise.
- 2017-08-21  Wilco Dijkstra  <wdijkstr@arm.com>
-       * match.pd: Add pow (C, x) simplification.
- 2017-08-21  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81900
-       * tree-ssa-pre.c (compute_antic_aux): Properly compute changed
-       for blocks with abnormal predecessors.
-       (compute_antic): Do not set visited flag prematurely.
- 2017-08-21  Georg-Johann Lay  <avr@gjlay.de>
-       PR target/79883
-       * config/avr/avr.c (avr_set_current_function): Typo in diagnostic.
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-       * stor-layout.h (vector_type_mode): Move to...
-       * tree.h (vector_type_mode): ...here.
-       * stor-layout.c (vector_type_mode): Move to...
-       * tree.c (vector_type_mode): ...here.  Include rtl.h and regs.h.
- 2017-08-21  Richard Biener  <rguenther@suse.de>
-       * debug.h (struct gcc_debug_hooks): Add die_ref_for_decl and
-       register_external_die hooks.
-       (debug_false_tree_charstarstar_uhwistar): Declare.
-       (debug_nothing_tree_charstar_uhwi): Likewise.
-       * debug.c (do_nothing_debug_hooks): Adjust.
-       (debug_false_tree_charstarstar_uhwistar): New do nothing.
-       (debug_nothing_tree_charstar_uhwi): Likewise.
-       * dbxout.c (dbx_debug_hooks): Adjust.
-       (xcoff_debug_hooks): Likewise.
-       * sdbout.c (sdb_debug_hooks): Likewise.
-       * vmsdbgout.c (vmsdbg_debug_hooks): Likewise.
-       * dwarf2out.c (macinfo_label_base): New global.
-       (dwarf2out_register_external_die): New function for the
-       register_external_die hook.
-       (dwarf2out_die_ref_for_decl): Likewise for die_ref_for_decl.
-       (dwarf2_debug_hooks): Use them.
-       (dwarf2_lineno_debug_hooks): Adjust.
-       (struct die_struct): Add with_offset flag.
-       (DEBUG_LTO_DWO_INFO_SECTION, DEBUG_LTO_INFO_SECTION,
-       DEBUG_LTO_DWO_ABBREV_SECTION, DEBUG_LTO_ABBREV_SECTION,
-       DEBUG_LTO_DWO_MACINFO_SECTION, DEBUG_LTO_MACINFO_SECTION,
-       DEBUG_LTO_DWO_MACRO_SECTION, DEBUG_LTO_MACRO_SECTION,
-       DEBUG_LTO_LINE_SECTION, DEBUG_LTO_DWO_STR_OFFSETS_SECTION,
-       DEBUG_LTO_STR_DWO_SECTION, DEBUG_STR_LTO_SECTION): New macros
-       defining section names for the early LTO debug variants.
-       (reset_indirect_string): New helper.
-       (add_AT_external_die_ref): Helper for dwarf2out_register_external_die.
-       (print_dw_val): Add support for offsetted symbol references.
-       (get_ultimate_context): Split out from is_cxx.
-       (is_cxx): Use get_ultimate_context.
-       (is_fortran): Add decl overload.
-       (compute_comp_unit_symbol): Split out worker from
-       compute_section_prefix.
-       (compute_section_prefix): Call compute_comp_unit_symbol and
-       set comdat_type_p here.
-       (output_die): Skip DIE symbol output for the LTO added one.
-       Handle DIE symbol references with offset.
-       (output_comp_unit): Guard section name mangling properly.
-       For LTO debug sections emit a symbol at the section beginning
-       which we use to refer to its DIEs.
-       (add_abstract_origin_attribute): For DIEs registered via
-       dwarf2out_register_external_die directly refer to the early
-       DIE rather than indirectly through the shadow one we created.
-       Remove obsolete call to dwarf2out_abstract_function for
-       non-function/block origins.
-       (gen_array_type_die): When generating early LTO debug do
-       not emit DW_AT_string_length.
-       (gen_formal_parameter_die): Do not re-create DIEs for PARM_DECLs
-       late when in LTO.  As suggested place a gcc_unreachable for
-       the DECL_ABSTRACT_P case.
-       (gen_subprogram_die): Avoid another specification DIE
-       for early built declarations/definitions for the late LTO case.
-       (gen_variable_die): Add type references for late duplicated VLA dies
-       when in late LTO.
-       (gen_inlined_subroutine_die): Do not call dwarf2out_abstract_function,
-       we have the abstract instance already.
-       (process_scope_var): Adjust decl DIE contexts in LTO which
-       first puts them in limbo.
-       (gen_decl_die): Do not generate type DIEs late apart from
-       types for VLAs or for decls we do not yet have a DIE.  Do not
-       call dwarf2out_abstract_function late.
-       (dwarf2out_early_global_decl): Make sure to create DIEs
-       for abstract instances of a decl first.
-       (dwarf2out_late_global_decl): Adjust comment.
-       (output_macinfo_op): With multiple macro sections use
-       macinfo_label_base to distinguish labels.
-       (output_macinfo): Likewise.  Update macinfo_label_base.
-       Pass in the line info label.
-       (note_variable_value_in_expr): When generating LTO resolve
-       all variable values here by generating DIEs as needed.
-       (init_sections_and_labels): Add early LTO debug flag parameter
-       and generate different sections and names if set.  Add generation
-       counter for the labels so we can have multiple of them.
-       (reset_dies): Helper to allow DIEs to be output multiple times.
-       (dwarf2out_finish): When outputting DIEs to the fat part of an
-       LTO object first reset DIEs.
-       (dwarf2out_early_finish): Output early DIEs when generating LTO.
-       (modified_type_die): Check for decl_ultimate_origin being self
-       before recursing.
-       (gen_type_die_with_usage): Likewise.
-       (gen_typedef_die): Allow decl_ultimate_origin being self.
-       (set_decl_abstract_flags): Remove.
-       (set_block_abstract_flags): Likewise.
-       (dwarf2out_abstract_function): Treat the early generated DIEs
-       as the abstract copy and only add DW_AT_inline and
-       DW_AT_artificial here and call set_decl_origin_self.
-       If the DIE has an abstract origin don't do anything.
-       * tree.c (free_lang_data): Build a dummy TRANSLATION_UNIT_DECL
-       if we have none yet (Go fails to build one, PR78628).
-       (variably_modified_type_p): Prevent endless recursion for Ada
-       cyclic pointer types.
-       * lto-streamer-in.c: Include debug.h.
-       (dref_queue): New global.
-       (lto_read_tree_1): Stream in DIE references.
-       (lto_input_tree): Register DIE references.
-       (input_function): Stream DECL_DEBUG_ARGS.
-       * lto-streamer-out.c: Include debug.h.
-       (lto_write_tree_1): Output DIE references.
-       (DFS::DFS_write_tree_body): Follow DECL_ABSTRACT_ORIGIN.
-       Force a TRANSLATION_UNIT_DECL DECL_CONTEXT for file-scope decls.
-       (output_function): Stream DECL_DEBUG_ARGS.
-       * tree-streamer-in.c (lto_input_ts_decl_common_tree_pointers):
-       Stream DECL_ABSTRACT_ORIGIN.
-       * tree-streamer-out.c (write_ts_decl_common_tree_pointers): Likewise.
-       (write_ts_decl_minimal_tree_pointers): Force a TRANSLATION_UNIT_DECL
-       DECL_CONTEXT for file-scope decls.
-       * lto-streamer.h (struct dref_entry): Declare.
-       (dref_queue): Likewise.
-       * cfgexpand.c (pass_expand::execute): Do not call the
-       outlining_inline_function hook here.
-       * lto-wrapper.c (debug_obj): New global.
-       (tool_cleanup): Unlink it if required.
-       (debug_objcopy): New function.
-       (run_gcc): Handle early debug sections in the IL files by
-       extracting them to separate files, partially linkin them and
-       feeding the result back as result to the linker.
-       * config/darwin.h (DEBUG_LTO_INFO_SECTION, DEBUG_LTO_ABBREV_SECTION,
-       DEBUG_LTO_MACINFO_SECTION, DEBUG_LTO_LINE_SECTION,
-       DEBUG_STR_LTO_SECTION, DEBUG_LTO_MACRO_SECTION): Put early debug
-       sections into a separate segment.
-       * config/darwin.c (darwin_asm_named_section): Handle __GNU_DWARF_LTO
-       segments.
-       (darwin_asm_dwarf_section): Likewise.
-       (darwin_asm_output_dwarf_offset): Likewise.
-       * config/i386/i386.c (make_resolver_func): Set DECL_IGNORED_P.
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+       PR tree-optimization/84235
+       * tree-ssa-scopedtables.c
+       (avail_exprs_stack::simplify_binary_operation): Fir MINUS_EXPR, punt
+       if the subtraction is performed in floating point type where NaNs are
+       honored.  For *DIV_EXPR, punt for ALL_FRACT_MODE_Ps where we can't
+       build 1.  Formatting fix.
  
-       * read-md.h (md_reader::record_potential_iterator_use): Replace
-       pointer argument with an rtx and an index.
-       * read-rtl.c (iterator_group::apply_iterator): Likewise.
-       (apply_mode_iterator): Likewise.
-       (apply_code_iterator): Likewise.
-       (apply_int_iterator): Likewise.
-       (apply_subst_iterator): Likewise.
-       (record_iterator_use): Likewise.
-       (record_attribute_use): Likewise.
-       (md_reader::record_potential_iterator_use): Likewise.  Update calls
-       to record_iterator_use and apply_iterator.
-       (iterator_use): Replace ptr with x and index.
-       (attribute_use): Likewise.
-       (apply_attribute_uses): Update calls to apply_iterator.
-       (apply_iterators): Likewise.  Update initialization of iterator_use.
-       (rtx_reader::read_rtx_code): Update calls to record_iterator_use
-       and record_potential_iterator_use.
-       (rtx_reader::read_rtx_operand): Likewise.
- 2017-08-21  Richard Sandiford  <richard.sandiford@linaro.org>
-           Alan Hayward  <alan.hayward@arm.com>
-           David Sherwood  <david.sherwood@arm.com>
+ 2018-02-06  Jakub Jelinek  <jakub@redhat.com>
  
-       * varasm.c (const_rtx_hash_1): Don't hash in the mode of a
-       CONST_WIDE_INT.
- 2017-08-21  Richard Biener  <rguenther@suse.de>
-       PR middle-end/81884
-       * tree-ssa-alias.c (stmt_kills_ref_p): Handle array accesses
-       at struct end conservatively when comparing common bases.
- 2017-08-21  Richard Biener  <rguenther@suse.de>
-       * tree-ssa-loop-im.c (struct lim_aux_data): Add ref index member.
-       (mem_ref_in_stmt): Remove.
-       (determine_max_movement): Use ref index to get at the reference.
-       (invariantness_dom_walker::before_dom_children): Deal with
-       lim data already initialized.
-       (gather_mem_refs_stmt): Initialize lim data and record ref index.
- 2017-08-19  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.h (OPTION_MASK_ISA_ROUND): Remove.
-       (TARGET_ISA_ROUND): Ditto.
-       (TARGET_ROUND): Ditto.
-       * config/i386/i386.c: Substitute TARGET_ROUND with TARGET_SSE4_1.
-       * config/i386/i386.md: Ditto.
-       * config/i386/sse.md: Ditto.
-       * config/i386/i386-builtin.def: Substitute OPTION_MASK_ISA_ROUND
-       with OPTION_MASK_ISA_SSE4_1.
- 2017-08-19  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81894
-       * doc/extend.texi (x86 Built-in Functions): Correct the name of
-       __builtin_ia32_lzcnt_u16.
- 2017-08-18  Peter Bergner  <bergner@vnet.ibm.com>
-       PR target/80210
-       * config/rs6000/rs6000.c (rs6000_activate_target_options): New function.
-       (rs6000_set_current_function): Rewrite function to use it.
- 2017-08-18  H.J. Lu  <hongjiu.lu@intel.com>
-       PR c/53037
-       * print-tree.c (print_node): Support DECL_WARN_IF_NOT_ALIGN
-       and TYPE_WARN_IF_NOT_ALIGN.
-       * stor-layout.c (do_type_align): Merge DECL_WARN_IF_NOT_ALIGN.
-       (handle_warn_if_not_align): New.
-       (place_union_field): Call handle_warn_if_not_align.
-       (place_field): Call handle_warn_if_not_align.
-       Copy TYPE_WARN_IF_NOT_ALIGN.
-       (finish_builtin_struct): Copy TYPE_WARN_IF_NOT_ALIGN.
-       (layout_type): Likewise.
-       * tree-core.h (tree_type_common): Add warn_if_not_align.  Set
-       spare to 18.
-       (tree_decl_common): Add warn_if_not_align.
-       * tree.c (build_range_type_1): Copy TYPE_WARN_IF_NOT_ALIGN.
-       * tree.h (TYPE_WARN_IF_NOT_ALIGN): New.
-       (SET_TYPE_WARN_IF_NOT_ALIGN): Likewise.
-       (DECL_WARN_IF_NOT_ALIGN): Likewise.
-       (SET_DECL_WARN_IF_NOT_ALIGN): Likewise.
-       * doc/extend.texi: Document warn_if_not_aligned attribute.
-       * doc/invoke.texi: Document -Wif-not-aligned and -Wpacked-not-aligned.
- 2017-08-17  Martin Liska  <mliska@suse.cz>
-       PR bootstrap/81864
-       * tree-loop-distribution.c (ddrs_table): Change type to pointer type.
-       (get_data_dependence): Use it as pointer type.
-       (distribute_loop): Likewise.
- 2017-08-17  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
-       * config/rs6000/altivec.md (UNSPEC_VMRGOW_DIRECT): New constant.
-       (p8_vmrgew_v4sf_direct): Generalize to p8_vmrgew_<mode>_direct.
-       (p8_vmrgow_<mode>_direct): New define_insn.
-       * config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Properly
-       handle endianness for vmrgew and vmrgow permute patterns.
- 2017-08-17  Peter Bergner  <bergner@vnet.ibm.com>
-       * config/rs6000/altivec.md (VParity): Remove TARGET_VSX_TIMODE.
-       * config/rs6000/rs6000-cpus.def: Remove comment.
-       (ISA_2_7_MASKS_SERVER): Delete OPTION_MASK_VSX_TIMODE;
-       (POWERPC_MASKS): Likewise.
-       * config/rs6000/rs6000.c (rs6000_hard_regno_mode_ok): Remove unneeded
-       use of TARGET_VSX_TIMODE.
-       (rs6000_setup_reg_addr_masks): Change TARGET_VSX_TIMODE to TARGET_VSX.
-       (rs6000_init_hard_regno_mode_ok): Remove unneeded uses of
-       TARGET_VSX_TIMODE.  Change use of TARGET_VSX_TIMODE to TARGET_VSX.
-       (rs6000_option_override_internal): Remove dead code.
-       (rs6000_legitimize_address): Change TARGET_VSX_TIMODE to TARGET_VSX.
-       (rs6000_legitimize_reload_address): Likewise.
-       (rs6000_legitimate_address_p): Likewise.
-       (rs6000_opt_masks): Delete "vsx-timode".
-       (rs6000_disable_incompatible_switches): Remove mention of -mvsx-timode
-       from function comment.
-       * config/rs6000/rs6000.h (MASK_VSX_TIMODE): Delete.
-       * config/rs6000/rs6000.md (FMOVE128_GPR): Remove TARGET_VSX_TIMODE.
-       (V16QI, V8HI, V4SI, V4SF, V2DI, V2DF, V1TI): Remove useless empty
-       condition.
-       * config/rs6000/rs6000.opt (mvsx-timode): Replace with stub.
-       * config/rs6000/vector.md (VEC_IP): Remove TARGET_VSX_TIMODE.
-       * config/rs6000/vsx.md (VSX_LE_128): Likewise.
-       (VSX_TI): Likewise.
-       (VSX_M): Likewise.
-       (define_peephole2): Likewise.
- 2017-08-17  Martin Sebor  <msebor@redhat.com>
-       PR c/81859
-       * pretty-print.c (pp_format): Use strnlen in %.*s to avoid reading
-       past the end of an array.
-       (test_pp_format): Add test cases.
- 2017-08-17  Richard Sandiford  <richard.sandiford@linaro.org>
-       * internal-fn.def (CLRSB, CLZ, CTZ, FFS, PARITY, POPCOUNT): Add
-       missing ECF_NOTHROW flags.
- 2017-08-17  Peter Bergner  <bergner@vnet.ibm.com>
-       PR target/72804
-       * config/rs6000/vsx.md (*vsx_le_permute_<mode>): Add support for
-       operands residing in integer registers.
-       (*vsx_le_perm_load_<mode>): Likewise.
-       (*vsx_le_perm_store_<mode>): Likewise.
-       (define_peephole2): Add peepholes to optimize the above.
- 2017-08-17  Marek Polacek  <polacek@redhat.com>
-       PR middle-end/81814
-       * fold-const.c (operand_equal_for_comparison_p): Remove code that used
-       to mimic what shorten_compare did.  Change the return type to bool.
-       (fold_cond_expr_with_comparison): Update call to
-       operand_equal_for_comparison_p.
-       (fold_ternary_loc): Likewise.
- 2017-08-17  Jackson Woodruff  <jackson.woodruff@arm.com>
-       * aarch64-simd.md (mov<mode>): No longer force zero immediate into
-       register.
-       (*aarch64_simd_mov<mode>): Add new case for stp using zero immediate.
- 2017-08-17  Richard Biener  <rguenther@suse.de>
-       * tree-ssa-structalias.c (solve_graph): When propagating
-       to successors update the graphs succ edges and avoid duplicate work.
- 2017-08-17  Maxim Ostapenko  <m.ostapenko@samsung.com>
-       PR target/81861
-       * config/i386/i386.c (ix86_option_override_internal): Save target
-       specific options after ix86_stack_protector_guard_reg was changed.
- 2017-08-17  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81827
-       * tree-ssa-structalias.c (struct variable_info): Add is_reg_var flag.
-       (new_var_info): Initialize it conservatively.
-       (get_call_vi): Mark register vars.
-       (new_scalar_tmp_constraint_exp): Likewise.
-       (handle_rhs_call): Likewise.
-       (handle_const_call): Likewise.
-       (create_function_info_for): Likewise.
-       (solve_constraints): Sort varinfos to separate register from
-       non-register vars to pack points-to solution bitmaps during
-       iteration.
- 2017-08-17  Marek Polacek  <polacek@redhat.com>
-       * gimplify.c (gimplify_adjust_omp_clauses): Compare with 0 instead of 1.
- 2017-08-17  Richard Biener  <rguenther@suse.de>
-       * tree-vrp.c (vrp_int_const_binop): Do not set *overflow_p
-       to true when overflow is undefined and we saturated the result.
- 2017-08-17  Alan Modra  <amodra@gmail.com>
-       PR target/80938
-       * config/rs6000/rs6000.c (rs6000_savres_strategy): Revert 2017-08-09.
-       Don't use store multiple if only one reg needs saving.
-       (interesting_frame_related_regno): New function.
-       (rs6000_frame_related): Don't emit frame info for regs that
-       don't need saving.
-       (rs6000_emit_epilogue): Likewise.
- 2017-08-16  Nathan Sidwell  <nathan@acm.org>
-       * tree-core.h (tree_type_non_common): Rename binfo to lang_1.
-       * tree.h (TYPE_BINFO): Use type_non_common.maxval.
-       (TYPE_LANG_SLOT_1): Use type_non_common.lang_1, for any type.
-       * tree.c (free_lang_data_in_type): Use else-if chain.  Always
-       clear TYPE_LANG_1.  Remove obsolete member-function stripping.
-       (find_decls_types_r): Comment about TYPE_MAX_VALUES_RAW.
-       (verify_type): Adjust for TYPE_BINFO move.
-       * lto-streamer-out.c (DFS::DFS_write_tree_body): No need to
-       process TYPE_BINFO directly.
-       (hash_tree): Likewise.
-       * tree-streamer-in.c (lto_input_ts_type_non_common_tree_pointers):
-       Likewise.
-       * tree-streamer-out.c (write_ts_type_non_common_tree_pointers):
-       Likewise.
+       PR target/84146
+       * config/i386/i386.c (rest_of_insert_endbranch): Only skip
+       NOTE_INSN_CALL_ARG_LOCATION after a call, not anything else,
+       and skip it regardless of bb boundaries.  Use CALL_P macro,
+       don't test INSN_P (insn) together with CALL_P or JUMP_P check
+       unnecessarily, formatting fix.
  
- 2017-08-16  David Malcolm  <dmalcolm@redhat.com>
+ 2018-02-06  Michael Collison  <michael.collison@arm.com>
  
-       * diagnostic-show-locus.c (colorizer::m_caret): Remove unused field.
+       * config/arm/thumb2.md:
+       (*thumb2_mov_negscc): Split only if TARGET_THUMB2 && !arm_restrict_it.
+       (*thumb_mov_notscc): Ditto.
  
- 2017-08-16  Uros Bizjak  <ubizjak@gmail.com>
+ 2018-02-06  Michael Meissner  <meissner@linux.vnet.ibm.com>
  
-       PR target/46091
-       * config/i386/i386.md (*anddi_1_btr): Change predicates of
-       operand 0 and operand 1 to nomimmediate_operand. Add "m" constraint.
-       Add ix86_binary_operator_ok to insn constraint.
-       (*iordi_1_bts): Ditto.
-       (*xordi_1_btc): Ditto.
-       (*btsq): Change predicate of operand 0 to nonimmediate_operand.
-       Update corresponding peephole2 pattern.
-       (*btrq): Ditto.
-       (*btcq): Ditto.
+       PR target/84154
+       * config/rs6000/rs6000.md (su code attribute): Use "u" for
+       unsigned_fix, not "s".
  
- 2017-08-16  Bin Cheng  <bin.cheng@arm.com>
+ 2018-02-06  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
  
-       PR tree-optimization/81832
-       * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Don't
-       copy loop header which has IFN_LOOP_DIST_ALIAS call.
+       * configure.ac (gcc_fn_eh_frame_ro): New function.
+       (gcc_cv_as_cfi_directive): Check both 32 and 64-bit assembler for
+       correct .eh_frame permissions.
+       * configure: Regenerate.
  
- 2017-08-16  Marek Polacek  <polacek@redhat.com>
+ 2018-02-06  Andrew Jenner  <andrew@codeourcery.com>
  
-       PR middle/81695
-       * fold-const.c (fold_indirect_ref_1): Restore original behavior
-       regarding size_zero_node.
+       * doc/invoke.texi: Add section for the PowerPC SPE backend. Remove
+       irrelevant options.
  
- 2017-08-16  Martin Liska  <mliska@suse.cz>
+ 2018-02-06  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
  
-       PR target/81753
-       * config.gcc: Respect previously set extra_objs in case
-       of darwin target.
+       * config/rs6000/rs6000.c (rs6000_option_override_internal):
+       Display warning message for -mno-speculate-indirect-jumps.
  
- 2017-08-16  Richard Sandiford  <richard.sandiford@linaro.org>
+ 2018-02-06  Andrew Jenner  <andrew@codesourcery.com>
  
-       PR tree-optimization/81835
-       * tree-vect-loop.c (vect_is_simple_reduction): Simply checks for
-       the phi SSA_NAME.  Check that the condition in a COND_EXPR does
-       not depend on the phi.
+       * config/powerpcspe/powerpcspe.opt: (msimple-fpu, mfpu) Add
+       Undocumented.
+       * config/powerpcspe/sysv4.opt (mbit-align): Likewise.
  
- 2017-08-16  Alan Modra  <amodra@gmail.com>
+ 2018-02-06  Aldy Hernandez  <aldyh@redhat.com>
  
-       * config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Delete
-       dead code.
+       PR tree-optimization/84225
+       * tree-eh.c (find_trapping_overflow): Only call
+       operation_no_trapping_overflow when ANY_INTEGRAL_TYPE_P.
  
- 2017-08-16  Alan Modra  <amodra@gmail.com>
+ 2018-02-06  Igor Tsimbalist  <igor.v.tsimbalist@intel.com>
  
-       * config/rs6000/rs6000.c (rs6000_reg_live_or_pic_offset_p): Merge..
-       (save_reg_p): ..into this.  Update all callers.
-       (first_reg_to_save): Simplify.
+       PR target/84145
+       * config/i386/i386.c: Reimplement the check of possible options
+       -mibt/-mshstk conbination. Change error messages.
+       * doc/invoke.texi: Fix a typo: remove extra '='.
  
- 2017-08-16  Alan Modra  <amodra@gmail.com>
+ 2018-02-06  Marek Polacek  <polacek@redhat.com>
  
-       * config/rs6000/rs6000.c (rs6000_savres_strategy): Don't restore
-       fixed regs.
+       PR tree-optimization/84228
+       * tree-ssa-strlen.c (maybe_diag_stxncpy_trunc): Skip debug statements.
  
- 2017-08-15  Joseph Myers  <joseph@codesourcery.com>
+ 2018-02-06  Tamar Christina  <tamar.christina@arm.com>
  
-       PR target/78460
-       PR target/67712
-       * config/sh/sh-mem.cc (sh_expand_cmpnstr): Only unroll for
-       constant count if that count is less than 32.
+       PR target/82641
+       * config/arm/arm.c (arm_print_asm_arch_directives): Record already
+       emitted arch directives.
+       * config/arm/arm-c.c (arm_cpu_builtins): Undefine __ARM_ARCH and
+       __ARM_FEATURE_COPROC before changing architectures.
  
- 2017-08-15  Nathan Sidwell  <nathan@acm.org>
+ 2018-02-06  Richard Biener  <rguenther@suse.de>
  
-       * gcc.c (execute): Emit friendlier message if inferior is killed
-       by an external cause.
+       * config/i386/i386.c (print_reg): Fix typo.
+       (ix86_loop_unroll_adjust): Do not unroll beyond the original nunroll.
  
- 2017-08-15  Richard Biener  <rguenther@suse.de>
+ 2018-02-06  Eric Botcazou  <ebotcazou@adacore.com>
  
-       PR tree-optimization/81790
-       * tree-ssa-sccvn.c (vn_lookup_simplify_result): Handle both
-       CONSTRUCTORs from simplifying and VN.
+       * configure: Regenerate.
  
- 2017-08-14  Martin Sebor  <msebor@redhat.com>
+ 2018-02-05  Martin Sebor  <msebor@redhat.com>
  
-       * builtin-attrs.def: Add comments.
+       PR tree-optimization/83369
+       * tree-ssa-ccp.c (pass_post_ipa_warn::execute): Use %G to print
+       inlining context.
  
- 2017-08-14  Martin Sebor  <msebor@redhat.com>
+ 2018-02-05  Martin Liska  <mliska@suse.cz>
  
-       PR c/81117
-       * doc/extend.texi (attribute nonstring): Document new attribute.
+       * doc/invoke.texi: Cherry-pick upstream r323995.
  
- 2017-08-14  Martin Sebor  <msebor@redhat.com>
+ 2018-02-05  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR c/81117
-       * tree-diagnostic.c (default_tree_printer): Handle %G.
-       * gimple-pretty-print.h (percent_G_format): Declare new function.
-       * gimple-pretty-print.c (percent_G_format): Define.
-       * tree-pretty-print.c (percent_K_format): Add argument.
+       * ira.c (ira_init_register_move_cost): Adjust comment.
  
- 2017-08-14  Martin Sebor  <msebor@redhat.com>
+ 2018-02-05  Martin Liska  <mliska@suse.cz>
  
-       PR translation/79998
-       * gimple-ssa-sprintf.c (pass_sprintf_length::handle_gimple_call):
-       Remove a stray space.
+       PR gcov-profile/84137
+       * doc/gcov.texi: Fix typo in documentation.
  
- 2017-08-14  Uros Bizjak  <ubizjak@gmail.com>
+ 2018-02-05  Martin Liska  <mliska@suse.cz>
  
-       PR target/46091
-       * config/i386/i386.md (*anddi_1_btr): New insn_and_split pattern.
-       (*iordi_1_bts): Ditto.
-       (*xordi_1_btc): Ditto.
+       PR gcov-profile/83879
+       * doc/gcov.texi: Document necessity of --dynamic-list-data when
+       using dlopen functionality.
  
- 2017-08-14  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+ 2018-02-05  Olga Makhotina  <olga.makhotina@intel.com>
  
-       PR target/79845
-       * config/rs6000/linux64.h (INVALID_64BIT): Use quoted strings.
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
-       Likewise.
-       * config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok): Use
-       quoted strings, and make more translator-friendly.
-       (darwin_rs6000_override_options): Likewise.
-       (rs6000_option_override_internal): Likewise.
-       (rs6000_return_in_memory): Fix overlong line.
-       (init_cmulative_args): Use quoted strings, and make more
-       translator-friendly.
-       (rs6000_pass_by_reference): Fix overlong line.
-       (def_builtin): Use quoted strings.
-       (altivec_expand_predicate_builtin): Use quoted strings, and make
-       more translator-friendly.
-       (htm_expand_builtin): Use quoted strings.
-       (cpu_expand_builtin): Use quoted strings, and make more
-       translator-friendly.
-       (altivec_expand_builtin): Likewise.
-       (paired_expand_predicate_builtin): Likewise.
-       (rs6000_invalid_builtin): Likewise.
-       (builtin_function_type): Use quoted strings.
-       (rs6000_expand_split_stack_prologue): Use quoted strings, and make
-       more translator-friendly.
-       (rs6000_trampoline_init): Likewise.
-       (rs6000_handle_altivec_attribute): Likewise.
-       (rs6000_inner_target_options): Use quoted strings.
-       (rs6000_disable_incompatible_switches): Likewise.
-       * config/rs6000/sysv4.h (SUBTARGET_OVERRIDE_OPTIONS): Use quoted
-       strings, and make more translator-friendly.
-       (SUBSUBTARGET_OVERRIDE_OPTIONS): Use quoted strings.
- 2017-08-14  Bin Cheng  <bin.cheng@arm.com>
-       PR tree-optimization/81799
-       * tree-loop-distribution.c (version_loop_by_alias_check): Force
-       cond_expr to simple gimple operand.
- 2017-08-14  Wilco Dijkstra  <wdijkstr@arm.com>
-       PR middle-end/46932
-       * doc/sourcebuild.texi (autoincdec): Add autoincdec description.
- 2017-08-14  Georg-Johann Lay  <avr@gjlay.de>
-       PR target/81754
-       PR target/81268
-       * config/avr/avr.opt (mgas-isr-prologues): New Var avr_gasisr_prologues.
-       * config/avr/avr.md (gasisr, *gasisr): Use it instead of
-       TARGET_GASISR_PROLOGUES.
-       * config/avr/avr.c (avr_option_override): Same.
-       (avr_pass_pre_proep::execute): Same.
- 2017-08-13  H.J. Lu  <hongjiu.lu@intel.com>
-       PR target/81820
-       * config/i386/i386.c (ix86_finalize_stack_frame_flags): Replace
-       frame pointer with stack pointer - UNITS_PER_WORD in debug insns.
- 2017-08-13  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.md (*load_tp_<mode>): Redefine as
-       define_insn_and_split.  Split to a memory load from 0 in
-       DEFAULT_TLS_SEG_REG address space.  Merge with *load_tp_x32
-       using PTR mode iterator.
-       (*load_tp_x32_zext"): Redefine as define_insn_and_split.
-       Split to a memory load from 0 in DEFAULT_TLS_SEG_REG address space.
-       (*add_tp_<mode>): Redefine as define_insn_and_split.
-       Split to an add with a memory load from 0 in DEFAULT_TLS_SEG_REG
-       address space.  Merge with *add_tp_x32 using PTR mode iterator.
-       (*add_tp_x32_zext"): Redefine as define_insn_and_split.
-       Split to an add with a  memory load from 0 in
-       DEFAULT_TLS_SEG_REG address space.
- 2017-08-12  Andrew Pinski  <apinski@cavium.com>
-       * config/aarch64/aarch64-option-extensions.def (rdma):
-       Fix feature string to what Linux prints out in /proc/cpuinfo.
- 2017-08-12  Pierre-Marie de Rodat  <derodat@adacore.com>
-       PR ada/79542
-       * dwarf2out.c (modified_type_die): For C typedef types that have
-       an ultimate origin, process the ultimate origin instead of the
-       input type.
-       (gen_typedef_die): Assert that input DECLs have no ultimate
-       origin.
-       (gen_type_die_with_usage): For typedef variants that have an
-       ultimate origin, just call gen_decl_die on the original DECL.
-       (process_scope_var): Avoid creating DIEs for local typedefs and
-       concrete static variables.
+       * config/i386/avx512dqintrin.h (_mm_mask_range_sd, _mm_maskz_range_sd,
+       _mm_mask_range_round_sd, _mm_maskz_range_round_sd, _mm_mask_range_ss,
+       _mm_maskz_range_ss, _mm_mask_range_round_ss,
+       _mm_maskz_range_round_ss): New intrinsics.
+       (__builtin_ia32_rangesd128_round)
+       (__builtin_ia32_rangess128_round): Remove.
+       (__builtin_ia32_rangesd128_mask_round,
+       __builtin_ia32_rangess128_mask_round): New builtins.
+       * config/i386/i386-builtin.def (__builtin_ia32_rangesd128_round,
+       __builtin_ia32_rangess128_round): Remove.
+       (__builtin_ia32_rangesd128_mask_round,
+       __builtin_ia32_rangess128_mask_round): New builtins.
+       * config/i386/sse.md (ranges<mode><round_saeonly_name>): Renamed to ...
+       (ranges<mode><mask_scalar_name><round_saeonly_scalar_name>): ... this.
+       ((match_operand:VF_128 2 "<round_saeonly_nimm_predicate>"
+       "<round_saeonly_constraint>")): Changed to ...
+       ((match_operand:VF_128 2 "<round_saeonly_scalar_nimm_predicate>"
+       "<round_saeonly_scalar_constraint>")): ... this.
+       ("vrange<ssescalarmodesuffix>\t{%3, <round_saeonly_op4>%2, %1, %0|
+       %0, %1, %2<round_saeonly_op4>, %3}"): Changed to ...
+       ("vrange<ssescalarmodesuffix>\t{%3, <round_saeonly_scalar_mask_op4>%2,
+       %1, %0<mask_scalar_operand4>|%0<mask_scalar_operand4>, %1,
+       %2<round_saeonly_scalar_mask_op4>, %3}"): ... this.
  
- 2017-08-12  Alan Modra  <amodra@gmail.com>
+ 2018-02-02  Andrew Jenner  <andrew@codesourcery.com>
  
-       PR target/81170
-       PR target/81295
-       * config/rs6000/sysv4.h (STARTFILE_LINUX_SPEC): Upgrade to
-       match gnu-user.h startfile.
-       (ENDFILE_LINUX_SPEC): Similarly.
+       * config/powerpcspe/powerpcspe.opt: Add Undocumented to irrelevant
+       options.
+       * config/powerpcspe/powerpcspe-tables.opt (rs6000_cpu_opt_value):
+       Remove all values except native, 8540 and 8548.
+ 2018-02-02  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386.c (ix86_output_function_return): Pass
+       INVALID_REGNUM, instead of -1, as invalid register number to
+       indirect_thunk_name and output_indirect_thunk.
+ 2018-02-02  Julia Koval  <julia.koval@intel.com>
+       * config.gcc: Add -march=icelake.
+       * config/i386/driver-i386.c (host_detect_local_cpu): Detect icelake.
+       * config/i386/i386-c.c (ix86_target_macros_internal): Handle icelake.
+       * config/i386/i386.c (processor_costs): Add m_ICELAKE.
+       (PTA_ICELAKE, PTA_AVX512VNNI, PTA_GFNI, PTA_VAES, PTA_AVX512VBMI2,
+       PTA_VPCLMULQDQ, PTA_RDPID, PTA_AVX512BITALG): New.
+       (processor_target_table): Add icelake.
+       (ix86_option_override_internal): Handle new PTAs.
+       (get_builtin_code_for_version): Handle icelake.
+       (M_INTEL_COREI7_ICELAKE): New.
+       (fold_builtin_cpu): Handle icelake.
+       * config/i386/i386.h (TARGET_ICELAKE, PROCESSOR_ICELAKE): New.
+       * doc/invoke.texi: Add -march=icelake.
+ 2018-02-02  Julia Koval  <julia.koval@intel.com>
+       * config/i386/i386.c (ix86_option_override_internal): Change flags type
+       to wide_int_bitmask.
+       * wide-int-bitmask.h: New.
+ 2018-02-02  Igor Tsimbalist  <igor.v.tsimbalist@intel.com>
+       PR target/84066
+       * config/i386/i386.md: Replace Pmode with word_mode in
+       builtin_setjmp_setup and builtin_longjmp to support x32.
+ 2018-02-01  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/56010
+       PR target/83743
+       * config/rs6000/driver-rs6000.c: #include "diagnostic.h".
+       #include "opts.h".
+       (rs6000_supported_cpu_names): New static variable.
+       (linux_cpu_translation_table): Likewise.
+       (elf_platform) <cpu>: Define new static variable and use it.
+       Translate kernel AT_PLATFORM name to canonical name if needed.
+       Error if platform name is unknown.
+ 2018-02-01  Aldy Hernandez  <aldyh@redhat.com>
+       PR target/84089
+       * config/pa/predicates.md (base14_operand): Handle E_VOIDmode.
+ 2018-02-01  Jeff Law  <law@redhat.com>
+       PR target/84128
+       * config/i386/i386.c (release_scratch_register_on_entry): Add new
+       OFFSET and RELEASE_VIA_POP arguments.  Use SP+OFFSET to restore
+       the scratch if RELEASE_VIA_POP is false.
+       (ix86_adjust_stack_and_probe_stack_clash): Un-constify SIZE.
+       If we have to save a temporary register, decrement SIZE appropriately.
+       Pass new arguments to release_scratch_register_on_entry.
+       (ix86_adjust_stack_and_probe): Likewise.
+       (ix86_emit_probe_stack_range): Pass new arguments to
+       release_scratch_register_on_entry.
+ 2018-02-01  Uros Bizjak  <ubizjak@gmail.com>
+       PR rtl-optimization/84157
+       * combine.c (change_zero_ext): Use REG_P predicate in
+       front of HARD_REGISTER_P predicate.
+ 2018-02-01  Georg-Johann Lay  <avr@gjlay.de>
+       * config/avr/avr.c (avr_option_override): Move disabling of
+       -fdelete-null-pointer-checks to...
+       * common/config/avr/avr-common.c (avr_option_optimization_table):
+       ...here.
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/81635
+       * tree-data-ref.c (split_constant_offset_1): For types that
+       wrap on overflow, try to use range info to prove that wrapping
+       cannot occur.
+ 2018-02-01  Renlin Li  <renlin.li@arm.com>
+       PR target/83370
+       * config/aarch64/aarch64.c (aarch64_class_max_nregs): Handle
+       TAILCALL_ADDR_REGS.
+       (aarch64_register_move_cost): Likewise.
+       * config/aarch64/aarch64.h (reg_class): Rename CALLER_SAVE_REGS to
+       TAILCALL_ADDR_REGS.
+       (REG_CLASS_NAMES): Likewise.
+       (REG_CLASS_CONTENTS): Rename CALLER_SAVE_REGS to
+       TAILCALL_ADDR_REGS. Remove IP registers.
+       * config/aarch64/aarch64.md (Ucs): Update register constraint.
+ 2018-02-01  Richard Biener  <rguenther@suse.de>
+       * domwalk.h (dom_walker::dom_walker): Add additional constructor
+       for specifying RPO order and allow NULL for that.
+       * domwalk.c (dom_walker::dom_walker): Likewise.
+       (dom_walker::walk): Handle NULL RPO order.
+       * tree-into-ssa.c (rewrite_dom_walker): Do not walk dom children
+       in RPO order.
+       (rewrite_update_dom_walker): Likewise.
+       (mark_def_dom_walker): Likewise.
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
+       * config/aarch64/aarch64-protos.h (aarch64_split_sve_subreg_move)
+       (aarch64_maybe_expand_sve_subreg_move): Declare.
+       * config/aarch64/aarch64.md (UNSPEC_REV_SUBREG): New unspec.
+       * config/aarch64/predicates.md (aarch64_any_register_operand): New
+       predicate.
+       * config/aarch64/aarch64-sve.md (mov<mode>): Optimize subreg moves
+       that are semantically a reverse operation.
+       (*aarch64_sve_mov<mode>_subreg_be): New pattern.
+       * config/aarch64/aarch64.c (aarch64_maybe_expand_sve_subreg_move):
+       (aarch64_replace_reg_mode, aarch64_split_sve_subreg_move): New
+       functions.
+       (aarch64_can_change_mode_class): For big-endian, forbid changes
+       between two SVE modes if they have different element sizes.
  
- 2017-08-11  Thomas Schwinge  <thomas@codesourcery.com>
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR lto/81430
-       * config/nvptx/nvptx.c (nvptx_override_options_after_change):
-       Remove function.
-       (TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE): Remove definition.
+       * config/aarch64/aarch64.c (aarch64_expand_sve_const_vector): Prefer
+       the TImode handling for big-endian targets.
  
- 2017-08-11  Tamar Christina  <tamar.christina@arm.com>
-       * config/aarch64/aarch64.md (mov<mode>): Change.
-       (*movhf_aarch64, *movsf_aarch64, *movdf_aarch64):
-       aarch64_reg_or_fp_float into aarch64_reg_or_fp_zero.
-       * config/aarch64/predicates.md (aarch64_reg_or_fp_float): Removed.
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-11  Eric Botcazou  <ebotcazou@adacore.com>
+       * config/aarch64/aarch64-sve.md (sve_ld1rq): Replace with...
+       (*sve_ld1rq<Vesize>): ... this new pattern.  Handle all element sizes,
+       not just bytes.
+       * config/aarch64/aarch64.c (aarch64_expand_sve_widened_duplicate):
+       Remove BSWAP handing for big-endian targets and use the form of
+       LD1RQ appropariate for the mode.
  
-       * tree-sra.c (build_access_from_expr_1): Use more precise diagnostics
-       for storage order barriers.
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-11  Martin Liska  <mliska@suse.cz>
+       * config/aarch64/aarch64.c (aarch64_simd_valid_immediate): Handle
+       all CONST_VECTOR_DUPLICATE_P vectors, not just those with a single
+       duplicated element.
  
-       PR tree-opt/79987
-       * tree-chkp.c (chkp_get_bounds_for_decl_addr): Do not instrument
-       variables of void type.
+ 2018-02-01  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-11  Martin Liska  <mliska@suse.cz>
+       PR tearget/83845
+       * config/aarch64/aarch64.c (aarch64_secondary_reload): Tighten
+       check for operands that need to go through aarch64_sve_reload_be.
  
-       * asan.c (asan_protect_global): Replace ASM_OUTPUT_DEF with
-       TARGET_SUPPORTS_ALIASES.
-       * cgraph.c (cgraph_node::create_same_body_alias): Likewise.
-       * ipa-visibility.c (can_replace_by_local_alias): Likewise.
-       (optimize_weakref): Likewise.
-       * symtab.c (symtab_node::noninterposable_alias): Likewise.
-       * varpool.c (varpool_node::create_extra_name_alias): Likewise.
-       * defaults.h: Introduce TARGET_SUPPORTS_ALIASES.
+ 2018-02-01  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-08-11  Martin Liska  <mliska@suse.cz>
+       PR tree-optimization/81661
+       PR tree-optimization/84117
+       * tree-eh.h (rewrite_to_non_trapping_overflow): Declare.
+       * tree-eh.c: Include gimplify.h.
+       (find_trapping_overflow, replace_trapping_overflow,
+       rewrite_to_non_trapping_overflow): New functions.
+       * tree-vect-loop.c: Include tree-eh.h.
+       (vect_get_loop_niters): Use rewrite_to_non_trapping_overflow.
+       * tree-data-ref.c: Include tree-eh.h.
+       (get_segment_min_max): Use rewrite_to_non_trapping_overflow.
  
-       PR ipa/81213
-       * config/i386/i386.c (make_resolver_func): Do complete
-       refactoring of the function.
+ 2018-01-31  Uros Bizjak  <ubizjak@gmail.com>
  
- 2017-08-10  Uros Bizjak  <ubizjak@gmail.com>
+       PR rtl-optimization/84123
+       * combine.c (change_zero_ext): Check if hard register satisfies
+       can_change_dest_mode before calling gen_lowpart_SUBREG.
  
-       PR target/81708
-       * config/i386/i386.opt (mstack-protector-guard-symbol=): New option
-       * config/i386/i386.c (ix86_stack_protect_guard): Use
-       ix86_stack_protect_guard_symbol_str to generate varible declaration.
-       * doc/invoke.texi (x86 Options): Document
-       -mstack-protector-guard-symbol= option.
+ 2018-01-31  Vladimir Makarov  <vmakarov@redhat.com>
  
- 2017-08-10  Uros Bizjak  <ubizjak@gmail.com>
+       PR target/82444
+       * ira.c (ira_init_register_move_cost): Remove assert.
  
-       * config/i386/i386-protos.h (ix86_split_stack_guard): New prototype.
-       * config/i386/i386.c (ix86_split_stack_guard): New function.
-       (ix86_xpand_split_stack_prologue): Call ix86_split_stack_guard.
-       (ix86_legitimate_address_p) <case UNSPEC_STACK_CHECK>: Remove.
-       (i386_asm_output_addr_const_extra) <case UNSPEC_STACK_CHECK>: Ditto.
-       (optput_pic_addr_const): Remove UNSPEC_STACK_CHECK handling.
-       * config/i386/i386.md (unspec): Remove UNSPEC_STACK_CHECK.
-       (split_stack_space_check): Call ix86_split_stack_guard.
+ 2018-01-31  Eric Botcazou  <ebotcazou@adacore.com>
  
- 2017-08-10  Martin Sebor  <msebor@redhat.com>
+       PR rtl-optimization/84071
+       * doc/tm.texi.in (WORD_REGISTER_OPERATIONS): Add explicit case.
+       * doc/tm.texi: Regenerate.
  
-       * print-tree.c (print_node): Print location using the established
-       format %s:%i%i.
-       Replace spaces with colons.
-       (debug_raw, debug): Ditto.
+ 2018-01-31  Richard Biener  <rguenther@suse.de>
  
- 2017-08-10  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/84132
+       * tree-data-ref.c (analyze_miv_subscript): Properly
+       check whether evolution_function_is_affine_multivariate_p
+       before calling gcd_of_steps_may_divide_p.
  
-       PR c++/81586
-       * pretty-print.c (pp_format): Correct the handling of %s precision.
+ 2018-01-31  Julia Koval  <julia.koval@intel.com>
  
- 2017-08-10  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/83618
+       * config/i386/i386.c (ix86_expand_builtin): Handle IX86_BUILTIN_RDPID.
+       * config/i386/i386.md (rdpid_rex64) New.
+       (rdpid): Make 32bit only.
  
-       PR target/81736
-       * config/i386/i386.c (ix86_finalize_stack_realign_flags): Renamed
-       to ...
-       (ix86_finalize_stack_frame_flags): This.  Also clear
-       frame_pointer_needed if -fno-omit-frame-pointer is used without
-       stack access.
-       (ix86_expand_prologue): Replace ix86_finalize_stack_realign_flags
-       with ix86_finalize_stack_frame_flags.
-       (ix86_expand_epilogue): Likewise.
-       (ix86_expand_split_stack_prologue): Likewise.
-       * doc/invoke.texi: Add a note for -fno-omit-frame-pointer.
- 2017-08-10  Martin Liska  <mliska@suse.cz>
-       PR c++/81355
-       * c-attribs.c (handle_target_attribute):
-       Report warning for an empty string argument of target attribute.
- 2017-08-09  Jakub Jelinek  <jakub@redhat.com>
-       PR c/81687
-       * omp-low.c (omp_copy_decl): Don't remap FORCED_LABEL or DECL_NONLOCAL
-       LABEL_DECLs.
-       * tree-cfg.c (move_stmt_op): Don't adjust DECL_CONTEXT of FORCED_LABEL
-       or DECL_NONLOCAL labels.
-       (move_stmt_r) <case GIMPLE_LABEL>: Adjust DECL_CONTEXT of FORCED_LABEL
-       or DECL_NONLOCAL labels here.
- 2017-08-09  Will Schmidt  <will_schmidt@vnet.ibm.com>
-       * config/rs6000/rs6000.c (rs6000_option_override_internal): Add blurb
-       to indicate when early gimple folding has been disabled.
-       (rs6000_gimple_fold_builtin): Add debug content.
-       (rs6000_invalid_builtin): Fix whitespace.
-       (rs6000_expand_builtin): Fix whitespace.
-       * config/rs6000/rs6000.opt: Add option for -mfold-gimple.
- 2017-08-09  Segher Boessenkool  <segher@kernel.crashing.org>
-       PR target/80938
-       * config/rs6000/rs6000.c (rs6000_savres_strategy): Don't use
-       SAVE_MULTIPLE if not all the registers that saves, should be saved.
- 2017-08-09  Jim Wilson  <jim.wilson@linaro.org>
-       * config/aarch64/aarch64-cores.def (falkor): Use falkor pipeline.
-       (qdf24xx): Likewise.
-       * config/aarch64/aarch64.md: Include falkor.md.
-       * config/aarch64/falkor.md: New.
- 2017-08-09  Marek Polacek  <polacek@redhat.com>
-       PR c/81233
-       * diagnostic-core.h (emit_diagnostic_valist): Add declaration.
-       * diagnostic.c (emit_diagnostic): Add a comment.
-       (emit_diagnostic_valist): New function.
- 2017-08-09  Marek Polacek  <polacek@redhat.com>
-       PR c/81417
-       * input.c (make_location): New overload.
-       * input.h (make_location): Declare.
- 2017-08-08  Alan Modra  <amodra@gmail.com>
-           H.J. Lu  <hongjiu.lu@intel.com>
-       PR driver/81523
-       * gcc.c (NO_PIE_SPEC): Delete.
-       (PIE_SPEC): Define as !no-pie/pie.  Move static|shared|r
-       exclusion..
-       (LINK_PIE_SPEC): ..to here.
-       (LINK_COMMAND_SPEC): Support -no-pie.
-       * config/gnu-user.h (GNU_USER_TARGET_STARTFILE_SPEC): Correct
-       chain of crtbegin*.o selection, update for PIE_SPEC changes and
-       format.
-       (GNU_USER_TARGET_ENDFILE_SPEC): Similarly.
-       * config/sol2.h (STARTFILE_CRTBEGIN_SPEC): Similarly.
-       (ENDFILE_CRTEND_SPEC): Similarly.
- 2017-08-08  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81708
-       * config/i386/i386.opt (mstack-protector-guard-reg=): New option
-       (mstack-protector-guard-offset=): Ditto.
-       * config/i386/i386.c (ix86_option_override): Handle
-       -mstack-protector-guard-reg= and -mstack-protector-guard-offset=
-       options.
-       (ix86_stack_protect_guard): Use ix86_stack_protect_guard_reg and
-       ix86_stack_protect_guard_offset variables.
-       (TARGET_STACK_PROTECT_GUARD): Always define.
-       * doc/invoke.texi (x86 Options): Document -mstack-protector-guard-reg=
-       and -mstack-protector-guard-offset= options.
- 2017-08-08  Bin Cheng  <bin.cheng@arm.com>
-       * tree-ssa-loop-ivopts.c (relate_compare_use_with_all_cands): Handle
-       boundary case for the last candidate.
- 2017-08-08  Bin Cheng  <bin.cheng@arm.com>
-       * doc/invoke.texi: Document -ftree-loop-distribution for O3.
-       * opts.c (default_options_table): Add OPT_ftree_loop_distribution.
- 2017-08-08  Tamar Christina  <tamar.christina@arm.com>
-       PR middle-end/19706
-       * config/aarch64/aarch64.md (xorsign<mode>3): New optabs.
-       * config/aarch64/aarch64-builtins.c
-       (aarch64_builtin_vectorized_function): Added CASE_CFN_XORSIGN.
-       * config/aarch64/aarch64-simd-builtins.def: Added xorsign BINOP.
-       * config/aarch64/aarch64-simd.md: Added xorsign<mode>3
- 2017-08-08  Tamar Christina  <tamar.christina@arm.com>
-           Andrew Pinski <pinskia@gmail.com>
-       PR middle-end/19706
-       * internal-fn.def (XORSIGN): New.
-       * optabs.def (xorsign_optab): New.
-       * tree-ssa-math-opts.c (is_copysign_call_with_1): New.
-       (convert_expand_mult_copysign): New.
-       (pass_optimize_widening_mul::execute): Call
-       convert_expand_mult_copysign.
- 2017-08-08  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
-       PR tree-optimization/81354
-       * gimple-ssa-strength-reduction.c (create_add_on_incoming_edge):
-       Insert on edges rather than explicitly creating landing pads.
-       (analyze_candidates_and_replace): Commit edge inserts.
- 2017-08-08  Richard Biener  <rguenther@suse.de>
-       PR middle-end/81719
-       * tree-ssa-loop-niter.c: Include tree-dfa.h.
-       (expand_simple_operations): Also look through ADDR_EXPRs with
-       MEM_REF bases treating them as POINTER_PLUS_EXPR.
- 2017-08-08  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81723
-       * tree-vect-slp.c (struct bst_traits): New hash traits.
-       (bst_fail): New global.
-       (vect_build_slp_tree_2): New worker, split out from ...
-       (vect_build_slp_tree): ... this now wrapping it with using
-       bst_fail set to cache SLP tree build fails.  Properly handle
-       max_tree_size.
-       (vect_analyze_slp_instance): Allocate and free bst_fail.
- 2017-08-08  Martin Liska  <mliska@suse.cz>
-       PR tree-opt/81696
-       * ipa-icf-gimple.c (func_checker::compare_cst_or_decl): Consider
-       LABEL_DECLs that can be from a different function.
- 2017-08-08  Bin Cheng  <bin.cheng@arm.com>
-       PR tree-optimization/81744
-       * tree-predcom.c (prepare_finalizers_chain): Deep copy expr of
-       loop's number of iterations.
- 2017-08-08  Martin Liska  <mliska@suse.cz>
-       * asan.c: Include header files.
-       * attribs.c (build_decl_attribute_variant): New function moved
-       from tree.[ch].
-       (build_type_attribute_qual_variant): Likewise.
-       (cmp_attrib_identifiers): Likewise.
-       (simple_cst_list_equal): Likewise.
-       (omp_declare_simd_clauses_equal): Likewise.
-       (attribute_value_equal): Likewise.
-       (comp_type_attributes): Likewise.
-       (build_type_attribute_variant): Likewise.
-       (lookup_ident_attribute): Likewise.
-       (remove_attribute): Likewise.
-       (merge_attributes): Likewise.
-       (merge_type_attributes): Likewise.
-       (merge_decl_attributes): Likewise.
-       (merge_dllimport_decl_attributes): Likewise.
-       (handle_dll_attribute): Likewise.
-       (attribute_list_equal): Likewise.
-       (attribute_list_contained): Likewise.
-       * attribs.h (lookup_attribute): New function moved from tree.[ch].
-       (lookup_attribute_by_prefix): Likewise.
-       * bb-reorder.c: Include header files.
-       * builtins.c: Likewise.
-       * calls.c: Likewise.
-       * cfgexpand.c: Likewise.
-       * cgraph.c: Likewise.
-       * cgraphunit.c: Likewise.
-       * convert.c: Likewise.
-       * dwarf2out.c: Likewise.
-       * final.c: Likewise.
-       * fold-const.c: Likewise.
-       * function.c: Likewise.
-       * gimple-expr.c: Likewise.
-       * gimple-fold.c: Likewise.
-       * gimple-pretty-print.c: Likewise.
-       * gimple.c: Likewise.
-       * gimplify.c: Likewise.
-       * hsa-common.c: Likewise.
-       * hsa-gen.c: Likewise.
-       * internal-fn.c: Likewise.
-       * ipa-chkp.c: Likewise.
-       * ipa-cp.c: Likewise.
-       * ipa-devirt.c: Likewise.
-       * ipa-fnsummary.c: Likewise.
-       * ipa-inline.c: Likewise.
-       * ipa-visibility.c: Likewise.
-       * ipa.c: Likewise.
-       * lto-cgraph.c: Likewise.
-       * omp-expand.c: Likewise.
-       * omp-general.c: Likewise.
-       * omp-low.c: Likewise.
-       * omp-offload.c: Likewise.
-       * omp-simd-clone.c: Likewise.
-       * opts-global.c: Likewise.
-       * passes.c: Likewise.
-       * predict.c: Likewise.
-       * sancov.c: Likewise.
-       * sanopt.c: Likewise.
-       * symtab.c: Likewise.
-       * toplev.c: Likewise.
-       * trans-mem.c: Likewise.
-       * tree-chkp.c: Likewise.
-       * tree-eh.c: Likewise.
-       * tree-into-ssa.c: Likewise.
-       * tree-object-size.c: Likewise.
-       * tree-parloops.c: Likewise.
-       * tree-profile.c: Likewise.
-       * tree-ssa-ccp.c: Likewise.
-       * tree-ssa-live.c: Likewise.
-       * tree-ssa-loop.c: Likewise.
-       * tree-ssa-sccvn.c: Likewise.
-       * tree-ssa-structalias.c: Likewise.
-       * tree-ssa.c: Likewise.
-       * tree-streamer-in.c: Likewise.
-       * tree-vectorizer.c: Likewise.
-       * tree-vrp.c: Likewise.
-       * tsan.c: Likewise.
-       * ubsan.c: Likewise.
-       * varasm.c: Likewise.
-       * varpool.c: Likewise.
-       * tree.c: Remove functions moved to attribs.[ch].
-       * tree.h: Likewise.
-       * config/aarch64/aarch64.c: Add attrs.h header file.
-       * config/alpha/alpha.c: Likewise.
-       * config/arc/arc.c: Likewise.
-       * config/arm/arm.c: Likewise.
-       * config/avr/avr.c: Likewise.
-       * config/bfin/bfin.c: Likewise.
-       * config/c6x/c6x.c: Likewise.
-       * config/cr16/cr16.c: Likewise.
-       * config/cris/cris.c: Likewise.
-       * config/darwin.c: Likewise.
-       * config/epiphany/epiphany.c: Likewise.
-       * config/fr30/fr30.c: Likewise.
-       * config/frv/frv.c: Likewise.
-       * config/ft32/ft32.c: Likewise.
-       * config/h8300/h8300.c: Likewise.
-       * config/i386/winnt.c: Likewise.
-       * config/ia64/ia64.c: Likewise.
-       * config/iq2000/iq2000.c: Likewise.
-       * config/lm32/lm32.c: Likewise.
-       * config/m32c/m32c.c: Likewise.
-       * config/m32r/m32r.c: Likewise.
-       * config/m68k/m68k.c: Likewise.
-       * config/mcore/mcore.c: Likewise.
-       * config/microblaze/microblaze.c: Likewise.
-       * config/mips/mips.c: Likewise.
-       * config/mmix/mmix.c: Likewise.
-       * config/mn10300/mn10300.c: Likewise.
-       * config/moxie/moxie.c: Likewise.
-       * config/msp430/msp430.c: Likewise.
-       * config/nds32/nds32-isr.c: Likewise.
-       * config/nds32/nds32.c: Likewise.
-       * config/nios2/nios2.c: Likewise.
-       * config/nvptx/nvptx.c: Likewise.
-       * config/pa/pa.c: Likewise.
-       * config/pdp11/pdp11.c: Likewise.
-       * config/powerpcspe/powerpcspe.c: Likewise.
-       * config/riscv/riscv.c: Likewise.
-       * config/rl78/rl78.c: Likewise.
-       * config/rx/rx.c: Likewise.
-       * config/s390/s390.c: Likewise.
-       * config/sh/sh.c: Likewise.
-       * config/sol2.c: Likewise.
-       * config/sparc/sparc.c: Likewise.
-       * config/spu/spu.c: Likewise.
-       * config/stormy16/stormy16.c: Likewise.
-       * config/tilegx/tilegx.c: Likewise.
-       * config/tilepro/tilepro.c: Likewise.
-       * config/v850/v850.c: Likewise.
-       * config/vax/vax.c: Likewise.
-       * config/visium/visium.c: Likewise.
-       * config/xtensa/xtensa.c: Likewise.
- 2017-08-07  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       PR target/81593
-       * config/rs6000/vsx.md (vsx_concat_<mode>, VSX_D): Cleanup
-       constraints since the -mupper-regs-* switches have been
-       eliminated.
-       (vsx_concat_<mode>_1): New combiner insns to recognize inserting
-       into a vector from a double word element that was extracted from
-       another vector, and eliminate extra XXPERMDI instructions.
-       (vsx_concat_<mode>_2): Likewise.
-       (vsx_concat_<mode>_3): Likewise.
-       (vsx_set_<mode>, VSX_D): Rewrite vector set in terms of vector
-       concat to allow optimizing inserts from previous extracts.
- 2017-08-07  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.c (ix86_stack_protect_guard): Generate
-       memory reference to a SSP offset in TLS address space.
-       (ix86_print_operand) <case '@'>: Remove.
-       (ix86_print_operand_punct_valid_p): Remove '@' code.
-       * config/i386/i386.md (unspec): Remove UNSPEC_SP_TLS_SET and
-       UNSPEC_SP_TLS_TEST.
-       (stack_tls_protect_set_<mode>): Remove.
-       (stack_protect_set): Do not call gen_stack_tls_protect_set_<mode>.
-       (stack_tls_protect_test_<mode>): Remove.
-       (stack_protect_test): Do not call gen_stack_tls_protect_test_<mode>.
- 2017-08-07  Olivier Hainque  <hainque@adacore.com>
-       PR target/81755
-       * config/vxworksae.h (VXWORKS_HAVE_TLS): Define.
- 2017-08-07  Douglas Rupp  <rupp@adacore.com>
-       * Makefile.in (install-mkheaders): Fix typo, where the multi_dir
-       variable was referenced as multidir in command.
- 2017-08-07  Jakub Jelinek  <jakub@redhat.com>
-       PR c/69389
-       * gimplify.c (goa_stabilize_expr): Handle BIT_INSERT_EXPR and
-       BIT_FIELD_REF.
- 2017-08-07  Martin Liska  <mliska@suse.cz>
-       * config/m32c/m32c.c: Add include of stringpool.h and attribs.h.
-       * config/rl78/rl78.c: Add include of attribs.h.
-       * config/sh/sh.c: Likewise.
-       * config/v850/v850.c: Likewise.
- 2017-08-07  Tom de Vries  <tom@codesourcery.com>
-       PR middle-end/78266
-       * omp-expand.c (expand_oacc_for): Ensure diff_type is large enough.
- 2017-08-07  Martin Liska  <mliska@suse.cz>
-       * config/mips/mips.c: Include attribs.h.
- 2017-08-07  Thomas Koenig  <tkoenig@gcc.gnu.org>
-       PR fortran/68829
-       * doc/invoke.texi: Document change in behvaior for -Ofast for
-       Fortran.
- 2017-08-07  Wilco Dijkstra  <wdijkstr@arm.com>
-       * config/aarch64/aarch64.c (aarch64_pushwb_single_reg):
-       Use gen_frame_mem.
-       (aarch64_pop_regs): Likewise.
-       (aarch64_gen_load_pair): Likewise.
-       (aarch64_save_callee_saves): Likewise.
-       (aarch64_restore_callee_saves): Likewise.
- 2017-08-07  H.J. Lu  <hongjiu.lu@intel.com>
-       * config/i386/i386.c: Revert the last change.
- 2017-08-07  H.J. Lu  <hongjiu.lu@intel.com>
-       PR target/81736
-       * config/i386/i386.c (ix86_finalize_stack_realign_flags): Renamed
-       to ...
-       (ix86_finalize_stack_frame_flags): This.  Also clear
-       frame_pointer_needed if -fno-omit-frame-pointer is used without
-       stack access.
-       (ix86_expand_prologue): Replace ix86_finalize_stack_realign_flags
-       with ix86_finalize_stack_frame_flags.
-       (ix86_expand_epilogue): Likewise.
-       (ix86_expand_split_stack_prologue): Likewise.
+ 2018-01-29  Aldy Hernandez  <aldyh@redhat.com>
  
- 2017-08-07  H.J. Lu  <hongjiu.lu@intel.com>
+       PR lto/84105
+       * tree-pretty-print.c (dump_generic_node): Handle a TYPE_NAME with
+       an IDENTIFIER_NODE for FUNCTION_TYPE's.
  
-       PR target/81743
-       * config/i386/i386.c (get_builtin_code_for_version): Set priority
-       to P_AES for Westmere.
+ 2018-01-31  Eric Botcazou  <ebotcazou@adacore.com>
  
- 2017-08-07  Jonathan Yong  <10walls@gmail.com>
+       Revert
+       2018-01-12  Eric Botcazou  <ebotcazou@adacore.com>
  
-       * config/i386/mingw.opt (fset-stack-executable): Removed.
-       * config/i386/cygming.opt (fset-stack-executable): Moved
-       from mingw.opt.
-       * config/i386/cygwin.h: Define CHECK_EXECUTE_STACK_ENABLED.
+       * config/sparc/sparc.md (vxworks_load_got): Set the GOT register.
  
- 2017-08-07  Segher Boessenkool  <segher@kernel.crashing.org>
+ 2018-01-31  Eric Botcazou  <ebotcazou@adacore.com>
  
-       * print-rtl.c (print_exp): Print NOT as "~" instead of as "!".
+       PR rtl-optimization/84071
+       * combine.c (record_dead_and_set_regs_1): Record the source unmodified
+       for a paradoxical SUBREG on a WORD_REGISTER_OPERATIONS target.
  
- 2017-08-07  Marek Polacek  <polacek@redhat.com>
+ 2018-01-31  Claudiu Zissulescu  <claziss@synopsys.com>
  
-       PR middle-end/81737
-       * fold-const.c (fold_indirect_ref_1): Check type_domain.
+       * config/arc/arc.c (arc_handle_aux_attribute): New function.
+       (arc_attribute_table): Add 'aux' attribute.
+       (arc_in_small_data_p): Consider aux like variables.
+       (arc_is_aux_reg_p): New function.
+       (arc_asm_output_aligned_decl_local): Ignore 'aux' like variables.
+       (arc_get_aux_arg): New function.
+       (prepare_move_operands): Handle aux-register access.
+       (arc_handle_aux_attribute): New function.
+       * doc/extend.texi (ARC Variable attributes): Add subsection.
  
- 2017-08-07  Martin Liska  <mliska@suse.cz>
+ 2018-01-31  Claudiu Zissulescu  <claziss@synopsys.com>
  
-       * attribs.h (canonicalize_attr_name): New function.
-       (cmp_attribs): Move from c-format.c and adjusted.
-       (is_attribute_p): Moved from tree.h.
-       * tree-inline.c: Add new includes.
-       * tree.c (cmp_attrib_identifiers): Use cmp_attribs.
-       (private_is_attribute_p): Remove.
-       (private_lookup_attribute): Likewise.
-       (private_lookup_attribute_by_prefix): Simplify.
-       (remove_attribute): Use is_attribute_p.
-       * tree.h: Remove removed declarations.
+       * config/arc/arc-protos.h (arc_is_uncached_mem_p): Function proto.
+       * config/arc/arc.c (arc_handle_uncached_attribute): New function.
+       (arc_attribute_table): Add 'uncached' attribute.
+       (arc_print_operand): Print '.di' flag for uncached memory
+       accesses.
+       (arc_in_small_data_p): Do not consider for small data the uncached
+       types.
+       (arc_is_uncached_mem_p): New function.
+       * config/arc/predicates.md (compact_store_memory_operand): Check
+       for uncached memory accesses.
+       (nonvol_nonimm_operand): Likewise.
+       * gcc/doc/extend.texi (ARC Type Attribute): New subsection.
  
- 2017-08-07  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-31  Jakub Jelinek  <jakub@redhat.com>
  
-       PR middle-end/81698
-       * stmt.c (emit_case_dispatch_table): Add DEFAULT_EDGE argument,
-       instead of computing it in the function.  Formatting fix.
-       (expand_case): Don't rely on default_edge being the first edge,
-       clear it if removing it, pass default_edge to
-       emit_case_dispatch_table.
-       (expand_sjlj_dispatch_table): Pass NULL as DEFAULT_EDGE, formatting
-       fix.
+       PR c/84100
+       * common.opt (falign-functions=, falign-jumps=, falign-labels=,
+       falign-loops=): Add Optimization flag.
  
- 2017-08-06  Uros Bizjak  <ubizjak@gmail.com>
+ 2018-01-30  Jeff Law  <law@redhat.com>
  
-       * config/alpha/alpha.c (alpha_reorg): If trap is the last active
-       insn in the function, emit NOP after the insn.
+       PR target/84064
+       * i386.c (ix86_adjust_stack_and_probe_stack_clash): New argument
+       INT_REGISTERS_SAVED.  Check it prior to calling
+       get_scratch_register_on_entry.
+       (ix86_adjust_stack_and_probe): Similarly.
+       (ix86_emit_probe_stack_range): Similarly.
+       (ix86_expand_prologue): Corresponding changes.
  
- 2017-08-06  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-30  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
  
-       * omp-expand.c (expand_oacc_for): Add missing edge probability for tile
-       and element loops.
+       PR target/40411
+       * config/sol2.h (STARTFILE_ARCH_SPEC): Use -std=c*,
+       -std=iso9899:199409 instead of -pedantic to select values-Xc.o.
  
- 2017-08-06  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-30  Vladimir Makarov  <vmakarov@redhat.com>
  
-       * omp-expand.c (expand_oacc_for): Add missing edge probability for chunk
-       loop.
+       PR target/84112
+       * lra-constraints.c (curr_insn_transform): Process AND in the
+       address.
  
- 2017-08-04  Yury Gribov  <tetra2005@gmail.com>
+ 2018-01-30  Jakub Jelinek  <jakub@redhat.com>
  
-       PR tree-optimization/57371
-       * match.pd: New pattern.
+       PR rtl-optimization/83986
+       * sched-deps.c (sched_analyze_insn): For frame related insns, add anti
+       dependence against last_pending_memory_flush in addition to
+       pending_jump_insns.
  
- 2017-08-04  Marek Polacek  <polacek@redhat.com>
+ 2018-01-30  Alexandre Oliva  <aoliva@redhat.com>
  
-       PR middle-end/81695
-       * fold-const.c (fold_indirect_ref_1): For ((int *)&a + 4 -> a[1],
-       perform the computation in offset_int.
+       PR tree-optimization/81611
+       * tree-ssa-dom.c (simple_iv_increment_p): Skip intervening
+       copies.
  
- 2017-08-04  Richard Sandiford  <richard.sandiford@linaro.org>
+ 2018-01-30  Aaron Sawdey  <acsawdey@linux.vnet.ibm.com>
  
-       PR tree-optimization/81136
-       * tree-vectorizer.h: Include tree-hash-traits.h.
-       (vec_base_alignments): New typedef.
-       (vec_info): Add a base_alignments field.
-       (vect_record_base_alignments): Declare.
-       * tree-data-ref.h (data_reference): Add an is_conditional_in_stmt
-       field.
-       (DR_IS_CONDITIONAL_IN_STMT): New macro.
-       (create_data_ref): Add an is_conditional_in_stmt argument.
-       * tree-data-ref.c (create_data_ref): Likewise.  Use it to initialize
-       the is_conditional_in_stmt field.
-       (data_ref_loc): Add an is_conditional_in_stmt field.
-       (get_references_in_stmt): Set the is_conditional_in_stmt field.
-       (find_data_references_in_stmt): Update call to create_data_ref.
-       (graphite_find_data_references_in_stmt): Likewise.
-       * tree-ssa-loop-prefetch.c (determine_loop_nest_reuse): Likewise.
-       * tree-vect-data-refs.c (vect_analyze_data_refs): Likewise.
-       (vect_record_base_alignment): New function.
-       (vect_record_base_alignments): Likewise.
-       (vect_compute_data_ref_alignment): Adjust base_addr and aligned_to
-       for nested statements even if we fail to compute a misalignment.
-       Use pooled base alignments for unconditional references.
-       (vect_find_same_alignment_drs): Compare base addresses instead
-       of base objects.
-       (vect_analyze_data_refs_alignment): Call vect_record_base_alignments.
-       * tree-vect-slp.c (vect_slp_analyze_bb_1): Likewise.
- 2017-08-04  Richard Sandiford  <richard.sandiford@linaro.org>
-       * tree-vectorizer.h (vec_info): Add a constructor and destructor.
-       Add an explicit name for the enum.  Use auto_vec for slp_instances
-       and grouped_stores.
-       (_loop_vec_info): Add a constructor and destructor.  Use auto_vec
-       for all vectors.
-       (_bb_vec_info): Add a constructor and destructor.
-       (vinfo_for_stmt): Return NULL for uids of -1 as well.
-       (destroy_loop_vec_info): Delete.
-       (vect_destroy_datarefs): Likewise.
-       * tree-vectorizer.c (vect_destroy_datarefs): Delete.
-       (vec_info::vec_info): New function.
-       (vec_info::~vec_info): Likewise.
-       (vectorize_loops): Use delete instead of destroy_loop_vec_info.
-       * tree-parloops.c (gather_scalar_reductions): Use delete instead of
-       destroy_loop_vec_info.
-       * tree-vect-loop.c (new_loop_vec_info): Replace with...
-       (_loop_vec_info::_loop_vec_info): ...this.
-       (destroy_loop_vec_info): Replace with...
-       (_loop_vec_info::~_loop_vec_info): ...this.  Unconditionally delete
-       the stmt_vec_infos.  Leave handling of vec_info information to its
-       destructor.  Remove explicit vector releases.
-       (vect_analyze_loop_form): Use new instead of new_loop_vec_info.
-       (vect_analyze_loop): Use delete instead of destroy_loop_vec_info.
-       * tree-vect-slp.c (new_bb_vec_info): Replace with...
-       (_bb_vec_info::_bb_vec_info): ...this.  Don't reserve space in
-       BB_VINFO_GROUPED_STORES or BB_VINFO_SLP_INSTANCES.
-       (destroy_bb_vec_info): Replace with...
-       (_bb_vec_info::~_bb_vec_info): ...this.  Leave handling of vec_info
-       information to its destructor.
-       (vect_slp_analyze_bb_1): Use new and delete instead of
-       new_bb_vec_info and destroy_bb_vec_info.
-       (vect_slp_bb): Replace 2 calls to destroy_bb_vec_info with a
-       single delete.
- 2017-08-04  Richard Sandiford  <richard.sandiford@linaro.org>
-       * tree-data-ref.h (subscript): Add access_fn field.
-       (data_dependence_relation): Add could_be_independent_p.
-       (SUB_ACCESS_FN, DDR_COULD_BE_INDEPENDENT_P): New macros.
-       (same_access_functions): Move to tree-data-ref.c.
-       * tree-data-ref.c (ref_contains_union_access_p): New function.
-       (access_fn_component_p): Likewise.
-       (access_fn_components_comparable_p): Likewise.
-       (dr_analyze_indices): Add a reference to access_fn_component_p.
-       (dump_data_dependence_relation): Use SUB_ACCESS_FN instead of
-       DR_ACCESS_FN.
-       (constant_access_functions): Likewise.
-       (add_other_self_distances): Likewise.
-       (same_access_functions): Likewise.  (Moved from tree-data-ref.h.)
-       (initialize_data_dependence_relation): Use XCNEW and remove
-       explicit zeroing of DDR_REVERSED_P.  Look for a subsequence
-       of access functions that have the same type.  Allow the
-       subsequence to end with different bases in some circumstances.
-       Record the chosen access functions in SUB_ACCESS_FN.
-       (build_classic_dist_vector_1): Replace ddr_a and ddr_b with
-       a_index and b_index.  Use SUB_ACCESS_FN instead of DR_ACCESS_FN.
-       (subscript_dependence_tester_1): Likewise dra and drb.
-       (build_classic_dist_vector): Update calls accordingly.
-       (subscript_dependence_tester): Likewise.
-       * tree-ssa-loop-prefetch.c (determine_loop_nest_reuse): Check
-       DDR_COULD_BE_INDEPENDENT_P.
-       * tree-vectorizer.h (LOOP_REQUIRES_VERSIONING_FOR_ALIAS): Test
-       comp_alias_ddrs instead of may_alias_ddrs.
-       * tree-vect-data-refs.c (vect_analyze_possibly_independent_ddr):
-       New function.
-       (vect_analyze_data_ref_dependence): Use it if
-       DDR_COULD_BE_INDEPENDENT_P, but fall back to using the recorded
-       distance vectors if that fails.
-       (dependence_distance_ge_vf): New function.
-       (vect_prune_runtime_alias_test_list): Use it.  Don't clear
-       LOOP_VINFO_MAY_ALIAS_DDRS.
+       PR target/83758
+       * config/rs6000/rs6000.c (rs6000_internal_arg_pointer): Only return
+       a reg rtx.
  
- 2017-08-04  Richard Biener  <rguenther@suse.de>
+ 2018-01-30  Richard Biener  <rguenther@suse.de>
+           Jakub Jelinek  <jakub@redhat.com>
  
-       PR middle-end/81705
-       * fold-const.c (fold_binary_loc): Properly restrict
-       minus_var0 && minus_var1 case when associating undefined overflow
-       entities.
+       PR tree-optimization/84111
+       * tree-ssa-loop-ivcanon.c (tree_unroll_loops_completely_1): Skip
+       inner loops added during recursion, as they don't have up-to-date
+       SSA form.
  
- 2017-08-04  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-30  Jan Hubicka  <hubicka@ucw.cz>
  
-       * omp-simd-clone.c (simd_clone_adjust): Add missing edge probability.
+       PR ipa/81360
+       * ipa-inline.c (can_inline_edge_p): Break out late tests to...
+       (can_inline_edge_by_limits_p): ... here.
+       (can_early_inline_edge_p, check_callers,
+       update_caller_keys, update_callee_keys, recursive_inlining,
+       add_new_edges_to_heap, speculation_useful_p,
+       inline_small_functions,
+       inline_small_functions, flatten_function,
+       inline_to_all_callers_1): Update.
  
- 2017-08-03  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+ 2018-01-30  Jan Hubicka  <hubicka@ucw.cz>
  
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
-       Don't start diagnostic messages with a capital letter.
-       * config/rs6000/rs6000.c (rs6000_option_override_internal):
-       Likewise.
-       (rs6000_invalid_builtin): Likewise.
-       (rs6000_trampoline_init): Likewise.
+       * profile-count.c (profile_count::combine_with_ipa_count): Handle
+       zeros correctly.
  
- 2017-08-03  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-30  Richard Biener  <rguenther@suse.de>
  
-       PR target/81621
-       * bb-reorder.c (pass_partition_blocks::execute): Return TODO_df_finish
-       after setting changeable df flags.
+       PR tree-optimization/83008
+       * tree-vect-slp.c (vect_analyze_slp_cost_1): Properly cost
+       invariant and constant vector uses in stmts when they need
+       more than one stmt.
  
- 2017-08-03  Richard Biener  <rguenther@suse.de>
+ 2018-01-30  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
  
-       * tree-ssa-reassoc.c (should_break_up_subtract): Also break
-       up if the use is in USE - X.
+       PR bootstrap/84017
+       * configure.ac (gcc_cv_as_shf_merge): Disable on Solaris 10/x86.
+       * configure: Regenerate.
  
- 2017-08-03  Alexander Monakov  <amonakov@ispras.ru>
+ 2018-01-30  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * toplev.c (dumpfile.h): New include.
-       (internal_error_reentered): New static function.  Use it...
-       (internal_error_function): ...here to handle reentered internal_error.
+       * config/aarch64/aarch64-sve.md (*vec_extract<mode><Vel>_0): New
+       pattern.
+       (*vec_extract<mode><Vel>_v128): Require a nonzero lane number.
+       Use gen_rtx_REG rather than gen_lowpart.
  
- 2017-08-03  Richard Biener  <rguenther@suse.de>
+ 2018-01-30  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR middle-end/81148
-       * fold-const.c (split_tree): Add minus_var and minus_con
-       arguments, remove unused loc arg.  Never generate NEGATE_EXPRs
-       here but always use minus_*.
-       (associate_trees): Assert we never associate with MINUS_EXPR
-       and NULL first operand.  Do not recurse for PLUS_EXPR operands
-       when associating as MINUS_EXPR either.
-       (fold_binary_loc): Track minus_var and minus_con.
+       * lra-constraints.c (match_reload): Use subreg_lowpart_offset
+       rather than 0 when creating partial subregs.
  
- 2017-08-03  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-30  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR lto/81430
-       * tree-streamer-in.c (lto_input_ts_function_decl_tree_pointers): If
-       ACCEL_COMPILER, apply finish_options on
-       DECL_FUNCTION_SPECIFIC_OPTIMIZATION.
+       * vec-perm-indices.c (vec_perm_indices::series_p): Give examples
+       of usage.
  
- 2017-08-03  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-29  Michael Meissner  <meissner@linux.vnet.ibm.com>
  
-       PR target/81662
-       * config/nvptx/nvptx.c (nvptx_option_override): Emit sorry if
-       function_entry_patch_area_size > 0.
+       PR target/81550
+       * config/rs6000/rs6000.c (rs6000_setup_reg_addr_masks): If DFmode
+       and SFmode can go in Altivec registers (-mcpu=power7 for DFmode,
+       -mcpu=power8 for SFmode) don't set the PRE_INCDEC or PRE_MODIFY
+       flags.  This restores the settings used before the 2017-07-24.
+       Turning off pre increment/decrement/modify allows IVOPTS to
+       optimize DF/SF loops where the index is an int.
  
- 2017-08-03  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-29  Richard Biener  <rguenther@suse.de>
+           Kelvin Nilsen  <kelvin@gcc.gnu.org>
  
-       PR driver/81650
-       * calls.c (alloc_max_size): Use HOST_WIDE_INT_UC (10??)
-       instead of 10??LU, perform unit multiplication in wide_int,
-       don't change alloc_object_size_limit if the limit is larger
-       than SSIZE_MAX.
+       PR bootstrap/80867
+       * tree-vect-stmts.c (vectorizable_call): Don't call
+       targetm.vectorize_builtin_md_vectorized_function if callee is
+       NULL.
  
-       PR tree-optimization/81655
-       PR tree-optimization/81588
-       * tree-ssa-reassoc.c (optimize_range_tests_var_bound): Handle also
-       the case when ranges[i].low and high are 1 for unsigned type with
-       precision 1.
+ 2018-01-22  Carl Love  <cel@us.ibm.com>
  
-       PR middle-end/81052
-       * omp-low.c (diagnose_sb_0): Handle flag_openmp_simd like flag_openmp.
-       (pass_diagnose_omp_blocks::gate): Enable also for flag_openmp_simd.
+       * doc/extend.tex: Fix typo in second arg in
+       __builtin_bcdadd_{lt|eq|gt|ov} and __builtin_bcdsub_{lt|eq|gt|ov}.
  
- 2017-08-03  Prathamesh Kulkarni  <prathamesh.kulkarni@linaro.org>
+ 2018-01-29  Richard Biener  <rguenther@suse.de>
  
-       * tree-vrp.h: Add include guard.
+       PR tree-optimization/84086
+       * tree-ssanames.c: Include cfgloop.h and tree-scalar-evolution.h.
+       (flush_ssaname_freelist): When SSA names were released reset
+       the SCEV hash table.
  
- 2017-08-02  Uros Bizjak  <ubizjak@gmail.com>
+ 2018-01-29  Richard Biener  <rguenther@suse.de>
  
-       PR target/81644
-       * config/i386/i386.md (unspecv): Add UNSPECV_UD2.
-       (ud2): New insn pattern.
-       * config/i386/i386.c (ix86_expand_epilogue):
-       For naked functions, generate ud2 instead of trap insn.
+       PR tree-optimization/84057
+       * tree-ssa-loop-ivcanon.c (unloop_loops): Deal with already
+       removed paths when removing edges.
  
- 2017-08-02  Marek Polacek  <polacek@redhat.com>
+ 2018-01-27  H.J. Lu  <hongjiu.lu@intel.com>
  
-       PR other/81667
-       * alloc-pool.h (base_pool_allocator): Initialize m_elt_size.
+       * doc/invoke.texi: Replace -mfunction-return==@var{choice} with
+       -mfunction-return=@var{choice}.
  
- 2017-08-02  Tom de Vries  <tom@codesourcery.com>
-           Cesar Philippidis  <cesar@codesourcery.com>
+ 2018-01-27  Bernd Edlinger  <bernd.edlinger@hotmail.de>
  
-       * config/nvptx/nvptx.c (nvptx_lockless_update, nvptx_lockfull_update):
-       Add missing edge probabilities.
+       PR diagnostic/84034
+       * diagnostic-show-locus.c (get_line_width_without_trailing_whitespace):
+       Handle CR like TAB.
+       (layout::print_source_line): Likewise.
+       (test_get_line_width_without_trailing_whitespace): Add test cases.
  
- 2017-08-02  Tamar Christina  <tamar.christina@arm.com>
+ 2018-01-27  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/aarch64/aarch64.c (aarch64_reinterpret_float_as_int):
-       Correct endianness.
+       PR middle-end/84040
+       * sched-deps.c (sched_macro_fuse_insns): Return immediately for
+       debug insns.
  
- 2017-08-02  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-26  Jim Wilson  <jimw@sifive.com>
  
-       PR middle-end/79499
-       * function.c (thread_prologue_and_epilogue_insns): Determine blocks
-       for find_many_sub_basic_blocks bitmap by looking up BLOCK_FOR_INSN
-       of first NONDEBUG_INSN_P in each of the split_prologue_seq and
-       prologue_seq sequences - if any.
+       * config/riscv/riscv.h (MAX_FIXED_MODE_SIZE): New.
  
- 2017-08-02  Richard Biener  <rguenther@suse.de>
+       * config/riscv/elf.h (LIB_SPEC): Don't include -lgloss when nosys.specs
+       specified.
  
-       * tree-vect-stmts.c (vectorizable_store): Perform vector extracts
-       via vectors if supported, integer extracts via punning if supported
-       or otherwise vector extracts.
+ 2018-01-26  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
  
- 2017-08-02  Richard Biener  <rguenther@suse.de>
+       * config/aarch64/aarch64.md: Add peepholes for CMP + SUB -> SUBS
+       and CMP + SUB-immediate -> SUBS.
  
-       * tree-ssa-pre.c (bitmap_insert_into_set_1): Remove and inline
-       into ...
-       (bitmap_insert_into_set): ... this.
+ 2018-01-26  Martin Sebor  <msebor@redhat.com>
  
- 2017-08-02  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83896
+       * tree-ssa-strlen.c (get_string_len): Rename...
+       (get_string_cst_length): ...to this.  Return HOST_WIDE_INT.
+       Avoid assuming length is constant.
+       (handle_char_store): Use HOST_WIDE_INT for string length.
  
-       PR tree-optimization/81633
-       Revert
-       2015-08-17  Alan Hayward  <alan.hayward@arm.com>
+ 2018-01-26  Uros Bizjak  <ubizjak@gmail.com>
  
-       PR tree-optimization/71752
-       * tree-vect-slp.c (vect_get_slp_defs): Handle null operands.
+       PR target/81763
+       * config/i386/i386.md (*andndi3_doubleword): Add earlyclobber
+       to (=&r,r,rm) alternative. Add (=r,0,rm) and (=r,r,0) alternatives.
  
- 2017-08-01  Daniel Santos  <daniel.santos@pobox.com>
+ 2018-01-26  Richard Biener  <rguenther@suse.de>
  
-       * config/i386/i386.h (ix86_frame::outlined_save_offset): Remove field.
-       (machine_function::call_ms2sysv_pad_out): Remove field.
-       * config/i386/i386.c (xlogue_layout::get_stack_space_used): Modify.
-       (ix86_compute_frame_layout): Likewise.
+       PR rtl-optimization/84003
+       * dse.c (record_store): Only record redundant stores when
+       the earlier store aliases at least all accesses the later one does.
  
- 2017-08-01  H.J. Lu  <hongjiu.lu@intel.com>
+ 2018-01-26  Jakub Jelinek  <jakub@redhat.com>
  
-       PR target/81654
-       * config/i386/i386.c (ix86_set_func_type): Disallow naked
-       attribute with interrupt attribute.
+       PR rtl-optimization/83985
+       * dce.c (deletable_insn_p): Return false for separate shrink wrapping
+       REG_CFA_RESTORE insns.
+       (delete_unmarked_insns): Don't ignore separate shrink wrapping
+       REG_CFA_RESTORE insns here.
  
- 2017-08-01  Andrew Pinski  <apinski@cavium.com>
+       PR c/83989
+       * gimple-ssa-warn-restrict.c (builtin_memref::builtin_memref): Don't
+       use SSA_NAME_VAR as base for SSA_NAMEs with non-NULL SSA_NAME_VAR.
  
-       * tree-ssa-scopedtables.c (hashable_expr_equal_p): Check
-       BIT_INSERT_EXPR's operand 1
-       to see if the types precision matches.
+ 2018-01-26  Claudiu Zissulescu  <claziss@synopsys.com>
  
- 2017-08-01  Martin Liska  <mliska@suse.cz>
+       * config/arc/arc-arch.h (arc_tune_attr): Add ARC_TUNE_CORE_3.
+       * config/arc/arc.c (arc_sched_issue_rate): Use ARC_TUNE_... .
+       (arc_init): Likewise.
+       (arc_override_options): Likewise.
+       (arc_file_start): Choose Tag_ARC_CPU_variation based on arc_tune
+       value.
+       (hwloop_fail): Use TARGET_DBNZ when we want to check for dbnz insn
+       support.
+       * config/arc/arc.h (TARGET_DBNZ): Define.
+       * config/arc/arc.md (attr tune): Add core_3, use ARC_TUNE_... to
+       properly set the tune attribute.
+       (dbnz): Use TARGET_DBNZ guard.
+       * config/arc/arc.opt (mtune): Add core3 option.
+ 2018-01-26  Claudiu Zissulescu  <claziss@synopsys.com>
+       * config/arc/arc.c (arc_delegitimize_address_0): Refactored to
+       recognize new pic like addresses.
+       (arc_delegitimize_address): Clean up.
+ 2018-01-26  Claudiu Zissulescu  <claziss@synopsys.com>
+       * config/arc/arc-arches.def: Option mrf16 valid for all
+       architectures.
+       * config/arc/arc-c.def (__ARC_RF16__): New predefined macro.
+       * config/arc/arc-cpus.def (em_mini): New cpu with rf16 on.
+       * config/arc/arc-options.def (FL_RF16): Add mrf16 option.
+       * config/arc/arc-tables.opt: Regenerate.
+       * config/arc/arc.c (arc_conditional_register_usage): Handle
+       reduced register file case.
+       (arc_file_start): Set must have build attributes.
+       * config/arc/arc.h (MAX_ARC_PARM_REGS): Conditional define using
+       mrf16 option value.
+       * config/arc/arc.opt (mrf16): Add new option.
+       * config/arc/elf.h (ATTRIBUTE_PCS): Define.
+       * config/arc/genmultilib.awk: Handle new mrf16 option.
+       * config/arc/linux.h (ATTRIBUTE_PCS): Define.
+       * config/arc/t-multilib: Regenerate.
+       * doc/invoke.texi (ARC Options): Document mrf16 option.
+ 2018-01-26  Claudiu Zissulescu  <claziss@synopsys.com>
+       * config/arc/arc-protos.h: Add arc_is_secure_call_p proto.
+       * config/arc/arc.c (arc_handle_secure_attribute): New function.
+       (arc_attribute_table): Add 'secure_call' attribute.
+       (arc_print_operand): Print secure call operand.
+       (arc_function_ok_for_sibcall): Don't optimize tail calls when
+       secure.
+       (arc_is_secure_call_p): New function.  * config/arc/arc.md
+       (call_i): Add support for sjli instruction.
+       (call_value_i): Likewise.
+       * config/arc/constraints.md (Csc): New constraint.
+ 2018-01-26  Claudiu Zissulescu  <claziss@synopsys.com>
+           John Eric Martin  <John.Martin@emmicro-us.com>
+       * config/arc/arc-protos.h: Add arc_is_jli_call_p proto.
+       * config/arc/arc.c (_arc_jli_section): New struct.
+       (arc_jli_section): New type.
+       (rc_jli_sections): New static variable.
+       (arc_handle_jli_attribute): New function.
+       (arc_attribute_table): Add jli_always and jli_fixed attribute.
+       (arc_file_end): New function.
+       (TARGET_ASM_FILE_END): Define.
+       (arc_print_operand): Reuse 'S' letter for JLI output instruction.
+       (arc_add_jli_section): New function.
+       (jli_call_scan): Likewise.
+       (arc_reorg): Call jli_call_scan.
+       (arc_output_addsi): Remove 'S' from printing asm operand.
+       (arc_is_jli_call_p): New function.
+       * config/arc/arc.md (movqi_insn): Remove 'S' from printing asm
+       operand.
+       (movhi_insn): Likewise.
+       (movsi_insn): Likewise.
+       (movsi_set_cc_insn): Likewise.
+       (loadqi_update): Likewise.
+       (load_zeroextendqisi_update): Likewise.
+       (load_signextendqisi_update): Likewise.
+       (loadhi_update): Likewise.
+       (load_zeroextendhisi_update): Likewise.
+       (load_signextendhisi_update): Likewise.
+       (loadsi_update): Likewise.
+       (loadsf_update): Likewise.
+       (movsicc_insn): Likewise.
+       (bset_insn): Likewise.
+       (bxor_insn): Likewise.
+       (bclr_insn): Likewise.
+       (bmsk_insn): Likewise.
+       (bicsi3_insn): Likewise.
+       (cmpsi_cc_c_insn): Likewise.
+       (movsi_ne): Likewise.
+       (movsi_cond_exec): Likewise.
+       (clrsbsi2): Likewise.
+       (norm_f): Likewise.
+       (normw): Likewise.
+       (swap): Likewise.
+       (divaw): Likewise.
+       (flag): Likewise.
+       (sr): Likewise.
+       (kflag): Likewise.
+       (ffs): Likewise.
+       (ffs_f): Likewise.
+       (fls): Likewise.
+       (call_i): Remove 'S' asm letter, add jli instruction.
+       (call_value_i): Likewise.
+       * config/arc/arc.op (mjli-always): New option.
+       * config/arc/constraints.md (Cji): New constraint.
+       * config/arc/fpx.md (addsf3_fpx): Remove 'S' from printing asm
+       operand.
+       (subsf3_fpx): Likewise.
+       (mulsf3_fpx): Likewise.
+       * config/arc/simdext.md (vendrec_insn): Remove 'S' from printing
+       asm operand.
+       * doc/extend.texi (ARC): Document 'jli-always' and 'jli-fixed'
+       function attrbutes.
+       * doc/invoke.texi (ARC): Document mjli-always option.
+ 2018-01-26  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.c (rl78_addsi3_internal): If operand 2 is const 
+       avoid addition with 0 and use incw and decw where possible.
+ 2018-01-26  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/81082
+       * fold-const.c (fold_plusminus_mult_expr): Do not perform the
+       association if it requires casting to unsigned.
+       * match.pd ((A * C) +- (B * C) -> (A+-B)): New patterns derived
+       from fold_plusminus_mult_expr to catch important cases late when
+       range info is available.
+ 2018-01-26  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
+       * config/i386/sol2.h (USE_HIDDEN_LINKONCE): Remove.
+       * configure.ac (hidden_linkonce): New test.
+       * configure: Regenerate.
+       * config.in: Regenerate.
  
-       PR middle-end/70140
-       * builtins.c (expand_builtin_memcpy_args): Remove.
-       (expand_builtin_memcpy): Call newly added function
-       expand_builtin_memory_copy_args.
-       (expand_builtin_memcpy_with_bounds): Likewise.
-       (expand_builtin_mempcpy): Remove last argument.
-       (expand_builtin_mempcpy_with_bounds): Likewise.
-       (expand_builtin_memory_copy_args): New function created from
-       expand_builtin_mempcpy_args with small modifications.
-       (expand_builtin_mempcpy_args): Remove.
-       (expand_builtin_stpcpy): Remove unused argument.
-       (expand_builtin): Likewise.
-       (expand_builtin_with_bounds): Likewise.
+ 2018-01-26  Julia Koval  <julia.koval@intel.com>
  
- 2017-08-01  Martin Liska  <mliska@suse.cz>
+       * config/i386/avx512bitalgintrin.h (_mm512_bitshuffle_epi64_mask,
+       _mm512_mask_bitshuffle_epi64_mask, _mm256_bitshuffle_epi64_mask,
+       _mm256_mask_bitshuffle_epi64_mask, _mm_bitshuffle_epi64_mask,
+       _mm_mask_bitshuffle_epi64_mask): Fix type.
+       * config/i386/i386-builtin-types.def (UHI_FTYPE_V2DI_V2DI_UHI,
+       USI_FTYPE_V4DI_V4DI_USI): Remove.
+       * config/i386/i386-builtin.def (__builtin_ia32_vpshufbitqmb512_mask,
+       __builtin_ia32_vpshufbitqmb256_mask,
+       __builtin_ia32_vpshufbitqmb128_mask): Fix types.
+       * config/i386/i386.c (ix86_expand_args_builtin): Remove old types.
+       * config/i386/sse.md (VI1_AVX512VLBW): Change types.
  
-       Revert r250771
-       Make mempcpy more optimal (PR middle-end/70140).
+ 2018-01-26  Alan Modra  <amodra@gmail.com>
  
- 2017-08-01  Jakub Jelinek  <jakub@redhat.com>
+       PR target/84033
+       * config/rs6000/rs6000-p8swap.c (rtx_is_swappable_p): Exclude
+       UNSPEC_VBPERMQ.  Sort other unspecs.
  
-       PR target/81622
-       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): For
-       __builtin_vec_cmpne verify both arguments are compatible vectors
-       before looking at TYPE_MODE on the element type.  For __builtin_vec_ld
-       verify arg1_type is a pointer or array type.  For __builtin_vec_st,
-       move computation of aligned to after checking the argument types.
-       Formatting fixes.
+ 2018-01-25  David Edelsohn  <dje.gcc@gmail.com>
  
-       PR target/80846
-       * config/rs6000/vsx.md (vextract_fp_from_shorth,
-       vextract_fp_from_shortl): Add element mode after mode in gen_vec_init*
-       calls.
- 2017-08-01  Jerome Lambourg  <lambourg@adacore.com>
-           Doug Rupp  <rupp@adacore.com>
-           Olivier Hainque  <hainque@adacore.com>
-       * config.gcc (arm-wrs-vxworks*): Rework to handle arm-wrs-vxworks7 as
-       well as arm-wrs-vxworks. Update target_cpu_name from arm6 (arch v3) to
-       arm8 (arch v4).
-       * config/arm/vxworks.h (MAYBE_TARGET_BPABI_CPP_BUILTINS): New, helper
-       for TARGET_OS_CPP_BUILTIN.
-       (TARGET_OS_CPP_BUILTIN): Invoke MAYBE_TARGET_BPABI_CPP_BUILTINS(),
-       refine CPU definitions for arm_arch5 and add those for arm_arch6 and
-       arm_arch7.
-       (MAYBE_ASM_ABI_SPEC): New, helper for SUBTARGET_EXTRA_ASM_SPEC,
-       passing required abi options to the assembler for EABI configurations.
-       (EXTRA_CC1_SPEC): New macro, to help prevent the implicit production
-       of .text.hot and .text.unlikely sections for kernel modules when
-       using ARM style exceptions.
-       (CC1_SPEC): Remove obsolete attempt at mimicking Diab toolchain
-       options. Add EXTRA_CC1_SPEC.
-       (VXWORKS_ENDIAN_SPEC): Adjust comment and remove handling of Diab
-       toolchain options.
-       (DWARF2_UNWIND_INFO): Redefine to handle the pre/post VxWorks 7
-       transition.
-       (ARM_TARGET2_DWARF_FORMAT): Define.
-       * config/arm/t-vxworks: Adjust multilib control to removal of the
-       Diab command line options.
- 2017-08-01  Martin Liska  <mliska@suse.cz>
-       PR gcov-profile/81561
-       * gcov.c (unblock): Make unblocking safe as we need to preserve
-       index correspondence of blocks and block_lists.
- 2017-08-01  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81181
-       * tree-ssa-pre.c (compute_antic_aux): Defer clean() to ...
-       (compute_antic): ... end of iteration here.
- 2017-08-01  James Greenhalgh  <james.greenhalgh@arm.com>
-       * common.opt (ftree-vectorize): No longer set flag_tree_vectorize.
-       (ftree-loop-vectorize): Set as EnabledBy ftree-vectorize.
-       (ftree-slp-vectorize): Likewise.
-       * omp-expand (expand_omp_simd): Remove flag_tree_vectorize, as it
-       can no longer be set independent of flag_tree_loop_vectorize.
-       * omp-general.c (emp_max_vf): Likewise.
-       * opts.c (enable_fdo_optimizations): Remove references to
-       flag_tree_vectorize, these are now implicit.
-       (common_handle_option): Remove handling for OPT_ftree_vectorize,
-       and leave it for the options machinery.
- 2017-08-01  Martin Liska  <mliska@suse.cz>
-       PR middle-end/70140
-       * builtins.c (expand_builtin_memcpy_args): Remove.
-       (expand_builtin_memcpy): Call newly added function
-       expand_builtin_memory_copy_args.
-       (expand_builtin_memcpy_with_bounds): Likewise.
-       (expand_builtin_mempcpy): Remove last argument.
-       (expand_builtin_mempcpy_with_bounds): Likewise.
-       (expand_builtin_memory_copy_args): New function created from
-       expand_builtin_mempcpy_args with small modifications.
-       (expand_builtin_mempcpy_args): Remove.
-       (expand_builtin_stpcpy): Remove unused argument.
-       (expand_builtin): Likewise.
-       (expand_builtin_with_bounds): Likewise.
- 2017-08-01  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81641
-       * config/i386/i386.c (ix86_print_operand_address_as): For -masm=intel
-       print "ds:" only for immediates in generic address space.
- 2017-08-01  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81639
-       * config/i386/i386.c (ix86_funciton_naked): New prototype.
-       (ix86_function_ok_for_sibcall): Return false for naked functions.
- 2017-08-01  Richard Biener  <rguenther@suse.de>
-       * tree-ssa-pre.c (print_pre_expr): Handle NULL expr.
-       (compute_antic): Seed worklist with exit block predecessors.
-       * cfganal.c (dfs_find_deadend): For a cycle return the source
-       of the edge closing it.
- 2017-08-01  Tamar Christina  <tamar.christina@arm.com>
-       * config/aarch64/aarch64.c
-       (aarch64_can_const_movi_rtx_p): Move 0 check.
- 2017-08-01  Bin Cheng  <bin.cheng@arm.com>
-       * tree.h (POINTER_TYPE_OVERFLOW_UNDEFINED): Delete.
-       * fold-const.c (fold_comparison, fold_binary_loc): Delete use of
-       above macro.
-       * match.pd: Ditto in address comparison pattern.
- 2017-08-01  Bin Cheng  <bin.cheng@arm.com>
-       PR tree-optimization/81627
-       * tree-predcom.c (prepare_finalizers): Always rewrite into loop
-       closed ssa form for store-store chain.
- 2017-08-01  Bin Cheng  <bin.cheng@arm.com>
-       PR tree-optimization/81620
-       * tree-predcom.c (add_ref_to_chain): Don't set has_max_use_after
-       for store-store chain.
- 2017-08-01  Jakub Jelinek  <jakub@redhat.com>
-       PR tree-optimization/81588
-       * tree-ssa-reassoc.c (optimize_range_tests_var_bound): If
-       ranges[i].in_p, invert comparison code ccode.  For >/>=,
-       swap rhs1 and rhs2 and comparison code unconditionally,
-       for </<= don't do that.  Don't swap rhs1/rhs2 again if
-       ranges[i].in_p, instead invert comparison code ccode if
-       opcode or oe->rank is BIT_IOR_EXPR.
-       PR target/80846
-       * optabs.def (vec_extract_optab, vec_init_optab): Change from
-       a direct optab to conversion optab.
-       * optabs.c (expand_vector_broadcast): Use convert_optab_handler
-       with GET_MODE_INNER as last argument instead of optab_handler.
-       * expmed.c (extract_bit_field_1): Likewise.  Use vector from
-       vector extraction if possible and optab is available.
-       * expr.c (store_constructor): Use convert_optab_handler instead
-       of optab_handler.  Use vector initialization from smaller
-       vectors if possible and optab is available.
-       * tree-vect-stmts.c (vectorizable_load): Likewise.
-       * doc/md.texi (vec_extract, vec_init): Document that the optabs
-       now have two modes.
-       * config/i386/i386.c (ix86_expand_vector_init): Handle expansion
-       of vec_init from half-sized vectors with the same element mode.
-       * config/i386/sse.md (ssehalfvecmode): Add V4TI case.
-       (ssehalfvecmodelower, ssescalarmodelower): New mode attributes.
-       (reduc_plus_scal_v8df, reduc_plus_scal_v4df, reduc_plus_scal_v2df,
-       reduc_plus_scal_v16sf, reduc_plus_scal_v8sf, reduc_plus_scal_v4sf,
-       reduc_<code>_scal_<mode>, reduc_umin_scal_v8hi): Add element mode
-       after mode in gen_vec_extract* calls.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><ssescalarmodelower>): ... this.
-       (vec_extract<mode><ssehalfvecmodelower>): New expander.
-       (rotl<mode>3, rotr<mode>3, <shift_insn><mode>3, ashrv2di3): Add
-       element mode after mode in gen_vec_init* calls.
-       (VEC_INIT_HALF_MODE): New mode iterator.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><ssescalarmodelower>): ... this.
-       (vec_init<mode><ssehalfvecmodelower>): New expander.
-       * config/i386/mmx.md (vec_extractv2sf): Renamed to ...
-       (vec_extractv2sfsf): ... this.
-       (vec_initv2sf): Renamed to ...
-       (vec_initv2sfsf): ... this.
-       (vec_extractv2si): Renamed to ...
-       (vec_extractv2sisi): ... this.
-       (vec_initv2si): Renamed to ...
-       (vec_initv2sisi): ... this.
-       (vec_extractv4hi): Renamed to ...
-       (vec_extractv4hihi): ... this.
-       (vec_initv4hi): Renamed to ...
-       (vec_initv4hihi): ... this.
-       (vec_extractv8qi): Renamed to ...
-       (vec_extractv8qiqi): ... this.
-       (vec_initv8qi): Renamed to ...
-       (vec_initv8qiqi): ... this.
-       * config/rs6000/vector.md (VEC_base_l): New mode attribute.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><VEC_base_l>): ... this.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><VEC_base_l>): ... this.
-       * config/rs6000/paired.md (vec_initv2sf): Renamed to ...
-       (vec_initv2sfsf): ... this.
-       * config/rs6000/altivec.md (splitter, altivec_copysign_v4sf3,
-       vec_unpacku_hi_v16qi, vec_unpacku_hi_v8hi, vec_unpacku_lo_v16qi,
-       vec_unpacku_lo_v8hi, mulv16qi3, altivec_vreve<mode>2): Add
-       element mode after mode in gen_vec_init* calls.
-       * config/aarch64/aarch64-simd.md (vec_init<mode>): Renamed to ...
-       (vec_init<mode><Vel>): ... this.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><Vel>): ... this.
-       * config/aarch64/iterators.md (Vel): New mode attribute.
-       * config/s390/s390.c (s390_expand_vec_strlen, s390_expand_vec_movstr):
-       Add element mode after mode in gen_vec_extract* calls.
-       * config/s390/vector.md (non_vec_l): New mode attribute.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><non_vec_l>): ... this.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><non_vec_l>): ... this.
-       * config/s390/s390-builtins.def (s390_vlgvb, s390_vlgvh, s390_vlgvf,
-       s390_vlgvf_flt, s390_vlgvg, s390_vlgvg_dbl): Add element mode after
-       vec_extract mode.
-       * config/arm/iterators.md (V_elem_l): New mode attribute.
-       * config/arm/neon.md (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><V_elem_l>): ... this.
-       (vec_extractv2di): Renamed to ...
-       (vec_extractv2didi): ... this.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><V_elem_l>): ... this.
-       (reduc_plus_scal_<mode>, reduc_plus_scal_v2di, reduc_smin_scal_<mode>,
-       reduc_smax_scal_<mode>, reduc_umin_scal_<mode>,
-       reduc_umax_scal_<mode>, neon_vget_lane<mode>, neon_vget_laneu<mode>):
-       Add element mode after gen_vec_extract* calls.
-       * config/mips/mips-msa.md (vec_init<mode>): Renamed to ...
-       (vec_init<mode><unitmode>): ... this.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><unitmode>): ... this.
-       * config/mips/loongson.md (vec_init<mode>): Renamed to ...
-       (vec_init<mode><unitmode>): ... this.
-       * config/mips/mips-ps-3d.md (vec_initv2sf): Renamed to ...
-       (vec_initv2sfsf): ... this.
-       (vec_extractv2sf): Renamed to ...
-       (vec_extractv2sfsf): ... this.
-       (reduc_plus_scal_v2sf, reduc_smin_scal_v2sf, reduc_smax_scal_v2sf):
-       Add element mode after gen_vec_extract* calls.
-       * config/mips/mips.md (unitmode): New mode iterator.
-       * config/spu/spu.c (spu_expand_prologue, spu_allocate_stack,
-       spu_builtin_extract): Add element mode after gen_vec_extract* calls.
-       * config/spu/spu.md (inner_l): New mode attribute.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><inner_l>): ... this.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><inner_l>): ... this.
-       * config/sparc/sparc.md (veltmode): New mode iterator.
-       (vec_init<VMALL:mode>): Renamed to ...
-       (vec_init<VMALL:mode><VMALL:veltmode>): ... this.
-       * config/ia64/vect.md (vec_initv2si): Renamed to ...
-       (vec_initv2sisi): ... this.
-       (vec_initv2sf): Renamed to ...
-       (vec_initv2sfsf): ... this.
-       (vec_extractv2sf): Renamed to ...
-       (vec_extractv2sfsf): ... this.
-       * config/powerpcspe/vector.md (VEC_base_l): New mode attribute.
-       (vec_init<mode>): Renamed to ...
-       (vec_init<mode><VEC_base_l>): ... this.
-       (vec_extract<mode>): Renamed to ...
-       (vec_extract<mode><VEC_base_l>): ... this.
-       * config/powerpcspe/paired.md (vec_initv2sf): Renamed to ...
-       (vec_initv2sfsf): ... this.
-       * config/powerpcspe/altivec.md (splitter, altivec_copysign_v4sf3,
-       vec_unpacku_hi_v16qi, vec_unpacku_hi_v8hi, vec_unpacku_lo_v16qi,
-       vec_unpacku_lo_v8hi, mulv16qi3): Add element mode after mode in
-       gen_vec_init* calls.
- 2017-08-01  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81297
-       * tree-vrp.c (get_single_symbol): Remove assert, instead drop
-       TREE_OVERFLOW from INTEGER_CSTs.
- 2017-07-31  Segher Boessenkool  <segher@kernel.crashing.org>
-       * config/rs6000/rs6000.c (enum rs6000_reg_type): Delete trailing comma.
- 2017-07-31  Carl Love  <cel@us.ibm.com>
-       * config/rs6000/rs6000-c: Add support for built-in functions
-       vector signed char vec_xl_be (signed long long, signed char *);
-       vector unsigned char vec_xl_be (signed long long, unsigned char *);
-       vector signed int vec_xl_be (signed long long, signed int *);
-       vector unsigned int vec_xl_be (signed long long, unsigned int *);
-       vector signed long long vec_xl_be (signed long long, signed long long *);
-       vector unsigned long long vec_xl_be (signed long long, unsigned long long *);
-       vector signed short vec_xl_be (signed long long, signed short *);
-       vector unsigned short vec_xl_be (signed long long, unsigned short *);
-       vector double vec_xl_be (signed long long, double *);
-       vector float vec_xl_be (signed long long, float *);
-       * config/rs6000/altivec.h (vec_xl_be): Add #define.
-       * config/rs6000/rs6000-builtin.def (XL_BE_V16QI, XL_BE_V8HI, XL_BE_V4SI)
-       XL_BE_V2DI, XL_BE_V4SF, XL_BE_V2DF, XL_BE): Add definitions
-       for the builtins.
-       * config/rs6000/rs6000.c (altivec_expand_xl_be_builtin): Add function.
-       (altivec_expand_builtin): Add switch statement to call
-       altivec_expand_xl_be for each builtin.
-       (altivec_init_builtins): Add def_builtin for _builtin_vsx_le_be_v8hi,
-       __builtin_vsx_le_be_v4si, __builtin_vsx_le_be_v2di,
-       __builtin_vsx_le_be_v4sf, __builtin_vsx_le_be_v2df,
-       __builtin_vsx_le_be_v16qi.
-       * doc/extend.texi: Update the built-in documentation file for the
-       new built-in functions.
- 2017-07-31  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/25967
-       * config/i386/i386.c (ix86_allocate_stack_slots_for_args):
-       New function.
-       (TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS): Define.
- 2017-07-31  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
-       * config.gcc: Add z14.
-       * config/s390/driver-native.c (s390_host_detect_local_cpu): Add
-       CPU model numbers for z13s and z14.
-       * config/s390/s390-c.c (s390_resolve_overloaded_builtin): Replace
-       arch12 with z14.
-       * config/s390/s390-opts.h (enum processor_type): Rename
-       PROCESSOR_ARCH12 to PROCESSOR_3906_Z14.
-       * config/s390/s390.c (processor_table): Add field for CPU name to
-       be passed to Binutils.
-       (s390_asm_output_machine_for_arch): Use the new field in
-       processor_table for Binutils.
-       (s390_expand_builtin): Replace arch12 with z14.
-       (s390_issue_rate): Rename PROCESSOR_ARCH12 to PROCESSOR_3906_Z14.
-       (s390_get_sched_attrmask): Likewise.
-       (s390_get_unit_mask): Likewise.
-       * config/s390/s390.opt: Add z14 to processor_type enum.
- 2017-07-31  Martin Jambor  <mjambor@suse.cz>
-       PR hsa/81477
-       * ipa-fnsummary.c (ipa_fn_summary_generate): Set versionable
-       regardless of optimization level.
- 2017-07-31  Jan Hubicka <hubicka@ucw.cz>
-           Martin Liska  <mliska@suse.cz>
-       * predict.def: Remove old comment and adjust probability.
-       * gimplify.c (should_warn_for_implicit_fallthrough): Ignore
-       PREDICT statements.
- 2017-07-31  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/25967
-       * config/i386/i386.c (ix86_function_naked): New function.
-       (ix86_can_use_return_insn_p): Return false for naked functions.
-       (ix86_expand_prologue): Skip prologue for naked functions.
-       (ix86_expand_epilogue): Skip epilogue for naked functions
-       and emit trap instruction.
-       (ix86_warn_func_return): New function.
-       (ix86_attribute_table): Add "naked" attribute specification.
-       (TARGET_WARN_FUNC_RETURN): Define.
-       * doc/extend.texi (x86 Function Attributes) <naked>: Document it.
- 2017-07-31  Martin Liska  <mliska@suse.cz>
-       * gimple-pretty-print.c (dump_gimple_label): Never dump BB info.
-       (dump_gimple_bb_header): Always dump BB info.
-       (pp_cfg_jump): Do not append info about BB when dumping a jump.
- 2017-07-31  Martin Liska  <mliska@suse.cz>
-       PR sanitize/81530
-       * convert.c (convert_to_integer_1): Guard condition with flag_sanitize_p
-       also with current_function_decl non-null equality.
- 2017-07-31  Jakub Jelinek  <jakub@redhat.com>
-       PR sanitizer/81604
-       * ubsan.c (ubsan_type_descriptor): For UBSAN_PRINT_ARRAY don't
-       change type to the element type, instead add eltype variable and
-       use it where we are interested in the element type.
-       PR tree-optimization/81603
-       * ipa-polymorphic-call.c
-       (ipa_polymorphic_call_context::ipa_polymorphic_call_context): Perform
-       offset arithmetic in offset_int, bail out if the resulting bit offset
-       doesn't fit into shwi.
- 2017-07-31  Martin Liska  <mliska@suse.cz>
-       * gimplify.c (mostly_copy_tree_r): Remove Java specific hunk.
-       (gimplify_save_expr): Fix comment.
- 2017-07-30  H.J. Lu  <hongjiu.lu@intel.com>
-       PR target/79793
-       * config/i386/i386.c (ix86_function_arg): Update arguments for
-       exception handler.
-       (ix86_compute_frame_layout): Set the initial stack offset to
-       INCOMING_FRAME_SP_OFFSET.  Update red-zone offset with
-       INCOMING_FRAME_SP_OFFSET.
-       (ix86_expand_epilogue): Don't pop the 'ERROR_CODE' off the
-       stack before exception handler returns.
-       * config/i386/i386.h (INCOMING_FRAME_SP_OFFSET): Add the
-       the 'ERROR_CODE' for exception handler.
+       * doc/invoke.texi (PowerPC Options): Document 'native' cpu type.
  
- 2017-07-30  Uros Bizjak  <ubizjak@gmail.com>
-       * config/i386/i386.h (ASM_PRINTF_EXTENSIONS): New macro.
-       (ASM_OUTPUT_REG_PUSH): Rewrite with new operand modifiers.
-       (ASM_OUTPUT_REG_POP): Ditto.
-       * config/i386/i386.c (ix86_asm_output_function_label): Use fputs
-       instead of asm_fprintf to output pure string.
+ 2018-01-25  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-29  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/83055
+       * predict.c (drop_profile): Do not push/pop cfun; update also
+       node->count.
+       (handle_missing_profiles): Fix logic looking for zero profiles.
  
-       * debug.h (struct gcc_debug_hooks): Add IMPLICIT argument
-       to imported_module_or_decl hook.
-       (debug_nothing_tree_tree_tree_bool): Remove.
-       (debug_nothing_tree_tree_tree_bool_bool): New declaration.
-       * debug.c (do_nothing_debug_hooks): Use
-       debug_nothing_tree_tree_tree_bool_bool instead of
-       debug_nothing_tree_tree_tree_bool.
-       * vmsdbgout.c (vmsdbg_debug_hooks): Likewise.
-       * dbxout.c (dbx_debug_hooks, xcoff_debug_hooks): Likewise.
-       * sdbout.c (sdb_debug_hooks): Likewise.
-       * dwarf2out.c (dwarf2_lineno_debug_hooks): Likewise.
-       (gen_namespace_die): Add DW_AT_export_symbols attribute if
-       langhook wants it.
-       (dwarf2out_imported_module_or_decl): Add IMPLICIT argument,
-       if true, -gdwarf-5 and decl will have DW_AT_export_symbols
-       attribute, don't add anything.
+ 2018-01-25  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR middle-end/83977
+       * ipa-fnsummary.c (compute_fn_summary): Clear can_change_signature
+       on functions with #pragma omp declare simd or functions with simd
+       attribute.
+       * omp-simd-clone.c (expand_simd_clones): Revert 2018-01-24 change.
+       * config/i386/i386.c (ix86_simd_clone_compute_vecsize_and_simdlen):
+       Remove trailing \n from warning_at calls.
  
-       * fold-const.c (fold_build1_stat_loc): Remove _stat from name.
-       (fold_build2_stat_loc): Likewise.
-       (fold_build3_stat_loc): Likewise.
-       * fold-const.h (fold_build1, fold_build2, fold_build3): Adjust.
-       (fold_build1_loc): Remove macro.
-       (fold_build2_loc): Likewise.
-       (fold_build3_loc): Likewise.
+ 2018-01-25  Tom de Vries  <tom@codesourcery.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/84028
+       * config/nvptx/nvptx.c (nvptx_single): Add exit insn after noreturn call
+       for neutered workers.
  
-       * gimple.c (gimple_build_debug_bind_stat): Remove _stat from name.
-       (gimple_build_debug_bind_source_stat): Likewise.
-       * gimple.h (gimple_build_debug_bind): Remove macro.
-       (gimple_build_debug_bind_source): Likewise.
+ 2018-01-24  Joseph Myers  <joseph@codesourcery.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/68467
+       * config/m68k/m68k.c (m68k_promote_function_mode): New function.
+       (TARGET_PROMOTE_FUNCTION_MODE): New macro.
  
-       * bitmap.c (bitmap_alloc): Adjust.
-       (bitmap_gc_alloc): Likewise.
-       * bitmap.h (bitmap_initialize_stat): Remove _stat from name.
+ 2018-01-24  Jeff Law  <law@redhat.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/83994
+       * i386.c (get_probe_interval): Move to earlier point.
+       (ix86_compute_frame_layout): If -fstack-clash-protection and
+       the frame is larger than the probe interval, then use pushes
+       to save registers rather than reg->mem moves.
+       (ix86_expand_prologue): Remove conditional for int_registers_saved
+       assertion.
  
-       * bitmap.c (bitmap_obstack_alloc_stat): Rename to bitmap_alloc.
-       (bitmap_gc_alloc_stat): Rename to bitmap_gc_alloc.
-       * bitmap.h (bitmap_obstack_alloc_stat): Adjust prototype.
-       (bitmap_gc_alloc_stat): Likewise.
-       (BITMAP_ALLOC, BITMAP_GGC_ALLOC): Adjust.
+ 2018-01-24  Vladimir Makarov  <vmakarov@redhat.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/84014
+       * ira-build.c (setup_min_max_allocno_live_range_point): Set up
+       min/max for never referenced object.
  
-       * rtl.c (shallow_copy_rtx_stat): Remove _stat from name.
-       * rtl.h (shallow_copy_rtx): Remove macro.
+ 2018-01-24  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR middle-end/83977
+       * tree.c (free_lang_data_in_decl): Don't clear DECL_ABSTRACT_ORIGIN
+       here.
+       * omp-low.c (create_omp_child_function): Remove "omp declare simd"
+       attributes from DECL_ATTRIBUTES (decl) without affecting
+       DECL_ATTRIBUTES (current_function_decl).
+       * omp-simd-clone.c (expand_simd_clones): Ignore DECL_ARTIFICIAL
+       functions with non-NULL DECL_ABSTRACT_ORIGIN.
+ 2018-01-24  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/83979
+       * fold-const.c (fold_comparison): Use constant_boolean_node
+       instead of boolean_{true,false}_node.
+ 2018-01-24  Jan Hubicka  <hubicka@ucw.cz>
+       * ipa-profile.c (ipa_propagate_frequency_1): Fix logic skipping calls
+       with zero counts.
+ 2018-01-24  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.md (*call_indirect_nonlocal_sysv<mode>):
+       Simplify the clause that sets the length attribute.
+       (*call_value_indirect_nonlocal_sysv<mode>): Likewise.
+       (*sibcall_nonlocal_sysv<mode>): Clean up code block; simplify the
+       clause that sets the length attribute.
+       (*sibcall_value_nonlocal_sysv<mode>): Likewise.
+ 2018-01-24  Tom de Vries  <tom@codesourcery.com>
+       PR target/83589
+       * config/nvptx/nvptx.c (WORKAROUND_PTXJIT_BUG_2): Define to 1.
+       (nvptx_pc_set, nvptx_condjump_label): New function. Copy from jump.c.
+       Add strict parameter.
+       (prevent_branch_around_nothing): Insert dummy insn between branch to
+       label and label with no ptx insn inbetween.
+       * config/nvptx/nvptx.md (define_insn "fake_nop"): New insn.
+ 2018-01-24  Tom de Vries  <tom@codesourcery.com>
+       PR target/81352
+       * config/nvptx/nvptx.c (nvptx_single): Add exit insn after noreturn call
+       for neutered threads in warp.
+       * config/nvptx/nvptx.md (define_insn "exit"): New insn.
+ 2018-01-24  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83176
+       * tree-chrec.c (chrec_fold_plus_1): Handle (signed T){(T) .. }
+       operands.
+ 2018-01-24  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/82819
+       * graphite-isl-ast-to-gimple.c (binary_op_to_tree): Avoid
+       code generating pluses that are no-ops in the target precision.
+ 2018-01-24  Richard Biener  <rguenther@suse.de>
+       PR middle-end/84000
+       * tree-cfg.c (replace_loop_annotate): Handle annot_expr_parallel_kind.
+ 2018-01-23  Jan Hubicka  <hubicka@ucw.cz>
+       * cfgcleanup.c (try_crossjump_to_edge): Use combine_with_count
+       to merge probabilities.
+       * predict.c (probably_never_executed): Also mark as cold functions
+       with global 0 profile and guessed local profile.
+       * profile-count.c (profile_probability::combine_with_count): New
+       member function.
+       * profile-count.h (profile_probability::operator*,
+       profile_probability::operator*=, profile_probability::operator/,
+       profile_probability::operator/=): Reduce precision to adjusted
+       and set value to guessed on contradictory divisions.
+       (profile_probability::combine_with_freq): Remove.
+       (profile_probability::combine_wiht_count): Declare.
+       (profile_count::force_nonzero):: Set to adjusted.
+       (profile_count::probability_in):: Set quality to adjusted.
+       * tree-ssa-tail-merge.c (replace_block_by): Use
+       combine_with_count.
+ 2018-01-23  Andrew Waterman  <andrew@sifive.com>
+           Jim Wilson  <jimw@sifive.com>
+       * config/riscv/riscv.c (riscv_stack_boundary): New.
+       (riscv_option_override): Set riscv_stack_boundary.  Handle
+       riscv_preferred_stack_boundary_arg.
+       * config/riscv/riscv.h (MIN_STACK_BOUNDARY, ABI_STACK_BOUNDARY): New.
+       (BIGGEST_ALIGNMENT): Set to STACK_BOUNDARY.
+       (STACK_BOUNDARY): Set to riscv_stack_boundary.
+       (RISCV_STACK_ALIGN): Use STACK_BOUNDARY.
+       * config/riscv/riscv.opt (mpreferred-stack-boundary): New.
+       * doc/invoke.tex (RISC-V Options): Add -mpreferred-stack-boundary.
+ 2018-01-23  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/83905
+       * config/i386/i386.c (ix86_expand_prologue): Use cost reference
+       of struct ix86_frame.
+       (ix86_expand_epilogue): Likewise.  Add a local variable for
+       the reg_save_offset field in struct ix86_frame.
+ 2018-01-23  Bin Cheng  <bin.cheng@arm.com>
+       PR tree-optimization/82604
+       * tree-loop-distribution.c (enum partition_kind): New enum item
+       PKIND_PARTIAL_MEMSET.
+       (partition_builtin_p): Support above new enum item.
+       (generate_code_for_partition): Ditto.
+       (compute_access_range): Differentiate cases that equality can be
+       proven at all loops, the innermost loops or no loops.
+       (classify_builtin_st, classify_builtin_ldst): Adjust call to above
+       function.  Set PKIND_PARTIAL_MEMSET for partition appropriately.
+       (finalize_partitions, distribute_loop): Don't fuse partition of
+       PKIND_PARTIAL_MEMSET kind when distributing 3-level loop nest.
+       (prepare_perfect_loop_nest): Distribute 3-level loop nest only if
+       parloop is enabled.
+ 2018-01-23  Martin Liska  <mliska@suse.cz>
+       * predict.def (PRED_INDIR_CALL): Set probability to PROB_EVEN in
+       order to ignore the predictor.
+       (PRED_POLYMORPHIC_CALL): Likewise.
+       (PRED_RECURSIVE_CALL): Likewise.
+ 2018-01-23  Martin Liska  <mliska@suse.cz>
+       * tree-profile.c (tree_profiling): Print function header to
+       aware reader which function we are working on.
+       * value-prof.c (gimple_find_values_to_profile): Do not print
+       not interesting value histograms.
+ 2018-01-23  Martin Liska  <mliska@suse.cz>
+       * profile-count.h (enum profile_quality): Add
+       profile_uninitialized as the first value. Do not number values
+       as they are zero based.
+       (profile_count::verify): Update sanity check.
+       (profile_probability::verify): Likewise.
+ 2018-01-23  Nathan Sidwell  <nathan@acm.org>
+       * doc/invoke.texi (ffor-scope): Deprecate.
+ 2018-01-23  David Malcolm  <dmalcolm@redhat.com>
+       PR tree-optimization/83510
+       * domwalk.c (set_all_edges_as_executable): New function.
+       (dom_walker::dom_walker): Convert bool param
+       "skip_unreachable_blocks" to enum reachability.  Move setup of
+       edge flags to set_all_edges_as_executable and only do it when
+       reachability is REACHABLE_BLOCKS.
+       * domwalk.h (enum dom_walker::reachability): New enum.
+       (dom_walker::dom_walker): Convert bool param
+       "skip_unreachable_blocks" to enum reachability.
+       (set_all_edges_as_executable): New decl.
+       * graphite-scop-detection.c  (gather_bbs::gather_bbs): Convert
+       from false for "skip_unreachable_blocks" to ALL_BLOCKS for
+       "reachability".
+       * tree-ssa-dom.c (dom_opt_dom_walker::dom_opt_dom_walker): Likewise,
+       but converting true to REACHABLE_BLOCKS.
+       * tree-ssa-sccvn.c (sccvn_dom_walker::sccvn_dom_walker): Likewise.
+       * tree-vrp.c
+       (check_array_bounds_dom_walker::check_array_bounds_dom_walker):
+       Likewise, but converting it to REACHABLE_BLOCKS_PRESERVING_FLAGS.
+       (vrp_dom_walker::vrp_dom_walker): Likewise, but converting it to
+       REACHABLE_BLOCKS.
+       (vrp_prop::vrp_finalize): Call set_all_edges_as_executable
+       if check_all_array_refs will be called.
+ 2018-01-23  David Malcolm  <dmalcolm@redhat.com>
+       * tree.c (selftest::test_location_wrappers): Add more test
+       coverage.
+ 2018-01-23  David Malcolm  <dmalcolm@redhat.com>
+       * sbitmap.c (selftest::test_set_range): Fix memory leaks.
+       (selftest::test_bit_in_range): Likewise.
+ 2018-01-23  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR testsuite/83888
+       * doc/sourcebuild.texi (vect_float): Say that the selector
+       only describes the situation when -funsafe-math-optimizations is on.
+       (vect_float_strict): Document.
+ 2018-01-23  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/83965
+       * tree-vect-patterns.c (vect_reassociating_reduction_p): New function.
+       (vect_recog_dot_prod_pattern, vect_recog_sad_pattern): Use it
+       instead of checking only for a reduction.
+       (vect_recog_widen_sum_pattern): Likewise.
+ 2018-01-23  Jan Hubicka  <hubicka@ucw.cz>
+       * predict.c (probably_never_executed): Only use precise profile info.
+       (compute_function_frequency): Skip after inlining hack since we now
+       have quality checking.
+ 2018-01-23  Jan Hubicka  <hubicka@ucw.cz>
+       * profile-count.h (profile_probability::very_unlikely,
+       profile_probability::unlikely, profile_probability::even): Set
+       precision to guessed.
+ 2018-01-23  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83963
+       * graphite-scop-detection.c (scop_detection::harmful_loop_in_region):
+       Properly terminate dominator walk when crossing the exit edge not
+       when visiting its source block.
  
-       * emit-rtl.c (gen_raw_REG): Adjust.
-       * gengenrtl.c (gendef): Likewise.
-       * rtl.c (rtx_alloc_stat): Remove _stat from name.
-       * rtl.h (rtx_alloc): Remove macro.
+ 2018-01-23  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
-       * tree.c (build_tree_list_vec_stat): Remove _stat from name.
-       (build_tree_list_stat): Likewise.
-       * tree.h (build_tree_list): Remove macro.
-       (build_tree_list_vec): Likewise.
+       PR c++/83918
+       * tree.c (maybe_wrap_with_location): Use NON_LVALUE_EXPR rather than
+       VIEW_CONVERT_EXPR to wrap CONST_DECLs.
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+ 2018-01-22  Jakub Jelinek  <jakub@redhat.com>
  
-       * tree.c (make_vector_stat): Remove _stat from name.
-       (build_vector_stat): Likewise.
-       * tree.h (make_vector_stat): Remove macro.
-       (build_vector_stat): Likewise.
+       PR tree-optimization/83957
+       * omp-expand.c (expand_omp_for_generic): Ignore virtual PHIs.  Remove
+       semicolon after for body surrounded by braces.
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR tree-optimization/83081
+       * profile-count.h (profile_probability::split): New method.
+       * dojump.c (do_jump_1) <case TRUTH_ANDIF_EXPR, case TRUTH_ORIF_EXPR>:
+       Use profile_probability::split.
+       (do_compare_rtx_and_jump): Fix adjustment of probabilities
+       when splitting a single conditional jump into 2.
  
-       * tree.h (build_var_debug_value): Remove prototype.
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
-       * tree.c (tree_cons_stat): Remove _stat from name.
-       * tree.h (tree_cons): Remove macro.
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+ 2018-01-22  David Malcolm  <dmalcolm@redhat.com>
  
-       * tree.c (build_vl_exp_stat): Remove _stat from name.
-       * tree.h (build_vl_exp): Remove macro.
+       PR tree-optimization/69452
+       * tree-ssa-loop-im.c (class move_computations_dom_walker): Remove
+       decl.
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
  
-       * tree.c (build_decl_stat): Remove _stat from name.
-       * tree.h (build_decl): Remove macro.
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
-       * gimple.c (gimple_build_with_ops_stat): Adjust.
-       (gimple_alloc_stat): Remove _stat from name.
-       * gimple.h (gimple_alloc): Remove macro.
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
-       * tree.c (make_tree_vec_stat): Remove _stat from name.
-       (grow_tree_vec_stat): Likewise.
-       * tree.h (make_tree_vec_stat): Adjust prototype.
-       (grow_tree_vec_stat): Likewise.
-       (make_tree_vec): Remove macro.
-       (grow_tree_vec): Likewise.
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       * config/rl78/rl78-expand.md (bswaphi2): New define_expand.
+       * config/rl78/rl78-virt.md (*bswaphi2_virt): New define_insn.
+       * config/rl78/rl78-real.md (*bswaphi2_real): New define_insn.
  
-       * fold-const.c (fold_build1_stat_loc): Adjust.
-       (fold_build2_stat_loc): Likewise.
-       (fold_build3_stat_loc): Likewise.
-       * tree.c (build0_stat): Remove _stat from name.
-       (build1_stat): Likewise.
-       (build2_stat): Likewise.
-       (build3_stat): Likewise.
-       (build4_stat): Likewise.
-       (build5_stat): Likewise.
-       * tree.h (build1_loc): Remove macro, and rename _stat function
-       to this.
-       (build2_loc): Likewise.
-       (build3_loc): Likewise.
-       (build4_loc): Likewise.
-       (build5_loc): Likewise.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       * config/rl78/rl78-protos.h (rl78_split_movdi): New function declaration.
+       * config/rl78/rl78.md (movdi): New define_expand.
+       * config/rl78/rl78.c (rl78_split_movdi): New function.
  
-       * tree.c (make_int_cst_stat): Remove _stat from name.
-       * tree.h (make_int_cst_stat): Adjust prototype.
-       (make_int_cst): Remove macro.
+ 2018-01-22  Michael Meissner  <meissner@linux.vnet.ibm.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/83862
+       * config/rs6000/rs6000-protos.h (rs6000_split_signbit): Delete,
+       no longer used.
+       * config/rs6000/rs6000.c (rs6000_split_signbit): Likewise.
+       * config/rs6000/rs6000.md (signbit<mode>2): Change code for IEEE
+       128-bit to produce an UNSPEC move to get the double word with the
+       signbit and then a shift directly to do signbit.
+       (signbit<mode>2_dm): Replace old IEEE 128-bit signbit
+       implementation with a new version that just does either a direct
+       move or a regular move.  Move memory interface to separate insns.
+       Move insns so they are next to the expander.
+       (signbit<mode>2_dm_mem_be): New combiner insns to combine load
+       with signbit move.  Split big and little endian case.
+       (signbit<mode>2_dm_mem_le): Likewise.
+       (signbit<mode>2_dm_<su>ext): Delete, no longer used.
+       (signbit<mode>2_dm2): Likewise.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.md (anddi3): New define_expand.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.md (umindi3): New define_expand.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.md (smindi3): New define_expand.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.md (smaxdi3): New define_expand.
+ 2018-01-22  Carl Love  <cel@us.ibm.com>
+       * config/rs6000/rs6000-builtin.def (ST_ELEMREV_V1TI, LD_ELEMREV_V1TI,
+       LVX_V1TI): Add macro expansion.
+       * config/rs6000/rs6000-c.c (altivec_builtin_types): Add argument
+       definitions for VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_VEC_ST,
+       VSX_BUILTIN_VEC_XL, LD_ELEMREV_V1TI builtins.
+       * config/rs6000/rs6000-p8swap.c (insn_is_swappable_p);
+       Change check to determine if the instruction is a byte reversing
+       entry.  Fix typo in comment.
+       * config/rs6000/rs6000.c (altivec_expand_builtin): Add case entry
+       for VSX_BUILTIN_ST_ELEMREV_V1TI and VSX_BUILTIN_LD_ELEMREV_V1TI.
+       Add def_builtin calls for new builtins.
+       * config/rs6000/vsx.md (vsx_st_elemrev_v1ti, vsx_ld_elemrev_v1ti):
+       Add define_insn expansion.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.md (umaxdi3): New define_expand.
+ 2018-01-22  Sebastian Perta  <sebastian.perta@renesas.com>
+       * config/rl78/rl78.c (rl78_note_reg_set): Fixed dead reg check
+       for non-QImode registers.
+ 2018-01-22  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83963
+       * graphite-scop-detection.c (scop_detection::get_sese): Delay
+       including the loop exit block.
+       (scop_detection::merge_sese): Likewise.
+       (scop_detection::add_scop): Do it here instead.
+ 2018-01-22  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       * doc/sourcebuild.texi (arm_softfloat): Document.
+ 2018-01-21  John David Anglin  <danglin@gcc.gnu.org>
+       PR gcc/77734
+       * config/pa/pa.c (pa_function_ok_for_sibcall): Use
+       targetm.binds_local_p instead of TREE_PUBLIC to check local binding.
+       Move TARGET_PORTABLE_RUNTIME check after TARGET_64BIT check.
+ 2018-01-21  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+           David Edelsohn  <dje.gcc@gmail.com>
+       PR target/83946
+       * config/rs6000/rs6000.md (*call_indirect_nonlocal_sysv<mode>):
+       Change "crset eq" to "crset 2".
+       (*call_value_indirect_nonlocal_sysv<mode>): Likewise.
+       (*call_indirect_aix<mode>_nospec): Likewise.
+       (*call_value_indirect_aix<mode>_nospec): Likewise.
+       (*call_indirect_elfv2<mode>_nospec): Likewise.
+       (*call_value_indirect_elfv2<mode>_nospec): Likewise.
+       (*sibcall_nonlocal_sysv<mode>): Change "crset eq" to "crset 2";
+       change assembly output from . to $.
+       (*sibcall_value_nonlocal_sysv<mode>): Likewise.
+       (indirect_jump<mode>_nospec): Change assembly output from . to $.
+       (*tablejump<mode>_internal1_nospec): Likewise.
+ 2018-01-21  Oleg Endo  <olegendo@gcc.gnu.org>
+       PR target/80870
+       * config/sh/sh_optimize_sett_clrt.cc:
+       Use INCLUDE_ALGORITHM and INCLUDE_VECTOR instead of direct includes.
+ 2018-01-20  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR tree-optimization/83940
+       * tree-vect-stmts.c (vect_truncate_gather_scatter_offset): Set
+       offset_dt to vect_constant_def rather than vect_unknown_def_type.
+       (vect_check_load_store_mask): Add a mask_dt_out parameter and
+       use it to pass back the definition type.
+       (vect_check_store_rhs): Likewise rhs_dt_out.
+       (vect_build_gather_load_calls): Add a mask_dt argument and use
+       it instead of a call to vect_is_simple_use.
+       (vectorizable_store): Update calls to vect_check_load_store_mask
+       and vect_check_store_rhs.  Use the dt returned by the latter instead
+       of scatter_src_dt.  Use the cached mask_dt and gs_info.offset_dt
+       instead of calls to vect_is_simple_use.  Pass the scalar rather
+       than the vector operand to vect_is_simple_use when handling
+       second and subsequent copies of an rhs value.
+       (vectorizable_load): Update calls to vect_check_load_store_mask
+       and vect_build_gather_load_calls.  Use the cached mask_dt and
+       gs_info.offset_dt instead of calls to vect_is_simple_use.
+ 2018-01-20  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/83945
+       * tree-emutls.c: Include gimplify.h.
+       (lower_emutls_2): New function.
+       (lower_emutls_1): If ADDR_EXPR is a gimple invariant and walk_tree
+       with lower_emutls_2 callback finds some TLS decl in it, unshare_expr
+       it before further processing.
+       PR target/83930
+       * simplify-rtx.c (simplify_binary_operation_1) <case UMOD>: Use
+       UINTVAL (trueop1) instead of INTVAL (op1).
+ 2018-01-19  Jakub Jelinek  <jakub@redhat.com>
+       PR debug/81570
+       PR debug/83728
+       * dwarf2cfi.c (DEFAULT_INCOMING_FRAME_SP_OFFSET): Define to
+       INCOMING_FRAME_SP_OFFSET if not defined.
+       (scan_trace): Add ENTRY argument.  If true and
+       DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET,
+       emit a note to adjust the CFA offset.
+       (create_cfi_notes): Adjust scan_trace callers.
+       (create_cie_data): Use DEFAULT_INCOMING_FRAME_SP_OFFSET rather than
+       INCOMING_FRAME_SP_OFFSET in the CIE.
+       * config/i386/i386.h (DEFAULT_INCOMING_FRAME_SP_OFFSET): Define.
+       * config/stormy16/stormy16.h (DEFAULT_INCOMING_FRAME_SP_OFFSET):
+       Likewise.
+       * doc/tm.texi.in (DEFAULT_INCOMING_FRAME_SP_OFFSET): Document.
+       * doc/tm.texi: Regenerated.
  
-       * tree.c (make_tre_binfo_stat): Remove _stat from name.
-       * tree.h (make_tree_binfo_stat): Adjust prototype.
-       (make_tree_binfo): Remove.
+ 2018-01-19  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR rtl-optimization/83147
+       * lra-constraints.c (remove_inheritance_pseudos): Use
+       lra_substitute_pseudo_within_insn.
  
-       * tree.c (copy_node_stat): Rename to copy_node.
-       (build_distinct_type_copy): Adjust.
-       * tree.h (copy_node_stat): Adjust prototype.
-       (copy_node): Remove macro.
+ 2018-01-19  Tom de Vries  <tom@codesourcery.com>
+           Cesar Philippidis  <cesar@codesourcery.com>
  
- 2017-07-28  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>
+       PR target/83920
+       * config/nvptx/nvptx.c (nvptx_single): Fix jit workaround.
  
-       * tree.c (make_node_stat): rename to make_node.
-       (build_tree_list_stat): Adjust.
-       (build0_stat): Likewise.
-       (build2_stat): Likewise.
-       (build3_stat): Likewise.
-       (build4_stat): Likewise.
-       (build5_stat): Likewise.
-       (build_decl_stat): Likewise.
-       * tree.h (make_node_stat): Adjust prototype.
-       (make_node): remove macro.
+ 2018-01-19  Cesar Philippidis  <cesar@codesourcery.com>
  
- 2017-07-28  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/83790
+       * config/nvptx/nvptx.c (output_init_frag): Don't use generic address
+       spaces for function labels.
  
-       * config/rs6000/ppc-auxv.h (PPC_FEATURE2_DARN): New define.
-       (PPC_FEATURE2_SCV): Likewise.
-       * config/rs6000/rs6000.c (cpu_supports_info): Use them.
- 2017-07-28  Tamar Christina  <tamar.christina@arm.com>
+ 2018-01-19  Martin Liska  <mliska@suse.cz>
  
-       * config/aarch64/aarch64.c
-       (aarch64_internal_mov_immediate): Add new special pattern.
-       * config/aarch64/aarch64.md (*movdi_aarch64):
-       Add reg/32bit const mov case.
- 2017-07-28  Tamar Christina  <tamar.christina@arm.com>
-           Richard Sandiford <richard.sandiford@linaro.org>
+       * predict.def (PRED_LOOP_EXIT): Change from 85 to 89.
+       (PRED_LOOP_EXIT_WITH_RECURSION): Change from 72 to 78.
+       (PRED_LOOP_EXTRA_EXIT): Change from 83 to 67.
+       (PRED_OPCODE_POSITIVE): Change from 64 to 59.
+       (PRED_TREE_OPCODE_POSITIVE): Change from 64 to 59.
+       (PRED_CONST_RETURN): Change from 69 to 65.
+       (PRED_NULL_RETURN): Change from 91 to 71.
+       (PRED_LOOP_IV_COMPARE_GUESS): Change from 98 to 64.
+       (PRED_LOOP_GUARD): Change from 66 to 73.
  
-       * config/aarch64/aarch64.md (mov<mode>): Generalize.
-       (*movhf_aarch64, *movsf_aarch64, *movdf_aarch64):
-       Add integer and movi cases.
-       (movi-split-hf-df-sf split, fp16): New.
-       (enabled): Added TARGET_FP_F16INST.
-       * config/aarch64/iterators.md (GPF_HF): New.
-       * config/aarch64/predicates.md (aarch64_reg_or_fp_float): New.
- 2017-07-28  Tamar Christina  <tamar.christina@arm.com>
-       * config/aarch64/aarch64.c
-       (aarch64_simd_container_mode): Add prototype.
-       (aarch64_expand_mov_immediate): Add HI support.
-       (aarch64_reinterpret_float_as_int, aarch64_float_const_rtx_p: New.
-       (aarch64_can_const_movi_rtx_p): New.
-       (aarch64_preferred_reload_class):
-       Remove restrictions of using FP registers for certain SIMD operations.
-       (aarch64_rtx_costs): Added new cost for CONST_DOUBLE moves.
-       (aarch64_valid_floating_const): Add integer move validation.
-       (aarch64_simd_imm_scalar_p): Remove.
-       (aarch64_output_scalar_simd_mov_immediate): Generalize function.
-       (aarch64_legitimate_constant_p): Expand list of supported cases.
-       * config/aarch64/aarch64-protos.h
-       (aarch64_float_const_rtx_p, aarch64_can_const_movi_rtx_p): New.
-       (aarch64_reinterpret_float_as_int): New.
-       (aarch64_simd_imm_scalar_p): Remove.
-       * config/aarch64/constraints.md (Uvi): New.
-       (Dd): Split into Ds and new Dd.
-       * config/aarch64/aarch64.md (*movsi_aarch64):
-       Add SIMD mov case.
-       (*movdi_aarch64): Add SIMD mov case.
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
-       * tree-predcom.c: (struct chain): Handle store-store chain in which
-       stores for elimination only store loop invariant values.
-       (execute_pred_commoning_chain): Ditto.
-       (prepare_initializers_chain_store_elim): Ditto.
-       (prepare_finalizers): Ditto.
-       (is_inv_store_elimination_chain): New function.
-       (initialize_root_vars_store_elim_1): New function.
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
-       * tree-predcom.c: Revise general description of the pass.
-       (enum chain_type): New enum type for store elimination.
-       (struct chain): New field supporting store elimination.
-       (struct component): Ditto.
-       (dump_chain): Dump store-stores chain.
-       (release_chain): Release resources.
-       (split_data_refs_to_components): Compute and create component
-       contains only stores for elimination.
-       (get_chain_last_ref_at): New function.
-       (make_invariant_chain): Initialization.
-       (make_rooted_chain): Specify chain type in parameter and record it.
-       (add_looparound_copies): Skip for store-stores chain.
-       (determine_roots_comp): Compute type of chain and pass it to
-       make_rooted_chain.
-       (initialize_root_vars_store_elim_2): New function.
-       (finalize_eliminated_stores): New function.
-       (remove_stmt): Handle store for elimination.
-       (execute_pred_commoning_chain): Execute predictive commoning on
-       store-store chains.
-       (determine_unroll_factor): Skip unroll for store-stores chain.
-       (prepare_initializers_chain_store_elim): New function.
-       (prepare_initializers_chain): Hanlde store-store chain.
-       (prepare_finalizers_chain, prepare_finalizers): New function.
-       (tree_predictive_commoning_loop): Return integer value indicating
-       if loop is unrolled or lcssa form is corrupted.
-       (tree_predictive_commoning): Rewrite for lcssa form if necessary.
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
-       * tree-predcom.c (initialize_root): Delete.
-       (execute_pred_commoning_chain): Initialize root vars and replace
-       reference of non-combined chain directly, rather than call above
-       function.
+ 2018-01-19  Martin Liska  <mliska@suse.cz>
  
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
+       * predict.c (predict_insn_def): Add new assert.
+       (struct branch_predictor): Change type to signed integer.
+       (test_prediction_value_range): Amend test to cover
+       PROB_UNINITIALIZED.
+       * predict.def (PRED_LOOP_ITERATIONS): Use the new constant.
+       (PRED_LOOP_ITERATIONS_GUESSED): Likewise.
+       (PRED_LOOP_ITERATIONS_MAX): Likewise.
+       (PRED_LOOP_IV_COMPARE): Likewise.
+       * predict.h (PROB_UNINITIALIZED): Define new constant.
  
-       * tree-predcom.c (ref_at_iteration): Add parameter NITERS.  Compute
-       memory reference to DR at (NITERS + ITERS)-th iteration of loop.
+ 2018-01-19  Martin Liska  <mliska@suse.cz>
  
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
+       * predict.c (dump_prediction): Add new format for
+       analyze_brprob.py script which is enabled with -details
+       suboption.
+       * profile-count.h (precise_p): New function.
  
-       * tree-predcom.c (struct chain): New field init_seq.
-       (release_chain): Release init_seq.
-       (prepare_initializers_chain): Record intialization stmts in above
-       field.
-       (insert_init_seqs): New function.
-       (tree_predictive_commoning_loop): Call insert_init_seqs.
- 2017-07-28  Bin Cheng  <bin.cheng@arm.com>
-       * tree-predcom.c (determine_roots_comp): Skip trivial components.
- 2017-07-28  Richard Biener  <rguenther@suse.de>
-       * match.pd: Remove superfluous :c.
-       * genmatch.c (simplify::id): Add member.
-       (lower_commutative, lower_opt_convert, lower_cond, lower_for):
-       Copy id.
-       (current_id): New global.
-       (dt_node::parent): Move from ...
-       (dt_operand::parent): ... here.  Add for_id member.
-       (is_a_helper <dt_operand *>::test): DT_TRUE is also a dt_operand.
-       (decision_tree::find_node): Relax order requirement when
-       merging DT_TRUE nodes to ones inbetween the current simplify
-       and the one we try to merge with.  Add diagnostic whenever
-       we need to enforce pattern order by not merging.
-       (decision_tree::insert): Set current_id.
-       (decision_tree::print_node): Dump parent node and for_id.
-       (parser::last_id): Add member.
-       (parser::push_simplify): Assign unique id.
-       (parser::parser): Initialize last_id.
- 2017-07-28  Martin Liska  <mliska@suse.cz>
-       PR sanitizer/81340
-       * sanopt.c (sanitize_rewrite_addressable_params): Set VALUE_EXPR after
-       gimple_build_debug_bind.
- 2017-07-28  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81502
-       * match.pd: Add pattern combining BIT_INSERT_EXPR with
-       BIT_FIELD_REF.
-       * tree-cfg.c (verify_expr): Verify types of BIT_FIELD_REF
-       size/pos operands.
-       (verify_gimple_assign_ternary): Likewise for BIT_INSERT_EXPR pos.
-       * gimple-fold.c (maybe_canonicalize_mem_ref_addr): Use bitsizetype
-       for BIT_FIELD_REF args.
-       * fold-const.c (make_bit_field_ref): Likewise.
-       * tree-vect-stmts.c (vectorizable_simd_clone_call): Likewise.
+ 2018-01-19  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-28  Jakub Jelinek  <jakub@redhat.com>
-       PR sanitizer/80998
-       * sanopt.c (pass_sanopt::execute): Handle IFN_UBSAN_PTR.
-       * tree-ssa-alias.c (call_may_clobber_ref_p_1): Likewise.
-       * flag-types.h (enum sanitize_code): Add SANITIZER_POINTER_OVERFLOW.
-       Or it into SANITIZER_UNDEFINED.
-       * ubsan.c: Include gimple-fold.h and varasm.h.
-       (ubsan_expand_ptr_ifn): New function.
-       (instrument_pointer_overflow): New function.
-       (maybe_instrument_pointer_overflow): New function.
-       (instrument_object_size): Formatting fix.
-       (pass_ubsan::execute): Call instrument_pointer_overflow
-       and maybe_instrument_pointer_overflow.
-       * internal-fn.c (expand_UBSAN_PTR): New function.
-       * ubsan.h (ubsan_expand_ptr_ifn): Declare.
-       * sanitizer.def (__ubsan_handle_pointer_overflow,
-       __ubsan_handle_pointer_overflow_abort): New builtins.
-       * tree-ssa-tail-merge.c (merge_stmts_p): Handle IFN_UBSAN_PTR.
-       * internal-fn.def (UBSAN_PTR): New internal function.
-       * opts.c (sanitizer_opts): Add pointer-overflow.
-       * lto-streamer-in.c (input_function): Handle IFN_UBSAN_PTR.
-       * fold-const.c (build_range_check): Compute pointer range check in
-       integral type if pointer arithmetics would be needed.  Formatting
-       fixes.
- 2017-07-28  Martin Liska  <mliska@suse.cz>
-       PR sanitizer/81460
-       * sanopt.c (sanitize_rewrite_addressable_params): Do not rewrite
-       parameters that are of a variable-length.
- 2017-07-28  Sebastian Huber  <sebastian.huber@embedded-brains.de>
-       * config.gcc (powerpc-*-rtems*): Remove rs6000/eabi.h.  Add
-       rs6000/biarch64.h.
-       * config/rs6000/rtems.h (ASM_DECLARE_FUNCTION_SIZE): New macro.
-       (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P): Likewise.
-       (CRT_CALL_STATIC_FUNCTION): Likewise.
-       (ASM_DEFAULT_SPEC): New define.
-       (ASM_SPEC32): Likewise.
-       (ASM_SPEC64): Likewise.
-       (ASM_SPEC_COMMON): Likewise.
-       (ASM_SPEC): Likewise.
-       (INVALID_64BIT): Likewise.
-       (LINK_OS_DEFAULT_SPEC): Likewise.
-       (LINK_OS_SPEC32): Likewise.
-       (LINK_OS_SPEC64): Likewise.
-       (POWERPC_LINUX): Likewise.
-       (PTRDIFF_TYPE): Likewise.
-       (RESTORE_FP_PREFIX): Likewise.
-       (RESTORE_FP_SUFFIX): Likewise.
-       (SAVE_FP_PREFIX): Likewise.
-       (SAVE_FP_SUFFIX): Likewise.
-       (SIZE_TYPE): Likewise.
-       (SUBSUBTARGET_OVERRIDE_OPTIONS): Likewise.
-       (TARGET_64BIT): Likewise.
-       (TARGET_64BIT): Likewise.
-       (TARGET_AIX): Likewise.
-       (WCHAR_TYPE_SIZE): Likewise.
-       (WCHAR_TYPE): Undefine.
-       (TARGET_OS_CPP_BUILTINS): Add 64-bit PowerPC defines.
-       (CPP_OS_DEFAULT_SPEC): Use previous CPP_OS_RTEMS_SPEC.
-       (CPP_OS_RTEMS_SPEC): Delete.
-       (SUBSUBTARGET_EXTRA_SPECS): Remove cpp_os_rtems.  Add
-       asm_spec_common, asm_spec32, asm_spec64, link_os_spec32, and
-       link_os_spec64.
-       * config/rs6000/t-rtems: Add mcpu=e6500/m64 multilibs.
- 2017-07-28  Jakub Jelinek  <jakub@redhat.com>
-       PR tree-optimization/81578
-       * tree-parloops.c (build_new_reduction): Bail out if
-       reduction_code isn't one of the standard OpenMP reductions.
-       Move the details printing after that decision.
- 2017-07-27  Peter Bergner  <bergner@vnet.ibm.com>
-       * config/rs6000/predicates.md (volatile_mem_operand): Remove code
-       related to reload_in_progress.
-       (splat_input_operand): Likewise.
-       * config/rs6000/rs6000-protos.h (rs6000_secondary_memory_needed_rtx):
-       Delete prototype.
-       * config/rs6000/rs6000.c (machine_function): Remove sdmode_stack_slot
-       field.
-       (TARGET_EXPAND_TO_RTL_HOOK): Delete.
-       (TARGET_INSTANTIATE_DECLS): Likewise.
-       (legitimate_indexed_address_p): Delete reload_in_progress code.
-       (rs6000_debug_legitimate_address_p): Likewise.
-       (rs6000_eliminate_indexed_memrefs): Likewise.
-       (rs6000_emit_le_vsx_store): Likewise.
-       (rs6000_emit_move_si_sf_subreg): Likewise.
-       (rs6000_emit_move): Likewise.
-       (register_to_reg_type): Likewise.
-       (rs6000_pre_atomic_barrier): Likewise.
-       (rs6000_machopic_legitimize_pic_address): Likewise.
-       (rs6000_allocate_stack_temp): Likewise.
-       (rs6000_address_for_fpconvert): Likewise.
-       (rs6000_address_for_altivec): Likewise.
-       (rs6000_secondary_memory_needed_rtx): Delete function.
-       (rs6000_check_sdmode): Likewise.
-       (rs6000_alloc_sdmode_stack_slot): Likewise.
-       (rs6000_instantiate_decls): Likewise.
-       * config/rs6000/rs6000.h (SECONDARY_MEMORY_NEEDED_RTX): Delete.
-       * config/rs6000/rs6000.md (splitter for *movsi_got_internal):
-       Delete reload_in_progress.
-       (*vec_reload_and_plus_<mptrsize>): Likewise.
-       * config/rs6000/vsx.md (vsx_mul_v2di): Likewise.
-       (vsx_div_v2di): Likewise.
-       (vsx_udiv_v2di): Likewise.
- 2017-07-27  Peter Bergner  <bergner@vnet.ibm.com>
-       * config/rs6000/rs6000.opt (mlra): Replace with stub.
-       * config/rs6000/rs6000-cpus.def (POWERPC_MASKS): Delete OPTION_MASK_LRA.
-       * config/rs6000/rs6000.c (TARGET_LRA_P): Delete.
-       (rs6000_debug_reg_global): Delete print of LRA status.
-       (rs6000_option_override_internal): Delete dead LRA related code.
-       (rs6000_lra_p): Delete function.
-       * doc/invoke.texi (RS/6000 and PowerPC Options): Delete -mlra.
- 2017-07-27  Sebastian Huber  <sebastian.huber@embedded-brains.de>
-       * config.gcc (riscv*-*-elf*): Add (riscv*-*-rtems*).
-       * config/riscv/rtems.h: New file.
- 2017-07-27  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
-           Sudakshina Das  <sudi.das@arm.com>
-       * config/aarch64/aarch64.md
-       (define_split for and<mode>3nr_compare): Move
-       non aarch64_logical_operand to a register.
-       (define_split for and_<SHIFT:optab><mode>3nr_compare0): Move non
-       register immediate operand to a register.
-       * config/aarch64/predicates.md (aarch64_mov_imm_operand): New.
- 2017-07-27  Peter Bergner  <bergner@vnet.ibm.com>
-       PR middle-end/81564
-       * tree-cfg.c (group_case_labels_stmt): Handle already deleted blocks.
- 2017-07-27  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81573
-       PR tree-optimization/81494
-       * tree-vect-loop.c (vect_create_epilog_for_reduction): Handle
-       multi defuse cycle case.
- 2017-07-27  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81571
-       * tree-vect-slp.c (vect_build_slp_tree): Properly verify reduction
-       PHIs.
- 2017-07-27  Eric Botcazou  <ebotcazou@adacore.com>
-       * config/sparc/sparc.c (sparc_option_override): Set MASK_FSMULD flag
-       earlier and only if MASK_FPU is set.  Adjust formatting.
- 2017-07-27  Martin Liska  <mliska@suse.cz>
-       * opt-functions.awk: Add validation of value of Init.
-       * optc-gen.awk: Pass new argument.
- 2017-07-27  Martin Liska  <mliska@suse.cz>
-       * auto-profile.c (autofdo_source_profile::update_inlined_ind_target):
-       Fix wrong condition.
- 2017-07-27  Martin Liska  <mliska@suse.cz>
-       * auto-profile.c (afdo_annotate_cfg): Assign zero counts to
-       BBs and edges seen by autoFDO.
- 2017-07-27  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81502
-       * tree-ssa.c (non_rewritable_lvalue_p): Handle BIT_INSERT_EXPR
-       with incompatible but same sized type.
-       (execute_update_addresses_taken): Likewise.
- 2017-07-27  James Greenhalgh  <james.greenhalgh@arm.com>
-       * tree-ssa-loop-ch.c (pass_ch::process_loop_p): Guard on
-       flag_tree_loop_vectorize rather than flag_tree_vectorize.
- 2017-07-27  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
-       PR target/81534
-       * config/s390/s390.md ("*atomic_compare_and_swap<mode>_1")
-       ("*atomic_compare_and_swapdi_2", "*atomic_compare_and_swapsi_3"):
-       Change s_operand to memory_operand.
- 2017-07-27  Richard Sandiford  <richard.sandiford@linaro.org>
-       * config/rs6000/rs6000-protos.h (rs6000_emit_le_vsx_permute): Declare.
-       * config/rs6000/rs6000.c (rs6000_gen_le_vsx_permute): Replace with...
-       (rs6000_emit_le_vsx_permute): ...this.  Take the destination as input.
-       Emit instructions rather than returning an expression.  Handle TFmode
-       and KFmode by casting to TImode.
-       (rs6000_emit_le_vsx_load): Update to use rs6000_emit_le_vsx_permute.
-       (rs6000_emit_le_vsx_store): Likewise.
-       * config/rs6000/vsx.md (VSX_TI): New iterator.
-       (*vsx_le_permute_<mode>): Use it instead of VSX_LE_128.
-       (*vsx_le_undo_permute_<mode>): Likewise.
-       (*vsx_le_perm_load_<mode>): Use rs6000_emit_le_vsx_permute to
-       emit the split sequence.
-       (*vsx_le_perm_store_<mode>): Likewise.
+       PR tree-optimization/83922
+       * tree-vect-loop.c (vect_verify_full_masking): Return false if
+       there are no statements that need masking.
+       (vect_active_double_reduction_p): New function.
+       (vect_analyze_loop_operations): Use it when handling phis that
+       are not in the loop header.
  
- 2017-07-27  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-19  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR tree-optimization/81555
-       PR tree-optimization/81556
-       * tree-ssa-reassoc.c (rewrite_expr_tree): Add NEXT_CHANGED argument,
-       if true, force CHANGED for the recursive invocation.
-       (reassociate_bb): Remember original length of ops array, pass
-       len != orig_len as NEXT_CHANGED in rewrite_expr_tree call.
+       PR tree-optimization/83914
+       * tree-vect-loop.c (vectorizable_induction): Don't convert
+       init_expr or apply the peeling adjustment for inductions
+       that are nested within the vectorized loop.
  
-       * attribs.c (decl_attributes): Imply noinline, noclone and no_icf
-       attributes for noipa attribute.  For naked attribute use
-       lookup_attribute first before lookup_attribute_spec.
-       * final.c (rest_of_handle_final): Disable IPA RA for functions with
-       noipa attribute.
-       * ipa-visibility.c (non_local_p): Fix comment typos.  Return true
-       for functions with noipa attribute.
-       (cgraph_externally_visible_p): Return true for functions with noipa
-       attribute.
-       * cgraph.c (cgraph_node::get_availability): Return AVAIL_INTERPOSABLE
-       for functions with noipa attribute.
-       * doc/extend.texi: Document noipa function attribute.
-       * tree-ssa-structalias.c (refered_from_nonlocal_fn): Set *nonlocal_p
-       also for functions with noipa attribute.
-       (ipa_pta_execute): Set nonlocal_p also for nodes with noipa attribute.
- 2017-07-26  Andrew Pinski  <apinski@cavium.com>
-       * config/aarch64/aarch64.c (thunderx_vector_cost): Decrease cost of
-       vec_unalign_load_cost and vec_unalign_store_cost.
- 2017-07-26  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000-cpus.def (ISA_2_7_MASKS_SERVER): Delete
-       -mvsx-small-integer option.
-       (ISA_3_0_MASKS_IEEE): Likewise.
-       (OTHER_VSX_VECTOR_MASKS): Likewise.
-       (POWERPC_MASKS): Likewise.
-       * config/rs6000/rs6000.opt (-mvsx-small-integer): Likewise.
-       * config/rs6000/rs6000.c (rs6000_hard_regno_mode_ok): Simplify
-       code, only testing for DImode being allowed in non-VSX floating
-       point registers.
-       (rs6000_init_hard_regno_mode_ok): Change TARGET_VSX_SMALL_INTEGER
-       to TARGET_P8_VECTOR test.  Remove redundant VSX test inside of
-       another VSX test.
-       (rs6000_option_override_internal): Delete -mvsx-small-integer.
-       (rs6000_expand_vector_set): Change TARGET_VSX_SMALL_INTEGER to
-       TARGET_P8_VECTOR test.
-       (rs6000_secondary_reload_simple_move): Likewise.
-       (rs6000_preferred_reload_class): Delete TARGET_VSX_SMALL_INTEGER,
-       since TARGET_P9_VECTOR was already tested.
-       (rs6000_opt_masks): Remove -mvsx-small-integer.
-       * config/rs6000/vsx.md (vsx_extract_<mode>): Delete
-       TARGET_VSX_SMALL_INTEGER, since a test for TARGET_P9_VECTOR was
-       used.
-       (vsx_extract_<mode>_p9): Delete TARGET_VSX_SMALL_INTEGER, since a
-       test for TARGET_VEXTRACTUB was used, and that uses
-       TARGET_P9_VECTOR.
-       (p9 extract splitter): Likewise.
-       (vsx_extract_<mode>_di_p9): Likewise.
-       (vsx_extract_<mode>_store_p9): Likewise.
-       (vsx_extract_si): Delete TARGET_VSX_SMALL_INTEGER, since a test
-       for TARGET_P9_VECTOR was used.  Delete code that is now dead with
-       the elimination of TARGET_VSX_SMALL_INTEGER.
-       (vsx_extract_<mode>_p8): Likewise.
-       (vsx_ext_<VSX_EXTRACT_I:VS_scalar>_fl_<FL_CONV:mode>): Likewise.
-       (vsx_ext_<VSX_EXTRACT_I:VS_scalar>_ufl_<FL_CONV:mode>): Likewise.
-       (vsx_set_<mode>_p9): Likewise.
-       (vsx_set_v4sf_p9): Likewise.
-       (vsx_set_v4sf_p9_zero): Likewise.
-       (vsx_insert_extract_v4sf_p9): Likewise.
-       (vsx_insert_extract_v4sf_p9_2): Likewise.
-       * config/rs6000/rs6000.md (sign extend splitter): Change
-       TARGET_VSX_SMALL_INTEGER to TARGET_P8_VECTOR test.
-       (floatsi<mode>2_lfiwax_mem): Likewise.
-       (floatunssi<mode>2_lfiwzx_mem): Likewise.
-       (float<QHI:mode><FP_ISA3:mode>2): Delete TARGET_VSX_SMALL_INTEGER,
-       since a test for TARGET_P9_VECTOR was used.
-       (float<QHI:mode><FP_ISA3:mode>2_internal): Likewise.
-       (floatuns<QHI:mode><FP_ISA3:mode>2): Likewise.
-       (floatuns<QHI:mode><FP_ISA3:mode>2_internal): Likewise.
-       (fix_trunc<mode>si2): Change TARGET_VSX_SMALL_INTEGER to
-       TARGET_P8_VECTOR test.
-       (fix_trunc<mode>si2_stfiwx): Likewise.
-       (fix_trunc<mode>si2_internal): Likewise.
-       (fix_trunc<SFDF:mode><QHI:mode>2): Delete
-       TARGET_VSX_SMALL_INTEGER, since a test for TARGET_P9_VECTOR was
-       used.
-       (fix_trunc<SFDF:mode><QHI:mode>2_internal): Likewise.
-       (fixuns_trunc<mode>si2): Change TARGET_VSX_SMALL_INTEGER to
-       TARGET_P8_VECTOR test.
-       (fixuns_trunc<mode>si2_stfiwx): Likewise.
-       (fixuns_trunc<SFDF:mode><QHI:mode>2): Delete
-       TARGET_VSX_SMALL_INTEGER, since a test for TARGET_P9_VECTOR was
-       used.
-       (fixuns_trunc<SFDF:mode><QHI:mode>2_internal): Likewise.
-       (fctiw<u>z_<mode>_smallint): Delete TARGET_VSX_SMALL_INTEGER,
-       since a test for TARGET_P9_VECTOR was used.
-       (splitter for loading small constants): Likewise.
+ 2018-01-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
  
- 2017-07-26  Andrew Pinski  <apinski@cavium.com>
+       * config/arm/thumb2.md (*thumb2_negsi2_short): Use RSB mnemonic
+       instead of NEG.
  
-       * config/aarch64/aarch64.c (thunderx_vector_cost): Fix
-       vec_fp_stmt_cost.
+ 2018-01-18  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-26  H.J. Lu  <hongjiu.lu@intel.com>
+       PR sanitizer/81715
+       PR testsuite/83882
+       * function.h (gimplify_parameters): Add gimple_seq * argument.
+       * function.c: Include gimple.h and options.h.
+       (gimplify_parameters): Add cleanup argument, add CLOBBER stmts
+       for the added local temporaries if needed.
+       * gimplify.c (gimplify_body): Adjust gimplify_parameters caller,
+       if there are any parameter cleanups, wrap whole body into a
+       try/finally with the cleanups.
  
-       PR target/81563
-       * config/i386/i386.c (sp_valid_at): Properly check CFA offset.
-       (fp_valid_at): Likewise.
+ 2018-01-18  Wilco Dijkstra  <wdijkstr@arm.com>
  
- 2017-07-26  James Greenhalgh  <james.greenhalgh@arm.com>
+       PR target/82964
+       * config/aarch64/aarch64.c (aarch64_legitimate_constant_p):
+       Use GET_MODE_CLASS for scalar floating point.
+ 2018-01-18  Jan Hubicka  <hubicka@ucw.cz>
+       PR ipa/82256
+       patch by PaX Team
+       * cgraphclones.c (cgraph_node::create_version_clone_with_body):
+       Fix call of call_cgraph_insertion_hooks.
+ 2018-01-18  Martin Sebor  <msebor@redhat.com>
+       * doc/invoke.texi (-Wclass-memaccess): Tweak text.
+ 2018-01-18  Jan Hubicka  <hubicka@ucw.cz>
+       PR ipa/83619
+       * cgraph.c (cgraph_edge::redirect_call_stmt_to_callee): Update edge
+       frequencies.
+ 2018-01-18  Boris Kolpackov  <boris@codesynthesis.com>
+       PR other/70268
+       * common.opt: (-ffile-prefix-map): New option.
+       * opts.c (common_handle_option): Defer it.
+       * opts-global.c (handle_common_deferred_options): Handle it.
+       * debug.h (remap_debug_filename, add_debug_prefix_map): Move to...
+       * file-prefix-map.h: New file.
+       (remap_debug_filename, add_debug_prefix_map): ...here.
+       (add_macro_prefix_map, add_file_prefix_map, remap_macro_filename): New.
+       * final.c (debug_prefix_map, add_debug_prefix_map
+       remap_debug_filename): Move to...
+       * file-prefix-map.c: New file.
+       (file_prefix_map, add_prefix_map, remap_filename) ...here and rename,
+       generalize, get rid of alloca(), use strrchr() instead of strchr().
+       (add_macro_prefix_map, add_debug_prefix_map, add_file_prefix_map):
+       Implement in terms of add_prefix_map().
+       (remap_macro_filename, remap_debug_filename): Implement in term of
+       remap_filename().
+       * Makefile.in (OBJS, PLUGIN_HEADERS): Add new files.
+       * builtins.c (fold_builtin_FILE): Call remap_macro_filename().
+       * dbxout.c: Include file-prefix-map.h.
+       * varasm.c: Likewise.
+       * vmsdbgout.c: Likewise.
+       * xcoffout.c: Likewise.
+       * dwarf2out.c: Likewise plus omit new options from DW_AT_producer.
+       * doc/cppopts.texi (-fmacro-prefix-map): Document.
+       * doc/invoke.texi (-ffile-prefix-map): Document.
+       (-fdebug-prefix-map): Update description.
+ 2018-01-18  Martin Liska  <mliska@suse.cz>
+       * config/i386/i386.c (indirect_thunk_name): Document that also
+       lfence is emitted.
+       (output_indirect_thunk): Document why both instructions
+       (pause and lfence) are generated.
+ 2018-01-18  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83887
+       * graphite-scop-detection.c
+       (scop_detection::get_nearest_dom_with_single_entry): Remove.
+       (scop_detection::get_nearest_pdom_with_single_exit): Likewise.
+       (scop_detection::merge_sese): Re-implement with a flood-fill
+       algorithm that properly finds a SESE region if it exists.
+ 2018-01-18  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/aarch64/aarch64.c (cortexa57_addrcost_table): Remove.
-       (qdf24xx_addrcost_table): Likewise.
-       (cortexa57_tunings): Update to use generic_branch_cost.
-       (cortexa72_tunings): Likewise.
-       (cortexa73_tunings): Likewise.
-       (qdf24xx_tunings): Likewise.
+       PR c/61240
+       * match.pd ((P + A) - P, P - (P + A), (P + A) - (P + B)): For
+       pointer_diff optimizations use view_convert instead of convert.
+ 2018-01-17  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.md (*call_indirect_nonlocal_sysv<mode>):
+       Generate different code for -mno-speculate-indirect-jumps.
+       (*call_value_indirect_nonlocal_sysv<mode>): Likewise.
+       (*call_indirect_aix<mode>): Disable for
+       -mno-speculate-indirect-jumps.
+       (*call_indirect_aix<mode>_nospec): New define_insn.
+       (*call_value_indirect_aix<mode>): Disable for
+       -mno-speculate-indirect-jumps.
+       (*call_value_indirect_aix<mode>_nospec): New define_insn.
+       (*sibcall_nonlocal_sysv<mode>): Generate different code for
+       -mno-speculate-indirect-jumps.
+       (*sibcall_value_nonlocal_sysv<mode>): Likewise.
+ 2018-01-17  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.c (rs6000_emit_move): If we load or store a
+       long double type, set the flags for noting the default long double
+       type, even if we don't pass or return a long double type.
+ 2018-01-17  Jan Hubicka  <hubicka@ucw.cz>
+       PR ipa/83051
+       * ipa-inline.c (flatten_function): Do not overwrite final inlining
+       failure.
+ 2018-01-17  Will Schmidt  <will_schmidt@vnet.ibm.com>
+       * config/rs6000/rs6000.c (rs6000_gimple_builtin): Add gimple folding
+       support for merge[hl].
+       (fold_mergehl_helper): New helper function.
+       (tree-vector-builder.h): New #include for tree_vector_builder usage.
+       * config/rs6000/altivec.md (altivec_vmrghw_direct): Add xxmrghw insn.
+       (altivec_vmrglw_direct): Add xxmrglw insn.
+ 2018-01-17  Andrew Waterman  <andrew@sifive.com>
+       * config/riscv/riscv.c (riscv_conditional_register_usage): If
+       UNITS_PER_FP_ARG is 0, set call_used_regs to 1 for all FP regs.
+ 2018-01-17  David Malcolm  <dmalcolm@redhat.com>
+       PR lto/83121
+       * ipa-devirt.c (add_type_duplicate): When comparing memory layout,
+       call the lto_location_cache before reading the
+       DECL_SOURCE_LOCATION of the types.
+ 2018-01-17  Wilco Dijkstra  <wdijkstr@arm.com>
+           Richard Sandiford  <richard.sandiford@linaro.org>
+       * config/aarch64/aarch64.md (movti_aarch64): Use Uti constraint.
+       * config/aarch64/aarch64.c (aarch64_mov128_immediate): New function.
+       (aarch64_legitimate_constant_p): Just support CONST_DOUBLE
+       SF/DF/TF mode to avoid creating illegal CONST_WIDE_INT immediates.
+       * config/aarch64/aarch64-protos.h (aarch64_mov128_immediate):
+       Add declaration.
+       * config/aarch64/constraints.md (aarch64_movti_operand):
+       Limit immediates.
+       * config/aarch64/predicates.md (Uti): Add new constraint.
+ 2018-01-17 Carl Love  <cel@us.ibm.com>
+       * config/rs6000/vsx.md (define_expand xl_len_r,
+       define_expand stxvl, define_expand *stxvl): Add match_dup argument.
+       (define_insn): Add, match_dup 1 argument to define_insn stxvll and
+       lxvll.
+       (define_expand, define_insn): Move the shift left from  the
+       define_insn to the define_expand for lxvl and stxvl instructions.
+       * config/rs6000/rs6000-builtin.def (BU_P9V_64BIT_VSX_2): Change LXVL
+       and XL_LEN_R definitions to PURE.
+ 2018-01-17  Uros Bizjak  <ubizjak@gmail.com>
+       * config/i386/i386.c (indirect_thunk_name): Declare regno
+       as unsigned int.  Compare regno with INVALID_REGNUM.
+       (output_indirect_thunk): Ditto.
+       (output_indirect_thunk_function): Ditto.
+       (ix86_code_end): Declare regno as unsigned int.  Use INVALID_REGNUM
+       in the call to output_indirect_thunk_function.
+ 2018-01-17  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR middle-end/83884
+       * expr.c (expand_expr_real_1): Use the size of GET_MODE (op0)
+       rather than the size of inner_type to determine the stack slot size
+       when handling VIEW_CONVERT_EXPRs on strict-alignment targets.
+ 2018-01-16  Sebastian Peryt  <sebastian.peryt@intel.com>
+       PR target/83546
+       * config/i386/i386.c (ix86_option_override_internal): Add PTA_RDRND
+       to PTA_SILVERMONT.
+ 2018-01-16  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       * config.gcc (powerpc*-linux*-*): Add support for 64-bit little
+       endian Linux systems to optionally enable multilibs for selecting
+       the long double type if the user configured an explicit type.
+       * config/rs6000/rs6000.h (TARGET_IEEEQUAD_MULTILIB): Indicate we
+       have no long double multilibs if not defined.
+       * config/rs6000/rs6000.c (rs6000_option_override_internal): Do not
+       warn if the user used -mabi={ieee,ibm}longdouble and we built
+       multilibs for long double.
+       * config/rs6000/linux64.h (MULTILIB_DEFAULTS_IEEE): Define as the
+       appropriate multilib option.
+       (MULTILIB_DEFAULTS): Add MULTILIB_DEFAULTS_IEEE to the default
+       multilib options.
+       * config/rs6000/t-ldouble-linux64le-ibm: New configuration files
+       for building long double multilibs.
+       * config/rs6000/t-ldouble-linux64le-ieee: Likewise.
+ 2018-01-16  John David Anglin  <danglin@gcc.gnu.org>
+       * config.gcc (hppa*-*-linux*): Change callee copies ABI to caller
+       copies.
+       * config/pa.h (MALLOC_ABI_ALIGNMENT): Set 32-bit alignment default to
+       64 bits.
+       * config/pa/pa32-linux.h (MALLOC_ABI_ALIGNMENT): Set alignment to
+       128 bits.
+       * config/pa/som.h (ASM_DECLARE_FUNCTION_NAME): Cleanup type and mode
+       variables.
  
- 2017-07-26  James Greenhalgh  <james.greenhalgh@arm.com>
+       * config/pa/pa.c (pa_function_arg_size): Apply CEIL to GET_MODE_SIZE
+       return value.
+ 2018-01-16  Eric Botcazou  <ebotcazou@adacore.com>
+       * gimple-ssa-warn-restrict.c (builtin_memref::builtin_memref): For an
+       ADDR_EXPR, do not count the offset of a COMPONENT_REF twice.
+ 2018-01-16  Kelvin Nilsen  <kelvin@gcc.gnu.org>
+       * config/rs6000/rs6000-p8swap.c (rs6000_gen_stvx): Generate
+       different rtl trees depending on TARGET_64BIT.
+       (rs6000_gen_lvx): Likewise.
+ 2018-01-16  Eric Botcazou  <ebotcazou@adacore.com>
+       * config/visium/visium.md (nop): Tweak comment.
+       (hazard_nop): Likewise.
+ 2018-01-16  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.c (rs6000_opt_vars): Add entry for
+       -mspeculate-indirect-jumps.
+       * config/rs6000/rs6000.md (*call_indirect_elfv2<mode>): Disable
+       for -mno-speculate-indirect-jumps.
+       (*call_indirect_elfv2<mode>_nospec): New define_insn.
+       (*call_value_indirect_elfv2<mode>): Disable for
+       -mno-speculate-indirect-jumps.
+       (*call_value_indirect_elfv2<mode>_nospec): New define_insn.
+       (indirect_jump): Emit different RTL for
+       -mno-speculate-indirect-jumps.
+       (*indirect_jump<mode>): Disable for
+       -mno-speculate-indirect-jumps.
+       (*indirect_jump<mode>_nospec): New define_insn.
+       (tablejump): Emit different RTL for
+       -mno-speculate-indirect-jumps.
+       (tablejumpsi): Disable for -mno-speculate-indirect-jumps.
+       (tablejumpsi_nospec): New define_expand.
+       (tablejumpdi): Disable for -mno-speculate-indirect-jumps.
+       (tablejumpdi_nospec): New define_expand.
+       (*tablejump<mode>_internal1): Disable for
+       -mno-speculate-indirect-jumps.
+       (*tablejump<mode>_internal1_nospec): New define_insn.
+       * config/rs6000/rs6000.opt (mspeculate-indirect-jumps): New
+       option.
  
-       * config/aarch64/aarch64.c (cortexa57_branch_cost): Remove.
-       (thunderx2t99_branch_cost): Likewise.
-       (cortexa35_tunings): Update to use generic_branch_cost.
-       (cortexa53_tunings): Likewise.
-       (cortexa57_tunings): Likewise.
-       (cortexa72_tunings): Likewise.
-       (cortexa73_tunings): Likewise.
-       (thunderx2t99_tunings): Likewise.
+ 2018-01-16  Artyom Skrobov tyomitch@gmail.com
  
- 2017-07-26  Sebastian Huber  <sebastian.huber@embedded-brains.de>
+       * caller-save.c (insert_save): Drop unnecessary parameter.  All
+       callers updated.
  
-       * config/sparc/sparc.c (dump_target_flag_bits): Dump MASK_FSMULD.
-       (sparc_option_override): Honour MASK_FSMULD.
-       * config/sparc/sparc.h (MASK_FEATURES): Add MASK_FSMULD.
-       * config/sparc/sparc.md (muldf3_extend): Use TARGET_FSMULD.
-       * config/sparc/sparc.opt (mfsmuld): New option.
-       * doc/invoke.texi (mfsmuld): Document option.
+ 2018-01-16  Jakub Jelinek  <jakub@redhat.com>
+           Richard Biener  <rguenth@suse.de>
  
- 2017-08-24  Aldy Hernandez  <aldyh@redhat.com>
+       PR libgomp/83590
+       * gimplify.c (gimplify_one_sizepos): For is_gimple_constant (expr)
+       return early, inline manually is_gimple_sizepos.  Make sure if we
+       call gimplify_expr we don't end up with a gimple constant.
+       * tree.c (variably_modified_type_p): Don't return true for
+       is_gimple_constant (_t).  Inline manually is_gimple_sizepos.
+       * gimplify.h (is_gimple_sizepos): Remove.
  
-       PR middle-end/81931
-       * tree-ssanames.c (get_nonzero_bits): Use element_precision
-       instead of TYPE_PRECISION.
+ 2018-01-16  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-08-22  Aldy Hernandez  <aldyh@redhat.com>
+       PR tree-optimization/83857
+       * tree-vect-loop.c (vect_analyze_loop_operations): Don't call
+       vectorizable_live_operation for pure SLP statements.
+       (vectorizable_live_operation): Handle PHIs.
  
-       * wide-int.h (hwi_with_prec::hwi_with_prec): Sign extend.
+ 2018-01-16  Richard Biener  <rguenther@suse.de>
  
- 2017-07-26  Marek Polacek  <polacek@redhat.com>
+       PR tree-optimization/83867
+       * tree-vect-stmts.c (vect_transform_stmt): Precompute
+       nested_in_vect_loop_p since the scalar stmt may get invalidated.
  
-       PR middle-end/70992
-       * tree.c (build2_stat): Don't set TREE_CONSTANT on divisions by zero.
+ 2018-01-16  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-26  Richard Biener  <rguenther@suse.de>
+       PR c/83844
+       * stor-layout.c (handle_warn_if_not_align): Use byte_position and
+       multiple_of_p instead of unchecked tree_to_uhwi and UHWI check.
+       If off is not INTEGER_CST, issue a may not be aligned warning
+       rather than isn't aligned.  Use isn%'t rather than isn't.
+       * fold-const.c (multiple_of_p) <case BIT_AND_EXPR>: Don't fall through
+       into MULT_EXPR.
+       <case MULT_EXPR>: Improve the case when bottom and one of the
+       MULT_EXPR operands are INTEGER_CSTs and bottom is multiple of that
+       operand, in that case check if the other operand is multiple of
+       bottom divided by the INTEGER_CST operand.
  
-       * gimple-match-head.c (do_valueize): Return OP if valueize
-       returns NULL_TREE.
-       (get_def): New helper to get at the def stmt of a SSA name
-       if valueize allows.
-       * genmatch.c (dt_node::gen_kids_1): Use get_def instead of
-       do_valueize to get at the def stmt.
-       (dt_operand::gen_gimple_expr): Simplify do_valueize calls.
+ 2018-01-16  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-26  Wilco Dijkstra  <wdijkstr@arm.com>
+       PR target/83858
+       * config/pa/pa.h (FUNCTION_ARG_SIZE): Delete.
+       * config/pa/pa-protos.h (pa_function_arg_size): Declare.
+       * config/pa/som.h (ASM_DECLARE_FUNCTION_NAME): Use
+       pa_function_arg_size instead of FUNCTION_ARG_SIZE.
+       * config/pa/pa.c (pa_function_arg_advance): Likewise.
+       (pa_function_arg, pa_arg_partial_bytes): Likewise.
+       (pa_function_arg_size): New function.
  
-       PR middle-end/46932
-       * auto-inc-dec.c (parse_add_or_inc): Block autoinc on sfp.
+ 2018-01-16  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-26  Martin Liska  <mliska@suse.cz>
+       * fold-const.c (fold_ternary_loc): Construct the vec_perm_indices
+       in a separate statement.
  
-       PR sanitize/81186
-       * function.c (expand_function_start): Make expansion of
-       nonlocal_goto_save_area after parm_birth_insn.
+ 2018-01-16  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-26  Sebastian Huber  <sebastian.huber@embedded-brains.de>
+       PR tree-optimization/83847
+       * tree-vect-data-refs.c (vect_analyze_data_ref_accesses): Don't
+       group gathers and scatters.
  
-       * config/sparc/sparc.c (sparc_option_override): Remove MASK_FPU
-       from all CPU target flags enable members.
+ 2018-01-16  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-26  Richard Biener  <rguenther@suse.de>
+       PR rtl-optimization/86620
+       * params.def (max-sched-ready-insns): Bump minimum value to 1.
  
-       * genmatch.c (dt_simplify::gen): Make iterator vars const.
-       (decision_tree::gen): Make 'type' const.
-       (write_predicate): Likewise.
+       PR rtl-optimization/83213
+       * recog.c (peep2_attempt): Copy over CROSSING_JUMP_P from peepinsn
+       to last if both are JUMP_INSNs.
  
- 2017-07-24  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       PR tree-optimization/83843
+       * gimple-ssa-store-merging.c
+       (imm_store_chain_info::output_merged_store): Handle bit_not_p on
+       store_immediate_info for bswap/nop orig_stores.
+ 2018-01-15  Andrew Waterman  <andrew@sifive.com>
+       * config/riscv/riscv.c (riscv_rtx_costs) <MULT>: Increase cost if
+       !TARGET_MUL.
+       <UDIV>: Increase cost if !TARGET_DIV.
+ 2018-01-15  Segher Boessenkool  <segher@kernel.crashing.org>
+       * config/rs6000/rs6000.md (define_attr "type"): Remove delayed_cr.
+       (define_attr "cr_logical_3op"): New.
+       (cceq_ior_compare): Adjust.
+       (cceq_ior_compare_complement): Adjust.
+       (*cceq_rev_compare): Adjust.
+       * config/rs6000/rs6000.c (rs6000_adjust_cost): Adjust.
+       (is_cracked_insn): Adjust.
+       (insn_must_be_first_in_group): Adjust.
+       * config/rs6000/40x.md: Adjust.
+       * config/rs6000/440.md: Adjust.
+       * config/rs6000/476.md: Adjust.
+       * config/rs6000/601.md: Adjust.
+       * config/rs6000/603.md: Adjust.
+       * config/rs6000/6xx.md: Adjust.
+       * config/rs6000/7450.md: Adjust.
+       * config/rs6000/7xx.md: Adjust.
+       * config/rs6000/8540.md: Adjust.
+       * config/rs6000/cell.md: Adjust.
+       * config/rs6000/e300c2c3.md: Adjust.
+       * config/rs6000/e500mc.md: Adjust.
+       * config/rs6000/e500mc64.md: Adjust.
+       * config/rs6000/e5500.md: Adjust.
+       * config/rs6000/e6500.md: Adjust.
+       * config/rs6000/mpc.md: Adjust.
+       * config/rs6000/power4.md: Adjust.
+       * config/rs6000/power5.md: Adjust.
+       * config/rs6000/power6.md: Adjust.
+       * config/rs6000/power7.md: Adjust.
+       * config/rs6000/power8.md: Adjust.
+       * config/rs6000/power9.md: Adjust.
+       * config/rs6000/rs64.md: Adjust.
+       * config/rs6000/titan.md: Adjust.
+ 2018-01-15  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/predicates.md (indirect_branch_operand): Rewrite
+       ix86_indirect_branch_register logic.
+ 2018-01-15  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/constraints.md (Bs): Update
+       ix86_indirect_branch_register check.  Don't check
+       ix86_indirect_branch_register with GOT_memory_operand.
+       (Bw): Likewise.
+       * config/i386/predicates.md (GOT_memory_operand): Don't check
+       ix86_indirect_branch_register here.
+       (GOT32_symbol_operand): Likewise.
+ 2018-01-15  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/predicates.md (constant_call_address_operand):
+       Rewrite ix86_indirect_branch_register logic.
+       (sibcall_insn_operand): Likewise.
+ 2018-01-15  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/constraints.md (Bs): Replace
+       ix86_indirect_branch_thunk_register with
+       ix86_indirect_branch_register.
+       (Bw): Likewise.
+       * config/i386/i386.md (indirect_jump): Likewise.
+       (tablejump): Likewise.
+       (*sibcall_memory): Likewise.
+       (*sibcall_value_memory): Likewise.
+       Peepholes of indirect call and jump via memory: Likewise.
+       * config/i386/i386.opt: Likewise.
+       * config/i386/predicates.md (indirect_branch_operand): Likewise.
+       (GOT_memory_operand): Likewise.
+       (call_insn_operand): Likewise.
+       (sibcall_insn_operand): Likewise.
+       (GOT32_symbol_operand): Likewise.
+ 2018-01-15  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/83837
+       * omp-expand.c (expand_omp_atomic_pipeline): Use loaded_val
+       type rather than type addr's type points to.
+       (expand_omp_atomic_mutex): Likewise.
+       (expand_omp_atomic): Likewise.
+ 2018-01-15  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/83839
+       * config/i386/i386.c (output_indirect_thunk_function): Use
+       ASM_OUTPUT_LABEL, instead of ASM_OUTPUT_DEF, for TARGET_MACHO
+       for  __x86_return_thunk.
+ 2018-01-15  Richard Biener  <rguenther@suse.de>
+       PR middle-end/83850
+       * expmed.c (extract_bit_field_1): Fix typo.
+ 2018-01-15  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       PR target/83687
+       * config/arm/iterators.md (VF): New mode iterator.
+       * config/arm/neon.md (neon_vabd<mode>_2): Use the above.
+       Remove integer-related logic from pattern.
+       (neon_vabd<mode>_3): Likewise.
+ 2018-01-15  Jakub Jelinek  <jakub@redhat.com>
+       PR middle-end/82694
+       * common.opt (fstrict-overflow): No longer an alias.
+       (fwrapv-pointer): New option.
+       * tree.h (TYPE_OVERFLOW_WRAPS, TYPE_OVERFLOW_UNDEFINED): Define
+       also for pointer types based on flag_wrapv_pointer.
+       * opts.c (common_handle_option) <case OPT_fstrict_overflow>: Set
+       opts->x_flag_wrap[pv] to !value, clear opts->x_flag_trapv if
+       opts->x_flag_wrapv got set.
+       * fold-const.c (fold_comparison, fold_binary_loc): Revert 2017-08-01
+       changes, just use TYPE_OVERFLOW_UNDEFINED on pointer type instead of
+       POINTER_TYPE_OVERFLOW_UNDEFINED.
+       * match.pd: Likewise in address comparison pattern.
+       * doc/invoke.texi: Document -fwrapv and -fstrict-overflow.
+ 2018-01-15  Richard Biener  <rguenther@suse.de>
+       PR lto/83804
+       * tree.c (free_lang_data_in_type): Always unlink TYPE_DECLs
+       from TYPE_FIELDS.  Free TYPE_BINFO if not used by devirtualization.
+       Reset type names to their identifier if their TYPE_DECL doesn't
+       have linkage (and thus is used for ODR and devirt).
+       (save_debug_info_for_decl): Remove.
+       (save_debug_info_for_type): Likewise.
+       (add_tree_to_fld_list): Adjust.
+       * tree-pretty-print.c (dump_generic_node): Make dumping of
+       type names more robust.
+ 2018-01-15  Richard Biener  <rguenther@suse.de>
+       * BASE-VER: Bump to 8.0.1.
+ 2018-01-14  Martin Sebor  <msebor@redhat.com>
+       PR other/83508
+       * builtins.c (check_access): Avoid warning when the no-warning bit
+       is set.
+ 2018-01-14  Cory Fields  <cory-nospam-@coryfields.com>
+       * tree-ssa-loop-im.c (sort_bbs_in_loop_postorder_cmp): Stabilize sort.
+       * ira-color (allocno_hard_regs_compare): Likewise.
+ 2018-01-14  Nathan Rossi  <nathan@nathanrossi.com>
+       PR target/83013
+       * config/microblaze/microblaze.c (microblaze_asm_output_ident):
+       Use .pushsection/.popsection.
+ 2018-01-14  Martin Sebor  <msebor@redhat.com>
+       PR c++/81327
+       * doc/invoke.texi (-Wlass-memaccess): Document suppression by casting.
+ 2018-01-14  Jakub Jelinek  <jakub@redhat.com>
+       * config.gcc (i[34567]86-*-*): Remove one duplicate gfniintrin.h
+       entry from extra_headers.
+       (x86_64-*-*): Remove two duplicate gfniintrin.h entries from
+       extra_headers, make the list bitwise identical to the i?86-*-* one.
+ 2018-01-14  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386.c (ix86_set_indirect_branch_type): Disallow
+       -mcmodel=large with -mindirect-branch=thunk,
+       -mindirect-branch=thunk-extern, -mfunction-return=thunk and
+       -mfunction-return=thunk-extern.
+       * doc/invoke.texi: Document -mcmodel=large is incompatible with
+       -mindirect-branch=thunk, -mindirect-branch=thunk-extern,
+       -mfunction-return=thunk and -mfunction-return=thunk-extern.
+ 2018-01-14  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386.c (print_reg): Print the name of the full
+       integer register without '%'.
+       (ix86_print_operand): Handle 'V'.
+        * doc/extend.texi: Document 'V' modifier.
+ 2018-01-14  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/constraints.md (Bs): Disallow memory operand for
+       -mindirect-branch-register.
+       (Bw): Likewise.
+       * config/i386/predicates.md (indirect_branch_operand): Likewise.
+       (GOT_memory_operand): Likewise.
+       (call_insn_operand): Likewise.
+       (sibcall_insn_operand): Likewise.
+       (GOT32_symbol_operand): Likewise.
+       * config/i386/i386.md (indirect_jump): Call convert_memory_address
+       for -mindirect-branch-register.
+       (tablejump): Likewise.
+       (*sibcall_memory): Likewise.
+       (*sibcall_value_memory): Likewise.
+       Disallow peepholes of indirect call and jump via memory for
+       -mindirect-branch-register.
+       (*call_pop): Replace m with Bw.
+       (*call_value_pop): Likewise.
+       (*sibcall_pop_memory): Replace m with Bs.
+       * config/i386/i386.opt (mindirect-branch-register): New option.
+       * doc/invoke.texi: Document -mindirect-branch-register option.
+ 2018-01-14  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386-protos.h (ix86_output_function_return): New.
+       * config/i386/i386.c (ix86_set_indirect_branch_type): Also
+       set function_return_type.
+       (indirect_thunk_name): Add ret_p to indicate thunk for function
+       return.
+       (output_indirect_thunk_function): Pass false to
+       indirect_thunk_name.
+       (ix86_output_indirect_branch_via_reg): Likewise.
+       (ix86_output_indirect_branch_via_push): Likewise.
+       (output_indirect_thunk_function): Create alias for function
+       return thunk if regno < 0.
+       (ix86_output_function_return): New function.
+       (ix86_handle_fndecl_attribute): Handle function_return.
+       (ix86_attribute_table): Add function_return.
+       * config/i386/i386.h (machine_function): Add
+       function_return_type.
+       * config/i386/i386.md (simple_return_internal): Use
+       ix86_output_function_return.
+       (simple_return_internal_long): Likewise.
+       * config/i386/i386.opt (mfunction-return=): New option.
+       (indirect_branch): Mention -mfunction-return=.
+       * doc/extend.texi: Document function_return function attribute.
+       * doc/invoke.texi: Document -mfunction-return= option.
+ 2018-01-14  H.J. Lu  <hongjiu.lu@intel.com>
+       * config/i386/i386-opts.h (indirect_branch): New.
+       * config/i386/i386-protos.h (ix86_output_indirect_jmp): Likewise.
+       * config/i386/i386.c (ix86_using_red_zone): Disallow red-zone
+       with local indirect jump when converting indirect call and jump.
+       (ix86_set_indirect_branch_type): New.
+       (ix86_set_current_function): Call ix86_set_indirect_branch_type.
+       (indirectlabelno): New.
+       (indirect_thunk_needed): Likewise.
+       (indirect_thunk_bnd_needed): Likewise.
+       (indirect_thunks_used): Likewise.
+       (indirect_thunks_bnd_used): Likewise.
+       (INDIRECT_LABEL): Likewise.
+       (indirect_thunk_name): Likewise.
+       (output_indirect_thunk): Likewise.
+       (output_indirect_thunk_function): Likewise.
+       (ix86_output_indirect_branch_via_reg): Likewise.
+       (ix86_output_indirect_branch_via_push): Likewise.
+       (ix86_output_indirect_branch): Likewise.
+       (ix86_output_indirect_jmp): Likewise.
+       (ix86_code_end): Call output_indirect_thunk_function if needed.
+       (ix86_output_call_insn): Call ix86_output_indirect_branch if
+       needed.
+       (ix86_handle_fndecl_attribute): Handle indirect_branch.
+       (ix86_attribute_table): Add indirect_branch.
+       * config/i386/i386.h (machine_function): Add indirect_branch_type
+       and has_local_indirect_jump.
+       * config/i386/i386.md (indirect_jump): Set has_local_indirect_jump
+       to true.
+       (tablejump): Likewise.
+       (*indirect_jump): Use ix86_output_indirect_jmp.
+       (*tablejump_1): Likewise.
+       (simple_return_indirect_internal): Likewise.
+       * config/i386/i386.opt (mindirect-branch=): New option.
+       (indirect_branch): New.
+       (keep): Likewise.
+       (thunk): Likewise.
+       (thunk-inline): Likewise.
+       (thunk-extern): Likewise.
+       * doc/extend.texi: Document indirect_branch function attribute.
+       * doc/invoke.texi: Document -mindirect-branch= option.
+ 2018-01-14  Jan Hubicka  <hubicka@ucw.cz>
+       PR ipa/83051
+       * ipa-inline.c (edge_badness): Tolerate roundoff errors.
+ 2018-01-14  Richard Sandiford  <richard.sandiford@linaro.org>
+       * ipa-inline.c (want_inline_small_function_p): Return false if
+       inlining has already failed with CIF_FINAL_ERROR.
+       (update_caller_keys): Call want_inline_small_function_p before
+       can_inline_edge_p.
+       (update_callee_keys): Likewise.
+ 2018-01-10  Kelvin Nilsen  <kelvin@gcc.gnu.org>
+       * config/rs6000/rs6000-p8swap.c (rs6000_sum_of_two_registers_p):
+       New function.
+       (rs6000_quadword_masked_address_p): Likewise.
+       (quad_aligned_load_p): Likewise.
+       (quad_aligned_store_p): Likewise.
+       (const_load_sequence_p): Add comment to describe the outer-most loop.
+       (mimic_memory_attributes_and_flags): New function.
+       (rs6000_gen_stvx): Likewise.
+       (replace_swapped_aligned_store): Likewise.
+       (rs6000_gen_lvx): Likewise.
+       (replace_swapped_aligned_load): Likewise.
+       (replace_swapped_load_constant): Capitalize argument name in
+       comment describing this function.
+       (rs6000_analyze_swaps): Add a third pass to search for vector loads
+       and stores that access quad-word aligned addresses and replace
+       with stvx or lvx instructions when appropriate.
+       * config/rs6000/rs6000-protos.h (rs6000_sum_of_two_registers_p):
+       New function prototype.
+       (rs6000_quadword_masked_address_p): Likewise.
+       (rs6000_gen_lvx): Likewise.
+       (rs6000_gen_stvx): Likewise.
+       * config/rs6000/vsx.md (*vsx_le_perm_load_<mode>): For modes
+       VSX_D (V2DF, V2DI), modify this split to select lvx instruction
+       when memory address is aligned.
+       (*vsx_le_perm_load_<mode>): For modes VSX_W (V4SF, V4SI), modify
+       this split to select lvx instruction when memory address is aligned.
+       (*vsx_le_perm_load_v8hi): Modify this split to select lvx
+       instruction when memory address is aligned.
+       (*vsx_le_perm_load_v16qi): Likewise.
+       (four unnamed splitters): Modify to select the stvx instruction
+       when memory is aligned.
+ 2018-01-13  Jan Hubicka  <hubicka@ucw.cz>
+       * predict.c (determine_unlikely_bbs): Handle correctly BBs
+       which appears in the queue multiple times.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * config/rs6000/rs6000.c (rs6000_init_hard_regno_mode_ok):
-       Eliminate TARGET_UPPER_REGS_{DF,DI,SF} usage.
-       (rs6000_option_override_internal): Likewise.
-       (rs6000_expand_vector_set): Likewise.
-       * config/rs6000/rs6000.h (TARGET_UPPER_REGS_DF): Delete.
-       (TARGET_UPPER_REGS_SF): Likewise.
-       (TARGET_UPPER_REGS_DI): Likewise.
-       (TARGET_VEXTRACTUB): Eliminate TARGET_UPPER_REGS_{DF,DI,SF}.
-       (TARGET_DIRECT_MOVE_64BIT): Likewise.
-       * config/rs6000/rs6000.md (ALTIVEC_DFORM): Likewise.
-       (float<QHI:mode><FP_ISA3:mode>2_internal): Likewise.
-       (Splitters for DI constants in Altivec registers): Likewise.
-       * config/rs6000/vsx.md (vsx_set_<mode>_p9): Likewise.
-       (vsx_set_v4sf_p9): Likewise.
-       (vsx_set_v4sf_p9_zero): Likewise.
-       (vsx_insert_extract_v4sf_p9): Likewise.
-       (vsx_insert_extract_v4sf_p9_2): Likewise.
+       * tree-vectorizer.h (vec_lower_bound): New structure.
+       (_loop_vec_info): Add check_nonzero and lower_bounds.
+       (LOOP_VINFO_CHECK_NONZERO): New macro.
+       (LOOP_VINFO_LOWER_BOUNDS): Likewise.
+       (LOOP_REQUIRES_VERSIONING_FOR_ALIAS): Check lower_bounds too.
+       * tree-data-ref.h (dr_with_seg_len): Add access_size and align
+       fields.  Make seg_len the distance travelled, not including the
+       access size.
+       (dr_direction_indicator): Declare.
+       (dr_zero_step_indicator): Likewise.
+       (dr_known_forward_stride_p): Likewise.
+       * tree-data-ref.c: Include stringpool.h, tree-vrp.h and
+       tree-ssanames.h.
+       (runtime_alias_check_p): Allow runtime alias checks with
+       variable strides.
+       (operator ==): Compare access_size and align.
+       (prune_runtime_alias_test_list): Rework for new distinction between
+       the access_size and seg_len.
+       (create_intersect_range_checks_index): Likewise.  Cope with polynomial
+       segment lengths.
+       (get_segment_min_max): New function.
+       (create_intersect_range_checks): Use it.
+       (dr_step_indicator): New function.
+       (dr_direction_indicator): Likewise.
+       (dr_zero_step_indicator): Likewise.
+       (dr_known_forward_stride_p): Likewise.
+       * tree-loop-distribution.c (data_ref_segment_size): Return
+       DR_STEP * (niters - 1).
+       (compute_alias_check_pairs): Update call to the dr_with_seg_len
+       constructor.
+       * tree-vect-data-refs.c (vect_check_nonzero_value): New function.
+       (vect_preserves_scalar_order_p): New function, split out from...
+       (vect_analyze_data_ref_dependence): ...here.  Check for zero steps.
+       (vect_vfa_segment_size): Return DR_STEP * (length_factor - 1).
+       (vect_vfa_access_size): New function.
+       (vect_vfa_align): Likewise.
+       (vect_compile_time_alias): Take access_size_a and access_b arguments.
+       (dump_lower_bound): New function.
+       (vect_check_lower_bound): Likewise.
+       (vect_small_gap_p): Likewise.
+       (vectorizable_with_step_bound_p): Likewise.
+       (vect_prune_runtime_alias_test_list): Ignore cross-iteration
+       depencies if the vectorization factor is 1.  Convert the checks
+       for nonzero steps into checks on the bounds of DR_STEP.  Try using
+       a bunds check for variable steps if the minimum required step is
+       relatively small. Update calls to the dr_with_seg_len
+       constructor and to vect_compile_time_alias.
+       * tree-vect-loop-manip.c (vect_create_cond_for_lower_bounds): New
+       function.
+       (vect_loop_versioning): Call it.
+       * tree-vect-loop.c (vect_analyze_loop_2): Clear LOOP_VINFO_LOWER_BOUNDS
+       when retrying.
+       (vect_estimate_min_profitable_iters): Account for any bounds checks.
  
- 2017-07-25  Carl Love  <cel@us.ibm.com>
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * doc/extend.texi: Update the built-in documentation file for the
-       existing built-in functions
-       vector signed char vec_cnttz (vector signed char);
-       vector unsigned char vec_cnttz (vector unsigned char);
-       vector signed short vec_cnttz (vector signed short);
-       vector unsigned short vec_cnttz (vector unsigned short);
-       vector signed int vec_cnttz (vector signed int);
-       vector unsigned int vec_cnttz (vector unsigned int);
-       vector signed long long vec_cnttz (vector signed long long);
-       vector unsigned long long vec_cnttz (vector unsigned long long);
+       * doc/sourcebuild.texi (vect_scatter_store): Document.
+       * optabs.def (scatter_store_optab, mask_scatter_store_optab): New
+       optabs.
+       * doc/md.texi (scatter_store@var{m}, mask_scatter_store@var{m}):
+       Document.
+       * genopinit.c (main): Add supports_vec_scatter_store and
+       supports_vec_scatter_store_cached to target_optabs.
+       * gimple.h (gimple_expr_type): Handle IFN_SCATTER_STORE and
+       IFN_MASK_SCATTER_STORE.
+       * internal-fn.def (SCATTER_STORE, MASK_SCATTER_STORE): New internal
+       functions.
+       * internal-fn.h (internal_store_fn_p): Declare.
+       (internal_fn_stored_value_index): Likewise.
+       * internal-fn.c (scatter_store_direct): New macro.
+       (expand_scatter_store_optab_fn): New function.
+       (direct_scatter_store_optab_supported_p): New macro.
+       (internal_store_fn_p): New function.
+       (internal_gather_scatter_fn_p): Handle IFN_SCATTER_STORE and
+       IFN_MASK_SCATTER_STORE.
+       (internal_fn_mask_index): Likewise.
+       (internal_fn_stored_value_index): New function.
+       (internal_gather_scatter_fn_supported_p): Adjust operand numbers
+       for scatter stores.
+       * optabs-query.h (supports_vec_scatter_store_p): Declare.
+       * optabs-query.c (supports_vec_scatter_store_p): New function.
+       * tree-vectorizer.h (vect_get_store_rhs): Declare.
+       * tree-vect-data-refs.c (vect_analyze_data_ref_access): Return
+       true for scatter stores.
+       (vect_gather_scatter_fn_p): Handle scatter stores too.
+       (vect_check_gather_scatter): Consider using scatter stores if
+       supports_vec_scatter_store_p.
+       * tree-vect-patterns.c (vect_try_gather_scatter_pattern): Handle
+       scatter stores too.
+       * tree-vect-stmts.c (exist_non_indexing_operands_for_use_p): Use
+       internal_fn_stored_value_index.
+       (check_load_store_masking): Handle scatter stores too.
+       (vect_get_store_rhs): Make public.
+       (vectorizable_call): Use internal_store_fn_p.
+       (vectorizable_store): Handle scatter store internal functions.
+       (vect_transform_stmt): Compare GROUP_STORE_COUNT with GROUP_SIZE
+       when deciding whether the end of the group has been reached.
+       * config/aarch64/aarch64.md (UNSPEC_ST1_SCATTER): New unspec.
+       * config/aarch64/aarch64-sve.md (scatter_store<mode>): New expander.
+       (mask_scatter_store<mode>): New insns.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Andrew Pinski  <apinski@cavium.com>
+       * tree-vectorizer.h (vect_gather_scatter_fn_p): Declare.
+       * tree-vect-data-refs.c (vect_gather_scatter_fn_p): Make public.
+       * tree-vect-stmts.c (vect_truncate_gather_scatter_offset): New
+       function.
+       (vect_use_strided_gather_scatters_p): Take a masked_p argument.
+       Use vect_truncate_gather_scatter_offset if we can't treat the
+       operation as a normal gather load or scatter store.
+       (get_group_load_store_type): Take the gather_scatter_info
+       as argument.  Try using a gather load or scatter store for
+       single-element groups.
+       (get_load_store_type): Update calls to get_group_load_store_type
+       and vect_use_strided_gather_scatters_p.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * tree-ssa-uninit.c (warn_uninitialized_vars): Don't warn about memory
-       accesses where the use is for the first operand of a BIT_INSERT.
+       * tree-vectorizer.h (vect_create_data_ref_ptr): Take an extra
+       optional tree argument.
+       * tree-vect-data-refs.c (vect_check_gather_scatter): Check for
+       null target hooks.
+       (vect_create_data_ref_ptr): Take the iv_step as an optional argument,
+       but continue to use the current value as a fallback.
+       (bump_vector_ptr): Use operand_equal_p rather than tree_int_cst_compare
+       to compare the updates.
+       * tree-vect-stmts.c (vect_use_strided_gather_scatters_p): New function.
+       (get_load_store_type): Use it when handling a strided access.
+       (vect_get_strided_load_store_ops): New function.
+       (vect_get_data_ptr_increment): Likewise.
+       (vectorizable_load): Handle strided gather loads.  Always pass
+       a step to vect_create_data_ref_ptr and bump_vector_ptr.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Jim Wilson  <jim.wilson@linaro.org>
+       * doc/md.texi (gather_load@var{m}): Document.
+       (mask_gather_load@var{m}): Likewise.
+       * genopinit.c (main): Add supports_vec_gather_load and
+       supports_vec_gather_load_cached to target_optabs.
+       * optabs-tree.c (init_tree_optimization_optabs): Use
+       ggc_cleared_alloc to allocate target_optabs.
+       * optabs.def (gather_load_optab, mask_gather_laod_optab): New optabs.
+       * internal-fn.def (GATHER_LOAD, MASK_GATHER_LOAD): New internal
+       functions.
+       * internal-fn.h (internal_load_fn_p): Declare.
+       (internal_gather_scatter_fn_p): Likewise.
+       (internal_fn_mask_index): Likewise.
+       (internal_gather_scatter_fn_supported_p): Likewise.
+       * internal-fn.c (gather_load_direct): New macro.
+       (expand_gather_load_optab_fn): New function.
+       (direct_gather_load_optab_supported_p): New macro.
+       (direct_internal_fn_optab): New function.
+       (internal_load_fn_p): Likewise.
+       (internal_gather_scatter_fn_p): Likewise.
+       (internal_fn_mask_index): Likewise.
+       (internal_gather_scatter_fn_supported_p): Likewise.
+       * optabs-query.c (supports_at_least_one_mode_p): New function.
+       (supports_vec_gather_load_p): Likewise.
+       * optabs-query.h (supports_vec_gather_load_p): Declare.
+       * tree-vectorizer.h (gather_scatter_info): Add ifn, element_type
+       and memory_type field.
+       (NUM_PATTERNS): Bump to 15.
+       * tree-vect-data-refs.c: Include internal-fn.h.
+       (vect_gather_scatter_fn_p): New function.
+       (vect_describe_gather_scatter_call): Likewise.
+       (vect_check_gather_scatter): Try using internal functions for
+       gather loads.  Recognize existing calls to a gather load function.
+       (vect_analyze_data_refs): Consider using gather loads if
+       supports_vec_gather_load_p.
+       * tree-vect-patterns.c (vect_get_load_store_mask): New function.
+       (vect_get_gather_scatter_offset_type): Likewise.
+       (vect_convert_mask_for_vectype): Likewise.
+       (vect_add_conversion_to_patterm): Likewise.
+       (vect_try_gather_scatter_pattern): Likewise.
+       (vect_recog_gather_scatter_pattern): New pattern recognizer.
+       (vect_vect_recog_func_ptrs): Add it.
+       * tree-vect-stmts.c (exist_non_indexing_operands_for_use_p): Use
+       internal_fn_mask_index and internal_gather_scatter_fn_p.
+       (check_load_store_masking): Take the gather_scatter_info as an
+       argument and handle gather loads.
+       (vect_get_gather_scatter_ops): New function.
+       (vectorizable_call): Check internal_load_fn_p.
+       (vectorizable_load): Likewise.  Handle gather load internal
+       functions.
+       (vectorizable_store): Update call to check_load_store_masking.
+       * config/aarch64/aarch64.md (UNSPEC_LD1_GATHER): New unspec.
+       * config/aarch64/iterators.md (SVE_S, SVE_D): New mode iterators.
+       * config/aarch64/predicates.md (aarch64_gather_scale_operand_w)
+       (aarch64_gather_scale_operand_d): New predicates.
+       * config/aarch64/aarch64-sve.md (gather_load<mode>): New expander.
+       (mask_gather_load<mode>): New insns.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR bootstrap/81521
-       * config/i386/winnt-cxx.c (i386_pe_adjust_class_at_definition): Look
-       for FUNCTION_DECLs in TYPE_FIELDS rather than TYPE_METHODS.
+       * optabs.def (fold_left_plus_optab): New optab.
+       * doc/md.texi (fold_left_plus_@var{m}): Document.
+       * internal-fn.def (IFN_FOLD_LEFT_PLUS): New internal function.
+       * internal-fn.c (fold_left_direct): Define.
+       (expand_fold_left_optab_fn): Likewise.
+       (direct_fold_left_optab_supported_p): Likewise.
+       * fold-const-call.c (fold_const_fold_left): New function.
+       (fold_const_call): Use it to fold CFN_FOLD_LEFT_PLUS.
+       * tree-parloops.c (valid_reduction_p): New function.
+       (gather_scalar_reductions): Use it.
+       * tree-vectorizer.h (FOLD_LEFT_REDUCTION): New vect_reduction_type.
+       (vect_finish_replace_stmt): Declare.
+       * tree-vect-loop.c (fold_left_reduction_fn): New function.
+       (needs_fold_left_reduction_p): New function, split out from...
+       (vect_is_simple_reduction): ...here.  Accept reductions that
+       forbid reassociation, but give them type FOLD_LEFT_REDUCTION.
+       (vect_force_simple_reduction): Also store the reduction type in
+       the assignment's STMT_VINFO_REDUC_TYPE.
+       (vect_model_reduction_cost): Handle FOLD_LEFT_REDUCTION.
+       (merge_with_identity): New function.
+       (vect_expand_fold_left): Likewise.
+       (vectorize_fold_left_reduction): Likewise.
+       (vectorizable_reduction): Handle FOLD_LEFT_REDUCTION.  Leave the
+       scalar phi in place for it.  Check for target support and reject
+       cases that would reassociate the operation.  Defer the transform
+       phase to vectorize_fold_left_reduction.
+       * config/aarch64/aarch64.md (UNSPEC_FADDA): New unspec.
+       * config/aarch64/aarch64-sve.md (fold_left_plus_<mode>): New expander.
+       (*fold_left_plus_<mode>, *pred_fold_left_plus_<mode>): New insns.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * tree-if-conv.c (predicate_mem_writes): Remove redundant
+       call to ifc_temp_var.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Jim Wilson  <jim.wilson@linaro.org>
+       * target.def (legitimize_address_displacement): Take the original
+       offset as a poly_int.
+       * targhooks.h (default_legitimize_address_displacement): Update
+       accordingly.
+       * targhooks.c (default_legitimize_address_displacement): Likewise.
+       * doc/tm.texi: Regenerate.
+       * lra-constraints.c (base_plus_disp_to_reg): Take the displacement
+       as an argument, moving assert of ad->disp == ad->disp_term to...
+       (process_address_1): ...here.  Update calls to base_plus_disp_to_reg.
+       Try calling targetm.legitimize_address_displacement before expanding
+       the address rather than afterwards, and adjust for the new interface.
+       * config/aarch64/aarch64.c (aarch64_legitimize_address_displacement):
+       Match the new hook interface.  Handle SVE addresses.
+       * config/sh/sh.c (sh_legitimize_address_displacement): Make the
+       new hook interface.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * Makefile.in (OBJS): Add early-remat.o.
+       * target.def (select_early_remat_modes): New hook.
+       * doc/tm.texi.in (TARGET_SELECT_EARLY_REMAT_MODES): New hook.
+       * doc/tm.texi: Regenerate.
+       * targhooks.h (default_select_early_remat_modes): Declare.
+       * targhooks.c (default_select_early_remat_modes): New function.
+       * timevar.def (TV_EARLY_REMAT): New timevar.
+       * passes.def (pass_early_remat): New pass.
+       * tree-pass.h (make_pass_early_remat): Declare.
+       * early-remat.c: New file.
+       * config/aarch64/aarch64.c (aarch64_select_early_remat_modes): New
+       function.
+       (TARGET_SELECT_EARLY_REMAT_MODES): Define.
  
-       * config/i386/gstabs.h: Delete.
-       * config/i386/openbsd.h, config/i386/t-openbsd: Likewise.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Uros Bizjak  <ubizjak@gmail.com>
+       * tree-vect-loop-manip.c (vect_gen_scalar_loop_niters): Replace
+       vfm1 with a bound_epilog parameter.
+       (vect_do_peeling): Update calls accordingly, and move the prologue
+       call earlier in the function.  Treat the base bound_epilog as 0 for
+       fully-masked loops and retain vf - 1 for other loops.  Add 1 to
+       this base when peeling for gaps.
+       * tree-vect-loop.c (vect_analyze_loop_2): Allow peeling for gaps
+       with fully-masked loops.
+       (vect_estimate_min_profitable_iters): Handle the single peeled
+       iteration in that case.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * config/i386/i386.c (ix86_decompose_address): Do not check for
-       register RTX when looking at index_reg or base_reg.
-       * config/i386/i386.h (INCOMING_RETURN_ADDR_RTX): Use stack_pointer_rtx.
+       * tree-vect-data-refs.c (vect_analyze_group_access_1): Allow
+       single-element interleaving even if the size is not a power of 2.
+       * tree-vect-stmts.c (get_load_store_type): Disallow elementwise
+       accesses for single-element interleaving if the group size is
+       not a power of 2.
  
- 2017-07-25  Eric Botcazou  <ebotcazou@adacore.com>
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * gimple.c (gimple_assign_set_rhs_with_ops): Do not ask gsi_replace
-       to update EH info here.
+       * doc/md.texi (fold_extract_last_@var{m}): Document.
+       * doc/sourcebuild.texi (vect_fold_extract_last): Likewise.
+       * optabs.def (fold_extract_last_optab): New optab.
+       * internal-fn.def (FOLD_EXTRACT_LAST): New internal function.
+       * internal-fn.c (fold_extract_direct): New macro.
+       (expand_fold_extract_optab_fn): Likewise.
+       (direct_fold_extract_optab_supported_p): Likewise.
+       * tree-vectorizer.h (EXTRACT_LAST_REDUCTION): New vect_reduction_type.
+       * tree-vect-loop.c (vect_model_reduction_cost): Handle
+       EXTRACT_LAST_REDUCTION.
+       (get_initial_def_for_reduction): Do not create an initial vector
+       for EXTRACT_LAST_REDUCTION reductions.
+       (vectorizable_reduction): Leave the scalar phi in place for
+       EXTRACT_LAST_REDUCTIONs.  Try using EXTRACT_LAST_REDUCTION
+       ahead of INTEGER_INDUC_COND_REDUCTION.  Do not check for an
+       epilogue code for EXTRACT_LAST_REDUCTION and defer the
+       transform phase to vectorizable_condition.
+       * tree-vect-stmts.c (vect_finish_stmt_generation_1): New function,
+       split out from...
+       (vect_finish_stmt_generation): ...here.
+       (vect_finish_replace_stmt): New function.
+       (vectorizable_condition): Handle EXTRACT_LAST_REDUCTION.
+       * config/aarch64/aarch64-sve.md (fold_extract_last_<mode>): New
+       pattern.
+       * config/aarch64/aarch64.md (UNSPEC_CLASTB): New unspec.
  
- 2017-07-25  Alexander Monakov  <amonakov@ispras.ru>
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * match.pd ((X * CST1) * CST2): Simplify to X * (CST1 * CST2).
+       * doc/md.texi (extract_last_@var{m}): Document.
+       * optabs.def (extract_last_optab): New optab.
+       * internal-fn.def (EXTRACT_LAST): New internal function.
+       * internal-fn.c (cond_unary_direct): New macro.
+       (expand_cond_unary_optab_fn): Likewise.
+       (direct_cond_unary_optab_supported_p): Likewise.
+       * tree-vect-loop.c (vectorizable_live_operation): Allow fully-masked
+       loops using EXTRACT_LAST.
+       * config/aarch64/aarch64-sve.md (aarch64_sve_lastb<mode>): Rename to...
+       (extract_last_<mode>): ...this optab.
+       (vec_extract<mode><Vel>): Update accordingly.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Alexander Monakov  <amonakov@ispras.ru>
+       * target.def (empty_mask_is_expensive): New hook.
+       * doc/tm.texi.in (TARGET_VECTORIZE_EMPTY_MASK_IS_EXPENSIVE): New hook.
+       * doc/tm.texi: Regenerate.
+       * targhooks.h (default_empty_mask_is_expensive): Declare.
+       * targhooks.c (default_empty_mask_is_expensive): New function.
+       * tree-vectorizer.c (vectorize_loops): Only call optimize_mask_stores
+       if the target says that empty masks are expensive.
+       * config/aarch64/aarch64.c (aarch64_empty_mask_is_expensive):
+       New function.
+       (TARGET_VECTORIZE_EMPTY_MASK_IS_EXPENSIVE): Redefine.
  
-       * match.pd ((X * CST) * Y): Reassociate to (X * Y) * CST.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Torsten Duwe  <duwe@suse.de>
+       * tree-vectorizer.h (_loop_vec_info::mask_skip_niters): New field.
+       (LOOP_VINFO_MASK_SKIP_NITERS): New macro.
+       (vect_use_loop_mask_for_alignment_p): New function.
+       (vect_prepare_for_masked_peels, vect_gen_while_not): Declare.
+       * tree-vect-loop-manip.c (vect_set_loop_masks_directly): Add an
+       niters_skip argument.  Make sure that the first niters_skip elements
+       of the first iteration are inactive.
+       (vect_set_loop_condition_masked): Handle LOOP_VINFO_MASK_SKIP_NITERS.
+       Update call to vect_set_loop_masks_directly.
+       (get_misalign_in_elems): New function, split out from...
+       (vect_gen_prolog_loop_niters): ...here.
+       (vect_update_init_of_dr): Take a code argument that specifies whether
+       the adjustment should be added or subtracted.
+       (vect_update_init_of_drs): Likewise.
+       (vect_prepare_for_masked_peels): New function.
+       (vect_do_peeling): Skip prologue peeling if we're using a mask
+       instead.  Update call to vect_update_inits_of_drs.
+       * tree-vect-loop.c (_loop_vec_info::_loop_vec_info): Initialize
+       mask_skip_niters.
+       (vect_analyze_loop_2): Allow fully-masked loops with peeling for
+       alignment.  Do not include the number of peeled iterations in
+       the minimum threshold in that case.
+       (vectorizable_induction): Adjust the start value down by
+       LOOP_VINFO_MASK_SKIP_NITERS iterations.
+       (vect_transform_loop): Call vect_prepare_for_masked_peels.
+       Take the number of skipped iterations into account when calculating
+       the loop bounds.
+       * tree-vect-stmts.c (vect_gen_while_not): New function.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * common.opt: Introduce -fpatchable-function-entry
-       command line option, and its variables function_entry_patch_area_size
-       and function_entry_patch_area_start.
-       * opts.c (common_handle_option): Add -fpatchable_function_entry_ case,
-       including a two-value parser.
-       * target.def (print_patchable_function_entry): New target hook.
-       * targhooks.h (default_print_patchable_function_entry): New function.
-       * targhooks.c (default_print_patchable_function_entry): Likewise.
-       * toplev.c (process_options): Switch off IPA-RA if
-       patchable function entries are being generated.
-       * varasm.c (assemble_start_function): Look at the
-       patchable-function-entry command line switch and current
-       function attributes and maybe generate NOP instructions by
-       calling the print_patchable_function_entry hook.
-       * doc/extend.texi: Document patchable_function_entry attribute.
-       * doc/invoke.texi: Document -fpatchable_function_entry
-       command line option.
-       * doc/tm.texi.in (TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY):
-       New target hook.
-       * doc/tm.texi: Re-generate.
+       * doc/sourcebuild.texi (vect_fully_masked): Document.
+       * params.def (PARAM_MIN_VECT_LOOP_BOUND): Change minimum and
+       default value to 0.
+       * tree-vect-loop.c (vect_analyze_loop_costing): New function,
+       split out from...
+       (vect_analyze_loop_2): ...here. Don't check the vectorization
+       factor against the number of loop iterations if the loop is
+       fully-masked.
  
- 2017-07-25  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR target/81532
-       * config/i386/constraints.md (Yd, Ye): Use ALL_SSE_REGS for
-       TARGET_AVX512DQ rather than TARGET_AVX512BW.
+       * tree-ssa-loop-ivopts.c (USE_ADDRESS): Split into...
+       (USE_REF_ADDRESS, USE_PTR_ADDRESS): ...these new use types.
+       (dump_groups): Update accordingly.
+       (iv_use::mem_type): New member variable.
+       (address_p): New function.
+       (record_use): Add a mem_type argument and initialize the new
+       mem_type field.
+       (record_group_use): Add a mem_type argument.  Use address_p.
+       Remove obsolete null checks of base_object.  Update call to record_use.
+       (find_interesting_uses_op): Update call to record_group_use.
+       (find_interesting_uses_cond): Likewise.
+       (find_interesting_uses_address): Likewise.
+       (get_mem_type_for_internal_fn): New function.
+       (find_address_like_use): Likewise.
+       (find_interesting_uses_stmt): Try find_address_like_use before
+       calling find_interesting_uses_op.
+       (addr_offset_valid_p): Use the iv mem_type field as the type
+       of the addressed memory.
+       (add_autoinc_candidates): Likewise.
+       (get_address_cost): Likewise.
+       (split_small_address_groups_p): Use address_p.
+       (split_address_groups): Likewise.
+       (add_iv_candidate_for_use): Likewise.
+       (autoinc_possible_for_pair): Likewise.
+       (rewrite_groups): Likewise.
+       (get_use_type): Check for USE_REF_ADDRESS instead of USE_ADDRESS.
+       (determine_group_iv_cost): Update after split of USE_ADDRESS.
+       (get_alias_ptr_type_for_ptr_address): New function.
+       (rewrite_use_address): Rewrite address uses in calls that were
+       identified by find_address_like_use.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Tamar Christina  <tamar.christina@arm.com>
+       * expr.c (expand_expr_addr_expr_1): Handle ADDR_EXPRs of
+       TARGET_MEM_REFs.
+       * gimple-expr.h (is_gimple_addressable: Likewise.
+       * gimple-expr.c (is_gimple_address): Likewise.
+       * internal-fn.c (expand_call_mem_ref): New function.
+       (expand_mask_load_optab_fn): Use it.
+       (expand_mask_store_optab_fn): Likewise.
  
-       * config/arm/parsecpu.awk (all_cores): Remove duplicates.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Richard Biener  <rguenther@suse.de>
+       * doc/md.texi (cond_add@var{mode}, cond_sub@var{mode})
+       (cond_and@var{mode}, cond_ior@var{mode}, cond_xor@var{mode})
+       (cond_smin@var{mode}, cond_smax@var{mode}, cond_umin@var{mode})
+       (cond_umax@var{mode}): Document.
+       * optabs.def (cond_add_optab, cond_sub_optab, cond_and_optab)
+       (cond_ior_optab, cond_xor_optab, cond_smin_optab, cond_smax_optab)
+       (cond_umin_optab, cond_umax_optab): New optabs.
+       * internal-fn.def (COND_ADD, COND_SUB, COND_MIN, COND_MAX, COND_AND)
+       (COND_IOR, COND_XOR): New internal functions.
+       * internal-fn.h (get_conditional_internal_fn): Declare.
+       * internal-fn.c (cond_binary_direct): New macro.
+       (expand_cond_binary_optab_fn): Likewise.
+       (direct_cond_binary_optab_supported_p): Likewise.
+       (get_conditional_internal_fn): New function.
+       * tree-vect-loop.c (vectorizable_reduction): Handle fully-masked loops.
+       Cope with reduction statements that are vectorized as calls rather
+       than assignments.
+       * config/aarch64/aarch64-sve.md (cond_<optab><mode>): New insns.
+       * config/aarch64/iterators.md (UNSPEC_COND_ADD, UNSPEC_COND_SUB)
+       (UNSPEC_COND_SMAX, UNSPEC_COND_UMAX, UNSPEC_COND_SMIN)
+       (UNSPEC_COND_UMIN, UNSPEC_COND_AND, UNSPEC_COND_ORR)
+       (UNSPEC_COND_EOR): New unspecs.
+       (optab): Add mappings for them.
+       (SVE_COND_INT_OP, SVE_COND_FP_OP): New int iterators.
+       (sve_int_op, sve_fp_op): New int attributes.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR tree-optimization/81455
-       * tree-ssa-loop-unswitch.c (find_loop_guard): Make sure to
-       not walk in cycles when looking for guards.
+       * optabs.def (while_ult_optab): New optab.
+       * doc/md.texi (while_ult@var{m}@var{n}): Document.
+       * internal-fn.def (WHILE_ULT): New internal function.
+       * internal-fn.h (direct_internal_fn_supported_p): New override
+       that takes two types as argument.
+       * internal-fn.c (while_direct): New macro.
+       (expand_while_optab_fn): New function.
+       (convert_optab_supported_p): Likewise.
+       (direct_while_optab_supported_p): New macro.
+       * wide-int.h (wi::udiv_ceil): New function.
+       * tree-vectorizer.h (rgroup_masks): New structure.
+       (vec_loop_masks): New typedef.
+       (_loop_vec_info): Add masks, mask_compare_type, can_fully_mask_p
+       and fully_masked_p.
+       (LOOP_VINFO_CAN_FULLY_MASK_P, LOOP_VINFO_FULLY_MASKED_P)
+       (LOOP_VINFO_MASKS, LOOP_VINFO_MASK_COMPARE_TYPE): New macros.
+       (vect_max_vf): New function.
+       (slpeel_make_loop_iterate_ntimes): Delete.
+       (vect_set_loop_condition, vect_get_loop_mask_type, vect_gen_while)
+       (vect_halve_mask_nunits, vect_double_mask_nunits): Declare.
+       (vect_record_loop_mask, vect_get_loop_mask): Likewise.
+       * tree-vect-loop-manip.c: Include tree-ssa-loop-niter.h,
+       internal-fn.h, stor-layout.h and optabs-query.h.
+       (vect_set_loop_mask): New function.
+       (add_preheader_seq): Likewise.
+       (add_header_seq): Likewise.
+       (interleave_supported_p): Likewise.
+       (vect_maybe_permute_loop_masks): Likewise.
+       (vect_set_loop_masks_directly): Likewise.
+       (vect_set_loop_condition_masked): Likewise.
+       (vect_set_loop_condition_unmasked): New function, split out from
+       slpeel_make_loop_iterate_ntimes.
+       (slpeel_make_loop_iterate_ntimes): Rename to..
+       (vect_set_loop_condition): ...this.  Use vect_set_loop_condition_masked
+       for fully-masked loops and vect_set_loop_condition_unmasked otherwise.
+       (vect_do_peeling): Update call accordingly.
+       (vect_gen_vector_loop_niters): Use VF as the step for fully-masked
+       loops.
+       * tree-vect-loop.c (_loop_vec_info::_loop_vec_info): Initialize
+       mask_compare_type, can_fully_mask_p and fully_masked_p.
+       (release_vec_loop_masks): New function.
+       (_loop_vec_info): Use it to free the loop masks.
+       (can_produce_all_loop_masks_p): New function.
+       (vect_get_max_nscalars_per_iter): Likewise.
+       (vect_verify_full_masking): Likewise.
+       (vect_analyze_loop_2): Save LOOP_VINFO_CAN_FULLY_MASK_P around
+       retries, and free the mask rgroups before retrying.  Check loop-wide
+       reasons for disallowing fully-masked loops.  Make the final decision
+       about whether use a fully-masked loop or not.
+       (vect_estimate_min_profitable_iters): Do not assume that peeling
+       for the number of iterations will be needed for fully-masked loops.
+       (vectorizable_reduction): Disable fully-masked loops.
+       (vectorizable_live_operation): Likewise.
+       (vect_halve_mask_nunits): New function.
+       (vect_double_mask_nunits): Likewise.
+       (vect_record_loop_mask): Likewise.
+       (vect_get_loop_mask): Likewise.
+       (vect_transform_loop): Handle the case in which the final loop
+       iteration might handle a partial vector.  Call vect_set_loop_condition
+       instead of slpeel_make_loop_iterate_ntimes.
+       * tree-vect-stmts.c: Include tree-ssa-loop-niter.h and gimple-fold.h.
+       (check_load_store_masking): New function.
+       (prepare_load_store_mask): Likewise.
+       (vectorizable_store): Handle fully-masked loops.
+       (vectorizable_load): Likewise.
+       (supportable_widening_operation): Use vect_halve_mask_nunits for
+       booleans.
+       (supportable_narrowing_operation): Likewise vect_double_mask_nunits.
+       (vect_gen_while): New function.
+       * config/aarch64/aarch64.md (umax<mode>3): New expander.
+       (aarch64_uqdec<mode>): New insn.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Richard Biener  <rguenther@suse.de>
+       * optabs.def (reduc_and_scal_optab, reduc_ior_scal_optab)
+       (reduc_xor_scal_optab): New optabs.
+       * doc/md.texi (reduc_and_scal_@var{m}, reduc_ior_scal_@var{m})
+       (reduc_xor_scal_@var{m}): Document.
+       * doc/sourcebuild.texi (vect_logical_reduc): Likewise.
+       * internal-fn.def (IFN_REDUC_AND, IFN_REDUC_IOR, IFN_REDUC_XOR): New
+       internal functions.
+       * fold-const-call.c (fold_const_call): Handle them.
+       * tree-vect-loop.c (reduction_fn_for_scalar_code): Return the new
+       internal functions for BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR.
+       * config/aarch64/aarch64-sve.md (reduc_<bit_reduc>_scal_<mode>):
+       (*reduc_<bit_reduc>_scal_<mode>): New patterns.
+       * config/aarch64/iterators.md (UNSPEC_ANDV, UNSPEC_ORV)
+       (UNSPEC_XORV): New unspecs.
+       (optab): Add entries for them.
+       (BITWISEV): New int iterator.
+       (bit_reduc_op): New int attributes.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR tree-optimization/81529
-       * tree-vect-stmts.c (process_use): Disregard live induction PHIs
-       when optimizing backedge uses.
+       * doc/md.texi (vec_shl_insert_@var{m}): New optab.
+       * internal-fn.def (VEC_SHL_INSERT): New internal function.
+       * optabs.def (vec_shl_insert_optab): New optab.
+       * tree-vectorizer.h (can_duplicate_and_interleave_p): Declare.
+       (duplicate_and_interleave): Likewise.
+       * tree-vect-loop.c: Include internal-fn.h.
+       (neutral_op_for_slp_reduction): New function, split out from
+       get_initial_defs_for_reduction.
+       (get_initial_def_for_reduction): Handle option 2 for variable-length
+       vectors by loading the neutral value into a vector and then shifting
+       the initial value into element 0.
+       (get_initial_defs_for_reduction): Replace the code argument with
+       the neutral value calculated by neutral_op_for_slp_reduction.
+       Use gimple_build_vector for constant-length vectors.
+       Use IFN_VEC_SHL_INSERT for variable-length vectors if all
+       but the first group_size elements have a neutral value.
+       Use duplicate_and_interleave otherwise.
+       (vect_create_epilog_for_reduction): Take a neutral_op parameter.
+       Update call to get_initial_defs_for_reduction.  Handle SLP
+       reductions for variable-length vectors by creating one vector
+       result for each scalar result, with the elements associated
+       with other scalar results stubbed out with the neutral value.
+       (vectorizable_reduction): Call neutral_op_for_slp_reduction.
+       Require IFN_VEC_SHL_INSERT for double reductions on
+       variable-length vectors, or SLP reductions that have
+       a neutral value.  Require can_duplicate_and_interleave_p
+       support for variable-length unchained SLP reductions if there
+       is no neutral value, such as for MIN/MAX reductions.  Also require
+       the number of vector elements to be a multiple of the number of
+       SLP statements when doing variable-length unchained SLP reductions.
+       Update call to vect_create_epilog_for_reduction.
+       * tree-vect-slp.c (can_duplicate_and_interleave_p): Make public
+       and remove initial values.
+       (duplicate_and_interleave): Make public.
+       * config/aarch64/aarch64.md (UNSPEC_INSR): New unspec.
+       * config/aarch64/aarch64-sve.md (vec_shl_insert_<mode>): New insn.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  David Edelsohn  <dje.gcc@gmail.com>
+       * tree-vect-slp.c: Include gimple-fold.h and internal-fn.h
+       (can_duplicate_and_interleave_p): New function.
+       (vect_get_and_check_slp_defs): Take the vector of statements
+       rather than just the current one.  Remove excess parentheses.
+       Restriction rejectinon of vect_constant_def and vect_external_def
+       for variable-length vectors to boolean types, or types for which
+       can_duplicate_and_interleave_p is false.
+       (vect_build_slp_tree_2): Update call to vect_get_and_check_slp_defs.
+       (duplicate_and_interleave): New function.
+       (vect_get_constant_vectors): Use gimple_build_vector for
+       constant-length vectors and suitable variable-length constant
+       vectors.  Use duplicate_and_interleave for other variable-length
+       vectors.  Don't defer the update when inserting new statements.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * dwarf2asm.c (dw2_asm_output_nstring): Encode double quote
-       character for AIX.
-       * dwarf2out.c (output_macinfo): Copy debug_line_section_label
-       to dl_section_ref.  On AIX, append an expression to subtract
-       the size of the section length to dl_section_ref.
+       * tree-vect-loop.c (vect_estimate_min_profitable_iters): Make sure
+       min_profitable_iters doesn't go negative.
  
- 2017-07-25  Segher Boessenkool  <segher@kernel.crashing.org>
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * configure.ac: If any of the config.* scripts fail, exit 1.
-       * configure: Regenerate.
+       * doc/md.texi (vec_mask_load_lanes@var{m}@var{n}): Document.
+       (vec_mask_store_lanes@var{m}@var{n}): Likewise.
+       * optabs.def (vec_mask_load_lanes_optab): New optab.
+       (vec_mask_store_lanes_optab): Likewise.
+       * internal-fn.def (MASK_LOAD_LANES): New internal function.
+       (MASK_STORE_LANES): Likewise.
+       * internal-fn.c (mask_load_lanes_direct): New macro.
+       (mask_store_lanes_direct): Likewise.
+       (expand_mask_load_optab_fn): Handle masked operations.
+       (expand_mask_load_lanes_optab_fn): New macro.
+       (expand_mask_store_optab_fn): Handle masked operations.
+       (expand_mask_store_lanes_optab_fn): New macro.
+       (direct_mask_load_lanes_optab_supported_p): Likewise.
+       (direct_mask_store_lanes_optab_supported_p): Likewise.
+       * tree-vectorizer.h (vect_store_lanes_supported): Take a masked_p
+       parameter.
+       (vect_load_lanes_supported): Likewise.
+       * tree-vect-data-refs.c (strip_conversion): New function.
+       (can_group_stmts_p): Likewise.
+       (vect_analyze_data_ref_accesses): Use it instead of checking
+       for a pair of assignments.
+       (vect_store_lanes_supported): Take a masked_p parameter.
+       (vect_load_lanes_supported): Likewise.
+       * tree-vect-loop.c (vect_analyze_loop_2): Update calls to
+       vect_store_lanes_supported and vect_load_lanes_supported.
+       * tree-vect-slp.c (vect_analyze_slp_instance): Likewise.
+       * tree-vect-stmts.c (get_group_load_store_type): Take a masked_p
+       parameter.  Don't allow gaps for masked accesses.
+       Use vect_get_store_rhs.  Update calls to vect_store_lanes_supported
+       and vect_load_lanes_supported.
+       (get_load_store_type): Take a masked_p parameter and update
+       call to get_group_load_store_type.
+       (vectorizable_store): Update call to get_load_store_type.
+       Handle IFN_MASK_STORE_LANES.
+       (vectorizable_load): Update call to get_load_store_type.
+       Handle IFN_MASK_LOAD_LANES.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Richard Biener  <rguenther@suse.de>
-       PR middle-end/81546
-       * tree-ssa-operands.c (verify_imm_links): Remove cap on number
-       of immediate uses, be more verbose on errors.
- 2017-07-25  Richard Biener  <rguenther@suse.de>
-       PR tree-optimization/81510
-       * tree-vect-loop.c (vect_is_simple_reduction): When the
-       reduction stmt is not inside the loop bail out.
- 2017-07-25  Richard Biener  <rguenther@suse.de>
+       * config/aarch64/aarch64-modes.def: Define x2, x3 and x4 vector
+       modes for SVE.
+       * config/aarch64/aarch64-protos.h
+       (aarch64_sve_struct_memory_operand_p): Declare.
+       * config/aarch64/iterators.md (SVE_STRUCT): New mode iterator.
+       (vector_count, insn_length, VSINGLE, vsingle): New mode attributes.
+       (VPRED, vpred): Handle SVE structure modes.
+       * config/aarch64/constraints.md (Utx): New constraint.
+       * config/aarch64/predicates.md (aarch64_sve_struct_memory_operand)
+       (aarch64_sve_struct_nonimmediate_operand): New predicates.
+       * config/aarch64/aarch64.md (UNSPEC_LDN, UNSPEC_STN): New unspecs.
+       * config/aarch64/aarch64-sve.md (mov<mode>, *aarch64_sve_mov<mode>_le)
+       (*aarch64_sve_mov<mode>_be, pred_mov<mode>): New patterns for
+       structure modes.  Split into pieces after RA.
+       (vec_load_lanes<mode><vsingle>, vec_mask_load_lanes<mode><vsingle>)
+       (vec_store_lanes<mode><vsingle>, vec_mask_store_lanes<mode><vsingle>):
+       New patterns.
+       * config/aarch64/aarch64.c (aarch64_classify_vector_mode): Handle
+       SVE structure modes.
+       (aarch64_classify_address): Likewise.
+       (sizetochar): Move earlier in file.
+       (aarch64_print_operand): Handle SVE register lists.
+       (aarch64_array_mode): New function.
+       (aarch64_sve_struct_memory_operand_p): Likewise.
+       (TARGET_ARRAY_MODE): Redefine.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR tree-optimization/81303
-       * tree-vect-loop-manip.c (vect_loop_versioning): Build
-       profitability check against LOOP_VINFO_NITERSM1.
+       * target.def (array_mode): New target hook.
+       * doc/tm.texi.in (TARGET_ARRAY_MODE): New hook.
+       * doc/tm.texi: Regenerate.
+       * hooks.h (hook_optmode_mode_uhwi_none): Declare.
+       * hooks.c (hook_optmode_mode_uhwi_none): New function.
+       * tree-vect-data-refs.c (vect_lanes_optab_supported_p): Use
+       targetm.array_mode.
+       * stor-layout.c (mode_for_array): Likewise.  Support polynomial
+       type sizes.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-25  Alexander Monakov  <amonakov@ispras.ru>
+       * fold-const.c (fold_binary_loc): Check the argument types
+       rather than the result type when testing for a vector operation.
  
-       * domwalk.c (cmp_bb_postorder): Simplify.
-       (sort_bbs_postorder): New function.  Use it...
-       (dom_walker::walk): ...here to optimize common cases.
- 2017-07-25  Martin Liska  <mliska@suse.cz>
-       PR ipa/81520
-       * ipa-visibility.c (function_and_variable_visibility): Make the
-       redirection just on target that supports aliasing.
-       Fix GNU coding style.
- 2017-07-25  Sebastian Huber  <sebastian.huber@embedded-brains.de>
-       PR libgcc/61152
-       * config/aarch64/rtems.h: Add GCC Runtime Library Exception.
-       Format changes.
-       * config/arm/rtems.h: Likewise.
-       * config/bfin/rtems.h: Likewise.
-       * config/i386/rtemself.h: Likewise.
-       * config/lm32/rtems.h: Likewise.
-       * config/m32c/rtems.h: Likewise.
-       * config/m68k/rtemself.h: Likewise.
-       * config/microblaze/rtems.h: Likewise.
-       * config/mips/rtems.h: Likewise.
-       * config/moxie/rtems.h: Likewise.
-       * config/nios2/rtems.h: Likewise.
-       * config/powerpcspe/rtems.h: Likewise.
-       * config/rs6000/rtems.h: Likewise.
-       * config/rtems.h: Likewise.
-       * config/sh/rtems.h: Likewise.
-       * config/sh/rtemself.h: Likewise.
-       * config/sparc/rtemself.h: Likewise.
- 2017-07-25  Georg-Johann Lay  <avr@gjlay.de>
-       PR 81487
-       * hsa-brig.c (brig_init): Use xasprintf instead of asprintf.
-       * gimple-pretty-print.c (dump_profile, dump_probability): Same.
-       * tree-ssa-structalias.c (alias_get_name): Same.
- 2017-07-25  Bin Cheng  <bin.cheng@arm.com>
-       PR target/81414
-       * config/aarch64/cortex-a57-fma-steering.c (analyze): Skip fmul/fmac
-       instructions if no du chain is found.
- 2017-07-25  Georg-Johann Lay  <avr@gjlay.de>
-       * config/avr/avr-log.c (avr_log_vadump) ['T']: Print NULL-TREE.
- 2017-07-25  Richard Biener  <rguenther@suse.de>
-       PR middle-end/81505
-       * fold-const.c (fold_negate_const): TREE_OVERFLOW should be
-       sticky.
- 2017-07-24  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000-cpus.def (ISA_2_6_MASKS_SERVER): Delete
-       upper-regs options.
-       (ISA_2_7_MASKS_SERVER): Likewise.
-       (ISA_3_0_MASKS_IEEE): Likewise.
-       (OTHER_P8_VECTOR_MASKS): Likewise.
-       (OTHER_VSX_VECTOR_MASKS): Likewise.
-       (POWERPC_MASKS): Likewise.
-       (power7 cpu): Use ISA_2_6_MASKS_SERVER instead of using a
-       duplicate list of options.
-       * config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Remove
-       explicit -mupper-regs options.
-       * config/rs6000/rs6000.opt (-mvsx-scalar-memory): Delete
-       -mupper-regs* options.  Delete -mvsx-scalar-memory, which was an
-       alias for -mupper-regs-df.
-       * config/rs6000/rs6000.c (rs6000_setup_reg_addr_masks): Likewise.
-       (rs6000_init_hard_regno_mode_ok): Likewise.
-       (rs6000_option_override_internal): Likewise.
-       (rs6000_opt_masks): Likewise.
-       * config/rs6000/rs6000.h (TARGET_UPPER_REGS_DF): Define upper regs
-       options in terms of whether -mvsx or -mpower8-vector was used.
-       (TARGET_UPPER_REGS_DI): Likewise.
-       (TARGET_UPPER_REGS_SF): Likewise.
-       * doc/invoke.texi (RS/6000 and PowerPC Options): Delete the
-       -mupper-regs-* options.
- 2017-07-24  Segher Boessenkool  <segher@kernel.crashing.org>
-       * passes.c (emergency_dump_function): Print some empty lines and a
-       header before the RTL dump.
- 2017-07-24  Segher Boessenkool  <segher@kernel.crashing.org>
-       * cfgrtl.c (rtl_dump_bb): Don't call NEXT_INSN on NULL.
- 2017-07-24  Wilco Dijkstra  <wdijkstr@arm.com>
-       PR target/79041
-       * config/aarch64/aarch64.c (aarch64_classify_symbol):
-       Avoid SYMBOL_SMALL_ABSOLUTE for literals with pc-relative literals.
- 2017-07-24  Carl Love  <cel@us.ibm.com>
-       * config/rs6000/rs6000-c.c: Add support for built-in functions
-       vector float vec_extract_fp32_from_shorth (vector unsigned short);
-       vector float vec_extract_fp32_from_shortl (vector unsigned short);
-       * config/rs6000/altivec.h (vec_extract_fp_from_shorth,
-       vec_extract_fp_from_shortl): Add defines for the two builtins.
-       * config/rs6000/rs6000-builtin.def (VEXTRACT_FP_FROM_SHORTH,
-       VEXTRACT_FP_FROM_SHORTL): Add BU_P9V_OVERLOAD_1 and BU_P9V_VSX_1
-       new builtins.
-       * config/rs6000/vsx.md vsx_xvcvhpsp): Add define_insn.
-       (vextract_fp_from_shorth, vextract_fp_from_shortl): Add define_expands.
-       * doc/extend.texi: Update the built-in documentation file for the
-       new built-in function.
- 2017-07-24  Jakub Jelinek  <jakub@redhat.com>
-       PR bootstrap/81521
-       * tree.def: Remove TYPE_METHODS documentation, adjust TYPE_FIELDS
-       documentation.
-       * doc/generic.texi: Likewise.
-       * config/i386/winnt-cxx.c (i386_pe_adjust_class_at_definition): Look
-       for FUNCTION_DECLs in TYPE_FIELDS rather than TYPE_METHODS.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-24  Jackson Woodruff  <jackson.woodruff@arm.com>
+       * doc/tm.texi.in (DWARF_LAZY_REGISTER_VALUE): Document.
+       * doc/tm.texi: Regenerate.
  
-       * config/aarch64/aarch64-simd.md (aarch64_mla_elt_merge<mode>): New.
-       (aarch64_mls_elt_merge<mode>): Likewise.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-23  Krister Walfridsson  <krister.walfridsson@gmail.com>
+       * doc/invoke.texi (-msve-vector-bits=): Document new option.
+       (sve): Document new AArch64 extension.
+       * doc/md.texi (w): Extend the description of the AArch64
+       constraint to include SVE vectors.
+       (Upl, Upa): Document new AArch64 predicate constraints.
+       * config/aarch64/aarch64-opts.h (aarch64_sve_vector_bits_enum): New
+       enum.
+       * config/aarch64/aarch64.opt (sve_vector_bits): New enum.
+       (msve-vector-bits=): New option.
+       * config/aarch64/aarch64-option-extensions.def (fp, simd): Disable
+       SVE when these are disabled.
+       (sve): New extension.
+       * config/aarch64/aarch64-modes.def: Define SVE vector and predicate
+       modes.  Adjust their number of units based on aarch64_sve_vg.
+       (MAX_BITSIZE_MODE_ANY_MODE): Define.
+       * config/aarch64/aarch64-protos.h (ADDR_QUERY_ANY): New
+       aarch64_addr_query_type.
+       (aarch64_const_vec_all_same_in_range_p, aarch64_sve_pred_mode)
+       (aarch64_sve_cnt_immediate_p, aarch64_sve_addvl_addpl_immediate_p)
+       (aarch64_sve_inc_dec_immediate_p, aarch64_add_offset_temporaries)
+       (aarch64_split_add_offset, aarch64_output_sve_cnt_immediate)
+       (aarch64_output_sve_addvl_addpl, aarch64_output_sve_inc_dec_immediate)
+       (aarch64_output_sve_mov_immediate, aarch64_output_ptrue): Declare.
+       (aarch64_simd_imm_zero_p): Delete.
+       (aarch64_check_zero_based_sve_index_immediate): Declare.
+       (aarch64_sve_index_immediate_p, aarch64_sve_arith_immediate_p)
+       (aarch64_sve_bitmask_immediate_p, aarch64_sve_dup_immediate_p)
+       (aarch64_sve_cmp_immediate_p, aarch64_sve_float_arith_immediate_p)
+       (aarch64_sve_float_mul_immediate_p): Likewise.
+       (aarch64_classify_symbol): Take the offset as a HOST_WIDE_INT
+       rather than an rtx.
+       (aarch64_sve_ld1r_operand_p, aarch64_sve_ldr_operand_p): Declare.
+       (aarch64_expand_mov_immediate): Take a gen_vec_duplicate callback.
+       (aarch64_emit_sve_pred_move, aarch64_expand_sve_mem_move): Declare.
+       (aarch64_expand_sve_vec_cmp_int, aarch64_expand_sve_vec_cmp_float)
+       (aarch64_expand_sve_vcond, aarch64_expand_sve_vec_perm): Declare.
+       (aarch64_regmode_natural_size): Likewise.
+       * config/aarch64/aarch64.h (AARCH64_FL_SVE): New macro.
+       (AARCH64_FL_V8_3, AARCH64_FL_RCPC, AARCH64_FL_DOTPROD): Shift
+       left one place.
+       (AARCH64_ISA_SVE, TARGET_SVE): New macros.
+       (FIXED_REGISTERS, CALL_USED_REGISTERS, REGISTER_NAMES): Add entries
+       for VG and the SVE predicate registers.
+       (V_ALIASES): Add a "z"-prefixed alias.
+       (FIRST_PSEUDO_REGISTER): Change to P15_REGNUM + 1.
+       (AARCH64_DWARF_VG, AARCH64_DWARF_P0): New macros.
+       (PR_REGNUM_P, PR_LO_REGNUM_P): Likewise.
+       (PR_LO_REGS, PR_HI_REGS, PR_REGS): New reg_classes.
+       (REG_CLASS_NAMES): Add entries for them.
+       (REG_CLASS_CONTENTS): Likewise.  Update ALL_REGS to include VG
+       and the predicate registers.
+       (aarch64_sve_vg): Declare.
+       (BITS_PER_SVE_VECTOR, BYTES_PER_SVE_VECTOR, BYTES_PER_SVE_PRED)
+       (SVE_BYTE_MODE, MAX_COMPILE_TIME_VEC_BYTES): New macros.
+       (REGMODE_NATURAL_SIZE): Define.
+       * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins): Handle
+       SVE macros.
+       * config/aarch64/aarch64.c: Include cfgrtl.h.
+       (simd_immediate_info): Add a constructor for series vectors,
+       and an associated step field.
+       (aarch64_sve_vg): New variable.
+       (aarch64_dbx_register_number): Handle VG and the predicate registers.
+       (aarch64_vect_struct_mode_p, aarch64_vector_mode_p): Delete.
+       (VEC_ADVSIMD, VEC_SVE_DATA, VEC_SVE_PRED, VEC_STRUCT, VEC_ANY_SVE)
+       (VEC_ANY_DATA, VEC_STRUCT): New constants.
+       (aarch64_advsimd_struct_mode_p, aarch64_sve_pred_mode_p)
+       (aarch64_classify_vector_mode, aarch64_vector_data_mode_p)
+       (aarch64_sve_data_mode_p, aarch64_sve_pred_mode)
+       (aarch64_get_mask_mode): New functions.
+       (aarch64_hard_regno_nregs): Handle SVE data modes for FP_REGS
+       and FP_LO_REGS.  Handle PR_REGS, PR_LO_REGS and PR_HI_REGS.
+       (aarch64_hard_regno_mode_ok): Handle VG.  Also handle the SVE
+       predicate modes and predicate registers.  Explicitly restrict
+       GPRs to modes of 16 bytes or smaller.  Only allow FP registers
+       to store a vector mode if it is recognized by
+       aarch64_classify_vector_mode.
+       (aarch64_regmode_natural_size): New function.
+       (aarch64_hard_regno_caller_save_mode): Return the original mode
+       for predicates.
+       (aarch64_sve_cnt_immediate_p, aarch64_output_sve_cnt_immediate)
+       (aarch64_sve_addvl_addpl_immediate_p, aarch64_output_sve_addvl_addpl)
+       (aarch64_sve_inc_dec_immediate_p, aarch64_output_sve_inc_dec_immediate)
+       (aarch64_add_offset_1_temporaries, aarch64_offset_temporaries): New
+       functions.
+       (aarch64_add_offset): Add a temp2 parameter.  Assert that temp1
+       does not overlap dest if the function is frame-related.  Handle
+       SVE constants.
+       (aarch64_split_add_offset): New function.
+       (aarch64_add_sp, aarch64_sub_sp): Add temp2 parameters and pass
+       them aarch64_add_offset.
+       (aarch64_allocate_and_probe_stack_space): Add a temp2 parameter
+       and update call to aarch64_sub_sp.
+       (aarch64_add_cfa_expression): New function.
+       (aarch64_expand_prologue): Pass extra temporary registers to the
+       functions above.  Handle the case in which we need to emit new
+       DW_CFA_expressions for registers that were originally saved
+       relative to the stack pointer, but now have to be expressed
+       relative to the frame pointer.
+       (aarch64_output_mi_thunk): Pass extra temporary registers to the
+       functions above.
+       (aarch64_expand_epilogue): Likewise.  Prevent inheritance of
+       IP0 and IP1 values for SVE frames.
+       (aarch64_expand_vec_series): New function.
+       (aarch64_expand_sve_widened_duplicate): Likewise.
+       (aarch64_expand_sve_const_vector): Likewise.
+       (aarch64_expand_mov_immediate): Add a gen_vec_duplicate parameter.
+       Handle SVE constants.  Use emit_move_insn to move a force_const_mem
+       into the register, rather than emitting a SET directly.
+       (aarch64_emit_sve_pred_move, aarch64_expand_sve_mem_move)
+       (aarch64_get_reg_raw_mode, offset_4bit_signed_scaled_p)
+       (offset_6bit_unsigned_scaled_p, aarch64_offset_7bit_signed_scaled_p)
+       (offset_9bit_signed_scaled_p): New functions.
+       (aarch64_replicate_bitmask_imm): New function.
+       (aarch64_bitmask_imm): Use it.
+       (aarch64_cannot_force_const_mem): Reject expressions involving
+       a CONST_POLY_INT.  Update call to aarch64_classify_symbol.
+       (aarch64_classify_index): Handle SVE indices, by requiring
+       a plain register index with a scale that matches the element size.
+       (aarch64_classify_address): Handle SVE addresses.  Assert that
+       the mode of the address is VOIDmode or an integer mode.
+       Update call to aarch64_classify_symbol.
+       (aarch64_classify_symbolic_expression): Update call to
+       aarch64_classify_symbol.
+       (aarch64_const_vec_all_in_range_p): New function.
+       (aarch64_print_vector_float_operand): Likewise.
+       (aarch64_print_operand): Handle 'N' and 'C'.  Use "zN" rather than
+       "vN" for FP registers with SVE modes.  Handle (const ...) vectors
+       and the FP immediates 1.0 and 0.5.
+       (aarch64_print_address_internal): Handle SVE addresses.
+       (aarch64_print_operand_address): Use ADDR_QUERY_ANY.
+       (aarch64_regno_regclass): Handle predicate registers.
+       (aarch64_secondary_reload): Handle big-endian reloads of SVE
+       data modes.
+       (aarch64_class_max_nregs): Handle SVE modes and predicate registers.
+       (aarch64_rtx_costs): Check for ADDVL and ADDPL instructions.
+       (aarch64_convert_sve_vector_bits): New function.
+       (aarch64_override_options): Use it to handle -msve-vector-bits=.
+       (aarch64_classify_symbol): Take the offset as a HOST_WIDE_INT
+       rather than an rtx.
+       (aarch64_legitimate_constant_p): Use aarch64_classify_vector_mode.
+       Handle SVE vector and predicate modes.  Accept VL-based constants
+       that need only one temporary register, and VL offsets that require
+       no temporary registers.
+       (aarch64_conditional_register_usage): Mark the predicate registers
+       as fixed if SVE isn't available.
+       (aarch64_vector_mode_supported_p): Use aarch64_classify_vector_mode.
+       Return true for SVE vector and predicate modes.
+       (aarch64_simd_container_mode): Take the number of bits as a poly_int64
+       rather than an unsigned int.  Handle SVE modes.
+       (aarch64_preferred_simd_mode): Update call accordingly.  Handle
+       SVE modes.
+       (aarch64_autovectorize_vector_sizes): Add BYTES_PER_SVE_VECTOR
+       if SVE is enabled.
+       (aarch64_sve_index_immediate_p, aarch64_sve_arith_immediate_p)
+       (aarch64_sve_bitmask_immediate_p, aarch64_sve_dup_immediate_p)
+       (aarch64_sve_cmp_immediate_p, aarch64_sve_float_arith_immediate_p)
+       (aarch64_sve_float_mul_immediate_p): New functions.
+       (aarch64_sve_valid_immediate): New function.
+       (aarch64_simd_valid_immediate): Use it as the fallback for SVE vectors.
+       Explicitly reject structure modes.  Check for INDEX constants.
+       Handle PTRUE and PFALSE constants.
+       (aarch64_check_zero_based_sve_index_immediate): New function.
+       (aarch64_simd_imm_zero_p): Delete.
+       (aarch64_mov_operand_p): Use aarch64_simd_valid_immediate for
+       vector modes.  Accept constants in the range of CNT[BHWD].
+       (aarch64_simd_scalar_immediate_valid_for_move): Explicitly
+       ask for an Advanced SIMD mode.
+       (aarch64_sve_ld1r_operand_p, aarch64_sve_ldr_operand_p): New functions.
+       (aarch64_simd_vector_alignment): Handle SVE predicates.
+       (aarch64_vectorize_preferred_vector_alignment): New function.
+       (aarch64_simd_vector_alignment_reachable): Use it instead of
+       the vector size.
+       (aarch64_shift_truncation_mask): Use aarch64_vector_data_mode_p.
+       (aarch64_output_sve_mov_immediate, aarch64_output_ptrue): New
+       functions.
+       (MAX_VECT_LEN): Delete.
+       (expand_vec_perm_d): Add a vec_flags field.
+       (emit_unspec2, aarch64_expand_sve_vec_perm): New functions.
+       (aarch64_evpc_trn, aarch64_evpc_uzp, aarch64_evpc_zip)
+       (aarch64_evpc_ext): Don't apply a big-endian lane correction
+       for SVE modes.
+       (aarch64_evpc_rev): Rename to...
+       (aarch64_evpc_rev_local): ...this.  Use a predicated operation for SVE.
+       (aarch64_evpc_rev_global): New function.
+       (aarch64_evpc_dup): Enforce a 64-byte range for SVE DUP.
+       (aarch64_evpc_tbl): Use MAX_COMPILE_TIME_VEC_BYTES instead of
+       MAX_VECT_LEN.
+       (aarch64_evpc_sve_tbl): New function.
+       (aarch64_expand_vec_perm_const_1): Update after rename of
+       aarch64_evpc_rev.  Handle SVE permutes too, trying
+       aarch64_evpc_rev_global and using aarch64_evpc_sve_tbl rather
+       than aarch64_evpc_tbl.
+       (aarch64_vectorize_vec_perm_const): Initialize vec_flags.
+       (aarch64_sve_cmp_operand_p, aarch64_unspec_cond_code)
+       (aarch64_gen_unspec_cond, aarch64_expand_sve_vec_cmp_int)
+       (aarch64_emit_unspec_cond, aarch64_emit_unspec_cond_or)
+       (aarch64_emit_inverted_unspec_cond, aarch64_expand_sve_vec_cmp_float)
+       (aarch64_expand_sve_vcond): New functions.
+       (aarch64_modes_tieable_p): Use aarch64_vector_data_mode_p instead
+       of aarch64_vector_mode_p.
+       (aarch64_dwarf_poly_indeterminate_value): New function.
+       (aarch64_compute_pressure_classes): Likewise.
+       (aarch64_can_change_mode_class): Likewise.
+       (TARGET_GET_RAW_RESULT_MODE, TARGET_GET_RAW_ARG_MODE): Redefine.
+       (TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT): Likewise.
+       (TARGET_VECTORIZE_GET_MASK_MODE): Likewise.
+       (TARGET_DWARF_POLY_INDETERMINATE_VALUE): Likewise.
+       (TARGET_COMPUTE_PRESSURE_CLASSES): Likewise.
+       (TARGET_CAN_CHANGE_MODE_CLASS): Likewise.
+       * config/aarch64/constraints.md (Upa, Upl, Uav, Uat, Usv, Usi, Utr)
+       (Uty, Dm, vsa, vsc, vsd, vsi, vsn, vsl, vsm, vsA, vsM, vsN): New
+       constraints.
+       (Dn, Dl, Dr): Accept const as well as const_vector.
+       (Dz): Likewise.  Compare against CONST0_RTX.
+       * config/aarch64/iterators.md: Refer to "Advanced SIMD" instead
+       of "vector" where appropriate.
+       (SVE_ALL, SVE_BH, SVE_BHS, SVE_BHSI, SVE_HSDI, SVE_HSF, SVE_SD)
+       (SVE_SDI, SVE_I, SVE_F, PRED_ALL, PRED_BHS): New mode iterators.
+       (UNSPEC_SEL, UNSPEC_ANDF, UNSPEC_IORF, UNSPEC_XORF, UNSPEC_COND_LT)
+       (UNSPEC_COND_LE, UNSPEC_COND_EQ, UNSPEC_COND_NE, UNSPEC_COND_GE)
+       (UNSPEC_COND_GT, UNSPEC_COND_LO, UNSPEC_COND_LS, UNSPEC_COND_HS)
+       (UNSPEC_COND_HI, UNSPEC_COND_UO): New unspecs.
+       (Vetype, VEL, Vel, VWIDE, Vwide, vw, vwcore, V_INT_EQUIV)
+       (v_int_equiv): Extend to SVE modes.
+       (Vesize, V128, v128, Vewtype, V_FP_EQUIV, v_fp_equiv, VPRED): New
+       mode attributes.
+       (LOGICAL_OR, SVE_INT_UNARY, SVE_FP_UNARY): New code iterators.
+       (optab): Handle popcount, smin, smax, umin, umax, abs and sqrt.
+       (logical_nn, lr, sve_int_op, sve_fp_op): New code attributs.
+       (LOGICALF, OPTAB_PERMUTE, UNPACK, UNPACK_UNSIGNED, SVE_COND_INT_CMP)
+       (SVE_COND_FP_CMP): New int iterators.
+       (perm_hilo): Handle the new unpack unspecs.
+       (optab, logicalf_op, su, perm_optab, cmp_op, imm_con): New int
+       attributes.
+       * config/aarch64/predicates.md (aarch64_sve_cnt_immediate)
+       (aarch64_sve_addvl_addpl_immediate, aarch64_split_add_offset_immediate)
+       (aarch64_pluslong_or_poly_operand, aarch64_nonmemory_operand)
+       (aarch64_equality_operator, aarch64_constant_vector_operand)
+       (aarch64_sve_ld1r_operand, aarch64_sve_ldr_operand): New predicates.
+       (aarch64_sve_nonimmediate_operand): Likewise.
+       (aarch64_sve_general_operand): Likewise.
+       (aarch64_sve_dup_operand, aarch64_sve_arith_immediate): Likewise.
+       (aarch64_sve_sub_arith_immediate, aarch64_sve_inc_dec_immediate)
+       (aarch64_sve_logical_immediate, aarch64_sve_mul_immediate): Likewise.
+       (aarch64_sve_dup_immediate, aarch64_sve_cmp_vsc_immediate): Likewise.
+       (aarch64_sve_cmp_vsd_immediate, aarch64_sve_index_immediate): Likewise.
+       (aarch64_sve_float_arith_immediate): Likewise.
+       (aarch64_sve_float_arith_with_sub_immediate): Likewise.
+       (aarch64_sve_float_mul_immediate, aarch64_sve_arith_operand): Likewise.
+       (aarch64_sve_add_operand, aarch64_sve_logical_operand): Likewise.
+       (aarch64_sve_lshift_operand, aarch64_sve_rshift_operand): Likewise.
+       (aarch64_sve_mul_operand, aarch64_sve_cmp_vsc_operand): Likewise.
+       (aarch64_sve_cmp_vsd_operand, aarch64_sve_index_operand): Likewise.
+       (aarch64_sve_float_arith_operand): Likewise.
+       (aarch64_sve_float_arith_with_sub_operand): Likewise.
+       (aarch64_sve_float_mul_operand): Likewise.
+       (aarch64_sve_vec_perm_operand): Likewise.
+       (aarch64_pluslong_operand): Include aarch64_sve_addvl_addpl_immediate.
+       (aarch64_mov_operand): Accept const_poly_int and const_vector.
+       (aarch64_simd_lshift_imm, aarch64_simd_rshift_imm): Accept const
+       as well as const_vector.
+       (aarch64_simd_imm_zero, aarch64_simd_imm_minus_one): Move earlier
+       in file.  Use CONST0_RTX and CONSTM1_RTX.
+       (aarch64_simd_or_scalar_imm_zero): Likewise.  Add match_codes.
+       (aarch64_simd_reg_or_zero): Accept const as well as const_vector.
+       Use aarch64_simd_imm_zero.
+       * config/aarch64/aarch64-sve.md: New file.
+       * config/aarch64/aarch64.md: Include it.
+       (VG_REGNUM, P0_REGNUM, P7_REGNUM, P15_REGNUM): New register numbers.
+       (UNSPEC_REV, UNSPEC_LD1_SVE, UNSPEC_ST1_SVE, UNSPEC_MERGE_PTRUE)
+       (UNSPEC_PTEST_PTRUE, UNSPEC_UNPACKSHI, UNSPEC_UNPACKUHI)
+       (UNSPEC_UNPACKSLO, UNSPEC_UNPACKULO, UNSPEC_PACK)
+       (UNSPEC_FLOAT_CONVERT, UNSPEC_WHILE_LO): New unspec constants.
+       (sve): New attribute.
+       (enabled): Disable instructions with the sve attribute unless
+       TARGET_SVE.
+       (movqi, movhi): Pass CONST_POLY_INT operaneds through
+       aarch64_expand_mov_immediate.
+       (*mov<mode>_aarch64, *movsi_aarch64, *movdi_aarch64): Handle
+       CNT[BHSD] immediates.
+       (movti): Split CONST_POLY_INT moves into two halves.
+       (add<mode>3): Accept aarch64_pluslong_or_poly_operand.
+       Split additions that need a temporary here if the destination
+       is the stack pointer.
+       (*add<mode>3_aarch64): Handle ADDVL and ADDPL immediates.
+       (*add<mode>3_poly_1): New instruction.
+       (set_clobber_cc): New expander.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+       * simplify-rtx.c (simplify_immed_subreg): Add an inner_bytes
+       parameter and use it instead of GET_MODE_SIZE (innermode).  Use
+       inner_bytes * BITS_PER_UNIT instead of GET_MODE_BITSIZE (innermode).
+       Use CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode)) instead of
+       GET_MODE_NUNITS (innermode).  Also add a first_elem parameter.
+       Change innermode from fixed_mode_size to machine_mode.
+       (simplify_subreg): Update call accordingly.  Handle a constant-sized
+       subreg of a variable-length CONST_VECTOR.
+ 2018-01-13  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * config.gcc (*-*-netbsd*): Remove check for NetBSD versions not
-       having __cxa_atexit.
+       * tree-ssa-address.c (mem_ref_valid_without_offset_p): New function.
+       (add_offset_to_base): New function, split out from...
+       (create_mem_ref): ...here.  When handling a scale other than 1,
+       check first whether the address is valid without the offset.
+       Add it into the base if so, leaving the index and scale as-is.
  
- 2017-07-23  Michael Collison  <michael.collison@arm.com>
+ 2018-01-12  Jakub Jelinek  <jakub@redhat.com>
  
-       * config/arm/arm.c (arm_option_override): Deprecate
-       use of -mstructure-size-boundary.
-       * config/arm/arm.opt: Deprecate -mstructure-size-boundary.
-       * doc/invoke.texi: Deprecate -mstructure-size-boundary.
+       PR c++/83778
+       * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): Call
+       fold_for_warn before checking if arg2 is INTEGER_CST.
  
- 2017-07-23  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+ 2018-01-12  Segher Boessenkool  <segher@kernel.crashing.org>
  
-       PR target/80695
-       * config/rs6000/rs6000.c (rs6000_builtin_vectorization_cost):
-       Reduce cost estimate for direct moves.
+       * config/rs6000/predicates.md (load_multiple_operation): Delete.
+       (store_multiple_operation): Delete.
+       * config/rs6000/rs6000-cpus.def (601): Remove MASK_STRING.
+       * config/rs6000/rs6000-protos.h (rs6000_output_load_multiple): Delete.
+       * config/rs6000/rs6000-string.c (expand_block_move): Delete everything
+       guarded by TARGET_STRING.
+       (rs6000_output_load_multiple): Delete.
+       * config/rs6000/rs6000.c (rs6000_option_override_internal): Delete
+       OPTION_MASK_STRING / TARGET_STRING handling.
+       (print_operand) <'N', 'O'>: Add comment that these are unused now.
+       (const rs6000_opt_masks) <"string">: Change mask to 0.
+       * config/rs6000/rs6000.h (TARGET_DEFAULT): Remove MASK_STRING.
+       (MASK_STRING): Delete.
+       * config/rs6000/rs6000.md (*mov<mode>_string): Delete TARGET_STRING
+       parts.  Simplify.
+       (load_multiple): Delete.
+       (*ldmsi8): Delete.
+       (*ldmsi7): Delete.
+       (*ldmsi6): Delete.
+       (*ldmsi5): Delete.
+       (*ldmsi4): Delete.
+       (*ldmsi3): Delete.
+       (store_multiple): Delete.
+       (*stmsi8): Delete.
+       (*stmsi7): Delete.
+       (*stmsi6): Delete.
+       (*stmsi5): Delete.
+       (*stmsi4): Delete.
+       (*stmsi3): Delete.
+       (movmemsi_8reg): Delete.
+       (corresponding unnamed define_insn): Delete.
+       (movmemsi_6reg): Delete.
+       (corresponding unnamed define_insn): Delete.
+       (movmemsi_4reg): Delete.
+       (corresponding unnamed define_insn): Delete.
+       (movmemsi_2reg): Delete.
+       (corresponding unnamed define_insn): Delete.
+       (movmemsi_1reg): Delete.
+       (corresponding unnamed define_insn): Delete.
+       * config/rs6000/rs6000.opt (mno-string): New.
+       (mstring): Replace by deprecation warning stub.
+       * doc/invoke.texi (RS/6000 and PowerPC Options): Delete -mstring.
+ 2018-01-12  Jakub Jelinek  <jakub@redhat.com>
+       * regrename.c (regrename_do_replace): If replacing the same
+       reg multiple times, try to reuse last created gen_raw_REG.
+       PR debug/81155
+       * bb-reorder.c (pass_partition_blocks::gate): In lto don't partition
+       main to workaround a bug in GDB.
+ 2018-01-12  Tom de Vries  <tom@codesourcery.com>
+       PR target/83737
+       * config.gcc (nvptx*-*-*): Set use_gcc_stdint=wrap.
+ 2018-01-12  Vladimir Makarov  <vmakarov@redhat.com>
+       PR rtl-optimization/80481
+       * ira-color.c (get_cap_member): New function.
+       (allocnos_conflict_by_live_ranges_p): Use it.
+       (slot_coalesced_allocno_live_ranges_intersect_p): Add assert.
+       (setup_slot_coalesced_allocno_live_ranges): Ditto.
+ 2018-01-12  Uros Bizjak  <ubizjak@gmail.com>
+       PR target/83628
+       * config/alpha/alpha.md (*saddsi_1): New insn_ans_split pattern.
+       (*saddl_se_1): Ditto.
+       (*ssubsi_1): Ditto.
+       (*ssubl_se_1): Ditto.
+ 2018-01-12  Richard Sandiford  <richard.sandiford@linaro.org>
+       * tree-predcom.c (aff_combination_dr_offset): Use wi::to_poly_widest
+       rather than wi::to_widest for DR_INITs.
+       * tree-vect-data-refs.c (vect_find_same_alignment_drs): Use
+       wi::to_poly_offset rather than wi::to_offset for DR_INIT.
+       (vect_analyze_data_ref_accesses): Require both DR_INITs to be
+       INTEGER_CSTs.
+       (vect_analyze_group_access_1): Note that here.
+ 2018-01-12  Richard Sandiford  <richard.sandiford@linaro.org>
+       * tree-vectorizer.c (get_vec_alignment_for_array_type): Handle
+       polynomial type sizes.
+ 2018-01-12  Richard Sandiford  <richard.sandiford@linaro.org>
+       * gimplify.c (gimple_add_tmp_var_fn): Allow variables to have a
+       poly_uint64 size, rather than requiring an unsigned HOST_WIDE_INT size.
+       (gimple_add_tmp_var): Likewise.
+ 2018-01-12  Martin Liska  <mliska@suse.cz>
+       * gimple.c (gimple_alloc_counts): Use uint64_t instead of int.
+       (gimple_alloc_sizes): Likewise.
+       (dump_gimple_statistics): Use PRIu64 in printf format.
+       * gimple.h: Change uint64_t to int.
+ 2018-01-12  Martin Liska  <mliska@suse.cz>
+       * tree-core.h: Use uint64_t instead of int.
+       * tree.c (tree_node_counts): Likewise.
+       (tree_node_sizes): Likewise.
+       (dump_tree_statistics): Use PRIu64 in printf format.
+ 2018-01-12  Martin Liska  <mliska@suse.cz>
+       * Makefile.in: As qsort_chk is implemented in vec.c, add
+       vec.o to linkage of gencfn-macros.
+       * tree.c (build_new_poly_int_cst): Add CXX_MEM_STAT_INFO as it's
+       passing the info to record_node_allocation_statistics.
+       (test_vector_cst_patterns): Add CXX_MEM_STAT_INFO to declaration
+       and pass the info.
+       * ggc-common.c (struct ggc_usage): Add operator== and use
+       it in operator< and compare function.
+       * mem-stats.h (struct mem_usage): Likewise.
+       * vec.c (struct vec_usage): Remove operator< and compare
+       function. Can be simply inherited.
+ 2018-01-12  Martin Jambor  <mjambor@suse.cz>
+       PR target/81616
+       * params.def: New parameter PARAM_AVOID_FMA_MAX_BITS.
+       * tree-ssa-math-opts.c: Include domwalk.h.
+       (convert_mult_to_fma_1): New function.
+       (fma_transformation_info): New type.
+       (fma_deferring_state): Likewise.
+       (cancel_fma_deferring): New function.
+       (result_of_phi): Likewise.
+       (last_fma_candidate_feeds_initial_phi): Likewise.
+       (convert_mult_to_fma): Added deferring logic, split actual
+       transformation to convert_mult_to_fma_1.
+       (math_opts_dom_walker): New type.
+       (math_opts_dom_walker::after_dom_children): New method, body moved
+       here from pass_optimize_widening_mul::execute, added deferring logic
+       bits.
+       (pass_optimize_widening_mul::execute): Moved most of code to
+       math_opts_dom_walker::after_dom_children.
+       * config/i386/x86-tune.def (X86_TUNE_AVOID_128FMA_CHAINS): New.
+       * config/i386/i386.c (ix86_option_override_internal): Added
+       maybe_setting of PARAM_AVOID_FMA_MAX_BITS.
+ 2018-01-12  Richard Biener  <rguenther@suse.de>
+       PR debug/83157
+       * dwarf2out.c (gen_variable_die): Do not reset old_die for
+       inline instance vars.
+ 2018-01-12  Oleg Endo  <olegendo@gcc.gnu.org>
+       PR target/81819
+       * config/rx/rx.c (rx_is_restricted_memory_address):
+       Handle SUBREG case.
+ 2018-01-12  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/80846
+       * target.def (split_reduction): New target hook.
+       * targhooks.c (default_split_reduction): New function.
+       * targhooks.h (default_split_reduction): Declare.
+       * tree-vect-loop.c (vect_create_epilog_for_reduction): If the
+       target requests first reduce vectors by combining low and high
+       parts.
+       * tree-vect-stmts.c (vect_gen_perm_mask_any): Adjust.
+       (get_vectype_for_scalar_type_and_size): Export.
+       * tree-vectorizer.h (get_vectype_for_scalar_type_and_size): Declare.
+       * doc/tm.texi.in (TARGET_VECTORIZE_SPLIT_REDUCTION): Document.
+       * doc/tm.texi: Regenerate.
+       * config/i386/i386.c (ix86_split_reduction): Implement
+       TARGET_VECTORIZE_SPLIT_REDUCTION.
+ 2018-01-12  Eric Botcazou  <ebotcazou@adacore.com>
+       PR target/83368
+       * config/sparc/sparc.h (PIC_OFFSET_TABLE_REGNUM): Set to INVALID_REGNUM
+       in PIC mode except for TARGET_VXWORKS_RTP.
+       * config/sparc/sparc.c: Include cfgrtl.h.
+       (TARGET_INIT_PIC_REG): Define.
+       (TARGET_USE_PSEUDO_PIC_REG): Likewise.
+       (sparc_pic_register_p): New predicate.
+       (sparc_legitimate_address_p): Use it.
+       (sparc_legitimize_pic_address): Likewise.
+       (sparc_delegitimize_address): Likewise.
+       (sparc_mode_dependent_address_p): Likewise.
+       (gen_load_pcrel_sym): Remove 4th parameter.
+       (load_got_register): Adjust call to above.  Remove obsolete stuff.
+       (sparc_expand_prologue): Do not call load_got_register here.
+       (sparc_flat_expand_prologue): Likewise.
+       (sparc_output_mi_thunk): Set the pic_offset_table_rtx object.
+       (sparc_use_pseudo_pic_reg): New function.
+       (sparc_init_pic_reg): Likewise.
+       * config/sparc/sparc.md (vxworks_load_got): Set the GOT register.
+       (builtin_setjmp_receiver): Enable only for TARGET_VXWORKS_RTP.
+ 2018-01-12  Christophe Lyon  <christophe.lyon@linaro.org>
  
- 2017-07-23  Uros Bizjak  <ubizjak@gmail.com>
+       * doc/sourcebuild.texi (Effective-Target Keywords, Other attributes):
+       Add item for branch_cost.
  
-       PR target/80569
-       * config/i386/i386.c (ix86_option_override_internal): Disable
-       BMI, BMI2 and TBM instructions for -m16.
+ 2018-01-12  Eric Botcazou  <ebotcazou@adacore.com>
  
- 2017-07-21  Carl Love  <cel@us.ibm.com>
+       PR rtl-optimization/83565
+       * rtlanal.c (nonzero_bits1): On WORD_REGISTER_OPERATIONS machines, do
+       not extend the result to a larger mode for rotate operations.
+       (num_sign_bit_copies1): Likewise.
  
-       * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
-       ALTIVEC_BUILTIN_VMULESW, ALTIVEC_BUILTIN_VMULEUW,
-       ALTIVEC_BUILTIN_VMULOSW, ALTIVEC_BUILTIN_VMULOUW entries.
-       * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin,
-       builtin_function_type): Add ALTIVEC_BUILTIN_* case statements.
-       * config/rs6000/altivec.md (MVULEUW, VMULESW, VMULOUW,
-       VMULOSW): New enum "unspec" values.
-       (altivec_vmuleuw, altivec_vmulesw, altivec_vmulouw,
-       altivec_vmulosw): New patterns.
-       * config/rs6000/rs6000-builtin.def (VMLEUW, VMULESW, VMULOUW,
-       VMULOSW): Add definitions.
+ 2018-01-12  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
  
- 2017-07-21  Jim Wilson  <jim.wilson@linaro.org>
+       PR target/40411
+       * config/sol2.h (STARTFILE_ARCH_SPEC): Don't use with -shared or
+       -symbolic.
+       Use values-Xc.o for -pedantic.
+       Link with values-xpg4.o for C90, values-xpg6.o otherwise.
  
-       * config/aarch64/aarch64-cores.def (falkor): Add AARCH64_FL_RDMA.
-       (qdf24xx): Likewise.
-       * config/aarch64/aarch64-options-extensions.def (rdma); New.
-       * config/aarch64/aarch64.h (AARCH64_FL_RDMA): New.
-       (AARCH64_FL_V8_1): Renumber.
-       (AARCH64_FL_FOR_ARCH8_1): Add AARCH64_FL_RDMA.
-       (AARCH64_ISA_RDMA): Use AARCH64_FL_RDMA.
-       * config/aarch64/arm_neon.h: Use +rdma instead of arch=armv8.1-a.
-       * doc/invoke.texi (AArch64 Options): Mention +rmda in -march docs.  Add
-       rdma to feature modifiers list.
+ 2018-01-12  Martin Liska  <mliska@suse.cz>
  
- 2017-07-21  Yury Gribov  <tetra2005@gmail.com>
+       PR ipa/83054
+       * ipa-devirt.c (final_warning_record::grow_type_warnings):
+       New function.
+       (possible_polymorphic_call_targets): Use it.
+       (ipa_devirt): Likewise.
  
-       PR middle-end/56727
-       * ipa-visibility (function_and_variable_visibility): Convert
-       recursive PLT call to direct call if appropriate.
+ 2018-01-12  Martin Liska  <mliska@suse.cz>
  
- 2017-07-21  Andrew Pinski  <apinski@cavium.com>
+       * profile-count.h (enum profile_quality): Use 0 as invalid
+       enum value of profile_quality.
  
-       * tree-ssa-sccvn.c (vn_nary_op_eq): Check BIT_INSERT_EXPR's
-       operand 1 to see if the types precision matches.
-       * fold-const.c (operand_equal_p): Likewise.
+ 2018-01-12  Chung-Ju Wu  <jasonwucj@gmail.com>
  
- 2017-07-21  Richard Biener  <rguenther@suse.de>
+       * doc/invoke.texi (NDS32 Options): Add -mext-perf, -mext-perf2 and
+       -mext-string options.
  
-       PR tree-optimization/81303
-       * tree-vect-data-refs.c (vect_get_peeling_costs_all_drs): Pass
-       in datarefs vector.  Allow NULL dr0 for no peeling cost estimate.
-       (vect_peeling_hash_get_lowest_cost): Adjust.
-       (vect_enhance_data_refs_alignment): Likewise.  Use
-       vect_get_peeling_costs_all_drs to compute the penalty for no
-       peeling to match up costs.
+ 2018-01-12  Richard Biener  <rguenther@suse.de>
  
- 2017-07-21  Richard Biener  <rguenther@suse.de>
+       * lto-streamer-out.c (DFS::DFS_write_tree_body): Process
+       DECL_DEBUG_EXPR conditional on DECL_HAS_DEBUG_EXPR_P.
+       * tree-streamer-in.c (lto_input_ts_decl_common_tree_pointers):
+       Likewise.
+       * tree-streamer-out.c (write_ts_decl_common_tree_pointers): Likewise.
  
-       PR tree-optimization/81500
-       * tree-vect-loop.c (vect_is_simple_reduction): Properly fail if
-       we didn't identify a reduction path.
+ 2018-01-11  Michael Meissner  <meissner@linux.vnet.ibm.com>
  
- 2017-07-21  Tom de Vries  <tom@codesourcery.com>
-           Cesar Philippidis  <cesar@codesourcery.com>
+       * configure.ac (--with-long-double-format): Add support for the
+       configuration option to change the default long double format on
+       PowerPC systems.
+       * config.gcc (powerpc*-linux*-*): Likewise.
+       * configure: Regenerate.
+       * config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): If long
+       double is IEEE, define __KC__ and __KF__ to allow floatn.h to be
+       used without modification.
+ 2018-01-11  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       * config/rs6000/rs6000-builtin.def (BU_P7_MISC_X): New #define.
+       (SPEC_BARRIER): New instantiation of BU_P7_MISC_X.
+       * config/rs6000/rs6000.c (rs6000_expand_builtin): Handle
+       MISC_BUILTIN_SPEC_BARRIER.
+       (rs6000_init_builtins): Likewise.
+       * config/rs6000/rs6000.md (UNSPECV_SPEC_BARRIER): New UNSPECV
+       enum value.
+       (speculation_barrier): New define_insn.
+       * doc/extend.texi: Document __builtin_speculation_barrier.
+ 2018-01-11  Jakub Jelinek  <jakub@redhat.com>
+       PR target/83203
+       * config/i386/i386.c (ix86_expand_vector_init_one_nonzero): If one_var
+       is 0, for V{8,16}S[IF] and V[48]D[IF]mode use gen_vec_set<mode>_0.
+       * config/i386/sse.md (VI8_AVX_AVX512F, VI4F_256_512): New mode
+       iterators.
+       (ssescalarmodesuffix): Add 512-bit vectors.  Use "d" or "q" for
+       integral modes instead of "ss" and "sd".
+       (vec_set<mode>_0): New define_insns for 256-bit and 512-bit
+       vectors with 32-bit and 64-bit elements.
+       (vecdupssescalarmodesuffix): New mode attribute.
+       (vec_dup<mode>): Use it.
+ 2018-01-11  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/83330
+       * config/i386/i386.c (ix86_compute_frame_layout): Align stack
+       frame if argument is passed on stack.
+ 2018-01-11  Jakub Jelinek  <jakub@redhat.com>
+       PR target/82682
+       * ree.c (combine_reaching_defs): Optimize also
+       reg2=exp; reg1=reg2; reg2=any_extend(reg1); into
+       reg2=any_extend(exp); reg1=reg2;, formatting fix.
+ 2018-01-11  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83189
+       * gimple-ssa-isolate-paths.c (isolate_path): Fix profile update.
+ 2018-01-11  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83718
+       * tree-inline.c (copy_cfg_body): Adjust num&den for scaling
+       after they are computed.
+ 2018-01-11  Bin Cheng  <bin.cheng@arm.com>
+       PR tree-optimization/83695
+       * gimple-loop-linterchange.cc
+       (tree_loop_interchange::interchange_loops): Call scev_reset_htab to
+       reset cached scev information after interchange.
+       (pass_linterchange::execute): Remove call to scev_reset_htab.
+ 2018-01-11  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       * config/arm/arm_neon.h (vfmlal_lane_low_u32, vfmlal_lane_high_u32,
+       vfmlalq_laneq_low_u32, vfmlalq_lane_low_u32, vfmlal_laneq_low_u32,
+       vfmlalq_laneq_high_u32, vfmlalq_lane_high_u32, vfmlal_laneq_high_u32,
+       vfmlsl_lane_low_u32, vfmlsl_lane_high_u32, vfmlslq_laneq_low_u32,
+       vfmlslq_lane_low_u32, vfmlsl_laneq_low_u32, vfmlslq_laneq_high_u32,
+       vfmlslq_lane_high_u32, vfmlsl_laneq_high_u32): Define.
+       * config/arm/arm_neon_builtins.def (vfmal_lane_low,
+       vfmal_lane_lowv4hf, vfmal_lane_lowv8hf, vfmal_lane_high,
+       vfmal_lane_highv4hf, vfmal_lane_highv8hf, vfmsl_lane_low,
+       vfmsl_lane_lowv4hf, vfmsl_lane_lowv8hf, vfmsl_lane_high,
+       vfmsl_lane_highv4hf, vfmsl_lane_highv8hf): New sets of builtins.
+       * config/arm/iterators.md (VFMLSEL2, vfmlsel2): New mode attributes.
+       (V_lane_reg): Likewise.
+       * config/arm/neon.md (neon_vfm<vfml_op>l_lane_<vfml_half><VCVTF:mode>):
+       New define_expand.
+       (neon_vfm<vfml_op>l_lane_<vfml_half><vfmlsel2><mode>): Likewise.
+       (vfmal_lane_low<mode>_intrinsic,
+       vfmal_lane_low<vfmlsel2><mode>_intrinsic,
+       vfmal_lane_high<vfmlsel2><mode>_intrinsic,
+       vfmal_lane_high<mode>_intrinsic, vfmsl_lane_low<mode>_intrinsic,
+       vfmsl_lane_low<vfmlsel2><mode>_intrinsic,
+       vfmsl_lane_high<vfmlsel2><mode>_intrinsic,
+       vfmsl_lane_high<mode>_intrinsic): New define_insns.
+ 2018-01-11  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       * config/arm/arm-cpus.in (fp16fml): New feature.
+       (ALL_SIMD): Add fp16fml.
+       (armv8.2-a): Add fp16fml as an option.
+       (armv8.3-a): Likewise.
+       (armv8.4-a): Add fp16fml as part of fp16.
+       * config/arm/arm.h (TARGET_FP16FML): Define.
+       * config/arm/arm-c.c (arm_cpu_builtins): Define __ARM_FEATURE_FP16_FML
+       when appropriate.
+       * config/arm/arm-modes.def (V2HF): Define.
+       * config/arm/arm_neon.h (vfmlal_low_u32, vfmlsl_low_u32,
+       vfmlal_high_u32, vfmlsl_high_u32, vfmlalq_low_u32,
+       vfmlslq_low_u32, vfmlalq_high_u32, vfmlslq_high_u32): Define.
+       * config/arm/arm_neon_builtins.def (vfmal_low, vfmal_high,
+       vfmsl_low, vfmsl_high): New set of builtins.
+       * config/arm/iterators.md (PLUSMINUS): New code iterator.
+       (vfml_op): New code attribute.
+       (VFMLHALVES): New int iterator.
+       (VFML, VFMLSEL): New mode attributes.
+       (V_reg): Define mapping for V2HF.
+       (V_hi, V_lo): New mode attributes.
+       (VF_constraint): Likewise.
+       (vfml_half, vfml_half_selector): New int attributes.
+       * config/arm/neon.md (neon_vfm<vfml_op>l_<vfml_half><mode>): New
+       define_expand.
+       (vfmal_low<mode>_intrinsic, vfmsl_high<mode>_intrinsic,
+       vfmal_high<mode>_intrinsic, vfmsl_low<mode>_intrinsic):
+       New define_insn.
+       * config/arm/t-arm-elf (v8_fps): Add fp16fml.
+       * config/arm/t-multilib (v8_2_a_simd_variants): Add fp16fml.
+       * config/arm/unspecs.md (UNSPEC_VFML_LO, UNSPEC_VFML_HI): New unspecs.
+       * doc/invoke.texi (ARM Options): Document fp16fml.  Update armv8.4-a
+       documentation.
+       * doc/sourcebuild.texi (arm_fp16fml_neon_ok, arm_fp16fml_neon):
+       Document new effective target and option set.
  
-       PR gcov-profile/81442
-       * config/nvptx/nvptx.c (nvptx_goacc_reduction_init): Add missing edge
-       probabilities.
+ 2018-01-11  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
  
- 2017-07-21  Tom de Vries  <tom@codesourcery.com>
+       * config/arm/arm-cpus.in (armv8_4): New feature.
+       (ARMv8_4a): New fgroup.
+       (armv8.4-a): New arch.
+       * config/arm/arm-tables.opt: Regenerate.
+       * config/arm/t-aprofile: Add matching rules for -march=armv8.4-a.
+       * config/arm/t-arm-elf (all_v8_archs): Add armv8.4-a.
+       * config/arm/t-multilib (v8_4_a_simd_variants): New variable.
+       Add matching rules for -march=armv8.4-a and extensions.
+       * doc/invoke.texi (ARM Options): Document -march=armv8.4-a.
  
-       PR lto/81430
-       * config/nvptx/nvptx.c (nvptx_override_options_after_change): New
-       function.
-       (TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE): Define to
-       nvptx_override_options_after_change.
+ 2018-01-11  Oleg Endo  <olegendo@gcc.gnu.org>
  
- 2017-07-21  Ulrich Drepper  <drepper@redhat.com>
+       PR target/81821
+       * config/rx/rx.md (BW): New mode attribute.
+       (sync_lock_test_and_setsi): Add mode suffix to insn output.
  
-       * dwarf2out.c (output_file_names): Avoid double testing for
-       dwarf_version >= 5.
+ 2018-01-11  Richard Biener  <rguenther@suse.de>
  
- 2017-07-21  Georg-Johann Lay  <avr@gjlay.de>
+       PR tree-optimization/83435
+       * graphite.c (canonicalize_loop_form): Ignore fake loop exit edges.
+       * graphite-scop-detection.c (scop_detection::get_sese): Likewise.
+       * tree-vrp.c (add_assert_info): Drop TREE_OVERFLOW if they appear.
  
-       * doc/invoke.texi (AVR Built-in Functions): Re-layout section.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2016-07-21  Jan Hubicka  <hubicka@ucw.cz>
+       * config/aarch64/aarch64.c (aarch64_address_info): Add a const_offset
+       field.
+       (aarch64_classify_address): Initialize it.  Track polynomial offsets.
+       (aarch64_print_address_internal): Use it to check for a zero offset.
  
-       * cfgcleanup.c (flow_find_cross_jump): Do not crossjump across
-       hot/cold regions.
-       (try_crossjump_to_edge): Do not punt on partitioned functions.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2016-07-21  Jan Hubicka  <hubicka@ucw.cz>
+       * config/aarch64/aarch64-modes.def (NUM_POLY_INT_COEFFS): Set to 2.
+       * config/aarch64/aarch64-protos.h (aarch64_initial_elimination_offset):
+       Return a poly_int64 rather than a HOST_WIDE_INT.
+       (aarch64_offset_7bit_signed_scaled_p): Take the offset as a poly_int64
+       rather than a HOST_WIDE_INT.
+       * config/aarch64/aarch64.h (aarch64_frame): Protect with
+       HAVE_POLY_INT_H rather than HOST_WIDE_INT.  Change locals_offset,
+       hard_fp_offset, frame_size, initial_adjust, callee_offset and
+       final_offset from HOST_WIDE_INT to poly_int64.
+       * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Use
+       to_constant when getting the number of units in an Advanced SIMD
+       mode.
+       (aarch64_builtin_vectorized_function): Check for a constant number
+       of units.
+       * config/aarch64/aarch64-simd.md (mov<mode>): Handle polynomial
+       GET_MODE_SIZE.
+       (aarch64_ld<VSTRUCT:nregs>_lane<VALLDIF:mode>): Use the nunits
+       attribute instead of GET_MODE_NUNITS.
+       * config/aarch64/aarch64.c (aarch64_hard_regno_nregs)
+       (aarch64_class_max_nregs): Use the constant_lowest_bound of the
+       GET_MODE_SIZE for fixed-size registers.
+       (aarch64_const_vec_all_same_in_range_p): Use const_vec_duplicate_p.
+       (aarch64_hard_regno_call_part_clobbered, aarch64_classify_index)
+       (aarch64_mode_valid_for_sched_fusion_p, aarch64_classify_address)
+       (aarch64_legitimize_address_displacement, aarch64_secondary_reload)
+       (aarch64_print_operand, aarch64_print_address_internal)
+       (aarch64_address_cost, aarch64_rtx_costs, aarch64_register_move_cost)
+       (aarch64_short_vector_p, aapcs_vfp_sub_candidate)
+       (aarch64_simd_attr_length_rglist, aarch64_operands_ok_for_ldpstp):
+       Handle polynomial GET_MODE_SIZE.
+       (aarch64_hard_regno_caller_save_mode): Likewise.  Return modes
+       wider than SImode without modification.
+       (tls_symbolic_operand_type): Use strip_offset instead of split_const.
+       (aarch64_pass_by_reference, aarch64_layout_arg, aarch64_pad_reg_upward)
+       (aarch64_gimplify_va_arg_expr): Assert that we don't yet handle
+       passing and returning SVE modes.
+       (aarch64_function_value, aarch64_layout_arg): Use gen_int_mode
+       rather than GEN_INT.
+       (aarch64_emit_probe_stack_range): Take the size as a poly_int64
+       rather than a HOST_WIDE_INT, but call sorry if it isn't constant.
+       (aarch64_allocate_and_probe_stack_space): Likewise.
+       (aarch64_layout_frame): Cope with polynomial offsets.
+       (aarch64_save_callee_saves, aarch64_restore_callee_saves): Take the
+       start_offset as a poly_int64 rather than a HOST_WIDE_INT.  Track
+       polynomial offsets.
+       (offset_9bit_signed_unscaled_p, offset_12bit_unsigned_scaled_p)
+       (aarch64_offset_7bit_signed_scaled_p): Take the offset as a
+       poly_int64 rather than a HOST_WIDE_INT.
+       (aarch64_get_separate_components, aarch64_process_components)
+       (aarch64_expand_prologue, aarch64_expand_epilogue)
+       (aarch64_use_return_insn_p): Handle polynomial frame offsets.
+       (aarch64_anchor_offset): New function, split out from...
+       (aarch64_legitimize_address): ...here.
+       (aarch64_builtin_vectorization_cost): Handle polynomial
+       TYPE_VECTOR_SUBPARTS.
+       (aarch64_simd_check_vect_par_cnst_half): Handle polynomial
+       GET_MODE_NUNITS.
+       (aarch64_simd_make_constant, aarch64_expand_vector_init): Get the
+       number of elements from the PARALLEL rather than the mode.
+       (aarch64_shift_truncation_mask): Use GET_MODE_UNIT_BITSIZE
+       rather than GET_MODE_BITSIZE.
+       (aarch64_evpc_trn, aarch64_evpc_uzp, aarch64_evpc_ext)
+       (aarch64_evpc_rev, aarch64_evpc_dup, aarch64_evpc_zip)
+       (aarch64_expand_vec_perm_const_1): Handle polynomial
+       d->perm.length () and d->perm elements.
+       (aarch64_evpc_tbl): Likewise.  Use nelt rather than GET_MODE_NUNITS.
+       Apply to_constant to d->perm elements.
+       (aarch64_simd_valid_immediate, aarch64_vec_fpconst_pow_of_2): Handle
+       polynomial CONST_VECTOR_NUNITS.
+       (aarch64_move_pointer): Take amount as a poly_int64 rather
+       than an int.
+       (aarch64_progress_pointer): Avoid temporary variable.
+       * config/aarch64/aarch64.md (aarch64_<crc_variant>): Use
+       the mode attribute instead of GET_MODE.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * bb-reorder.c (find_rarely_executed_basic_blocks_and_crossing_edges):
-       Put all BBs reachable only via paths crossing cold region to cold
-       region.
-       * cfgrtl.c (find_bbs_reachable_by_hot_paths): New function.
+       * config/aarch64/aarch64.c (aarch64_force_temporary): Assert that
+       x exists before using it.
+       (aarch64_add_constant_internal): Rename to...
+       (aarch64_add_offset_1): ...this.  Replace regnum with separate
+       src and dest rtxes.  Handle the case in which they're different,
+       including when the offset is zero.  Replace scratchreg with an rtx.
+       Use 2 additions if there is no spare register into which we can
+       move a 16-bit constant.
+       (aarch64_add_constant): Delete.
+       (aarch64_add_offset): Replace reg with separate src and dest
+       rtxes.  Take a poly_int64 offset instead of a HOST_WIDE_INT.
+       Use aarch64_add_offset_1.
+       (aarch64_add_sp, aarch64_sub_sp): Take the scratch register as
+       an rtx rather than an int.  Take the delta as a poly_int64
+       rather than a HOST_WIDE_INT.  Use aarch64_add_offset.
+       (aarch64_expand_mov_immediate): Update uses of aarch64_add_offset.
+       (aarch64_expand_prologue): Update calls to aarch64_sub_sp,
+       aarch64_allocate_and_probe_stack_space and aarch64_add_offset.
+       (aarch64_expand_epilogue): Update calls to aarch64_add_offset
+       and aarch64_add_sp.
+       (aarch64_output_mi_thunk): Use aarch64_add_offset rather than
+       aarch64_add_constant.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2016-07-21  Richard Biener  <rguenther@suse.de>
+       * config/aarch64/aarch64.c (aarch64_reinterpret_float_as_int):
+       Use scalar_float_mode.
  
-       PR tree-optimization/81303
-       * tree-vect-loop.c (vect_estimate_min_profitable_iters): Take
-       into account prologue and epilogue iterations when raising
-       min_profitable_iters to sth at least covering one vector iteration.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-21  Tamar Christina  <tamar.christina@arm.com>
+       * config/aarch64/aarch64-simd.md
+       (aarch64_fml<f16mac1>l<f16quad>_low<mode>): Avoid GET_MODE_NUNITS.
+       (aarch64_fml<f16mac1>l<f16quad>_high<mode>): Likewise.
+       (aarch64_fml<f16mac1>l_lane_lowv2sf): Likewise.
+       (aarch64_fml<f16mac1>l_lane_highv2sf): Likewise.
+       (aarch64_fml<f16mac1>lq_laneq_lowv4sf): Likewise.
+       (aarch64_fml<f16mac1>lq_laneq_highv4sf): Likewise.
+       (aarch64_fml<f16mac1>l_laneq_lowv2sf): Likewise.
+       (aarch64_fml<f16mac1>l_laneq_highv2sf): Likewise.
+       (aarch64_fml<f16mac1>lq_lane_lowv4sf): Likewise.
+       (aarch64_fml<f16mac1>lq_lane_highv4sf): Likewise.
  
-       * config/arm/arm.c (arm_test_cpu_arch_dat):
-       Check for overlap.
+ 2018-01-11  Prathamesh Kulkarni  <prathamesh.kulkarni@linaro.org>
  
- 2017-07-20  Nathan Sidwell  <nathan@acm.org>
+       PR target/83514
+       * config/arm/arm.c (arm_declare_function_name): Set arch_to_print if
+       targ_options->x_arm_arch_string is non NULL.
  
-       Remove TYPE_METHODS.
-       * tree.h (TYPE_METHODS): Delete.
-       * dwarf2out.c (gen_member_die): Member fns are on TYPE_FIELDS.
-       * dbxout.c (dbxout_type_fields): Ignore FUNCTION_DECLs.
-       (dbxout_type_methods): Scan TYPE_FIELDS.
-       (dbxout_type): Don't check TYPE_METHODS here.
-       * function.c (use_register_for_decl): Always ignore register for
-       class types when not optimizing.
-       * ipa-devirt.c (odr_types_equivalent_p): Delete TYPE_METHODS scan.
-       * tree.c (free_lang_data_in_type): Stitch out member functions and
-       templates from TYPE_FIELDS.
-       (build_distinct_type_copy, verify_type_variant,
-       verify_type): Member fns are on TYPE_FIELDS.
-       * tree-dump.c (dequeue_and_dump): No TYPE_METHODS.
-       * tree-pretty-print.c (dump_generic_node): Likewise.
+ 2018-01-11  Tamar Christina  <tamar.christina@arm.com>
  
- 2017-07-20  Jakub Jelinek  <jakub@redhat.com>
+       * config/aarch64/aarch64.h
+       (AARCH64_FL_FOR_ARCH8_4): Add  AARCH64_FL_DOTPROD.
  
-       PR target/80846
-       * config/i386/i386.c (ix86_expand_vector_init_general): Handle
-       V2TImode and V4TImode.
-       (ix86_expand_vector_extract): Likewise.
-       * config/i386/sse.md (VMOVE): Enable V4TImode even for just
-       TARGET_AVX512F, instead of only for TARGET_AVX512BW.
-       (ssescalarmode): Handle V4TImode and V2TImode.
-       (VEC_EXTRACT_MODE): Add V4TImode and V2TImode.
-       (*vec_extractv2ti, *vec_extractv4ti): New insns.
-       (VEXTRACTI128_MODE): New mode iterator.
-       (splitter for *vec_extractv?ti first element): New.
-       (VEC_INIT_MODE): New mode iterator.
-       (vec_init<mode>): Consolidate 3 expanders into one using
-       VEC_INIT_MODE mode iterator.
+ 2018-01-11  Sudakshina Das  <sudi.das@arm.com>
  
- 2017-07-20  Alexander Monakov  <amonakov@ispras.ru>
+       PR target/82096
+       * expmed.c (emit_store_flag_force): Swap if const op0
+       and change VOIDmode to mode of op0.
  
-       * lra-assigns.c (pseudo_compare_func): Fix comparison step based on
-       non_spilled_static_chain_regno_p.
+ 2018-01-11  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-20  Alexander Monakov  <amonakov@ispras.ru>
+       PR rtl-optimization/83761
+       * caller-save.c (replace_reg_with_saved_mem): Pass bits rather
+       than bytes to mode_for_size.
  
-       * gimple-ssa-store-merging.c (sort_by_bitpos): Return 0 on equal bitpos.
+ 2018-01-10  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-20  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83189
+       * gfortran.fortran-torture/compile/pr83189.f90: New testcase.
+       * tree-ssa-loop-manip.c (tree_transform_and_unroll_loop): Handle zero
+       profile.
  
-       * bb-reorder.c (connect_traces): Allow copying of blocks within
-       single partition.
+ 2018-01-10  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83575
+       * cfgrtl.c (rtl_verify_edges): Only verify fixability of partition
+       when in layout mode.
+       (cfg_layout_finalize): Do not verify cfg before we are out of layout.
+       * cfgcleanup.c (try_optimize_cfg): Only verify flow info when doing
+       partition fixup.
+ 2018-01-10  Michael Collison  <michael.collison@arm.com>
+       * config/aarch64/aarch64-modes.def (V2HF): New VECTOR_MODE.
+       * config/aarch64/aarch64-option-extension.def: Add
+       AARCH64_OPT_EXTENSION of 'fp16fml'.
+       * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins):
+       (__ARM_FEATURE_FP16_FML): Define if TARGET_F16FML is true.
+       * config/aarch64/predicates.md (aarch64_lane_imm3): New predicate.
+       * config/aarch64/constraints.md (Ui7): New constraint.
+       * config/aarch64/iterators.md (VFMLA_W): New mode iterator.
+       (VFMLA_SEL_W): Ditto.
+       (f16quad): Ditto.
+       (f16mac1): Ditto.
+       (VFMLA16_LOW): New int iterator.
+       (VFMLA16_HIGH): Ditto.
+       (UNSPEC_FMLAL): New unspec.
+       (UNSPEC_FMLSL): Ditto.
+       (UNSPEC_FMLAL2): Ditto.
+       (UNSPEC_FMLSL2): Ditto.
+       (f16mac): New code attribute.
+       * config/aarch64/aarch64-simd-builtins.def
+       (aarch64_fmlal_lowv2sf): Ditto.
+       (aarch64_fmlsl_lowv2sf): Ditto.
+       (aarch64_fmlalq_lowv4sf): Ditto.
+       (aarch64_fmlslq_lowv4sf): Ditto.
+       (aarch64_fmlal_highv2sf): Ditto.
+       (aarch64_fmlsl_highv2sf): Ditto.
+       (aarch64_fmlalq_highv4sf): Ditto.
+       (aarch64_fmlslq_highv4sf): Ditto.
+       (aarch64_fmlal_lane_lowv2sf): Ditto.
+       (aarch64_fmlsl_lane_lowv2sf): Ditto.
+       (aarch64_fmlal_laneq_lowv2sf): Ditto.
+       (aarch64_fmlsl_laneq_lowv2sf): Ditto.
+       (aarch64_fmlalq_lane_lowv4sf): Ditto.
+       (aarch64_fmlsl_lane_lowv4sf): Ditto.
+       (aarch64_fmlalq_laneq_lowv4sf): Ditto.
+       (aarch64_fmlsl_laneq_lowv4sf): Ditto.
+       (aarch64_fmlal_lane_highv2sf): Ditto.
+       (aarch64_fmlsl_lane_highv2sf): Ditto.
+       (aarch64_fmlal_laneq_highv2sf): Ditto.
+       (aarch64_fmlsl_laneq_highv2sf): Ditto.
+       (aarch64_fmlalq_lane_highv4sf): Ditto.
+       (aarch64_fmlsl_lane_highv4sf): Ditto.
+       (aarch64_fmlalq_laneq_highv4sf): Ditto.
+       (aarch64_fmlsl_laneq_highv4sf): Ditto.
+       * config/aarch64/aarch64-simd.md:
+       (aarch64_fml<f16mac1>l<f16quad>_low<mode>): New pattern.
+       (aarch64_fml<f16mac1>l<f16quad>_high<mode>): Ditto.
+       (aarch64_simd_fml<f16mac1>l<f16quad>_low<mode>): Ditto.
+       (aarch64_simd_fml<f16mac1>l<f16quad>_high<mode>): Ditto.
+       (aarch64_fml<f16mac1>l_lane_lowv2sf): Ditto.
+       (aarch64_fml<f16mac1>l_lane_highv2sf): Ditto.
+       (aarch64_simd_fml<f16mac>l_lane_lowv2sf): Ditto.
+       (aarch64_simd_fml<f16mac>l_lane_highv2sf): Ditto.
+       (aarch64_fml<f16mac1>lq_laneq_lowv4sf): Ditto.
+       (aarch64_fml<f16mac1>lq_laneq_highv4sf): Ditto.
+       (aarch64_simd_fml<f16mac>lq_laneq_lowv4sf): Ditto.
+       (aarch64_simd_fml<f16mac>lq_laneq_highv4sf): Ditto.
+       (aarch64_fml<f16mac1>l_laneq_lowv2sf): Ditto.
+       (aarch64_fml<f16mac1>l_laneq_highv2sf): Ditto.
+       (aarch64_simd_fml<f16mac>l_laneq_lowv2sf): Ditto.
+       (aarch64_simd_fml<f16mac>l_laneq_highv2sf): Ditto.
+       (aarch64_fml<f16mac1>lq_lane_lowv4sf): Ditto.
+       (aarch64_fml<f16mac1>lq_lane_highv4sf): Ditto.
+       (aarch64_simd_fml<f16mac>lq_lane_lowv4sf): Ditto.
+       (aarch64_simd_fml<f16mac>lq_lane_highv4sf): Ditto.
+       * config/aarch64/arm_neon.h (vfmlal_low_u32): New intrinsic.
+       (vfmlsl_low_u32): Ditto.
+       (vfmlalq_low_u32): Ditto.
+       (vfmlslq_low_u32): Ditto.
+       (vfmlal_high_u32): Ditto.
+       (vfmlsl_high_u32): Ditto.
+       (vfmlalq_high_u32): Ditto.
+       (vfmlslq_high_u32): Ditto.
+       (vfmlal_lane_low_u32): Ditto.
+       (vfmlsl_lane_low_u32): Ditto.
+       (vfmlal_laneq_low_u32): Ditto.
+       (vfmlsl_laneq_low_u32): Ditto.
+       (vfmlalq_lane_low_u32): Ditto.
+       (vfmlslq_lane_low_u32): Ditto.
+       (vfmlalq_laneq_low_u32): Ditto.
+       (vfmlslq_laneq_low_u32): Ditto.
+       (vfmlal_lane_high_u32): Ditto.
+       (vfmlsl_lane_high_u32): Ditto.
+       (vfmlal_laneq_high_u32): Ditto.
+       (vfmlsl_laneq_high_u32): Ditto.
+       (vfmlalq_lane_high_u32): Ditto.
+       (vfmlslq_lane_high_u32): Ditto.
+       (vfmlalq_laneq_high_u32): Ditto.
+       (vfmlslq_laneq_high_u32): Ditto.
+       * config/aarch64/aarch64.h (AARCH64_FL_F16SML): New flag.
+       (AARCH64_FL_FOR_ARCH8_4): New.
+       (AARCH64_ISA_F16FML): New ISA flag.
+       (TARGET_F16FML): New feature flag for fp16fml.
+       (doc/invoke.texi): Document new fp16fml option.
+ 2018-01-10  Michael Collison  <michael.collison@arm.com>
+       * config/aarch64/aarch64-builtins.c:
+       (aarch64_types_ternopu_imm_qualifiers, TYPES_TERNOPUI): New.
+       * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins):
+       (__ARM_FEATURE_SHA3): Define if TARGET_SHA3 is true.
+       * config/aarch64/aarch64.h (AARCH64_FL_SHA3): New flags.
+       (AARCH64_ISA_SHA3): New ISA flag.
+       (TARGET_SHA3): New feature flag for sha3.
+       * config/aarch64/iterators.md (sha512_op): New int attribute.
+       (CRYPTO_SHA512): New int iterator.
+       (UNSPEC_SHA512H): New unspec.
+       (UNSPEC_SHA512H2): Ditto.
+       (UNSPEC_SHA512SU0): Ditto.
+       (UNSPEC_SHA512SU1): Ditto.
+       * config/aarch64/aarch64-simd-builtins.def
+       (aarch64_crypto_sha512hqv2di): New builtin.
+       (aarch64_crypto_sha512h2qv2di): Ditto.
+       (aarch64_crypto_sha512su0qv2di): Ditto.
+       (aarch64_crypto_sha512su1qv2di): Ditto.
+       (aarch64_eor3qv8hi): Ditto.
+       (aarch64_rax1qv2di): Ditto.
+       (aarch64_xarqv2di): Ditto.
+       (aarch64_bcaxqv8hi): Ditto.
+       * config/aarch64/aarch64-simd.md:
+       (aarch64_crypto_sha512h<sha512_op>qv2di): New pattern.
+       (aarch64_crypto_sha512su0qv2di): Ditto.
+       (aarch64_crypto_sha512su1qv2di): Ditto.
+       (aarch64_eor3qv8hi): Ditto.
+       (aarch64_rax1qv2di): Ditto.
+       (aarch64_xarqv2di): Ditto.
+       (aarch64_bcaxqv8hi): Ditto.
+       * config/aarch64/arm_neon.h (vsha512hq_u64): New intrinsic.
+       (vsha512h2q_u64): Ditto.
+       (vsha512su0q_u64): Ditto.
+       (vsha512su1q_u64): Ditto.
+       (veor3q_u16): Ditto.
+       (vrax1q_u64): Ditto.
+       (vxarq_u64): Ditto.
+       (vbcaxq_u16): Ditto.
+       * config/arm/types.md (crypto_sha512): New type attribute.
+       (crypto_sha3): Ditto.
+       (doc/invoke.texi): Document new sha3 option.
+ 2018-01-10  Michael Collison  <michael.collison@arm.com>
+       * config/aarch64/aarch64-builtins.c:
+       (aarch64_types_quadopu_imm_qualifiers, TYPES_QUADOPUI): New.
+       * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins):
+       (__ARM_FEATURE_SM3): Define if TARGET_SM4 is true.
+       (__ARM_FEATURE_SM4): Define if TARGET_SM4 is true.
+       * config/aarch64/aarch64.h (AARCH64_FL_SM4): New flags.
+       (AARCH64_ISA_SM4): New ISA flag.
+       (TARGET_SM4): New feature flag for sm4.
+       * config/aarch64/aarch64-simd-builtins.def
+       (aarch64_sm3ss1qv4si): Ditto.
+       (aarch64_sm3tt1aq4si): Ditto.
+       (aarch64_sm3tt1bq4si): Ditto.
+       (aarch64_sm3tt2aq4si): Ditto.
+       (aarch64_sm3tt2bq4si): Ditto.
+       (aarch64_sm3partw1qv4si): Ditto.
+       (aarch64_sm3partw2qv4si): Ditto.
+       (aarch64_sm4eqv4si): Ditto.
+       (aarch64_sm4ekeyqv4si): Ditto.
+       * config/aarch64/aarch64-simd.md:
+       (aarch64_sm3ss1qv4si): Ditto.
+       (aarch64_sm3tt<sm3tt_op>qv4si): Ditto.
+       (aarch64_sm3partw<sm3part_op>qv4si): Ditto.
+       (aarch64_sm4eqv4si): Ditto.
+       (aarch64_sm4ekeyqv4si): Ditto.
+       * config/aarch64/iterators.md (sm3tt_op): New int iterator.
+       (sm3part_op): Ditto.
+       (CRYPTO_SM3TT): Ditto.
+       (CRYPTO_SM3PART): Ditto.
+       (UNSPEC_SM3SS1): New unspec.
+       (UNSPEC_SM3TT1A): Ditto.
+       (UNSPEC_SM3TT1B): Ditto.
+       (UNSPEC_SM3TT2A): Ditto.
+       (UNSPEC_SM3TT2B): Ditto.
+       (UNSPEC_SM3PARTW1): Ditto.
+       (UNSPEC_SM3PARTW2): Ditto.
+       (UNSPEC_SM4E): Ditto.
+       (UNSPEC_SM4EKEY): Ditto.
+       * config/aarch64/constraints.md (Ui2): New constraint.
+       * config/aarch64/predicates.md (aarch64_imm2): New predicate.
+       * config/arm/types.md (crypto_sm3): New type attribute.
+       (crypto_sm4): Ditto.
+       * config/aarch64/arm_neon.h (vsm3ss1q_u32): New intrinsic.
+       (vsm3tt1aq_u32): Ditto.
+       (vsm3tt1bq_u32): Ditto.
+       (vsm3tt2aq_u32): Ditto.
+       (vsm3tt2bq_u32): Ditto.
+       (vsm3partw1q_u32): Ditto.
+       (vsm3partw2q_u32): Ditto.
+       (vsm4eq_u32): Ditto.
+       (vsm4ekeyq_u32): Ditto.
+       (doc/invoke.texi): Document new sm4 option.
+ 2018-01-10  Michael Collison  <michael.collison@arm.com>
+       * config/aarch64/aarch64-arches.def (armv8.4-a): New architecture.
+       * config/aarch64/aarch64.h (AARCH64_ISA_V8_4): New ISA flag.
+       (AARCH64_FL_FOR_ARCH8_4): New.
+       (AARCH64_FL_V8_4): New flag.
+       (doc/invoke.texi): Document new armv8.4-a option.
+ 2018-01-10  Michael Collison  <michael.collison@arm.com>
+       * config/aarch64/aarch64-c.c (aarch64_update_cpp_builtins):
+       (__ARM_FEATURE_AES): Define if TARGET_AES is true.
+       (__ARM_FEATURE_SHA2): Define if TARGET_SHA2 is true.
+       * config/aarch64/aarch64-option-extension.def: Add
+       AARCH64_OPT_EXTENSION of 'sha2'.
+       (aes): Add AARCH64_OPT_EXTENSION of 'aes'.
+       (crypto): Disable sha2 and aes if crypto disabled.
+       (crypto): Enable aes and sha2 if enabled.
+       (simd): Disable sha2 and aes if simd disabled.
+       * config/aarch64/aarch64.h (AARCH64_FL_AES, AARCH64_FL_SHA2):
+       New flags.
+       (AARCH64_ISA_AES, AARCH64_ISA_SHA2): New ISA flags.
+       (TARGET_SHA2): New feature flag for sha2.
+       (TARGET_AES): New feature flag for aes.
+       * config/aarch64/aarch64-simd.md:
+       (aarch64_crypto_aes<aes_op>v16qi): Make pattern
+       conditional on TARGET_AES.
+       (aarch64_crypto_aes<aesmc_op>v16qi): Ditto.
+       (aarch64_crypto_sha1hsi): Make pattern conditional
+       on TARGET_SHA2.
+       (aarch64_crypto_sha1hv4si): Ditto.
+       (aarch64_be_crypto_sha1hv4si): Ditto.
+       (aarch64_crypto_sha1su1v4si): Ditto.
+       (aarch64_crypto_sha1<sha1_op>v4si): Ditto.
+       (aarch64_crypto_sha1su0v4si): Ditto.
+       (aarch64_crypto_sha256h<sha256_op>v4si): Ditto.
+       (aarch64_crypto_sha256su0v4si): Ditto.
+       (aarch64_crypto_sha256su1v4si): Ditto.
+       (doc/invoke.texi): Document new aes and sha2 options.
+ 2018-01-10  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/83781
+       * gimple-fold.c (get_range_strlen): Avoid treating arrays of pointers
+       as string arrays.
+ 2018-01-11  Martin Sebor  <msebor@gmail.com>
+           Prathamesh Kulkarni  <prathamesh.kulkarni@linaro.org>
+       PR tree-optimization/83501
+       PR tree-optimization/81703
+       * tree-ssa-strlen.c (get_string_cst): Rename...
+       (get_string_len): ...to this.  Handle global constants.
+       (handle_char_store): Adjust.
+ 2018-01-10  Kito Cheng  <kito.cheng@gmail.com>
+           Jim Wilson  <jimw@sifive.com>
+       * config/riscv/riscv-protos.h (riscv_output_return): New.
+       * config/riscv/riscv.c (struct machine_function): New naked_p field.
+       (riscv_attribute_table, riscv_output_return),
+       (riscv_handle_fndecl_attribute, riscv_naked_function_p),
+       (riscv_allocate_stack_slots_for_args, riscv_warn_func_return): New.
+       (riscv_compute_frame_info): Only compute frame->mask if not a naked
+       function.
+       (riscv_expand_prologue): Add early return for naked function.
+       (riscv_expand_epilogue): Likewise.
+       (riscv_function_ok_for_sibcall): Return false for naked function.
+       (riscv_set_current_function): New.
+       (TARGET_SET_CURRENT_FUNCTION, TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS),
+       (TARGET_ATTRIBUTE_TABLE, TARGET_WARN_FUNC_RETURN): New.
+       * config/riscv/riscv.md (simple_return): Call riscv_output_return.
+       * doc/extend.texi (RISC-V Function Attributes): New.
+ 2018-01-10  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.c (is_complex_IBM_long_double): Explicitly
+       check for 128-bit long double before checking TCmode.
+       * config/rs6000/rs6000.h (FLOAT128_IEEE_P): Explicitly check for
+       128-bit long doubles before checking TFmode or TCmode.
+       (FLOAT128_IBM_P): Likewise.
+ 2018-01-10  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/83671
+       * builtins.c (c_strlen): Unconditionally return zero for the empty
+       string.
+       Use -Warray-bounds for warnings.
+       * gimple-fold.c (get_range_strlen): Handle non-constant lengths
+       for non-constant array indices with COMPONENT_REF, arrays of
+       arrays, and pointers to arrays.
+       (gimple_fold_builtin_strlen): Determine and set length range for
+       non-constant character arrays.
+ 2018-01-10  Aldy Hernandez  <aldyh@redhat.com>
+       PR middle-end/81897
+       * tree-ssa-uninit.c (convert_control_dep_chain_into_preds): Skip
+       empty blocks.
+ 2018-01-10  Eric Botcazou  <ebotcazou@adacore.com>
+       * dwarf2out.c (dwarf2out_var_location): Do not pass NULL to fprintf.
+ 2018-01-10  Peter Bergner  <bergner@vnet.ibm.com>
+       PR target/83399
+       * config/rs6000/rs6000.c (print_operand) <'y'>: Use
+       VECTOR_MEM_ALTIVEC_OR_VSX_P.
+       * config/rs6000/vsx.md (*vsx_le_perm_load_<mode> for VSX_D): Use
+       indexed_or_indirect_operand predicate.
+       (*vsx_le_perm_load_<mode> for VSX_W): Likewise.
+       (*vsx_le_perm_load_v8hi): Likewise.
+       (*vsx_le_perm_load_v16qi): Likewise.
+       (*vsx_le_perm_store_<mode> for VSX_D): Likewise.
+       (*vsx_le_perm_store_<mode> for VSX_W): Likewise.
+       (*vsx_le_perm_store_v8hi): Likewise.
+       (*vsx_le_perm_store_v16qi): Likewise.
+       (eight unnamed splitters): Likewise.
+ 2018-01-10  Peter Bergner  <bergner@vnet.ibm.com>
+       * config/rs6000/x86intrin.h: Change #warning to #error. Update message.
+       * config/rs6000/emmintrin.h: Likewise.
+       * config/rs6000/mmintrin.h: Likewise.
+       * config/rs6000/xmmintrin.h: Likewise.
+ 2018-01-10  David Malcolm  <dmalcolm@redhat.com>
+       PR c++/43486
+       * tree-core.h: Document EXPR_LOCATION_WRAPPER_P's usage of
+       "public_flag".
+       * tree.c (tree_nop_conversion): Return true for location wrapper
+       nodes.
+       (maybe_wrap_with_location): New function.
+       (selftest::check_strip_nops): New function.
+       (selftest::test_location_wrappers): New function.
+       (selftest::tree_c_tests): Call it.
+       * tree.h (STRIP_ANY_LOCATION_WRAPPER): New macro.
+       (maybe_wrap_with_location): New decl.
+       (EXPR_LOCATION_WRAPPER_P): New macro.
+       (location_wrapper_p): New inline function.
+       (tree_strip_any_location_wrapper): New inline function.
+ 2018-01-10  H.J. Lu  <hongjiu.lu@intel.com>
+       PR target/83735
+       * config/i386/i386.c (ix86_compute_frame_layout): Always adjust
+       stack_realign_offset for the largest alignment of stack slot
+       actually used.
+       (ix86_find_max_used_stack_alignment): New function.
+       (ix86_finalize_stack_frame_flags): Use it.  Set
+       max_used_stack_alignment if we don't realign stack.
+       * config/i386/i386.h (machine_function): Add
+       max_used_stack_alignment.
  
- 2017-07-20  Richard Biener  <rguenther@suse.de>
+ 2018-01-10  Christophe Lyon  <christophe.lyon@linaro.org>
  
-       * gimple.h (gimple_phi_result): Add gphi * overload.
-       (gimple_phi_result_ptr): Likewise.
-       (gimple_phi_arg): Likewise.  Adjust index assert to only
-       allow actual argument accesses rather than all slots available
-       by capacity.
-       (gimple_phi_arg_def): Add gphi * overload.
-       * tree-phinodes.c (make_phi_node): Initialize only actual
-       arguments.
-       (resize_phi_node): Clear memory not covered by old node,
-       do not initialize excess argument slots.
-       (reserve_phi_args_for_new_edge): Initialize new argument slot
-       completely.
+       * config/arm/arm.opt (-mbranch-cost): New option.
+       * config/arm/arm.h (BRANCH_COST): Take arm_branch_cost into
+       account.
  
- 2017-07-20  Bin Cheng  <bin.cheng@arm.com>
+ 2018-01-10  Segher Boessenkool  <segher@kernel.crashing.org>
  
-       PR tree-optimization/81388
-       Revert r238585:
-       2016-07-21  Bin Cheng  <bin.cheng@arm.com>
+       PR target/83629
+       * config/rs6000/rs6000.md (load_toc_v4_PIC_2, load_toc_v4_PIC_3b,
+       load_toc_v4_PIC_3c): Wrap const term in CONST RTL.
  
-       * tree-ssa-loop-niter.c (number_of_iterations_lt_to_ne): Clean up
-       by removing computation of may_be_zero.
+ 2018-01-10  Richard Biener  <rguenther@suse.de>
  
- 2017-07-18  Jan Hubicka  <hubicka@ucw.cz>
-           Tom de Vries  <tom@codesourcery.com>
+       PR debug/83765
+       * dwarf2out.c (gen_subprogram_die): Hoist old_die && declaration
+       early out so it also covers the case where we have a non-NULL
+       origin.
  
-       PR middle-end/81030
-       * cfgbuild.c (find_many_sub_basic_blocks): Update REG_BR_PROB note
-       when gimple level profile disagrees with what RTL expander did.
+ 2018-01-10  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-20  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83753
+       * tree-vect-stmts.c (get_group_load_store_type): Use VMAT_CONTIGUOUS
+       for non-strided grouped accesses if the number of elements is 1.
  
-       PR tree-optimization/61171
-       * tree-vectorizer.h (slp_instance): Add reduc_phis member.
-       (vect_analyze_stmt): Add slp instance parameter.
-       (vectorizable_reduction): Likewise.
-       * tree-vect-loop.c (vect_analyze_loop_operations): Adjust.
-       (vect_is_simple_reduction): Deal with chains not detected
-       as SLP reduction chain, specifically not properly associated
-       chains containing a mix of plus/minus.
-       (get_reduction_op): Remove.
-       (get_initial_defs_for_reduction): Simplify, pass in whether
-       this is a reduction chain, pass in the SLP node for the PHIs.
-       (vect_create_epilog_for_reduction): Get the SLP instance as
-       arg and adjust.
-       (vectorizable_reduction): Get the SLP instance as arg.
-       During analysis remember the SLP node with the PHIs in the
-       instance.  Simplify getting at the vectorized reduction PHIs.
-       * tree-vect-slp.c (vect_slp_analyze_node_operations): Pass
-       through SLP instance.
-       (vect_slp_analyze_operations): Likewise.
-       * tree-vect-stms.c (vect_analyze_stmt): Likewise.
-       (vect_transform_stmt): Likewise.
+ 2018-01-10  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-20  Tom de Vries  <tom@codesourcery.com>
+       PR target/81616
+       * i386.c (ix86_vectorize_builtin_gather): Check TARGET_USE_GATHER.
+       * i386.h (TARGET_USE_GATHER): Define.
+       * x86-tune.def (X86_TUNE_USE_GATHER): New.
  
-       PR tree-optimization/81489
-       * gimple-ssa-isolate-paths.c (find_implicit_erroneous_behavior): Move
-       read of phi arg location to before loop that modifies phi.
+ 2018-01-10  Martin Liska  <mliska@suse.cz>
  
- 2017-07-20  Naveen H.S  <Naveen.Hurugalawadi@cavium.com>
+       PR bootstrap/82831
+       * basic-block.h (CLEANUP_NO_PARTITIONING): New define.
+       * bb-reorder.c (pass_reorder_blocks::execute): Do not clean up
+       partitioning.
+       * cfgcleanup.c (try_optimize_cfg): Fix up partitioning if
+       CLEANUP_NO_PARTITIONING is not set.
  
-       * match.pd (((m1 >/</>=/<= m2) * d -> (m1 >/</>=/<= m2) ? d : 0):
-       New pattern.
+ 2018-01-10  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-19  Jan Hubicka  <hubicka@ucw.cz>
+       * doc/rtl.texi: Remove documentation of (const ...) wrappers
+       for vectors, as a partial revert of r254296.
+       * rtl.h (const_vec_p): Delete.
+       (const_vec_duplicate_p): Don't test for vector CONSTs.
+       (unwrap_const_vec_duplicate, const_vec_series_p): Likewise.
+       * expmed.c (make_tree): Likewise.
  
-       PR middle-end/81331
-       * except.c (execute): Fix ordering issue.
+       Revert:
+       * common.md (E, F): Use CONSTANT_P instead of checking for
+       CONST_VECTOR.
+       * emit-rtl.c (gen_lowpart_common): Use const_vec_p instead of
+       checking for CONST_VECTOR.
+ 2018-01-09  Jan Hubicka  <hubicka@ucw.cz>
+       PR middle-end/83575
+       * predict.c (force_edge_cold): Handle in more sane way edges
+       with no prediction.
+ 2018-01-09  Carl Love  <cel@us.ibm.com>
+       * config/rs6002/altivec.md (p8_vmrgow): Add support for V2DI, V2DF,
+       V4SI, V4SF types.
+       (p8_vmrgew): Add support for V2DI, V2DF, V4SF types.
+       * config/rs6000/rs6000-builtin.def: Add definitions for FLOAT2_V2DF,
+       VMRGEW_V2DI, VMRGEW_V2DF, VMRGEW_V4SF, VMRGOW_V4SI, VMRGOW_V4SF,
+       VMRGOW_V2DI, VMRGOW_V2DF.  Remove definition for VMRGOW.
+       * config/rs6000/rs6000-c.c (VSX_BUILTIN_VEC_FLOAT2,
+       P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VEC_VMRGOW):  Add definitions.
+       * config/rs6000/rs6000-protos.h: Add extern defition for
+       rs6000_generate_float2_double_code.
+       * config/rs6000/rs6000.c (rs6000_generate_float2_double_code): Add
+       function.
+       * config/rs6000/vsx.md (vsx_xvcdpsp): Add define_insn.
+       (float2_v2df): Add define_expand.
  
- 2018-07-19  Segher Boessenkool  <segher@kernel.crashing.org>
+ 2018-01-09  Uros Bizjak  <ubizjak@gmail.com>
  
-       PR rtl-optimization/81423
-       * combine.c (make_compound_operation_int): Don't try to optimize
-       the AND of a SUBREG of an LSHIFTRT if that SUBREG is paradoxical.
+       PR target/83628
+       * combine.c (force_int_to_mode) <case ASHIFT>: Use mode instead of
+       op_mode in the force_to_mode call.
  
- 2017-07-19  Segher Boessenkool  <segher@kernel.crashing.org>
+ 2018-01-09  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR rtl-optimization/81423
-       * simplify-rtx.c (simplify_truncation): Handle truncating an IOR
-       with a constant that is -1 in the truncated to mode.
+       * config/aarch64/aarch64.c (aarch64_evpc_trn): Use d.perm.series_p
+       instead of checking each element individually.
+       (aarch64_evpc_uzp): Likewise.
+       (aarch64_evpc_zip): Likewise.
+       (aarch64_evpc_ext): Likewise.
+       (aarch64_evpc_rev): Likewise.
+       (aarch64_evpc_dup): Test the encoding for a single duplicated element,
+       instead of checking each element individually.  Return true without
+       generating rtl if
+       (aarch64_vectorize_vec_perm_const): Use all_from_input_p to test
+       whether all selected elements come from the same input, instead of
+       checking each element individually.  Remove calls to gen_rtx_REG,
+       start_sequence and end_sequence and instead assert that no rtl is
+       generated.
  
- 2017-07-19  Jan Hubicka  <hubicka@ucw.cz>
+ 2018-01-09  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * predict.c (propagate_unlikely_bbs_forward): Break out from ...
-       (determine_unlikely_bbs): ... here.
-       * predict.h (propagate_unlikely_bbs_forward): Declare.
-       * cfgexpand.c (pass_expand::execute): Use it.
-       * bb-reorder.c (sanitize_hot_paths): Do not consider known to be
-       unlikely edges.
-       (find_rarely_executed_basic_blocks_and_crossing_edges): Use
-       propagate_unlikely_bbs_forward.
+       * config/aarch64/aarch64.c (aarch64_legitimate_constant_p): Fix
+       order of HIGH and CONST checks.
  
- 2017-07-19  Jan Hubicka  <hubicka@ucw.cz>
+ 2018-01-09  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       PR middle-end/81331
-       * except.c (maybe_add_nop_after_section_switch): New function.
-       (execute): Use it.
+       * tree-vect-stmts.c (permute_vec_elements): Create a fresh variable
+       if the destination isn't an SSA_NAME.
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-09  Richard Biener  <rguenther@suse.de>
  
-       * gimple.h (gimple_phi_set_arg): Make assert more strict.
+       PR tree-optimization/83668
+       * graphite.c (canonicalize_loop_closed_ssa): Add edge argument,
+       move prologue...
+       (canonicalize_loop_form): ... here, renamed from ...
+       (canonicalize_loop_closed_ssa_form): ... this and amended to
+       swap successor edges for loop exit blocks to make us use
+       the RPO order we need for initial schedule generation.
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-09  Joseph Myers  <joseph@codesourcery.com>
  
-       * gimple.h (gimple_phi_arg): Make assert more strict.
+       PR tree-optimization/64811
+       * match.pd: When optimizing comparisons with Inf, avoid
+       introducing or losing exceptions from comparisons with NaN.
  
- 2017-07-19  Steven Munroe  <munroesj@gcc.gnu.org>
+ 2018-01-09  Martin Liska  <mliska@suse.cz>
  
-       * config.gcc (powerpc*-*-*): Add mmintrin.h.
-       * config/rs6000/mmintrin.h: New file.
-       * config/rs6000/x86intrin.h [__ALTIVEC__]: Include mmintrin.h.
+       PR sanitizer/82517
+       * asan.c (shadow_mem_size): Add gcc_assert.
  
- 2017-07-19  Jakub Jelinek  <jakub@redhat.com>
+ 2018-01-09  Georg-Johann Lay  <avr@gjlay.de>
  
-       PR tree-optimization/81346
-       * match.pd: Optimize (X - 1U) <= INT_MAX-1U into (int) X > 0.
+       Don't save registers in main().
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+       PR target/83738
+       * doc/invoke.texi (AVR Options) [-mmain-is-OS_task]: Document it.
+       * config/avr/avr.opt (-mmain-is-OS_task): New target option.
+       * config/avr/avr.c (avr_set_current_function): Don't error if
+       naked, OS_task or OS_main are specified at the same time.
+       (avr_function_ok_for_sibcall): Don't disable sibcalls for OS_task,
+       OS_main.
+       (avr_insert_attributes) [-mmain-is-OS_task] <main>: Add OS_task
+       attribute.
+       * common/config/avr/avr-common.c (avr_option_optimization_table):
+       Switch on -mmain-is-OS_task for optimizing compilations.
  
-       * config/nvptx/nvptx.md (VECIM): Add V2DI.
+ 2018-01-09  Richard Biener  <rguenther@suse.de>
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+       PR tree-optimization/83572
+       * graphite.c: Include cfganal.h.
+       (graphite_transform_loops): Connect infinite loops to exit
+       and remove fake edges at the end.
  
-       * config/nvptx/nvptx-modes.def: Add V2DImode.
-       * config/nvptx/nvptx-protos.h (nvptx_data_alignment): Declare.
-       * config/nvptx/nvptx.c (nvptx_ptx_type_from_mode): Handle V2DImode.
-       (nvptx_output_mov_insn): Handle lack of mov.b128.
-       (nvptx_print_operand): Handle 'H' and 'L' codes.
-       (nvptx_vector_mode_supported): Allow V2DImode.
-       (nvptx_preferred_simd_mode): New function.
-       (nvptx_data_alignment): New function.
-       (TARGET_VECTORIZE_PREFERRED_SIMD_MODE): Redefine to
-       nvptx_preferred_simd_mode.
-       * config/nvptx/nvptx.h (STACK_BOUNDARY, BIGGEST_ALIGNMENT): Change from
-       64 to 128 bits.
-       (DATA_ALIGNMENT): Define.  Set to nvptx_data_alignment.
+ 2018-01-09  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+       * ipa-inline.c (edge_badness): Revert accidental checkin.
  
-       * config/nvptx/nvptx-modes.def: New file.  Add V2SImode.
-       * config/nvptx/nvptx.c (nvptx_ptx_type_from_mode): Handle V2SImode.
-       (nvptx_vector_mode_supported): New function.  Allow V2SImode.
-       (TARGET_VECTOR_MODE_SUPPORTED_P): Redefine to nvptx_vector_mode_supported.
-       * config/nvptx/nvptx.md (VECIM): New mode iterator. Add V2SI.
-       (mov<VECIM>_insn): New define_insn.
-       (define_expand "mov<VECIM>): New define_expand.
+ 2018-01-09  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-19  Tom de Vries  <tom@codesourcery.com>
+       PR ipa/80763
+       * ipa-comdats.c (set_comdat_group): Only set comdat group of real
+       symbols; not inline clones.
  
-       * config/nvptx/nvptx.c (nvptx_print_operand): Handle v2 vector mode.
+ 2018-01-09  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-19  Jakub Jelinek  <jakub@redhat.com>
+       PR target/83507
+       * modulo-sched.c (schedule_reg_moves): Punt if we'd need to move
+       hard registers.  Formatting fixes.
  
-       PR tree-optimization/81346
-       * fold-const.h (fold_div_compare, range_check_type): Declare.
-       * fold-const.c (range_check_type): New function.
-       (build_range_check): Use range_check_type.
-       (fold_div_compare): No longer static, rewritten into
-       a match.pd helper function.
-       (fold_comparison): Don't call fold_div_compare here.
-       * match.pd (X / C1 op C2): New optimization using fold_div_compare
-       as helper function.
+       PR preprocessor/83722
+       * gcc.c (try_generate_repro): Pass
+       &temp_stderr_files[RETRY_ICE_ATTEMPTS - 1] rather than
+       &temp_stdout_files[RETRY_ICE_ATTEMPTS - 1] as last argument to
+       do_report_bug.
  
- 2017-07-19  Nathan Sidwell  <nathan@acm.org>
+ 2018-01-08  Monk Chiang  <sh.chiang04@gmail.com>
+           Kito Cheng  <kito.cheng@gmail.com>
  
-       * tree.h (TYPE_MINVAL, TYPE_MAXVAL): Rename to ...
-       (TYPE_MIN_VALUE_RAW, TYPE_MAX_VALUE_RAW): ... these.
-       * tree.c (find_decls_types_r, verify_type): Use
-       TYPE_{MIN,MAX}_VALUE_RAW.
-       * lto-streamer-out.c (DFS::DFS_write_tree_body): Likewise.
-       (hash_tree): Likewise.
-       * tree-streamer-in.c (lto_input_ts_type_non_common_tree_pointers):
-       Likewise.
-       * tree-streamer-out.c (write_ts_type_non_common_tree_pointers):
-       Likewise.
+       * config/riscv/riscv.c (machine_function::is_leaf): Remove field.
+       (riscv_leaf_function_p): Delete.
+       (riscv_function_ok_for_sibcall): Return false when TARGET_SAVE_RESTORE.
  
- 2017-07-18  Tom de Vries  <tom@codesourcery.com>
+ 2018-01-08  Aaron Sawdey  <acsawdey@linux.vnet.ibm.com>
  
-       PR middle-end/81464
-       * omp-expand.c (expand_omp_for_static_chunk): Handle
-       equal-argument loop exit phi.
+       * config/rs6000/rs6000-string.c (do_load_for_compare_from_addr): New
+       function.
+       (do_ifelse): New function.
+       (do_isel): New function.
+       (do_sub3): New function.
+       (do_add3): New function.
+       (do_load_mask_compare): New function.
+       (do_overlap_load_compare): New function.
+       (expand_compare_loop): New function.
+       (expand_block_compare): Call expand_compare_loop() when appropriate.
+       * config/rs6000/rs6000.opt (-mblock-compare-inline-limit): Change
+       option description.
+       (-mblock-compare-inline-loop-limit): New option.
+ 2018-01-08  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
+       PR target/83677
+       * config/rs6000/altivec.md (*altivec_vpermr_<mode>_internal):
+       Reverse order of second and third operands in first alternative.
+       * config/rs6000/rs6000.c (rs6000_expand_vector_set): Reverse order
+       of first and second elements in UNSPEC_VPERMR vector.
+       (altivec_expand_vec_perm_le): Likewise.
+ 2018-01-08  Jeff Law  <law@redhat.com>
+       PR rtl-optimizatin/81308
+       * tree-switch-conversion.c (cfg_altered): New file scoped static.
+       (process_switch): If group_case_labels makes a change, then set
+       cfg_altered.
+       (pass_convert_switch::execute): If a switch is converted, then
+       set cfg_altered.  Return TODO_cfg_cleanup if cfg_altered is true.
  
- 2017-07-18  Uros Bizjak  <ubizjak@gmail.com>
+       PR rtl-optimization/81308
+       * recog.c (split_all_insns): Conditionally cleanup the CFG after
+       splitting insns.
  
-       PR target/81471
-       * config/i386/i386.md (rorx_immediate_operand): New mode attribute.
-       (*bmi2_rorx<mode>3_1): Use rorx_immediate_operand as
-       operand 2 predicate.
-       (*bmi2_rorxsi3_1_zext): Use const_0_to_31_operand as
-       operand 2 predicate.
-       (ror,rol -> rorx splitters): Use const_int_operand as
-       operand 2 predicate.
+ 2018-01-08  Vidya Praveen  <vidyapraveen@arm.com>
+       PR target/83663 - Revert r255946
+       * config/aarch64/aarch64.c (aarch64_expand_vector_init): Modify code
+       generation for cases where splatting a value is not useful.
+       * simplify-rtx.c (simplify_ternary_operation): Simplify vec_merge
+       across a vec_duplicate and a paradoxical subreg forming a vector
+       mode to a vec_concat.
+ 2018-01-08  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
+       * config/arm/t-aprofile (MULTILIB_MATCHES): Add mapping rules for
+       -march=armv8.3-a variants.
+       * config/arm/t-multilib: Likewise.
+       * config/arm/t-arm-elf: Likewise.  Handle dotprod extension.
+ 2018-01-08  Aaron Sawdey  <acsawdey@linux.vnet.ibm.com>
+       * config/rs6000/rs6000.md (cceq_ior_compare): Remove * so I can use it
+       to generate rtl.
+       (cceq_ior_compare_complement): Give it a name so I can use it, and
+       change boolean_or_operator predicate to boolean_operator so it can
+       be used to generate a crand.
+       (eqne): New code iterator.
+       (bd/bd_neg): New code_attrs.
+       (<bd>_<mode>): New name for ctr<mode>_internal[12] now combined into
+       a single define_insn.
+       (<bd>tf_<mode>): A new insn pattern for the conditional form branch
+       decrement (bdnzt/bdnzf/bdzt/bdzf).
+       * config/rs6000/rs6000.c (rs6000_legitimate_combined_insn): Updated
+       with the new names of the branch decrement patterns, and added the
+       names of the branch decrement conditional patterns.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83563
+       * graphite.c (canonicalize_loop_closed_ssa_form): Reset the SCEV
+       cache.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR middle-end/83713
+       * convert.c (do_narrow): Properly guard TYPE_OVERFLOW_WRAPS checks.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR tree-optimization/83685
+       * tree-ssa-pre.c (create_expression_by_pieces): Do not insert
+       references to abnormals.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR lto/83719
+       * dwarf2out.c (output_indirect_strings): Handle empty
+       skeleton_debug_str_hash.
+       (dwarf2out_early_finish): Index strings for -gsplit-dwarf.
+ 2018-01-08  Claudiu Zissulescu  <claziss@synopsys.com>
+       * config/arc/arc.c (TARGET_TRAMPOLINE_ADJUST_ADDRESS): Delete.
+       (emit_store_direct): Likewise.
+       (arc_trampoline_adjust_address): Likewise.
+       (arc_asm_trampoline_template): New function.
+       (arc_initialize_trampoline): Use asm_trampoline_template.
+       (TARGET_ASM_TRAMPOLINE_TEMPLATE): Define.
+       * config/arc/arc.h (TRAMPOLINE_SIZE): Adjust to 16.
+       * config/arc/arc.md (flush_icache): Delete pattern.
+ 2018-01-08  Claudiu Zissulescu  <claziss@synopsys.com>
+       * config/arc/arc-c.def (__ARC_UNALIGNED__): New define.
+       * config/arc/arc.h (STRICT_ALIGNMENT): Control this macro using
+       munaligned-access.
+ 2018-01-08  Sebastian Huber  <sebastian.huber@embedded-brains.de>
+       PR target/83681
+       * config/epiphany/epiphany.h (make_pass_mode_switch_use): Guard
+       by not USED_FOR_TARGET.
+       (make_pass_resolve_sw_modes): Likewise.
+ 2018-01-08  Sebastian Huber  <sebastian.huber@embedded-brains.de>
+       * config/nios2/nios2.h (nios2_section_threshold): Guard by not
+       USED_FOR_TARGET.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR middle-end/83580
+       * tree-data-ref.c (split_constant_offset): Remove STRIP_NOPS.
+ 2018-01-08  Richard Biener  <rguenther@suse.de>
+       PR middle-end/83517
+       * match.pd ((t * 2) / 2) -> t): Add missing :c.
+ 2018-01-06  Aldy Hernandez  <aldyh@redhat.com>
+       PR middle-end/81897
+       * tree-ssa-uninit.c (compute_control_dep_chain): Do not bail on
+       basic blocks with a small number of successors.
+       (convert_control_dep_chain_into_preds): Improve handling of
+       forwarder blocks.
+       (dump_predicates): Split apart into...
+       (dump_pred_chain): ...here...
+       (dump_pred_info): ...and here.
+       (can_one_predicate_be_invalidated_p): Add debugging printfs.
+       (can_chain_union_be_invalidated_p): Improve check for invalidation
+       of paths.
+       (uninit_uses_cannot_happen): Avoid unnecessary if
+       convert_control_dep_chain_into_preds yielded nothing.
+ 2018-01-06  Martin Sebor  <msebor@redhat.com>
+       PR tree-optimization/83640
+       * gimple-ssa-warn-restrict.c (builtin_access::builtin_access): Avoid
+       subtracting negative offset from size.
+       (builtin_access::overlap): Adjust offset bounds of the access to fall
+       within the size of the object if possible.
+ 2018-01-06  Richard Sandiford  <richard.sandiford@linaro.org>
+       PR rtl-optimization/83699
+       * expmed.c (extract_bit_field_1): Restrict the vector usage of
+       extract_bit_field_as_subreg to cases in which the extracted
+       value is also a vector.
+       * lra-constraints.c (process_alt_operands): Test for the equivalence
+       substitutions when detecting a possible reload cycle.
+ 2018-01-06  Jakub Jelinek  <jakub@redhat.com>
+       PR debug/83480
+       * toplev.c (process_options): Don't enable debug_nonbind_markers_p
+       by default if flag_selective_schedling{,2}.  Formatting fixes.
+       PR rtl-optimization/83682
+       * rtl.h (const_vec_duplicate_p): Only return true for VEC_DUPLICATE
+       if it has non-VECTOR_MODE element mode.
+       (vec_duplicate_p): Likewise.
+       PR middle-end/83694
+       * cfgexpand.c (expand_debug_expr): Punt if mode1 is VOIDmode
+       and bitsize might be greater than MAX_BITSIZE_MODE_ANY_INT.
+ 2018-01-05  Jakub Jelinek  <jakub@redhat.com>
+       PR target/83604
+       * config/i386/i386-builtin.def
+       (__builtin_ia32_vgf2p8affineinvqb_v64qi,
+       __builtin_ia32_vgf2p8affineqb_v64qi, __builtin_ia32_vgf2p8mulb_v64qi):
+       Require also OPTION_MASK_ISA_AVX512F in addition to
+       OPTION_MASK_ISA_GFNI.
+       (__builtin_ia32_vgf2p8affineinvqb_v16qi_mask,
+       __builtin_ia32_vgf2p8affineqb_v16qi_mask): Require
+       OPTION_MASK_ISA_AVX512VL instead of OPTION_MASK_ISA_SSE in addition
+       to OPTION_MASK_ISA_GFNI.
+       (__builtin_ia32_vgf2p8mulb_v32qi_mask): Require
+       OPTION_MASK_ISA_AVX512VL in addition to OPTION_MASK_ISA_GFNI and
+       OPTION_MASK_ISA_AVX512BW.
+       (__builtin_ia32_vgf2p8mulb_v16qi_mask): Require
+       OPTION_MASK_ISA_AVX512VL instead of OPTION_MASK_ISA_AVX512BW in
+       addition to OPTION_MASK_ISA_GFNI.
+       (__builtin_ia32_vgf2p8affineinvqb_v16qi,
+       __builtin_ia32_vgf2p8affineqb_v16qi, __builtin_ia32_vgf2p8mulb_v16qi):
+       Require OPTION_MASK_ISA_SSE2 instead of OPTION_MASK_ISA_SSE in addition
+       to OPTION_MASK_ISA_GFNI.
+       * config/i386/i386.c (def_builtin): Change to builtin isa/isa2 being
+       a requirement for all ISAs rather than any of them with a few
+       exceptions.
+       (ix86_add_new_builtins): Clear OPTION_MASK_ISA_64BIT from isa before
+       processing.
+       (ix86_expand_builtin): Require all ISAs from builtin's isa and isa2
+       bitmasks to be enabled with 3 exceptions, instead of requiring any
+       enabled ISA with lots of exceptions.
+       * config/i386/sse.md (vgf2p8affineinvqb_<mode><mask_name>,
+       vgf2p8affineqb_<mode><mask_name>, vgf2p8mulb_<mode><mask_name>):
+       Change avx512bw in isa attribute to avx512f.
+       * config/i386/sgxintrin.h: Add license boilerplate.
+       * config/i386/vaesintrin.h: Likewise.  Fix macro spelling __AVX512F
+       to __AVX512F__ and __AVX512VL to __AVX512VL__.
+       (_mm256_aesdec_epi128, _mm256_aesdeclast_epi128, _mm256_aesenc_epi128,
+       _mm256_aesenclast_epi128): Enable temporarily avx if __AVX__ is not
+       defined.
+       * config/i386/gfniintrin.h (_mm_gf2p8mul_epi8,
+       _mm_gf2p8affineinv_epi64_epi8, _mm_gf2p8affine_epi64_epi8): Enable
+       temporarily sse2 rather than sse if not enabled already.
  
- 2017-06-18  Richard Biener  <rguenther@suse.de>
+       PR target/83604
+       * config/i386/sse.md (VI248_VLBW): Rename to ...
+       (VI248_AVX512VL): ... this.  Don't guard V32HI with TARGET_AVX512BW.
+       (vpshrd_<mode><mask_name>, vpshld_<mode><mask_name>,
+       vpshrdv_<mode>, vpshrdv_<mode>_mask, vpshrdv_<mode>_maskz,
+       vpshrdv_<mode>_maskz_1, vpshldv_<mode>, vpshldv_<mode>_mask,
+       vpshldv_<mode>_maskz, vpshldv_<mode>_maskz_1): Use VI248_AVX512VL
+       mode iterator instead of VI248_VLBW.
  
-       PR tree-optimization/81410
-       * tree-vect-stmts.c (vectorizable_load): Properly adjust for
-       the gap in the ! slp_perm SLP case after each group.
+ 2018-01-05  Jan Hubicka  <hubicka@ucw.cz>
  
- 2017-07-18  Jan Hubicka  <hubicka@ucw.cz>
+       * ipa-fnsummary.c (record_modified_bb_info): Add OP.
+       (record_modified): Skip clobbers; add debug output.
+       (param_change_prob): Use sreal frequencies.
  
-       PR middle-end/81463
-       * cfgloopmanip.c (scale_loop_profile): Watch out for zero frequency
-       again.
+ 2018-01-05  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-18  Jan Hubicka  <hubicka@ucw.cz>
+       * tree-vect-data-refs.c (vect_compute_data_ref_alignment): Don't
+       punt for user-aligned variables.
  
-       PR middle-end/81462
-       * predict.c (set_even_probabilities): Cleanup; do not affect
-       probabilities that are already known.
-       (combine_predictions_for_bb): Call even when count is set.
+ 2018-01-05  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-18  Nathan Sidwell  <nathan@acm.org>
+       * tree-chrec.c (chrec_contains_symbols): Return true for
+       POLY_INT_CST.
  
-       * tree-parloops.c (try_transform_to_exit_first_loop_alt): Use
-       TYPE_MAX_VALUE.
+ 2018-01-05  Sudakshina Das  <sudi.das@arm.com>
  
- 2017-07-18  Bin Cheng  <bin.cheng@arm.com>
+       PR target/82439
+       * simplify-rtx.c (simplify_relational_operation_1): Add simplifications
+       of (x|y) == x for BICS pattern.
  
-       PR target/81408
-       * tree-ssa-loop-niter.c (number_of_iterations_exit): Dump missed
-       optimization for loop niter analysis.
+ 2018-01-05  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-18  Georg-Johann Lay  <avr@gjlay.de>
+       PR tree-optimization/83605
+       * gimple-ssa-strength-reduction.c: Include tree-eh.h.
+       (find_candidates_dom_walker::before_dom_children): Ignore stmts that
+       can throw.
  
-       PR target/81473
-       * config/avr/avr.c (avr_optimize_casesi): Don't use
-       INT8_MIN, INT8_MAX, UINT8_MAX, INT16_MIN, INT16_MAX, UINT16_MAX.
+ 2018-01-05  Sebastian Huber  <sebastian.huber@embedded-brains.de>
  
- 2017-07-18  Robin Dapp  <rdapp@linux.vnet.ibm.com>
+       * config.gcc (epiphany-*-elf*): Add (epiphany-*-rtems*) configuration.
+       * config/epiphany/rtems.h: New file.
  
-       * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Remove
-       body_cost_vec from _vect_peel_extended_info.
-       (vect_peeling_hash_get_lowest_cost): Do not set body_cost_vec.
-       (vect_peeling_hash_choose_best_peeling): Remove body_cost_vec and
-       npeel.
+ 2018-01-04  Jakub Jelinek  <jakub@redhat.com>
+           Uros Bizjak  <ubizjak@gmail.com>
  
- 2017-07-18  Bin Cheng  <bin.cheng@arm.com>
+       PR target/83554
+       * config/i386/i386.md (*<rotate_insn>hi3_1 splitter): Use
+       QIreg_operand instead of register_operand predicate.
+       * config/i386/i386.c (ix86_rop_should_change_byte_p,
+       set_rop_modrm_reg_bits, ix86_mitigate_rop): Use -mmitigate-rop in
+       comments instead of -fmitigate[-_]rop.
  
-       * config/arm/arm.c (emit_unlikely_jump): Remove unused var.
+ 2018-01-04  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
  
- 2017-07-18  Richard Biener  <rguenther@suse.de>
+       PR bootstrap/81926
+       * cgraphunit.c (symbol_table::compile): Switch to text_section
+       before calling assembly_start debug hook.
+       * run-rtl-passes.c (run_rtl_passes): Likewise.
+       Include output.h.
  
-       PR tree-optimization/80620
-       PR tree-optimization/81403
-       * tree-ssa-pre.c (phi_translate_1): Clear range and points-to
-       info when re-using a VN table entry.
+ 2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-18  Richard Biener  <rguenther@suse.de>
+       * tree-vrp.c (extract_range_from_binary_expr_1): Check
+       range_int_cst_p rather than !symbolic_range_p before calling
+       extract_range_from_multiplicative_op_1.
  
-       PR tree-optimization/81418
-       * tree-vect-loop.c (vectorizable_reduction): Properly compute
-       vectype_in.  Verify that with lane-reducing reduction operations
-       we have a single def-use cycle.
+ 2018-01-04  Jeff Law  <law@redhat.com>
  
- 2017-07-17  Carl Love  <cel@us.ibm.com>
+       * tree-ssa-math-opts.c (execute_cse_reciprocals_1): Remove
+       redundant test in assertion.
  
      Revert commit r249424   2017-06-20  Carl Love  <cel@us.ibm.com>
2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
-       * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
-       ALTIVEC_BUILTIN_VMULESW, ALTIVEC_BUILTIN_VMULEUW,
-       ALTIVEC_BUILTIN_VMULOSW, ALTIVEC_BUILTIN_VMULOUW entries.
-       * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin,
-       builtin_function_type): Add ALTIVEC_BUILTIN_* case statements.
-       * config/rs6000/altivec.md (MVULEUW, VMULESW, VMULOUW,
-       VMULOSW): New enum "unspec" values.
-       (vec_widen_umult_even_v4si, vec_widen_smult_even_v4si,
-       vec_widen_umult_odd_v4si, vec_widen_smult_odd_v4si,
-       altivec_vmuleuw, altivec_vmulesw, altivec_vmulouw,
-       altivec_vmulosw): New patterns.
-       * config/rs6000/rs6000-builtin.def (VMLEUW, VMULESW, VMULOUW,
-       VMULOSW): Add definitions.
- 2017-07-17  Uros Bizjak  <ubizjak@gmail.com>
+       * doc/rtl.texi: Document machine_mode wrapper classes.
  
-       * config/alpha/alpha.c: Include predict.h.
+ 2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-17  Yury Gribov  <tetra2005@gmail.com>
+       * fold-const.c (fold_ternary_loc): Check tree_fits_uhwi_p before
+       using tree_to_uhwi.
  
-       * tree-vrp.c (compare_assert_loc): Fix comparison function
-       to return predictable results.
+ 2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-17  Claudiu Zissulescu  <claziss@synopsys.com>
+       * tree-ssa-forwprop.c (is_combined_permutation_identity): Allow
+       the VEC_PERM_EXPR fold to fail.
  
-       * config/arc/arc.md (adddi3): Remove support for mexpand-adddi
-       option.
-       (subdi3): Likewise.
-       * config/arc/arc.opt (mexpand-adddi): Deprecate it.
-       * doc/invoke.texi (mexpand-adddi): Update text.
+ 2018-01-04  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-17  Claudiu Zissulescu  <claziss@synopsys.com>
+       PR debug/83585
+       * bb-reorder.c (insert_section_boundary_note): Set has_bb_partition
+       to switched_sections.
  
-       * config/arc/arc.md (clzsi2): Expand to an arc_clzsi2 instruction
-       that also clobbers the CC register. The old expand code is moved
-       to ...
-       (*arc_clzsi2): ... here.
-       (ctzsi2): Expand to an arc_ctzsi2 instruction that also clobbers
-       the CC register. The old expand code is moved to ...
-       (arc_ctzsi2): ... here.
+ 2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-17  Claudiu Zissulescu  <claziss@synopsys.com>
+       PR target/83680
+       * config/arm/arm.c (arm_vectorize_vec_perm_const): Fix inverted
+       test for d.testing.
  
-       * config/arc/arc.opt (mindexed-loads): Use initial value
-       TARGET_INDEXED_LOADS_DEFAULT.
-       (mauto-modify-reg): Use initial value
-       TARGET_AUTO_MODIFY_REG_DEFAULT.
-       * config/arc/elf.h (TARGET_INDEXED_LOADS_DEFAULT): Define.
-       (TARGET_AUTO_MODIFY_REG_DEFAULT): Likewise.
-       * config/arc/linux.h (TARGET_INDEXED_LOADS_DEFAULT): Define.
-       (TARGET_AUTO_MODIFY_REG_DEFAULT): Likewise.
+ 2018-01-04  Peter Bergner  <bergner@vnet.ibm.com>
  
- 2017-07-17  Martin Liska  <mliska@suse.cz>
+       PR target/83387
+       * config/rs6000/rs6000.c (rs6000_discover_homogeneous_aggregate): Do not
+       allow arguments in FP registers if TARGET_HARD_FLOAT is false.
  
-       PR sanitizer/81302
-       * opts.c (finish_options): Do not allow -fgnu-tm
-       w/ -fsanitize={kernel-,}address.  Say sorry.
+ 2018-01-04  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-17  Bin Cheng  <bin.cheng@arm.com>
+       PR debug/83666
+       * cfgexpand.c (expand_debug_expr) <case BIT_FIELD_REF>: Punt if mode
+       is BLKmode and bitpos not zero or mode change is needed.
  
-       PR target/81369
-       * tree-loop-distribution.c (classify_partition): Only assert on
-       numer of iterations.
-       (merge_dep_scc_partitions): Delete prameter.  Update function call.
-       (distribute_loop): Remove code handling loop with unknown niters.
-       (pass_loop_distribution::execute): Skip loop with unknown niters.
+ 2018-01-04  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-17  Bin Cheng  <bin.cheng@arm.com>
+       PR target/83675
+       * config/sparc/sparc.c (sparc_vectorize_vec_perm_const): Require
+       TARGET_VIS2.
  
-       PR target/81369
-       * tree-loop-distribution.c (merge_dep_scc_partitions): Sink call to
-       function sort_partitions_by_post_order.
+ 2018-01-04  Uros Bizjak  <ubizjak@gmail.com>
  
- 2017-07-17  Bin Cheng  <bin.cheng@arm.com>
+       PR target/83628
+       * config/alpha/alpha.md (*sadd<modesuffix>): Use ASHIFT
+       instead of MULT rtx.  Update all corresponding splitters.
+       (*saddl_se): Ditto.
+       (*ssub<modesuffix>): Ditto.
+       (*ssubl_se): Ditto.
+       (*cmp_sadd_di): Update split patterns.
+       (*cmp_sadd_si): Ditto.
+       (*cmp_sadd_sidi): Ditto.
+       (*cmp_ssub_di): Ditto.
+       (*cmp_ssub_si): Ditto.
+       (*cmp_ssub_sidi): Ditto.
+       * config/alpha/predicates.md (const23_operand): New predicate.
+       * config/alpha/alpha.c (alpha_rtx_costs) [PLUS, MINUS]:
+       Look for ASHIFT, not MULT inner operand.
+       (alpha_split_conditional_move): Update for *sadd<modesuffix> change.
  
-       PR tree-optimization/81374
-       * tree-loop-distribution.c (pass_loop_distribution::execute): Record
-       the max index of basic blocks, rather than number of basic blocks.
+ 2018-01-04  Martin Liska  <mliska@suse.cz>
  
- 2017-07-17  Claudiu Zissulescu  <claziss@synopsys.com>
+       PR gcov-profile/83669
+       * gcov.c (output_intermediate_file): Add version to intermediate
+       gcov file.
+       * doc/gcov.texi: Document new field 'version' in intermediate
+       file format. Fix location of '-k' option of gcov command.
  
-       * config/arc/arc-protos.h (arc_legitimate_pc_offset_p): Remove
-       proto.
-       (arc_legitimate_pic_operand_p): Likewise.
-       * config/arc/arc.c (arc_legitimate_pic_operand_p): Remove
-       function.
-       (arc_needs_pcl_p): Likewise.
-       (arc_legitimate_pc_offset_p): Likewise.
-       (arc_legitimate_pic_addr_p): Remove LABEL_REF case, as this
-       function is also used in constrains.md.
-       (arc_legitimate_constant_p): Use arc_legitimate_pic_addr_p to
-       validate pic constants. Handle CONST_INT, CONST_DOUBLE, MINUS and
-       PLUS.  Only return true/false in known cases, otherwise assert.
-       (arc_legitimate_address_p): Remove arc_legitimate_pic_addr_p as it
-       is already called in arc_legitimate_constant_p.
-       * config/arc/arc.h (CONSTANT_ADDRESS_P): Consider also LABEL for
-       pic addresses.
-       (LEGITIMATE_PIC_OPERAND_P): Use
-       arc_raw_symbolic_reference_mentioned_p function.
-       * config/arc/constraints.md (Cpc): Use arc_legitimate_pic_addr_p
-       function.
-       (Cal): Likewise.
-       (C32): Likewise.
- 2017-07-17  Claudiu Zissulescu  <claziss@synopsys.com>
-       Andrew Burgess  <andrew.burgess@embecosm.com>
-       * config/arc/arc-protos.h (arc_compute_function_type): Change prototype.
-       (arc_return_address_register): New function.
-       * config/arc/arc.c (arc_handle_fndecl_attribute): New function.
-       (arc_handle_fndecl_attribute): Add naked attribute.
-       (TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS): Define.
-       (TARGET_WARN_FUNC_RETURN): Likewise.
-       (arc_allocate_stack_slots_for_args): New function.
-       (arc_warn_func_return): Likewise.
-       (machine_function): Change type fn_type.
-       (arc_compute_function_type): Consider new naked function type,
-       change function return type.
-       (arc_must_save_register): Adapt to handle new
-       arc_compute_function_type's return type.
-       (arc_expand_prologue): Likewise.
-       (arc_expand_epilogue): Likewise.
-       (arc_return_address_regs): Delete.
-       (arc_return_address_register): New function.
-       (arc_epilogue_uses): Use above function.
-       * config/arc/arc.h (arc_return_address_regs): Delete prototype.
-       (arc_function_type): Change encoding, add naked type.
-       (ARC_INTERRUPT_P): Change to handle the new encoding.
-       (ARC_FAST_INTERRUPT_P): Likewise.
-       (ARC_NORMAL_P): Define.
-       (ARC_NAKED_P): Likewise.
-       (arc_compute_function_type): Delete prototype.
-       * config/arc/arc.md (in_ret_delay_slot): Use
-       arc_return_address_register function.
-       (simple_return): Likewise.
-       (p_return_i): Likewise.
+ 2018-01-04  Martin Liska  <mliska@suse.cz>
  
- 2017-07-17  Jakub Jelinek  <jakub@redhat.com>
+       PR ipa/82352
+       * ipa-icf.c (sem_function::merge): Do not cross comdat boundary.
  
-       PR tree-optimization/81428
-       * match.pd (X / X -> one): Don't optimize _Fract divisions, as 1
-       can't be built for those types.
+ 2018-01-04  Jakub Jelinek  <jakub@redhat.com>
  
- 2017-07-17  Georg-Johann Lay  <avr@gjlay.de>
-       Remove stuff dead since r239246.
-       * config/avr/avr-arch.h (avr_inform_devices): Remove dead proto.
-       * config/avr/avr-devices.c (mcu_name, comparator, avr_mcus_str)
-       (avr_inform_devices): Remove dead stuff.
- 2017-07-17  Tamar Christina  <tamar.christina@arm.com>
-       * config/arm/arm_neon.h: Fix softp typo.
- 2017-07-17  Jakub Jelinek  <jakub@redhat.com>
-       PR tree-optimization/81365
-       * tree-ssa-phiprop.c (propagate_with_phi): When considering hoisting
-       aggregate moves onto bb predecessor edges, make sure there are no
-       loads that could alias the lhs in between the start of bb and the
-       loads from *phi.
- 2017-07-17  Georg-Johann Lay  <avr@gjlay.de>
-       PR 80929
-       * config/avr/avr.c (avr_mul_highpart_cost): New static function.
-       (avr_rtx_costs_1) [TRUNCATE]: Use it to compute mul_highpart cost.
-       [LSHIFTRT, outer_code = TRUNCATE]: Same.
- 2017-07-17  Jakub Jelinek  <jakub@redhat.com>
-       PR tree-optimization/81396
-       * tree-ssa-math-opts.c (struct symbolic_number): Add n_ops field.
-       (init_symbolic_number): Initialize it to 1.
-       (perform_symbolic_merge): Add n_ops from both operands into the new
-       n_ops.
-       (find_bswap_or_nop): Don't consider n->n == cmpnop computations
-       without base_addr as useless if they need more than one operation.
-       (bswap_replace): Handle !bswap case for NULL base_addr.
- 2017-07-17  Tom de Vries  <tom@codesourcery.com>
-       PR target/81069
-       * config/nvptx/nvptx.c (nvptx_single): Insert diverging branch as late
-       as possible.
- 2017-07-17  Sebastian Huber  <sebastian.huber@embedded-brains.de>
-       * config/sparc/rtemself.h (TARGET_OS_CPP_BUILTINS): Add
-       conditional builtin define __FIX_LEON3FT_B2BST.
- 2017-07-17  Daniel Cederman  <cederman@gaisler.com>
-       * config/sparc/t-rtems: Add mfix-gr712rc multilibs. Replace
-       MULTILIB_EXCEPTIONS with MULTILIB_REQUIRED. Match -mfix-gr712rc
-       with -mfix-ut700.
- 2017-07-16  Eric Botcazou  <ebotcazou@adacore.com>
-       PR rtl-optimization/81424
-       * optabs.c (prepare_cmp_insn): Use copy_to_reg instead of force_reg
-       to remove potential trapping from operands if -fnon-call-exceptions.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * tree-ssa-loop-manip.c (tree_transform_and_unroll_loop): Use
-       profile_proability for scalling.
-       * scale_profile_for_vect_loop.c (scale_profile_for_vect_loop): Likewise.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * cgraph.c (cgraph_edge::redirect_call_stmt_to_caller): Cleanup.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * cfgloopmanip.c (scale_loop_profile): Avoid use of REG_BR_PROB_BASE
-       fixpoint arithmetics.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * tree-ssa-loop-unswitch.c (hoist_guard): Avoid use of REG_BR_PROB_BASE
-       fixpoint arithmetics.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * asan.c (create_cond_insert_point): Avoid use of REG_BR_PROB_BASE
-       fixpoint arithmetics.
- 2017-07-16  Jan Hubicka  <hubicka@ucw.cz>
-       * profile-count.h (profile_probability::from_reg_br_prob_note,
-       profile_probability::to_reg_br_prob_note): New functions.
-       * doc/rtl.texi (REG_BR_PROB_NOTE): Update documentation.
-       * reg-notes.h (REG_BR_PROB, REG_BR_PRED): Update docs.
-       * predict.c (probability_reliable_p): Update.
-       (edge_probability_reliable_p): Update.
-       (br_prob_note_reliable_p): Update.
-       (invert_br_probabilities): Update.
-       (add_reg_br_prob_note): New function.
-       (combine_predictions_for_insn): Update.
-       * asan.c (asan_clear_shadow): Update.
-       * cfgbuild.c (compute_outgoing_frequencies): Update.
-       * cfgrtl.c (force_nonfallthru_and_redirect): Update.
-       (update_br_prob_note): Update.
-       (rtl_verify_edges): Update.
-       (purge_dead_edges): Update.
-       (fixup_reorder_chain): Update.
-       * emit-rtl.c (try_split): Update.
-       * ifcvt.c (cond_exec_process_insns): Update.
-       (cond_exec_process_if_block): Update.
-       (dead_or_predicable): Update.
-       * internal-fn.c (expand_addsub_overflow): Update.
-       (expand_neg_overflow): Update.
-       (expand_mul_overflow): Update.
-       * loop-doloop.c (doloop_modify): Update.
-       * loop-unroll.c (compare_and_jump_seq): Update.
-       * optabs.c (emit_cmp_and_jump_insn_1): Update.
-       * predict.h: Update.
-       * reorg.c (mostly_true_jump): Update.
-       * rtl.h: Update.
-       * config/aarch64/aarch64.c (aarch64_emit_unlikely_jump): Update.
-       * config/alpha/alpha.c (emit_unlikely_jump): Update.
-       * config/arc/arc.c: (emit_unlikely_jump): Update.
-       * config/arm/arm.c: (emit_unlikely_jump): Update.
-       * config/bfin/bfin.c (cbranch_predicted_taken_p): Update.
-       * config/frv/frv.c (frv_print_operand_jump_hint): Update.
-       * config/i386/i386.c (ix86_expand_split_stack_prologue): Update.
-       (ix86_print_operand): Update.
-       (ix86_split_fp_branch): Update.
-       (predict_jump): Update.
-       * config/ia64/ia64.c (ia64_print_operand): Update.
-       * config/mmix/mmix.c (mmix_print_operand): Update.
-       * config/powerpcspe/powerpcspe.c (output_cbranch): Update.
-       (rs6000_expand_split_stack_prologue): Update.
-       * config/rs6000/rs6000.c: Update.
-       * config/s390/s390.c (s390_expand_vec_strlen): Update.
-       (s390_expand_vec_movstr): Update.
-       (s390_expand_cs_tdsi): Update.
-       (s390_expand_split_stack_prologue): Update.
-       * config/sh/sh.c (sh_print_operand): Update.
-       (expand_cbranchsi4): Update.
-       (expand_cbranchdi4): Update.
-       * config/sparc/sparc.c (output_v9branch): Update.
-       * config/spu/spu.c (get_branch_target): Update.
-       (ea_load_store_inline): Update.
-       * config/tilegx/tilegx.c (cbranch_predicted_p): Update.
-       * config/tilepro/tilepro.c: Update.
- 2017-07-16  Eric Botcazou  <ebotcazou@adacore.com>
-       * gimplify.c (mostly_copy_tree_r): Revert latest change.
-       (gimplify_save_expr): Likewise.
- 2017-07-07  Jan Hubicka  <hubicka@ucw.cz>
-       * ipa-visibility.c (function_and_variable_visibility): Fix pasto.
- 2017-07-07  Jan Hubicka  <hubicka@ucw.cz>
-       * ipa-fnsummary.c (pass_data_ipa_fn_summary): Use
-       TV_IPA_FNSUMMARY.
-       * timevar.def (TV_IPA_FNSUMMARY): Define.
- 2017-07-16  Daniel Cederman  <cederman@gaisler.com>
-       * config/sparc/sparc.md (divdf3_fix): Add NOP to prevent back
-       to back store errata sensitive sequence from being generated.
-       (sqrtdf2_fix): Likewise.
- 2017-07-07  Jan Hubicka  <hubicka@ucw.cz>
-       * tree-ssa-threadupdate.c (compute_path_counts,
-       update_joiner_offpath_counts): Use profile_probability.
- 2017-07-15  Thomas Preud'homme  <thomas.preudhomme@arm.com>
+       * gimple-ssa-sprintf.c (parse_directive): Cast second dir.len to uhwi.
  
-       Revert:
-       2017-07-14  Thomas Preud'homme  <thomas.preudhomme@arm.com>
-       * config/arm/arm-c.c (arm_cpu_builtins): Define
-       __ARM_FEATURE_NUMERIC_MAXMIN solely based on TARGET_VFP5.
- 2017-07-14  Kelvin Nilsen  <kelvin@gcc.gnu.org>
-       * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
-       array entries to represent __ieee128 versions of the
-       scalar_test_data_class, scalar_test_neg, scalar_extract_exp,
-       scalar_extract_sig, and scalar_insert_exp built-in functions.
-       (altivec_resolve_overloaded_builtin): Add special case handling
-       for the __builtin_scalar_insert_exp function, as represented by
-       the P9V_BUILTIN_VEC_VSIEDP constant.
-       * config/rs6000/rs6000-builtin.def (VSEEQP): Add scalar extract
-       exponent support for __ieee128 argument.
-       (VSESQP): Add scalar extract signature support for __ieee128
-       argument.
-       (VSTDCNQP): Add scalar test negative support for __ieee128
-       argument.
-       (VSIEQP): Add scalar insert exponent support for __int128 argument
-       with __ieee128 result.
-       (VSIEQPF): Add scalar insert exponent support for __ieee128
-       argument with __ieee128 result.
-       (VSTDCQP): Add scalar test data class support for __ieee128
-       argument.
-       (VSTDCNQP): Add overload support for scalar test negative with
-       __ieee128 argument.
-       (VSTDCQP): Add overload support for scalar test data class
-       __ieee128 argument.
-       * config/rs6000/vsx.md (UNSPEC_VSX_SXSIG) Replace
-       UNSPEC_VSX_SXSIGDP.
-       (UNSPEC_VSX_SIEXPQP): New constant.
-       (xsxexpqp): New insn for VSX scalar extract exponent quad
-       precision.
-       (xsxsigqp): New insn for VSX scalar extract significand quad
-       precision.
-       (xsiexpqpf): New insn for VSX scalar insert exponent quad
-       precision with floating point argument.
-       (xststdcqp): New expand for VSX scalar test data class quad
-       precision.
-       (xststdcnegqp): New expand for VSX scalar test negative quad
-       precision.
-       (xststdcqp): New insn to match expansions for VSX scalar test data
-       class quad precision and VSX scalar test negative quad precision.
-       * config/rs6000/rs6000.c (rs6000_expand_binop_builtin): Add
-       special case operand checking to enforce that second operand of
-       VSX scalar test data class with quad precision argument is a 7-bit
-       unsigned literal.
-       * doc/extend.texi (PowerPC AltiVec Built-in Functions): Add
-       prototypes and descriptions of __ieee128 versions of
-       scalar_extract_exp, scalar_extract_sig, scalar_insert_exp,
-       scalar_test_data_class, and scalar_test_neg built-in functions.
- 2016-07-14  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>
-       PR tree-optimization/81162
-       * gimple-ssa-strength-reduction.c (replace_mult_candidate): Don't
-       replace a negate with an add.
- 2017-07-14  James Greenhalgh  <james.greenhalgh@arm.com>
-       * doc/invoke.texi (arm/-mcpu): Document +crypto.
- 2017-07-14  Thomas Preud'homme  <thomas.preudhomme@arm.com>
-       * config/arm/arm-c.c (arm_cpu_builtins): Define
-       __ARM_FEATURE_NUMERIC_MAXMIN solely based on TARGET_VFP5.
- 2017-07-14  Thomas Preud'homme  <thomas.preudhomme@arm.com>
-       * config/arm/arm-cpus.in (cortex-r52): Add new entry.
-       (armv8-r): Set ARM Cortex-R52 as default CPU.
-       * config/arm/arm-tables.opt: Regenerate.
-       * config/arm/arm-tune.md: Regenerate.
-       * config/arm/driver-arm.c (arm_cpu_table): Add entry for ARM
-       Cortex-R52.
-       * doc/invoke.texi: Mention -mtune=cortex-r52 and availability of fp.dp
-       extension for -mcpu=cortex-r52.
- 2017-07-14  Thomas Preud'homme  <thomas.preudhomme@arm.com>
-       * config/arm/arm-isa.h (isa_bit_FP_ARMv8): Delete enumerator.
-       (ISA_FP_ARMv8): Define as ISA_FPv5 and ISA_FP_D32.
-       * config/arm/arm-cpus.in (armv8-r): Define fp.sp as enabling FPv5.
-       (fp-armv8): Define it as FP_ARMv8 only.
-       config/arm/arm.h (TARGET_FPU_ARMV8): Delete.
-       (TARGET_VFP_FP16INST): Define using TARGET_VFP5 rather than
-       TARGET_FPU_ARMV8.
-       config/arm/arm.c (arm_rtx_costs_internal): Replace checks against
-       TARGET_FPU_ARMV8 by checks against TARGET_VFP5.
-       * config/arm/arm-builtins.c (arm_builtin_vectorized_function): Define
-       first ARM_CHECK_BUILTIN_MODE definition using TARGET_VFP5 rather
-       than TARGET_FPU_ARMV8.
-       * config/arm/arm-c.c (arm_cpu_builtins): Likewise for
-       __ARM_FEATURE_NUMERIC_MAXMIN macro definition.
-       * config/arm/arm.md (cmov<mode>): Condition on TARGET_VFP5 rather than
-       TARGET_FPU_ARMV8.
-       * config/arm/neon.md (neon_vrint): Likewise.
-       (neon_vcvt): Likewise.
-       (neon_<fmaxmin_op><mode>): Likewise.
-       (<fmaxmin><mode>3): Likewise.
-       * config/arm/vfp.md (l<vrint_pattern><su_optab><mode>si2): Likewise.
-       * config/arm/predicates.md (arm_cond_move_operator): Check against
-       TARGET_VFP5 rather than TARGET_FPU_ARMV8 and fix spacing.
- 2017-07-14  Jackson Woodruff  <jackson.woodruff@arm.com>
-       * config/aarch64/aarch64.c (aarch64_print_operand): Move comments
-       to top of function.
- 2017-07-14  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
-       * gimple-ssa-store-merging.c (clear_bit_region): Replace reference to
-       loop in comment with memset.
- 2017-07-14  Martin Liska  <mliska@suse.cz>
-       * cfgexpand.c (expand_gimple_basic_block): Remove dead comment.
-       * dwarf2out.c (is_java): Remove the function.
-       (output_pubname): Remove usage of the function.
-       (lower_bound_default): Remove usage of DW_LANG_Java.
-       (gen_compile_unit_die): Likewise.
-       * gcc.c: Remove compiler defaults for .java and .zip files.
-       * gimple-expr.c (remove_suffix): Change as there's no longer
-       extension than 4-letter one.
-       * gimplify.c (mostly_copy_tree_r): Remove Java-special part.
-       (gimplify_save_expr): Likewise.
-       * ipa-utils.h (polymorphic_type_binfo_p): Remove the comment
-       as it's possible even for other languages than Java.
-       * langhooks.h (struct lang_hooks): Remove Java from a comment.
-       * lto-opts.c (lto_write_options): Remove reference to Java.
-       * opts.c (strip_off_ending): Update file extension handling.
-       * tree-cfg.c (verify_gimple_call): Remove comment with Java.
-       * tree-eh.c (lower_resx): Likewise.
-       * tree.c (free_lang_data_in_type): Remove dead code.
-       (find_decls_types_r): Likewise.
-       (build_common_builtin_nodes): Remove Java from a comment.
-       (verify_type): Remove dead code.
-       * varasm.c (assemble_external): Remove Java from a comment.
- 2017-07-14  Martin Liska  <mliska@suse.cz>
-       * opts.c (finish_options): Add quotes.
-       (common_handle_option): Likewise.
- 2017-07-14  Martin Liska  <mliska@suse.cz>
-       * dbxout.c (get_lang_number): Do not handle GNU Pascal.
-       * dbxout.h (extern void dbxout_stab_value_internal_label_diff):
-       Remove N_SO_PASCAL.
-       * dwarf2out.c (lower_bound_default): Do not handle
-       DW_LANG_Pascal83.
-       (gen_compile_unit_die): Likewise.
-       * gcc.c: Remove default extension binding for GNU Pascal.
-       * stmt.c: Remove Pascal language from a comment.
-       * xcoffout.c: Likewise.
+ 2018-01-03  Martin Sebor  <msebor@redhat.com>
  
- 2017-07-13  David Malcolm  <dmalcolm@redhat.com>
+       PR tree-optimization/83655
+       * gimple-ssa-warn-restrict.c (wrestrict_dom_walker::check_call): Avoid
+       checking calls with invalid arguments.
  
-       PR c/81405
-       * diagnostic-show-locus.c (fixit_cmp): New function.
-       (layout::layout): Sort m_fixit_hints.
-       (column_range::column_range): Assert that the values are valid.
-       (struct char_span): New struct.
-       (correction::overwrite): New method.
-       (struct source_line): New struct.
-       (line_corrections::add_hint): Add assertions.  Reimplement memcpy
-       calls in terms of classes source_line and char_span, and
-       correction::overwrite.
-       (selftest::test_overlapped_fixit_printing_2): New function.
-       (selftest::diagnostic_show_locus_c_tests): Call it.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Will Schmidt  <will_schmidt@vnet.ibm.com>
+       * tree-vect-stmts.c (vect_get_store_rhs): New function.
+       (vectorizable_mask_load_store): Delete.
+       (vectorizable_call): Return false for masked loads and stores.
+       (vectorizable_store): Handle IFN_MASK_STORE.  Use vect_get_store_rhs
+       instead of gimple_assign_rhs1.
+       (vectorizable_load): Handle IFN_MASK_LOAD.
+       (vect_transform_stmt): Don't set is_store for call_vec_info_type.
  
-       * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin): Return
-       early if there is no lhs.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Martin Liska  <mliska@suse.cz>
+       * tree-vect-stmts.c (vect_build_gather_load_calls): New function,
+       split out from..,
+       (vectorizable_mask_load_store): ...here.
+       (vectorizable_load): ...and here.
  
-       * dwarf2out.c (gen_pointer_type_die): Remove dead code.
-       (gen_reference_type_die): Likewise.
-       * stor-layout.c: Remove Pascal-related comment.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Martin Liska  <mliska@suse.cz>
+       * tree-vect-stmts.c (vect_build_all_ones_mask)
+       (vect_build_zero_merge_argument): New functions, split out from...
+       (vectorizable_load): ...here.
  
-       * opts.c (finish_options): Add quotes to error messages.
-       (parse_sanitizer_options): Likewise.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Thomas Preud'homme  <thomas.preudhomme@arm.com>
+       * tree-vect-stmts.c (vect_check_store_rhs): New function,
+       split out from...
+       (vectorizable_mask_load_store): ...here.
+       (vectorizable_store): ...and here.
  
-       * doc/invoke.texi (armv8-r): Document +fp.sp ARMv8-R extension.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Richard Earnshaw  <rearnsha@arm.com>
+       * tree-vect-stmts.c (vect_check_load_store_mask): New function,
+       split out from...
+       (vectorizable_mask_load_store): ...here.
  
-       * config/arm/vxworks.h (TARGET_ENDIAN_DEFAULT): Define.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-13  Maxim Ostapenko  <m.ostapenko@samsung.com>
+       * tree-vectorizer.h (vec_load_store_type): Moved from tree-vec-stmts.c
+       (vect_model_store_cost): Take a vec_load_store_type instead of a
+       vect_def_type.
+       * tree-vect-stmts.c (vec_load_store_type): Move to tree-vectorizer.h.
+       (vect_model_store_cost): Take a vec_load_store_type instead of a
+       vect_def_type.
+       (vectorizable_mask_load_store): Update accordingly.
+       (vectorizable_store): Likewise.
+       * tree-vect-slp.c (vect_analyze_slp_cost_1): Update accordingly.
  
-       * asan.c (asan_emit_allocas_unpoison): Use ptr_mode for arguments
-       during expansion.
-       * builtins.c (expand_asan_emit_allocas_unpoison): Likewise.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-12  Michael Meissner  <meissner@linux.vnet.ibm.com>
+       * tree-vect-loop.c (vect_transform_loop): Stub out scalar
+       IFN_MASK_LOAD calls here rather than...
+       * tree-vect-stmts.c (vectorizable_mask_load_store): ...here.
  
-       PR target/81193
-       * config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): If GLIBC
-       provides the hardware capability bits, define the macro
-       __BUILTIN_CPU_SUPPORTS__.
-       * config/rs6000/rs6000.c (cpu_expand_builtin): Generate a warning
-       if GLIBC does not provide the hardware capability bits.  Add a
-       gcc_unreachable call if the built-in cpu function is neither
-       __builtin_cpu_is nor __builtin_cpu_supports.
-       (rs6000_get_function_versions_dispatcher): Change the warning
-       that an old GLIBC is used which does not export the capability
-       bits to be an error.
-       * doc/extend.texi (target_clones attribute): Document the
-       restriction that GLIBC 2.23 or newer is needed on the PowerPC.
-       (PowerPC built-in functions): Document that GLIBC 2.23 or newer is
-       needed by __builtin_cpu_is and __builtin_cpu_supports.  Document
-       the macros defined by GCC if the newer GLIBC is available.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-12  Jeff Law  <law@redhat.com>
+       * expmed.c (extract_bit_field_1): For vector extracts,
+       fall back to extract_bit_field_as_subreg if vec_extract
+       isn't available.
  
-       * config/riscv/riscv.c: Remove unnecessary includes.  Reorder
-       remaining includes slightly.
-       * config/riscv/riscv-builtins.c: Include profile-count.h.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-12  Georg-Johann Lay  <avr@gjlay.de>
+       * lra-spills.c (pseudo_reg_slot_compare): Sort slots by whether
+       they are variable or constant sized.
+       (assign_stack_slot_num_and_sort_pseudos): Don't reuse variable-sized
+       slots for constant-sized data.
  
-       PR target/79883
-       * config/avr/avr.c (avr_set_current_function): In diagnostic
-       messages: Quote keywords and (parts of) identifiers.
-       [WITH_AVRLIBC]: Warn for functions named "ISR", "SIGNAL" or
-       "INTERUPT".
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-12  Carl Love  <cel@us.ibm.com>
+       * tree-vect-patterns.c (vect_recog_mask_conversion_pattern): When
+       handling COND_EXPRs with boolean comparisons, try to find a better
+       basis for the mask type than the boolean itself.
  
-       * config/rs6000/rs6000-c.c: Add support for built-in functions
-       vector bool char vec_revb (vector bool char);
-       vector bool short vec_revb (vector short char);
-       vector bool int vec_revb (vector bool int);
-       vector bool long long vec_revb (vector bool long long);
-       * doc/extend.texi: Update the built-in documentation file for the
-       new built-in functions.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
  
- 2017-07-12  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
+       * doc/rtl.texi (MAX_BITSIZE_MODE_ANY_MODE): Describe how the default
+       is calculated and how it can be overridden.
+       * genmodes.c (max_bitsize_mode_any_mode): New variable.
+       (create_modes): Initialize it from MAX_BITSIZE_MODE_ANY_MODE,
+       if defined.
+       (emit_max_int): Use it to set the output MAX_BITSIZE_MODE_ANY_MODE,
+       if nonzero.
  
-       * config/s390/s390.md: Remove movcc splitter.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-12  Andreas Krebbel  <krebbel@linux.vnet.ibm.com>
+       * config/aarch64/aarch64-protos.h (aarch64_output_simd_mov_immediate):
+       Remove the mode argument.
+       (aarch64_simd_valid_immediate): Remove the mode and inverse
+       arguments.
+       * config/aarch64/iterators.md (bitsize): New iterator.
+       * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>, and<mode>3)
+       (ior<mode>3): Update calls to aarch64_output_simd_mov_immediate.
+       * config/aarch64/constraints.md (Do, Db, Dn): Update calls to
+       aarch64_simd_valid_immediate.
+       * config/aarch64/predicates.md (aarch64_reg_or_orr_imm): Likewise.
+       (aarch64_reg_or_bic_imm): Likewise.
+       * config/aarch64/aarch64.c (simd_immediate_info): Replace mvn
+       with an insn_type enum and msl with a modifier_type enum.
+       Replace element_width with a scalar_mode.  Change the shift
+       to unsigned int.  Add constructors for scalar_float_mode and
+       scalar_int_mode elements.
+       (aarch64_vect_float_const_representable_p): Delete.
+       (aarch64_can_const_movi_rtx_p)
+       (aarch64_simd_scalar_immediate_valid_for_move)
+       (aarch64_simd_make_constant): Update call to
+       aarch64_simd_valid_immediate.
+       (aarch64_advsimd_valid_immediate_hs): New function.
+       (aarch64_advsimd_valid_immediate): Likewise.
+       (aarch64_simd_valid_immediate): Remove mode and inverse
+       arguments.  Rewrite to use the above.  Use const_vec_duplicate_p
+       to detect duplicated constants and use aarch64_float_const_zero_rtx_p
+       and aarch64_float_const_representable_p on the result.
+       (aarch64_output_simd_mov_immediate): Remove mode argument.
+       Update call to aarch64_simd_valid_immediate and use of
+       simd_immediate_info.
+       (aarch64_output_scalar_simd_mov_immediate): Update call
+       accordingly.
  
-       * config/s390/s390.c (s390_rtx_costs): Return proper costs for
-       load/store on condition.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
- 2017-07-12  Georg-Johann Lay  <avr@gjlay.de>
+       * machmode.h (mode_precision): Prefix with CONST_MODE_PRECISION.
+       (mode_nunits): Likewise CONST_MODE_NUNITS.
+       * machmode.def (ADJUST_NUNITS): Document.
+       * genmodes.c (mode_data::need_nunits_adj): New field.
+       (blank_mode): Update accordingly.
+       (adj_nunits): New variable.
+       (print_maybe_const_decl): Replace CATEGORY with a NEEDS_ADJ
+       parameter.
+       (emit_mode_size_inline): Set need_bytesize_adj for all modes
+       listed in adj_nunits.
+       (emit_mode_nunits_inline): Set need_nunits_adj for all modes
+       listed in adj_nunits.  Don't emit case statements for such modes.
+       (emit_insn_modes_h): Emit definitions of CONST_MODE_NUNITS
+       and CONST_MODE_PRECISION.  Make CONST_MODE_SIZE expand to
+       nothing if adj_nunits is nonnull.
+       (emit_mode_precision, emit_mode_nunits): Use print_maybe_const_decl.
+       (emit_mode_unit_size, emit_mode_base_align, emit_mode_ibit)
+       (emit_mode_fbit): Update use of print_maybe_const_decl.
+       (emit_move_size): Likewise.  Treat the array as non-const
+       if adj_nunits.
+       (emit_mode_adjustments): Handle adj_nunits.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+       * machmode.def (VECTOR_MODES_WITH_PREFIX): Document.
+       * genmodes.c (VECTOR_MODES_WITH_PREFIX): New macro.
+       (VECTOR_MODES): Use it.
+       (make_vector_modes): Take the prefix as an argument.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       PR target/81407
-       * config/avr/avr.c (avr_encode_section_info)
-       [progmem && !TREE_READONLY]: Error if progmem object needs
-       constructing.
- 2017-07-11  Michael Collison  <michael.collison@arm.com>
+       * mode-classes.def (MODE_VECTOR_BOOL): New mode class.
+       * machmode.h (INTEGRAL_MODE_P, VECTOR_MODE_P): Return true
+       for MODE_VECTOR_BOOL.
+       * machmode.def (VECTOR_BOOL_MODE): Document.
+       * genmodes.c (VECTOR_BOOL_MODE): New macro.
+       (make_vector_bool_mode): New function.
+       (complete_mode, emit_mode_wider, emit_mode_adjustments): Handle
+       MODE_VECTOR_BOOL.
+       * lto-streamer-in.c (lto_input_mode_table): Likewise.
+       * rtx-vector-builder.c (rtx_vector_builder::find_cached_value):
+       Likewise.
+       * stor-layout.c (int_mode_for_mode): Likewise.
+       * tree.c (build_vector_type_for_mode): Likewise.
+       * varasm.c (output_constant_pool_2): Likewise.
+       * emit-rtl.c (init_emit_once): Make sure that CONST1_RTX (BImode) and
+       CONSTM1_RTX (BImode) are the same thing.  Initialize const_tiny_rtx
+       for MODE_VECTOR_BOOL.
+       * expr.c (expand_expr_real_1): Use VECTOR_MODE_P instead of a list
+       of mode class checks.
+       * tree-vect-generic.c (expand_vector_operation): Use VECTOR_MODE_P
+       instead of a list of mode class checks.
+       (expand_vector_scalar_condition): Likewise.
+       (type_for_widest_vector_mode): Handle BImode as an inner mode.
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * config/aarch64/aarch64-simd.md (aarch64_sub<mode>_compare0):
-       New pattern.
+       * machmode.h (mode_size): Change from unsigned short to
+       poly_uint16_pod.
+       (mode_to_bytes): Return a poly_uint16 rather than an unsigned short.
+       (GET_MODE_SIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
+       or if measurement_type is not polynomial.
+       (fixed_size_mode::includes_p): Check for constant-sized modes.
+       * genmodes.c (emit_mode_size_inline): Make mode_size_inline
+       return a poly_uint16 rather than an unsigned short.
+       (emit_mode_size): Change the type of mode_size from unsigned short
+       to poly_uint16_pod.  Use ZERO_COEFFS for the initializer.
+       (emit_mode_adjustments): Cope with polynomial vector sizes.
+       * lto-streamer-in.c (lto_input_mode_table): Use bp_unpack_poly_value
+       for GET_MODE_SIZE.
+       * lto-streamer-out.c (lto_write_mode_table): Use bp_pack_poly_value
+       for GET_MODE_SIZE.
+       * auto-inc-dec.c (try_merge): Treat GET_MODE_SIZE as polynomial.
+       * builtins.c (expand_ifn_atomic_compare_exchange_into_call): Likewise.
+       * caller-save.c (setup_save_areas): Likewise.
+       (replace_reg_with_saved_mem): Likewise.
+       * calls.c (emit_library_call_value_1): Likewise.
+       * combine-stack-adj.c (combine_stack_adjustments_for_block): Likewise.
+       * combine.c (simplify_set, make_extraction, simplify_shift_const_1)
+       (gen_lowpart_for_combine): Likewise.
+       * convert.c (convert_to_integer_1): Likewise.
+       * cse.c (equiv_constant, cse_insn): Likewise.
+       * cselib.c (autoinc_split, cselib_hash_rtx): Likewise.
+       (cselib_subst_to_values): Likewise.
+       * dce.c (word_dce_process_block): Likewise.
+       * df-problems.c (df_word_lr_mark_ref): Likewise.
+       * dwarf2cfi.c (init_one_dwarf_reg_size): Likewise.
+       * dwarf2out.c (multiple_reg_loc_descriptor, mem_loc_descriptor)
+       (concat_loc_descriptor, concatn_loc_descriptor, loc_descriptor)
+       (rtl_for_decl_location): Likewise.
+       * emit-rtl.c (gen_highpart, widen_memory_access): Likewise.
+       * expmed.c (extract_bit_field_1, extract_integral_bit_field): Likewise.
+       * expr.c (emit_group_load_1, clear_storage_hints): Likewise.
+       (emit_move_complex, emit_move_multi_word, emit_push_insn): Likewise.
+       (expand_expr_real_1): Likewise.
+       * function.c (assign_parm_setup_block_p, assign_parm_setup_block)
+       (pad_below): Likewise.
+       * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
+       * gimple-ssa-store-merging.c (rhs_valid_for_store_merging_p): Likewise.
+       * ira.c (get_subreg_tracking_sizes): Likewise.
+       * ira-build.c (ira_create_allocno_objects): Likewise.
+       * ira-color.c (coalesced_pseudo_reg_slot_compare): Likewise.
+       (ira_sort_regnos_for_alter_reg): Likewise.
+       * ira-costs.c (record_operand_costs): Likewise.
+       * lower-subreg.c (interesting_mode_p, simplify_gen_subreg_concatn)
+       (resolve_simple_move): Likewise.
+       * lra-constraints.c (get_reload_reg, operands_match_p): Likewise.
+       (process_addr_reg, simplify_operand_subreg, curr_insn_transform)
+       (lra_constraints): Likewise.
+       (CONST_POOL_OK_P): Reject variable-sized modes.
+       * lra-spills.c (slot, assign_mem_slot, pseudo_reg_slot_compare)
+       (add_pseudo_to_slot, lra_spill): Likewise.
+       * omp-low.c (omp_clause_aligned_alignment): Likewise.
+       * optabs-query.c (get_best_extraction_insn): Likewise.
+       * optabs-tree.c (expand_vec_cond_expr_p): Likewise.
+       * optabs.c (expand_vec_perm_var, expand_vec_cond_expr): Likewise.
+       (expand_mult_highpart, valid_multiword_target_p): Likewise.
+       * recog.c (offsettable_address_addr_space_p): Likewise.
+       * regcprop.c (maybe_mode_change): Likewise.
+       * reginfo.c (choose_hard_reg_mode, record_subregs_of_mode): Likewise.
+       * regrename.c (build_def_use): Likewise.
+       * regstat.c (dump_reg_info): Likewise.
+       * reload.c (complex_word_subreg_p, push_reload, find_dummy_reload)
+       (find_reloads, find_reloads_subreg_address): Likewise.
+       * reload1.c (eliminate_regs_1): Likewise.
+       * rtlanal.c (for_each_inc_dec_find_inc_dec, rtx_cost): Likewise.
+       * simplify-rtx.c (avoid_constant_pool_reference): Likewise.
+       (simplify_binary_operation_1, simplify_subreg): Likewise.
+       * targhooks.c (default_function_arg_padding): Likewise.
+       (default_hard_regno_nregs, default_class_max_nregs): Likewise.
+       * tree-cfg.c (verify_gimple_assign_binary): Likewise.
+       (verify_gimple_assign_ternary): Likewise.
+       * tree-inline.c (estimate_move_cost): Likewise.
+       * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
+       * tree-ssa-loop-ivopts.c (add_autoinc_candidates): Likewise.
+       (get_address_cost_ainc): Likewise.
+       * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise.
+       (vect_supportable_dr_alignment): Likewise.
+       * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+       (vectorizable_reduction): Likewise.
+       * tree-vect-stmts.c (vectorizable_assignment, vectorizable_shift)
+       (vectorizable_operation, vectorizable_load): Likewise.
+       * tree.c (build_same_sized_truth_vector_type): Likewise.
+       * valtrack.c (cleanup_auto_inc_dec): Likewise.
+       * var-tracking.c (emit_note_insn_var_location): Likewise.
+       * config/arc/arc.h (ASM_OUTPUT_CASE_END): Use as_a <scalar_int_mode>.
+       (ADDR_VEC_ALIGN): Likewise.
  
- 2017-07-11  Carl Love  <cel@us.ibm.com>
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * config/rs6000/rs6000-c.c: Add support for builtins
-       vector unsigned int vec_parity_lsbb (vector signed int);
-       vector unsigned int vec_parity_lsbb (vector unsigned int);
-       vector unsigned __int128 vec_parity_lsbb (vector signed __int128);
-       vector unsigned __int128 vec_parity_lsbb (vector unsigned __int128);
-       vector unsigned long long vec_parity_lsbb (vector signed long long);
-       vector unsigned long long vec_parity_lsbb (vector unsigned long long);
-       * config/rs6000/rs6000-builtin.def (VPARITY_LSBB): Add BU_P9V_OVERLOAD1.
-       * config/rs6000/altivec.h (vec_parity_lsbb): Add define.
-       * doc/extend.texi: Update the built-in documentation file for the
-       new built-in functions.
+       * machmode.h (mode_to_bits): Return a poly_uint16 rather than an
+       unsigned short.
+       (GET_MODE_BITSIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
+       or if measurement_type is polynomial.
+       * calls.c (shift_return_value): Treat GET_MODE_BITSIZE as polynomial.
+       * combine.c (make_extraction): Likewise.
+       * dse.c (find_shift_sequence): Likewise.
+       * dwarf2out.c (mem_loc_descriptor): Likewise.
+       * expmed.c (store_integral_bit_field, extract_bit_field_1): Likewise.
+       (extract_bit_field, extract_low_bits): Likewise.
+       * expr.c (convert_move, convert_modes, emit_move_insn_1): Likewise.
+       (optimize_bitfield_assignment_op, expand_assignment): Likewise.
+       (store_expr_with_bounds, store_field, expand_expr_real_1): Likewise.
+       * fold-const.c (optimize_bit_field_compare, merge_ranges): Likewise.
+       * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
+       * reload.c (find_reloads): Likewise.
+       * reload1.c (alter_reg): Likewise.
+       * stor-layout.c (bitwise_mode_for_mode, compute_record_mode): Likewise.
+       * targhooks.c (default_secondary_memory_needed_mode): Likewise.
+       * tree-if-conv.c (predicate_mem_writes): Likewise.
+       * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
+       * tree-vect-patterns.c (adjust_bool_pattern): Likewise.
+       * tree-vect-stmts.c (vectorizable_simd_clone_call): Likewise.
+       * valtrack.c (dead_debug_insert_temp): Likewise.
+       * varasm.c (mergeable_constant_section): Likewise.
+       * config/sh/sh.h (LOCAL_ALIGNMENT): Use as_a <fixed_size_mode>.
  
- 2017-07-11  David Malcolm  <dmalcolm@redhat.com>
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       * diagnostic-show-locus.c: Include "gcc-rich-location.h".
-       (layout::m_primary_loc): New field.
-       (layout::layout): Initialize new field.  Move location filtering
-       logic from here to...
-       (layout::maybe_add_location_range): ...this new method.  Add
-       support for filtering to just the lines already specified by other
-       locations.
-       (layout::will_show_line_p): New method.
-       (gcc_rich_location::add_location_if_nearby): New method.
-       (selftest::test_add_location_if_nearby): New test function.
-       (selftest::diagnostic_show_locus_c_tests): Call it.
-       * gcc-rich-location.h (gcc_rich_location::add_location_if_nearby):
-       New method.
- 2017-07-11  Tom de Vries  <tom@codesourcery.com>
-       * config/nvptx/nvptx.c (WORKAROUND_PTXJIT_BUG): New macro.
-       (bb_first_real_insn): New function.
-       (nvptx_single): Add extra initialization of broadcasted condition
-       variables.
- 2017-07-11  Nathan Sidwell  <nathan@acm.org>
-       * dwarf2out.c (gen_member_die): Remove useless check for anon ctors.
- 2017-07-11  Georg-Johann Lay  <avr@gjlay.de>
-       * doc/extend.texi (AVR Function Attributes): Remove weblink to
-       Binutils doc as TEXI will mess them up.
-       * doc/invoke.texi (AVR Options): Same here.
- 2017-07-11  Daniel Cederman  <cederman@gaisler.com>
-       * config/sparc/sparc.opt (mfix-ut700): New option.
-       (mfix-gr712rc): Likewise.
-       (sparc_fix_b2bst): New variable.
-       * doc/invoke.texi (SPARC options): Document them.
-       (ARM options): Fix warnings.
-       * config/sparc/sparc.c (sparc_do_work_around_errata): Insert NOP
-       instructions to prevent sequences that can trigger the store-store
-       errata for certain LEON3FT processors.
-       (pass_work_around_errata::gate): Also test sparc_fix_b2bst.
-       (sparc_option_override): Set sparc_fix_b2bst appropriately.
-       * config/sparc/sparc.md (fix_b2bst): New attribute.
-       (in_branch_delay): Prevent stores in delay slot if fix_b2bst.
- 2017-07-10  Uros Bizjak  <ubizjak@gmail.com>
-       PR target/81375
-       * config/i386/i386.md (divsf3): Add TARGET_SSE to TARGET_SSE_MATH.
-       (rcpps): Ditto.
-       (*rsqrtsf2_sse): Ditto.
-       (rsqrtsf2): Ditto.
-       (div<mode>3): Macroize insn from divdf3 and divsf3
-       using MODEF mode iterator.
- 2017-07-10  Martin Sebor  <msebor@redhat.com>
-       PR tree-optimization/80397
-       * gimple-ssa-sprintf.c (format_integer): Use INTEGRAL_TYPE_P()
-       instead of testing for equality to INTEGER_TYPE.
- 2017-07-10  Vineet Gupta <vgupta@synopsys.com>
-       * config.gcc: Remove uclibc from arc target spec.
- 2017-07-10  Claudiu Zissulescu  <claziss@synopsys.com>
-       * config/arc/arc.h (ADDITIONAL_REGISTER_NAMES): Define.
- 2017-07-07  Jan Hubicka  <hubicka@ucw.cz>
+       * expr.c (expand_assignment): Cope with polynomial mode sizes
+       when assigning to a CONCAT.
  
-       PR lto/80838
-       * lto-wrapper.c (remove_option): New function.
-       (merge_and_complain): Merge PIC/PIE options more realistically.
- 2017-07-10  Georg-Johann Lay  <avr@gjlay.de>
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
+           Alan Hayward  <alan.hayward@arm.com>
+           David Sherwood  <david.sherwood@arm.com>
  
-       Better ISR prologues by supporting GASes __gcc_isr pseudo insn.
+       * machmode.h (mode_precision): Change from unsigned short to
+       poly_uint16_pod.
+       (mode_to_precision): Return a poly_uint16 rather than an unsigned
+       short.
+       (GET_MODE_PRECISION): Return a constant if ONLY_FIXED_SIZE_MODES,
+       or if measurement_type is not polynomial.
+       (HWI_COMPUTABLE_MODE_P): Turn into a function.  Optimize the case
+       in which the mode is already known to be a scalar_int_mode.
+       * genmodes.c (emit_mode_precision): Change the type of mode_precision
+       from unsigned short to poly_uint16_pod.  Use ZERO_COEFFS for the
+       initializer.
+       * lto-streamer-in.c (lto_input_mode_table): Use bp_unpack_poly_value
+       for GET_MODE_PRECISION.
+       * lto-streamer-out.c (lto_write_mode_table): Use bp_pack_poly_value
+       for GET_MODE_PRECISION.
+       * combine.c (update_rsp_from_reg_equal): Treat GET_MODE_PRECISION
+       as polynomial.
+       (try_combine, find_split_point, combine_simplify_rtx): Likewise.
+       (expand_field_assignment, make_extraction): Likewise.
+       (make_compound_operation_int, record_dead_and_set_regs_1): Likewise.
+       (get_last_value): Likewise.
+       * convert.c (convert_to_integer_1): Likewise.
+       * cse.c (cse_insn): Likewise.
+       * expr.c (expand_expr_real_1): Likewise.
+       * lra-constraints.c (simplify_operand_subreg): Likewise.
+       * optabs-query.c (can_atomic_load_p): Likewise.
+       * optabs.c (expand_atomic_load): Likewise.
+       (expand_atomic_store): Likewise.
+       * ree.c (combine_reaching_defs): Likewise.
+       * rtl.h (partial_subreg_p, paradoxical_subreg_p): Likewise.
+       * rtlanal.c (nonzero_bits1, lsb_bitfield_op_p): Likewise.
+       * tree.h (type_has_mode_precision_p): Likewise.
+       * ubsan.c (instrument_si_overflow): Likewise.
  
-       PR target/20296
-       PR target/81268
-       * configure.ac [target=avr]: Add GAS check for -mgcc-isr.
-       (HAVE_AS_AVR_MGCCISR_OPTION):  If so, AC_DEFINE it.
-       * config.in: Regenerate.
-       * configure: Regenerate.
-       * doc/extend.texi (AVR Function Attributes) <no_gccisr>: Document it.
-       * doc/invoke.texi (AVR Options) <-mgas-isr-prologues>: Document it.
-       * config/avr/avr.opt (-mgas-isr-prologues): New option and...
-       (TARGET_GASISR_PROLOGUES): ...target mask.
-       * common/config/avr/avr-common.c
-       (avr_option_optimization_table) [OPT_LEVELS_1_PLUS_NOT_DEBUG]:
-       Set -mgas-isr-prologues.
-       * config/avr/avr-passes.def (avr_pass_pre_proep): Add
-       INSERT_PASS_BEFORE for it.
-       * config/avr/avr-protos.h (make_avr_pass_pre_proep): New proto.
-       * config/avr/avr.c (avr_option_override)
-       [!HAVE_AS_AVR_MGCCISR_OPTION]: Unset TARGET_GASISR_PROLOGUES.
-       (avr_no_gccisr_function_p, avr_hregs_split_reg): New static functions.
-       (avr_attribute_table) <no_gccisr>: Add new function attribute.
-       (avr_set_current_function) <is_no_gccisr>: Init machine field.
-       (avr_pass_data_pre_proep, avr_pass_pre_proep): New pass data
-       and rtl_opt_pass.
-       (make_avr_pass_pre_proep): New function.
-       (emit_push_sfr) <treg>: Add argument to function and use it
-       instead of TMP_REG.
-       (avr_expand_prologue) [machine->gasisr.maybe]: Emit gasisr insn
-       and set machine->gasisr.yes.
-       (avr_expand_epilogue) [machine->gasisr.yes]: Similar.
-       (avr_asm_function_end_prologue) [machine->gasisr.yes]: Add
-       __gcc_isr.n_pushed to .L__stack_usage.
-       (TARGET_ASM_FINAL_POSTSCAN_INSN): Define to...
-       (avr_asm_final_postscan_insn): ...this new static function.
-       * config/avr/avr.h (machine_function)
-       <is_no_gccisr, use_L__stack_usage>: New fields.
-       <gasisr, gasisr.yes, gasisr.maybe, gasisr.regno>: New fields.
-       * config/avr/avr.md (UNSPECV_GASISR): Add unspecv enum.
-       (GASISR_Prologue, GASISR_Epilogue, GASISR_Done): New define_constants.
-       (gasisr, *gasisr): New expander and insn.
-       * config/avr/gen-avr-mmcu-specs.c (print_mcu)
-       [HAVE_AS_AVR_MGCCISR_OPTION]: Print asm_gccisr spec.
-       * config/avr/specs.h (ASM_SPEC) <asm_gccisr>: Add sub spec.
- 2017-07-10  Richard Earnshaw  <rearnsha@arm.com>
-       * config/arm/parsecpu.awk (gen_comm_data): Do not escape single quotes
-       in quoted strings.
- 2017-07-10  Georg-Johann Lay  <avr@gjlay.de>
-       Move jump-tables out of .text again.
-       PR target/81075
-       * config/avr/avr.c (ASM_OUTPUT_ADDR_VEC_ELT): Remove function.
-       (ASM_OUTPUT_ADDR_VEC): New function.
-       (avr_adjust_insn_length) [JUMP_TABLE_DATA_P]: Return 0.
-       (avr_final_prescan_insn) [avr_log.insn_addresses]: Dump
-       INSN_ADDRESSes as asm comment.
-       * config/avr/avr.h (JUMP_TABLES_IN_TEXT_SECTION): Adjust comment.
-       (ASM_OUTPUT_ADDR_VEC_ELT): Remove define.
-       (ASM_OUTPUT_ADDR_VEC): Define to avr_output_addr_vec.
-       * config/avr/avr.md (*tablejump): Adjust comment.
-       * config/avr/elf.h (ASM_OUTPUT_BEFORE_CASE_LABEL): Remove.
-       * config/avr/avr-log.c (avr_log_set_avr_log) <insn_addresses>:
-       New detail.
-       * config/avr/avr-protos.h (avr_output_addr_vec_elt): Remove proto.
-       (avr_output_addr_vec): New proto.
-       (avr_log_t) <insn_addresses>: New field.
- 2017-07-09  H.J. Lu  <hongjiu.lu@intel.com>
-       PR target/81313
-       * config/i386/i386.c (ix86_function_arg_advance): Set
-       outgoing_args_on_stack to true if there are outgoing arguments
-       on stack.
-       (ix86_function_arg): Likewise.
-       (ix86_get_drap_rtx): Use DRAP only if there are outgoing
-       arguments on stack and ACCUMULATE_OUTGOING_ARGS is false.
-       * config/i386/i386.h (machine_function): Add
-       outgoing_args_on_stack.
- 2017-07-09  Krister Walfridsson  <krister.walfridsson@gmail.com>
-       * config.gcc (*-*-netbsd*): Remove check for NetBSD versions not
-       supporting pthreds.
-       * config/netbsd.h (NETBSD_LIBGCC_SPEC): Always enable pthreads.
- 2017-07-08  Richard Sandiford  <richard.sandiford@linaro.org>
-       * Makefile.in (HOOKS_H, RTL_BASE_H, FUNCTION_H, EXPR_H, REGS_H)
-       (REAL_H): Remove $(MACHMODE_H).
-       (FIXED_VALUE_H, TREE_CORE_H, CFGLOOP_H): Remove $(MACHMODE_H) and
-       double-int.h.
-       (CORETYPES_H): Add signop.h, wide-int.h, wide-int-print.h,
-       $(MACHMODE_H) and double-int.h.
-       (build/min-insn-modes.o): Depend on $(CORETYPES_H) rather than
-       $(MACHMODE_H).
-       (gengtype-state.o, gengtype.o, build/gengtype.o): Don't depend on
-       double-int.h.
- 2017-07-07  Andrew Pinski  <apinski@cavium.com>
-       * config/aarch64/aarch64.c (aarch_macro_fusion_pair_p): Check
-       prev_set and curr_set for AARCH64_FUSE_ALU_BRANCH.
- 2017-07-07  Michael Meissner  <meissner@linux.vnet.ibm.com>
-       * config/rs6000/rs6000.c (rs6000_get_function_versions_dispatcher):
-       Add warning if GCC was not configured to link against a GLIBC that
-       exports the hardware capability bits.
-       (make_resolver_func): Make resolver function private and not a
-       COMDAT function.  Create the name with clone_function_name instead
-       of make_unique_name.
-       PR target/81348
-       * config/rs6000/rs6000.md (HI sign_extend splitter): Use the
-       correct operand in doing the split.
- 2017-07-07 Carl Love  <cel@us.ibm.com>
-       * config/rs6000/rs6000-c: Add support for built-in function
-       vector unsigned short vec_pack_to_short_fp32 (vector float,
-                                                     vector float).
-       * config/rs6000/rs6000-builtin.def (CONVERT_4F32_8I16): Add
-       BU_P9V_AV_2 and BU_P9V_OVERLOAD_2 definitions.
-       * config/rs6000/altivec.h (vec_pack_to_short_fp32): Add define.
-       * config/rs6000/altivec.md(UNSPEC_CONVERT_4F32_8I16): Add UNSPEC.
-       (convert_4f32_8i16): Add define_expand.
-       * doc/extend.texi: Update the built-in documentation file for the
-       new built-in function.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/m8.md: New file.
-       * config/sparc/sparc.md: Include m8.md.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/sparc.opt: New option -mvis4b.
-       * config/sparc/sparc.c (dump_target_flag_bits): Handle MASK_VIS4B.
-       (sparc_option_override): Handle VIS4B.
-       (enum sparc_builtins): Define
-       SPARC_BUILTIN_DICTUNPACK{8,16,32},
-       SPARC_BUILTIN_FPCMP{LE,GT,EQ,NE}{8,16,32}SHL,
-       SPARC_BUILTIN_FPCMPU{LE,GT}{8,16,32}SHL,
-       SPARC_BUILTIN_FPCMPDE{8,16,32}SHL and
-       SPARC_BUILTIN_FPCMPUR{8,16,32}SHL.
-       (check_constant_argument): New function.
-       (sparc_vis_init_builtins): Define builtins
-       __builtin_vis_dictunpack{8,16,32},
-       __builtin_vis_fpcmp{le,gt,eq,ne}{8,16,32}shl,
-       __builtin_vis_fpcmpu{le,gt}{8,16,32}shl,
-       __builtin_vis_fpcmpde{8,16,32}shl and
-       __builtin_vis_fpcmpur{8,16,32}shl.
-       (sparc_expand_builtin): Check that the constant operands to
-       __builtin_vis_fpcmp*shl and _builtin_vis_dictunpack* are indeed
-       constant and in range.
-       * config/sparc/sparc-c.c (sparc_target_macros): Handle
-       TARGET_VIS4B.
-       * config/sparc/sparc.h (SPARC_IMM2_P): Define.
-       (SPARC_IMM5_P): Likewise.
-       * config/sparc/sparc.md (cpu_feature): Add new feagure "vis4b".
-       (enabled): Handle vis4b.
-       (UNSPEC_DICTUNPACK): New unspec.
-       (UNSPEC_FPCMPSHL): Likewise.
-       (UNSPEC_FPUCMPSHL): Likewise.
-       (UNSPEC_FPCMPDESHL): Likewise.
-       (UNSPEC_FPCMPURSHL): Likewise.
-       (cpu_feature): New CPU feature `vis4b'.
-       (dictunpack{8,16,32}): New insns.
-       (FPCSMODE): New mode iterator.
-       (fpcscond): New code iterator.
-       (fpcsucond): Likewise.
-       (fpcmp{le,gt,eq,ne}{8,16,32}{si,di}shl): New insns.
-       (fpcmpu{le,gt}{8,16,32}{si,di}shl): Likewise.
-       (fpcmpde{8,16,32}{si,di}shl): Likewise.
-       (fpcmpur{8,16,32}{si,di}shl): Likewise.
-       * config/sparc/constraints.md: Define constraints `q' for unsigned
-       2-bit integer constants and `t' for unsigned 5-bit integer
-       constants.
-       * config/sparc/predicates.md (imm5_operand_dictunpack8): New
-       predicate.
-       (imm5_operand_dictunpack16): Likewise.
-       (imm5_operand_dictunpack32): Likewise.
-       (imm2_operand): Likewise.
-       * doc/invoke.texi (SPARC Options): Document -mvis4b.
-       * doc/extend.texi (SPARC VIS Built-in Functions): Document the
-       ditunpack* and fpcmp*shl builtins.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config.gcc: Handle m8 in --with-{cpu,tune} options.
-       * config.in: Add HAVE_AS_SPARC6 define.
-       * config/sparc/driver-sparc.c (cpu_names): Add entry for the SPARC
-       M8.
-       * config/sparc/sol2.h (CPP_CPU64_DEFAULT_SPEC): Define for
-       TARGET_CPU_m8.
-       (ASM_CPU32_DEFAUILT_SPEC): Likewise.
-       (CPP_CPU_SPEC): Handle m8.
-       (ASM_CPU_SPEC): Likewise.
-       * config/sparc/sparc-opts.h (enum processor_type): Add
-       PROCESSOR_M8.
-       * config/sparc/sparc.c (m8_costs): New struct.
-       (sparc_option_override): Handle TARGET_CPU_m8.
-       (sparc32_initialize_trampoline): Likewise.
-       (sparc64_initialize_trampoline): Likewise.
-       (sparc_issue_rate): Likewise.
-       (sparc_register_move_cost): Likewise.
-       * config/sparc/sparc.h (TARGET_CPU_m8): Define.
-       (CPP_CPU64_DEFAULT_SPEC): Define for M8.
-       (ASM_CPU64_DEFAULT_SPEC): Likewise.
-       (CPP_CPU_SPEC): Handle M8.
-       (ASM_CPU_SPEC): Likewise.
-       (AS_M8_FLAG): Define.
-       * config/sparc/sparc.md: Add m8 to the cpu attribute.
-       * config/sparc/sparc.opt: New option -mcpu=m8 for sparc targets.
-       * configure.ac (HAVE_AS_SPARC6): Check for assembler support for
-       M8 instructions.
-       * configure: Regenerate.
-       * doc/invoke.texi (SPARC Options): Document -mcpu=m8 and
-       -mtune=m8.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/niagara7.md: Rework the DFA scheduler to use insn
-       subtypes.
-       * config/sparc/sparc.md: Remove the `v3pipe' insn attribute.
-       ("*movdi_insn_sp32"): Do not set v3pipe.
-       ("*movsi_insn"): Likewise.
-       ("*movdi_insn_sp64"): Likewise.
-       ("*movsf_insn"): Likewise.
-       ("*movdf_insn_sp32"): Likewise.
-       ("*movdf_insn_sp64"): Likewise.
-       ("*zero_extendsidi2_insn_sp64"): Likewise.
-       ("*sign_extendsidi2_insn"): Likewise.
-       ("*mov<VM32:mode>_insn"): Likewise.
-       ("*mov<VM64:mode>_insn_sp64"): Likewise.
-       ("*mov<VM64:mode>_insn_sp32"): Likewise.
-       ("<plusminus_insn><VADDSUB:mode>3"): Likewise.
-       ("<vlop:code><VL:mode>3"): Likewise.
-       ("*not_<vlop:code><VL:mode>3"): Likewise.
-       ("*nand<VL:mode>_vis"): Likewise.
-       ("*<vlnotop:code>_not1<VL:mode>_vis"): Likewise.
-       ("*<vlnotop:code>_not2<VL:mode>_vis"): Likewise.
-       ("one_cmpl<VL:mode>2"): Likewise.
-       ("faligndata<VM64:mode>_vis"): Likewise.
-       ("alignaddrsi_vis"): Likewise.
-       ("alignaddrdi_vis"): Likweise.
-       ("alignaddrlsi_vis"): Likewise.
-       ("alignaddrldi_vis"): Likewise.
-       ("fcmp<gcond:code><GCM:gcm_name><P:mode>_vis"): Likewise.
-       ("bmaskdi_vis"): Likewise.
-       ("bmasksi_vis"): Likewise.
-       ("bshuffle<VM64:mode>_vis"): Likewise.
-       ("cmask8<P:mode>_vis"): Likewise.
-       ("cmask16<P:mode>_vis"): Likewise.
-       ("cmask32<P:mode>_vis"): Likewise.
-       ("pdistn<P:mode>_vis"): Likewise.
-       ("<vis3_addsub_ss_patname><VASS:mode>3"): Likewise.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/sparc.md ("subtype"): New insn attribute.
-       ("*wrgsr_sp64"): Set insn subtype.
-       ("*rdgsr_sp64"): Likewise.
-       ("alignaddrsi_vis"): Likewise.
-       ("alignaddrdi_vis"): Likewise.
-       ("alignaddrlsi_vis"): Likewise.
-       ("alignaddrldi_vis"): Likewise.
-       ("<plusminus_insn><VADDSUB:mode>3"): Likewise.
-       ("fexpand_vis"): Likewise.
-       ("fpmerge_vis"): Likewise.
-       ("faligndata<VM64:mode>_vis"): Likewise.
-       ("bshuffle<VM64:mode>_vis"): Likewise.
-       ("cmask8<P:mode>_vis"): Likewise.
-       ("cmask16<P:mode>_vis"): Likewise.
-       ("cmask32<P:mode>_vis"): Likewise.
-       ("fchksm16_vis"): Likewise.
-       ("v<vis3_shift_patname><GCM:mode>3"): Likewise.
-       ("fmean16_vis"): Likewise.
-       ("fp<plusminus_insn>64_vis"): Likewise.
-       ("<plusminus_insn>v8qi3"): Likewise.
-       ("<vis3_addsub_ss_patname><VASS:mode>3"): Likewise.
-       ("<vis4_minmax_patname><VMMAX:mode>3"): Likewise.
-       ("<vis4_uminmax_patname><VMMAX:mode>3"): Likewise.
-       ("<vis3_addsub_ss_patname>v8qi3"): Likewise.
-       ("<vis4_addsub_us_patname><VAUS:mode>3"): Likewise.
-       ("*movqi_insn"): Likewise.
-       ("*movhi_insn"): Likewise.
-       ("*movsi_insn"): Likewise.
-       ("movsi_pic_gotdata_op"): Likewise.
-       ("*movdi_insn_sp32"): Likewise.
-       ("*movdi_insn_sp64"): Likewise.
-       ("movdi_pic_gotdata_op"): Likewise.
-       ("*movsf_insn"): Likewise.
-       ("*movdf_insn_sp32"): Likewise.
-       ("*movdf_insn_sp64"): Likewise.
-       ("*zero_extendhisi2_insn"): Likewise.
-       ("*zero_extendqihi2_insn"): Likewise.
-       ("*zero_extendqisi2_insn"): Likewise.
-       ("*zero_extendqidi2_insn"): Likewise.
-       ("*zero_extendhidi2_insn"): Likewise.
-       ("*zero_extendsidi2_insn_sp64"): Likewise.
-       ("ldfsr"): Likewise.
-       ("prefetch_64"): Likewise.
-       ("prefetch_32"): Likewise.
-       ("tie_ld32"): Likewise.
-       ("tie_ld64"): Likewise.
-       ("*tldo_ldub_sp32"): Likewise.
-       ("*tldo_ldub1_sp32"): Likewise.
-       ("*tldo_ldub2_sp32"): Likewise.
-       ("*tldo_ldub_sp64"): Likewise.
-       ("*tldo_ldub1_sp64"): Likewise.
-       ("*tldo_ldub2_sp64"): Likewise.
-       ("*tldo_ldub3_sp64"): Likewise.
-       ("*tldo_lduh_sp32"): Likewise.
-       ("*tldo_lduh1_sp32"): Likewise.
-       ("*tldo_lduh_sp64"): Likewise.
-       ("*tldo_lduh1_sp64"): Likewise.
-       ("*tldo_lduh2_sp64"): Likewise.
-       ("*tldo_lduw_sp32"): Likewise.
-       ("*tldo_lduw_sp64"): Likewise.
-       ("*tldo_lduw1_sp64"): Likewise.
-       ("*tldo_ldx_sp64"): Likewise.
-       ("*mov<VM32:mode>_insn"): Likewise.
-       ("*mov<VM64:mode>_insn_sp64"): Likewise.
-       ("*mov<VM64:mode>_insn_sp32"): Likewise.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/sparc.md ("type"): New insn type viscmp.
-       ("fcmp<gcond:code><GCM:gcm_name><P:mode>_vis"): Set insn type to
-       viscmp.
-       ("fpcmp<gcond:code>8<P:mode>_vis"): Likewise.
-       ("fucmp<gcond:code>8<P:mode>_vis"): Likewise.
-       ("fpcmpu<gcond:code><GCM:gcm_name><P:mode>_vis"): Likewise.
-       * config/sparc/niagara7.md ("n7_vis_logical_v3pipe"): Handle
-       viscmp.
-       ("n7_vis_logical_11cycle"): Likewise.
-       * config/sparc/niagara4.md ("n4_vis_logical"): Likewise.
-       * config/sparc/niagara2.md ("niag3_vis": Likewise.
-       * config/sparc/niagara.md ("niag_vis"): Likewise.
-       * config/sparc/ultra3.md ("us3_fga"): Likewise.
-       * config/sparc/ultra1_2.md ("us1_fga_double"): Likewise.
- 2017-07-07  Jose E. Marchesi  <jose.marchesi@oracle.com>
-       * config/sparc/sparc.md: New instruction type `bmask'.
-       (bmaskdi_vis): Use the `bmask' type.
-       (bmasksi_vis): Likewise.
-       * config/sparc/ultra3.md (us3_array): Likewise.
-       * config/sparc/niagara7.md (n7_array): Likewise.
-       * config/sparc/niagara4.md (n4_array): Likewise.
-       * config/sparc/niagara2.md (niag2_vis): Likewise.
-       (niag3_vis): Likewise.
-       * config/sparc/niagara.md (niag_vis): Likewise.
- 2017-07-06  Jan Hubicka  <hubicka@ucw.cz>
-       * ipa-comdats.c: Remove optimize check from gate.
-       * ipa-fnsummary.c (ipa_fn_summary_generate): do not generate summary
-       for functions not optimized.
-       (ipa_fn_summary_read): Skip optimize check.
-       (ipa_fn_summary_write): Likewise.
-       * ipa-inline-analysis.c (do_estimate_growth_1): Check that caller
-       is optimized.
-       * ipa-inline.c (can_inline_edge_p): Not optimized functions are
-       uninlinable.
-       (can_inline_edge_p): Check flag_pcc_struct_return for match.
-       (check_callers): Give up on caller which is not optimized.
-       (inline_small_functions): Likewise.
-       (ipa_inline): Do not give up when not optimizing.
-       * ipa-visbility.c (function_and_variable_visibility): Do not optimize
-       away unoptimizes cdtors.
-       (whole_program_function_and_variable_visibility): Do
-       ipa_discover_readonly_nonaddressable_vars in LTO mode.
-       * ipa.c (process_references): Do not check optimize.
-       (symbol_table::remove_unreachable_nodes): Update optimize check.
-       (set_writeonly_bit): Update optimize check.
-       (pass_ipa_cdtor_merge::gate): Do not check optimize.
-       (pass_ipa_single_use::gate): Remove.
- 2017-07-06  Aaron Sawdey  <acsawdey@linux.vnet.ibm.com>
-       * config/rs6000/rs6000.c (union_defs, union_uses, insn_is_load_p,
-       insn_is_store_p, insn_is_swap_p, const_load_sequence_p, v2df_reduction_p,
-       rtx_is_swappable_p, insn_is_swappable_p, chain_contains_only_swaps,
-       mark_swaps_for_removal, swap_const_vector_halves, adjust_subreg_index,
-       permute_load, permute_store, adjust_extract, adjust_splat,
-       adjust_xxpermdi, adjust_concat, adjust_vperm, handle_special_swappables,
-       replace_swap_with_copy, dump_swap_insn_table,
-       alignment_with_canonical_addr, alignment_mask, find_alignment_op,
-       recombine_lvx_pattern, recombine_stvx_pattern,
-       recombine_lvx_stvx_patterns, rs6000_analyze_swaps,
-       make_pass_analyze_swaps): Move all code related to p8 swap optimizations
-       to file rs6000-p8swap.c.
-       * config/rs6000/rs6000-p8swap.c: New file.
-       * config/rs6000/t-rs6000: Add rule to build rs6000-p8swap.o.
-       * config.gcc: Add rs6000-p8swap.o to extra_objs for powerpc*-*-*
-       and rs6000*-*-* targets.
- 2017-07-06  David Malcolm  <dmalcolm@redhat.com>
-       * Makefile.in (selftest): Remove dependency on s-selftest-c++.
- 2017-07-06  Jan Hubicka  <hubicka@ucw.cz>
-       * lto-wrapper.c (merge_and_complain): Do not merge
-       fexceptions, fnon_call_exceptions, ftrapv, ffp_contract_, fmath_errno,
-       fsigned_zeros, ftrapping_math, fwrapv.
-       (append_compiler_options): Do not track these options.
-       (append_linker_options): Likewie
- 2017-07-06  Jan Hubicka  <hubicka@ucw.cz>
-       * cgraphunit.c (cgraph_node::finalize_function): When
-       !flag_toplevel_reorde set no_reorder flag.
-       (varpool_node::finalize_decl): Likewise.
-       (symbol_table::compile): Drop no toplevel reorder path.
- 2017-07-06  Jan Hubicka  <hubicka@ucw.cz>
-       * bb-reorder.c (better_edge_p): Do not build traces across abnormal/eh
-       edges; zero probability is not better than uninitialized.
- 2017-07-06  Maxim Ostapenko  <m.ostapenko@samsung.com>
-       * asan.h (asan_sanitize_allocas_p): Declare.
-       * asan.c (asan_sanitize_allocas_p): New function.
-       (handle_builtin_stack_restore): Bail out if !asan_sanitize_allocas_p.
-       (handle_builtin_alloca): Likewise.
-       * cfgexpand.c (expand_used_vars): Do not add allocas unpoisoning stuff
-       if !asan_sanitize_allocas_p.
-       * params.def (asan-instrument-allocas): Add new option.
-       * params.h (ASAN_PROTECT_ALLOCAS): Define.
-       * opts.c (common_handle_option): Disable allocas sanitization for
-       KASan by default.
- 2017-07-06  Maxim Ostapenko  <m.ostapenko@samsung.com>
-       * asan.c: Include gimple-fold.h.
-       (get_last_alloca_addr): New function.
-       (handle_builtin_stackrestore): Likewise.
-       (handle_builtin_alloca): Likewise.
-       (asan_emit_allocas_unpoison): Likewise.
-       (get_mem_refs_of_builtin_call): Add new parameter, remove const
-       quallifier from first paramerer. Handle BUILT_IN_ALLOCA,
-       BUILT_IN_ALLOCA_WITH_ALIGN and BUILT_IN_STACK_RESTORE builtins.
-       (instrument_builtin_call): Pass gimple iterator to
-       get_mem_refs_of_builtin_call.
-       (last_alloca_addr): New global.
-       * asan.h (asan_emit_allocas_unpoison): Declare.
-       * builtins.c (expand_asan_emit_allocas_unpoison): New function.
-       (expand_builtin): Handle BUILT_IN_ASAN_ALLOCAS_UNPOISON.
-       * cfgexpand.c (expand_used_vars): Call asan_emit_allocas_unpoison
-       if function calls alloca.
-       * gimple-fold.c (replace_call_with_value): Remove static keyword.
-       * gimple-fold.h (replace_call_with_value): Declare.
-       * internal-fn.c: Include asan.h.
-       * sanitizer.def (BUILT_IN_ASAN_ALLOCA_POISON,
-       BUILT_IN_ASAN_ALLOCAS_UNPOISON): New builtins.
- 2017-07-06  David Malcolm  <dmalcolm@redhat.com>
-       * Makefile.in (SELFTEST_FLAGS): Drop "-x c", moving it to...
-       (C_SELFTEST_FLAGS): New.
-       (CPP_SELFTEST_FLAGS): New.
-       (SELFTEST_DEPS): New, from deps of s-selftest.
-       (C_SELFTEST_DEPS): New, from deps of s-selftest.
-       (CPP_SELFTEST_DEPS): New.
-       (selftest): Add dependency on s-selftest-c++.
-       (s-selftest): Rename to...
-       (s-selftest-c): ...this, moving deps to SELFTEST_DEPS
-       and C_SELFTEST_DEPS, and using C_SELFTEST_FLAGS rather
-       than SELFTEST_FLAGS.
-       (selftest-gdb): Rename to...
-       (selftest-c-gdb): ...this, using C_SELFTEST_DEPS and
-       C_SELFTEST_FLAGS.
-       (selftest-gdb): Reintroduce as an alias for selftest-c-gdb.
-       (selftest-valgrind): Rename to...
-       (selftest-c-valgrind): ...this, using C_SELFTEST_DEPS and
-       C_SELFTEST_FLAGS.
-       (selftest-valgrind): Reintroduce as an alias for
-       selftest-c-valgrind.
-       (s-selftest-c++): New.
-       (selftest-c++-gdb): New.
-       (selftest-c++-valgrind): New.
- 2017-07-06  Olivier Hainque  <hainque@adacore.com>
-       * gcc.c (process_command): When deciding if undefined variables
-       should be ignored when processing specs, accept "gcc -v" as well.
- 2017-07-06  Jan Hubicka  <hubicka@ucw.cz>
-       * auto-profile.c (afdo_set_bb_count, afdo_propagate_edge,
-       afdo_annotate_cfg): Set counts/probabilities as determined by afdo.
- 2017-07-06  Thomas Preud'homme  <thomas.preudhomme@arm.com>
-       * config/arm/arm-cpus.in (armv8-r): Add new entry.
-       * config/arm/arm-isa.h (ISA_ARMv8r): Define macro.
-       * config/arm/arm-tables.opt: Regenerate.
-       * config/arm/arm.h (enum base_architecture): Add BASE_ARCH_8R
-       enumerator.
-       * doc/invoke.texi: Mention -march=armv8-r and its extensions.
- 2017-07-06  Carl Love  <cel@us.ibm.com>
-       * ChangeLog: Clean up from mid air collision
- 2017-07-06  Carl Love  <cel@us.ibm.com>
-       * config/rs6000/rs6000-c.c: Add support for built-in functions
-       vector signed int vec_subc (vector signed int, vector signed int);
-       vector signed __int128 vec_subc (vector signed __int128,
-                                        vector signed __int128);
-       vector unsigned __int128 vec_subc (vector unsigned __int128,
-                                          vector unsigned __int128);
-       vector signed int vec_sube (vector signed int, vector signed int,
-                                   vector signed int);
-       vector unsigned int vec_sube (vector unsigned int,
-                                     vector unsigned int,
-                                     vector unsigned int);
-       vector signed __int128 vec_sube (vector signed __int128,
-                                        vector signed __int128,
-                                        vector signed__int128);
-       vector unsigned __int128 vec_sube (vector unsigned __int128,
-                                          vector unsigned __int128,
-                                          vector unsigned __int128);
-       vector signed int vec_subec (vector signed int, vector signed int,
-                                    vector signed int);
-       vector unsigned int vec_subec (vector unsigned int,
-                                      vector unsigned int,
-                                      vector unsigned int);
-       vector signed __int128 vec_subec (vector signed __int128,
-                                         vector signed __int128,
-                                         vector signed__int128);
-       vector unsigned __int128 vec_subec (vector unsigned __int128,
-                                           vector unsigned __int128,
-                                           vector unsigned __int128);
-       * config/rs6000/rs6000.c (ALTIVEC_BUILTIN_VEC_SUBE,
-       ALTIVEC_BUILTIN_VEC_SUBEC): Add ef_builtins.
-       * config/rs6000/rs6000-builtin.def (SUBE, SUBEC): Add
-       BU_ALTIVEC_OVERLOAD_X definitions.
-       * config/rs6000/altivec.h (vec_sube, vec_subec): Add builtin defines.
-       * doc/extend.texi: Update the built-in documentation file for the new
-       built-in functions.
- 2017-07-06  David Malcolm  <dmalcolm@redhat.com>
-       PR c++/79300
-       * diagnostic-show-locus.c (layout::layout): Use start and finish
-       spelling location for the start and finish of each range.
-       * genmatch.c (linemap_client_expand_location_to_spelling_point):
-       Add unused aspect param.
-       * input.c (expand_location_1): Add "aspect" param, and use it
-       to access the correct part of the location.
-       (expand_location): Pass LOCATION_ASPECT_CARET to new param of
-       expand_location_1.
-       (expand_location_to_spelling_point): Likewise.
-       (linemap_client_expand_location_to_spelling_point): Add "aspect"
-       param, and pass it to expand_location_1.
- 2017-07-06  Sebastian Peryt  <sebastian.peryt@intel.com>
-       * config/i386/avx512fintrin.h (_mm_mask_getexp_round_ss,
-       _mm_maskz_getexp_round_ss, _mm_mask_getexp_round_sd,
-       _mm_maskz_getexp_round_sd, _mm_mask_getmant_round_sd,
-       _mm_maskz_getmant_round_sd, _mm_mask_getmant_round_ss,
-       _mm_maskz_getmant_round_ss, _mm_mask_getexp_ss, _mm_maskz_getexp_ss,
-       _mm_mask_getexp_sd, _mm_maskz_getexp_sd, _mm_mask_getmant_sd,
-       _mm_maskz_getmant_sd, _mm_mask_getmant_ss,
-       _mm_maskz_getmant_ss): New intrinsics.
-       (__builtin_ia32_getexpss128_mask): Changed to ...
-       __builtin_ia32_getexpss128_round ... this.
-       (__builtin_ia32_getexpsd128_mask): Changed to ...
-       __builtin_ia32_getexpsd128_round ... this.
-       * config/i386/i386-builtin-types.def
-       ((V2DF, V2DF, V2DF, INT, V2DF, UQI, INT),
-       (V4SF, V4SF, V4SF, INT, V4SF, UQI, INT)): New function type aliases.
-       * config/i386/i386-builtin.def (__builtin_ia32_getexpsd_mask_round,
-       __builtin_ia32_getexpss_mask_round,     __builtin_ia32_getmantsd_mask_round,
-       __builtin_ia32_getmantss_mask_round): New builtins.
-       * config/i386/i386.c (V2DF_FTYPE_V2DF_V2DF_INT_V2DF_UQI_INT,
-       V4SF_FTYPE_V4SF_V4SF_INT_V4SF_UQI_INT): Handle new types.
-       (CODE_FOR_avx512f_vgetmantv2df_mask_round,
-       CODE_FOR_avx512f_vgetmantv4sf_mask_round): New cases.
-       * config/i386/sse.md
-       (avx512f_sgetexp<mode><round_saeonly_name>): Changed to ...
-       avx512f_sgetexp<mode><mask_scalar_name>
-       <round_saeonly_scalar_name> ... this.
-       (vgetexp<ssescalarmodesuffix>\t{<round_saeonly_op3>%2, %1, %0|
-       %0, %1, %2<round_saeonly_op3>}): Changed to ...
-       vgetexp<ssescalarmodesuffix>
-       \t{<round_saeonly_scalar_mask_op3>%2, %1, %0<mask_scalar_operand3>|
-       %0<mask_scalar_operand3>, %1, %2<round_saeonly_scalar_mask_op3>} ... this.
-       (avx512f_vgetmant<mode><round_saeonly_name>): Changed to ...
-       avx512f_vgetmant<mode><mask_scalar_name>
-       <round_saeonly_scalar_name> ... this.
-       (vgetmant<ssescalarmodesuffix>\t{%3, <round_saeonly_op4>%2, %1, %0|
-       %0, %1, %2<round_saeonly_op4>, %3}): Changed to ...
-       vgetmant<ssescalarmodesuffix>
-       \t{%3, <round_saeonly_scalar_mask_op4>%2, %1, %0<mask_scalar_operand4>|
-       %0<mask_scalar_operand4>, %1, %2
-       <round_saeonly_scalar_mask_op4>, %3} ... this.
-       * config/i386/subst.md (mask_scalar_operand4,
-       round_saeonly_scalar_mask_operand4,     round_saeonly_scalar_mask_op4,
-       round_saeonly_scalar_nimm_predicate): New subst attributes.
- 2017-07-06  Julia Koval  <julia.koval@intel.com>
-       * config/i386/i386.c (ix86_erase_embedded_rounding):
-       Remove code for old rounding pattern.
- 2017-07-06  Richard Earnshaw  <rearnsha@arm.com>
-       * config/arm/t-arm (GTM_H): Add arm-cpu.h.
- 2017-07-06  Christophe Lyon  <christophe.lyon@linaro.org>
-       * doc/sourcebuild.texi (Test Directives, Variants of
-       dg-require-support): Add documentation for dg-require-stack-check.
- 2017-07-05  Sebastian Peryt  <sebastian.peryt@intel.com>
-       * config/i386/subst.md (mask_scalar, round_scalar,
-       round_saeonly_scalar): New meta-templates.
-       (mask_scalar_name, mask_scalar_operand3, round_scalar_name,
-       round_scalar_mask_operand3, round_scalar_mask_op3,
-       round_scalar_constraint, round_scalar_prefix, round_saeonly_scalar_name,
-       round_saeonly_scalar_mask_operand3, round_saeonly_scalar_mask_op3,
-       round_saeonly_scalar_constraint,
-       round_saeonly_scalar_prefix): New subst attribute.
-       * config/i386/sse.md
-       (<sse>_vm<plusminus_insn><mode>3<mask_name><round_name>): Renamed to ...
-       <sse>_vm<plusminus_insn><mode>3<mask_scalar_name>
-       <round_scalar_name> ... this.
-       (<sse>_vm<multdiv_mnemonic><mode>3<mask_name><round_name>): Renamed to ...
-       <sse>_vm<multdiv_mnemonic><mode>3<mask_scalar_name>
-       <round_scalar_name> ... this.
-       (<sse>_vm<code><mode>3<mask_name><round_saeonly_name>): Renamed to ...
-       <sse>_vm<code><mode>3<mask_scalar_name>
-       <round_saeonly_scalar_name> ... this.
-       (v<plusminus_mnemonic><ssescalarmodesuffix>
-       \t{<round_mask_op3>%2, %1, %0<mask_operand3>|
-       %0<mask_operand3>, %1, %<iptr>2<round_mask_op3>}): Changed to ...
-       v<plusminus_mnemonic><ssescalarmodesuffix>
-       \t{<round_scalar_mask_op3>%2, %1, %0<mask_scalar_operand3>|
-       %0<mask_scalar_operand3>, %1, %<iptr>2<round_scalar_mask_op3>} ... this.
-       (v<multdiv_mnemonic><ssescalarmodesuffix>
-       \t{<round_mask_op3>%2, %1, %0<mask_operand3>|
-       %0<mask_operand3>, %1, %<iptr>2<round_mask_op3>}): Changed to ...
-       v<multdiv_mnemonic><ssescalarmodesuffix>
-       \t{<round_scalar_mask_op3>%2, %1, %0<mask_scalar_operand3>|
-       %0<mask_scalar_operand3>, %1, %<iptr>2<round_scalar_mask_op3>} ... this.
-       (v<maxmin_float><ssescalarmodesuffix>
-       \t{<round_saeonly_mask_op3>%2, %1, %0<mask_operand3>|
-       %0<mask_operand3>, %1, %<iptr>2<round_saeonly_mask_op3>}): Changed to ...
-       v<maxmin_float><ssescalarmodesuffix>
-       \t{<round_saeonly_scalar_mask_op3>%2, %1, %0<mask_scalar_operand3>|
-       %0<mask_scalar_operand3>, %1, %<iptr>2
-       <round_saeonly_scalar_mask_op3>} ... this.
- 2017-07-05  Richard Earnshaw  <rearnsha@arm.com>
-       * config/arm/arm.c (arm_fixed_condition_code_regs): New function.
-       (TARGET_FIXED_CONDITION_CODE_REGS): Redefine.
- 2017-07-05  Richard Sandiford  <richard.sandiford@linaro.org>
+ 2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
            Alan Hayward  <alan.hayward@arm.com>
            David Sherwood  <david.sherwood@arm.com>
  
diff --cc gcc/Makefile.in
Simple merge
diff --cc gcc/builtins.c
index c2defaabe85703b2c43e0d677e80dda7d723eca1,487d9d58db2354958420e23308b5d5c3fd03da35..62754ac8bfff79cf435f95698fa8a2a5b1848918
@@@ -3078,20 -3107,18 +3137,18 @@@ check_access (tree exp, tree, tree, tre
        return true;
  
        /* Otherwise, when the length of the source sequence is known
-        (as with with strlen), set SIZE to it.  */
+        (as with strlen), set DSTWRITE to it.  */
        if (!range[0])
-       size = slen;
+       dstwrite = slen;
      }
  
-   if (!objsize)
-     objsize = maxobjsize;
+   if (!dstsize)
+     dstsize = maxobjsize;
  
-   /* The SIZE is exact if it's non-null, constant, and in range of
-      unsigned HOST_WIDE_INT.  */
-   bool exactsize = size && tree_fits_uhwi_p (size);
+   if (dstwrite)
 -    get_size_range (dstwrite, range);
++    get_size_range (dstwrite, range, /*range_starts_at=*/0);
  
-   if (size)
-     get_size_range (size, range, /*range_starts_at=*/0);
+   tree func = get_callee_fndecl (exp);
  
    /* First check the number of bytes to be written against the maximum
       object size.  */
  compute_objsize (tree dest, int ostype)
  {
    unsigned HOST_WIDE_INT size;
-   if (compute_builtin_object_size (dest, ostype & 3, &size))
+   /* Only the two least significant bits are meaningful.  */
+   ostype &= 3;
+   if (compute_builtin_object_size (dest, ostype, &size))
      return build_int_cst (sizetype, size);
  
 -            enum value_range_type rng = get_range_info (off, &min, &max);
 -
 -            if (rng == VR_RANGE)
+   if (TREE_CODE (dest) == SSA_NAME)
+     {
+       gimple *stmt = SSA_NAME_DEF_STMT (dest);
+       if (!is_gimple_assign (stmt))
+       return NULL_TREE;
+       dest = gimple_assign_rhs1 (stmt);
+       tree_code code = gimple_assign_rhs_code (stmt);
+       if (code == POINTER_PLUS_EXPR)
+       {
+         /* compute_builtin_object_size fails for addresses with
+            non-constant offsets.  Try to determine the range of
+            such an offset here and use it to adjus the constant
+            size.  */
+         tree off = gimple_assign_rhs2 (stmt);
+         if (TREE_CODE (off) == SSA_NAME
+             && INTEGRAL_TYPE_P (TREE_TYPE (off)))
+           {
+             wide_int min, max;
++            if (get_range_info (off, &min, &max))
+               {
+                 if (tree size = compute_objsize (dest, ostype))
+                   {
+                     wide_int wisiz = wi::to_wide (size);
+                     /* Ignore negative offsets for now.  For others,
+                        use the lower bound as the most optimistic
+                        estimate of the (remaining)size.  */
+                     if (wi::sign_mask (min))
+                       ;
+                     else if (wi::ltu_p (min, wisiz))
+                       return wide_int_to_tree (TREE_TYPE (size),
+                                                wi::sub (wisiz, min));
+                     else
+                       return size_zero_node;
+                   }
+               }
+           }
+       }
+       else if (code != ADDR_EXPR)
+       return NULL_TREE;
+     }
+   /* Unless computing the largest size (for memcpy and other raw memory
+      functions), try to determine the size of the object from its type.  */
+   if (!ostype)
+     return NULL_TREE;
+   if (TREE_CODE (dest) != ADDR_EXPR)
+     return NULL_TREE;
+   tree type = TREE_TYPE (dest);
+   if (TREE_CODE (type) == POINTER_TYPE)
+     type = TREE_TYPE (type);
+   type = TYPE_MAIN_VARIANT (type);
+   if (TREE_CODE (type) == ARRAY_TYPE
+       && !array_at_struct_end_p (dest))
+     {
+       /* Return the constant size unless it's zero (that's a zero-length
+        array likely at the end of a struct).  */
+       tree size = TYPE_SIZE_UNIT (type);
+       if (size && TREE_CODE (size) == INTEGER_CST
+         && !integer_zerop (size))
+       return size;
+     }
    return NULL_TREE;
  }
  
diff --cc gcc/calls.c
index 6acf2e257b904624c6e58b481eead088f001fee9,4dcfef77a5a53cfec20b7cbfd355b9fac0839866..3cdc8a8199e0dc40d806bb564860ee564765a55b
@@@ -1265,15 -1293,17 +1294,19 @@@ alloc_max_size (void
  }
  
  /* Return true when EXP's range can be determined and set RANGE[] to it
-    after adjusting it if necessary to make EXP a valid size argument to
-    an allocation function declared with attribute alloc_size (whose
-    argument may be signed), or to a string manipulation function like
-    memset.
+    after adjusting it if necessary to make EXP a represents a valid size
+    of object, or a valid size argument to an allocation function declared
+    with attribute alloc_size (whose argument may be signed), or to a string
+    manipulation function like memset.  When ALLOW_ZERO is true, allow
+    returning a range of [0, 0] for a size in an anti-range [1, N] where
+    N > PTRDIFF_MAX.  A zero range is a (nearly) invalid argument to
+    allocation functions like malloc but it is a valid argument to
 -   functions like memset.  */
++   functions like memset.
 +
 +   RANGE_STARTS_AT is the number where the range will start from.  */
  
  bool
- get_size_range (tree exp, tree range[2], unsigned range_starts_at)
 -get_size_range (tree exp, tree range[2], bool allow_zero /* = false */)
++get_size_range (tree exp, tree range[2], unsigned range_starts_at /* = 1*/)
  {
    if (tree_fits_uhwi_p (exp))
      {
        return true;
      }
  
 -
+   tree exptype = TREE_TYPE (exp);
+   bool integral = INTEGRAL_TYPE_P (exptype);
    wide_int min, max;
 -  enum value_range_type range_type;
 -
 -  if (TREE_CODE (exp) == SSA_NAME && integral)
 -    range_type = get_range_info (exp, &min, &max);
 -  else
 -    range_type = VR_VARYING;
 -
 -  if (range_type == VR_VARYING)
 +  if (TREE_CODE (exp) != SSA_NAME
-       || !INTEGRAL_TYPE_P (TREE_TYPE (exp))
++      || !integral
 +      || !get_range_info (exp, &min, &max))
      {
-       /* No range information available.  */
+       if (integral)
+       {
+         /* Use the full range of the type of the expression when
+            no value range information is available.  */
+         range[0] = TYPE_MIN_VALUE (exptype);
+         range[1] = TYPE_MAX_VALUE (exptype);
+         return true;
+       }
        range[0] = NULL_TREE;
        range[1] = NULL_TREE;
        return false;
      }
-   irange ir (exp);
-   tree exptype = TREE_TYPE (exp);
  
 -  unsigned expprec = TYPE_PRECISION (exptype);
 -
 -  bool signed_p = !TYPE_UNSIGNED (exptype);
 -
 -  if (range_type == VR_ANTI_RANGE)
 +  /* Remove negative numbers from the range.  */
-   irange positives;
++  irange positives, ir (exp);
 +  range_positives (&positives, exptype, range_starts_at);
 +  if (!positives.intersect (ir).empty_p ())
      {
 -      if (signed_p)
 -      {
 -        if (wi::les_p (max, 0))
 -          {
 -            /* EXP is not in a strictly negative range.  That means
 -               it must be in some (not necessarily strictly) positive
 -               range which includes zero.  Since in signed to unsigned
 -               conversions negative values end up converted to large
 -               positive values, and otherwise they are not valid sizes,
 -               the resulting range is in both cases [0, TYPE_MAX].  */
 -            min = wi::zero (expprec);
 -            max = wi::to_wide (TYPE_MAX_VALUE (exptype));
 -          }
 -        else if (wi::les_p (min - 1, 0))
 -          {
 -            /* EXP is not in a negative-positive range.  That means EXP
 -               is either negative, or greater than max.  Since negative
 -               sizes are invalid make the range [MAX + 1, TYPE_MAX].  */
 -            min = max + 1;
 -            max = wi::to_wide (TYPE_MAX_VALUE (exptype));
 -          }
 -        else
 -          {
 -            max = min - 1;
 -            min = wi::zero (expprec);
 -          }
 -      }
 -      else if (wi::eq_p (0, min - 1))
 -      {
 -        /* EXP is unsigned and not in the range [1, MAX].  That means
 -           it's either zero or greater than MAX.  Even though 0 would
 -           normally be detected by -Walloc-zero, unless ALLOW_ZERO
 -           is true, set the range to [MAX, TYPE_MAX] so that when MAX
 -           is greater than the limit the whole range is diagnosed.  */
 -        if (allow_zero)
 -          min = max = wi::zero (expprec);
 -        else
 -          {
 -            min = max + 1;
 -            max = wi::to_wide (TYPE_MAX_VALUE (exptype));
 -          }
 -      }
 -      else
 -      {
 -        max = min - 1;
 -        min = wi::zero (expprec);
 -      }
 +      /* Remove the unknown parts of a multi-range.
 +       This will transform [5,10][20,MAX] into [5,10].  */
 +      if (positives.num_pairs () > 1
-         && positives.upper_bound () == wide_int (TYPE_MAX_VALUE (exptype)))
++        && positives.upper_bound () == wide_int (wi::to_wide
++                                                 (TYPE_MAX_VALUE (exptype))))
 +      positives.remove_pair (positives.num_pairs () - 1);
 +
 +      range[0] = wide_int_to_tree (exptype, positives.lower_bound ());
 +      range[1] = wide_int_to_tree (exptype, positives.upper_bound ());
 +    }
 +  else
 +    {
 +      /* If removing the negative numbers didn't give us anything
 +       back, the entire range was negative. Leave things as they
 +       were, and let the caller sort it out.  */
 +      range[0] = wide_int_to_tree (exptype, min);
 +      range[1] = wide_int_to_tree (exptype, max);
      }
 -
 -  range[0] = wide_int_to_tree (exptype, min);
 -  range[1] = wide_int_to_tree (exptype, max);
 -
    return true;
  }
  
diff --cc gcc/calls.h
index 2b204c7cb31f5d3fcfd57465437e8308529f27e5,302ccb5328ddb3e29c590f1b0653987763350261..db960d4e2148505834a3d6b89ee32bb3685dd063
@@@ -38,6 -38,9 +38,9 @@@ extern bool pass_by_reference (CUMULATI
  extern bool reference_callee_copied (CUMULATIVE_ARGS *, machine_mode,
                                     tree, bool);
  extern void maybe_warn_alloc_args_overflow (tree, tree, tree[2], int[2]);
 -extern bool get_size_range (tree, tree[2], bool = false);
+ extern tree get_attr_nonstring_decl (tree, tree * = NULL);
+ extern void maybe_warn_nonstring_arg (tree, tree);
 +extern bool get_size_range (tree, tree[2], unsigned range_starts_at = 1);
+ extern rtx rtx_for_static_chain (const_tree, bool);
  
  #endif // GCC_CALLS_H
Simple merge
Simple merge
Simple merge
diff --cc gcc/gengtype.c
Simple merge
index f1c9bc6f6222963c67a858c047cf68457d0b4fba,7771988b83782d0bdcf64d40fe3a68a9678591d5..521bd75fcd8a7c6e0c343234c4e34d6f46f38b83
@@@ -635,18 -642,35 +642,17 @@@ var_decl_component_p (tree var
  static bool
  size_must_be_zero_p (tree size)
  {
 -  if (integer_zerop (size))
 -    return true;
 -
 -  if (TREE_CODE (size) != SSA_NAME)
 -    return false;
 -
 -  wide_int min, max;
 -  enum value_range_type rtype = get_range_info (size, &min, &max);
 -  if (rtype != VR_ANTI_RANGE)
 -    return false;
 -
 -  tree type = TREE_TYPE (size);
 -  int prec = TYPE_PRECISION (type);
 -
 -  wide_int wone = wi::one (prec);
 -
 -  /* Compute the value of SSIZE_MAX, the largest positive value that
 -     can be stored in ssize_t, the signed counterpart of size_t.  */
 -  wide_int ssize_max = wi::lshift (wi::one (prec), prec - 1) - 1;
 -
 -  return wi::eq_p (min, wone) && wi::geu_p (max, ssize_max);
 +  return (integer_zerop (size)
 +        || (TREE_CODE (size) == SSA_NAME
 +            && irange (size).zero_p ()));
  }
  
- /* Fold function call to builtin mem{{,p}cpy,move}.  Return
-    false if no simplification can be made.
-    If ENDP is 0, return DEST (like memcpy).
-    If ENDP is 1, return DEST+LEN (like mempcpy).
-    If ENDP is 2, return DEST+LEN-1 (like stpcpy).
-    If ENDP is 3, return DEST, additionally *SRC and *DEST may overlap
-    (memmove).   */
+ /* Fold function call to builtin mem{{,p}cpy,move}.  Try to detect and
+    diagnose (otherwise undefined) overlapping copies without preventing
+    folding.  When folded, GCC guarantees that overlapping memcpy has
+    the same semantics as memmove.  Call to the library memcpy need not
+    provide the same guarantee.  Return false if no simplification can
+    be made.  */
  
  static bool
  gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
Simple merge
index df506bb8347e87e35fedc0de7eb07ace43aa856e,327c806ae11b877e18c0d45456b4f538a3492bc9..0c712bc8a0c0c42022c20b4c7abff1fa26bae436
@@@ -291,25 -295,78 +293,28 @@@ alloca_call_type (gimple *stmt, bool is
    // Check for the obviously bounded case.
    if (TREE_CODE (len) == INTEGER_CST)
      {
-       if (tree_to_uhwi (len) > max_user_size)
-       return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE, len);
+       if (tree_to_uhwi (len) > max_size)
+       return alloca_type_and_limit (ALLOCA_BOUND_DEFINITELY_LARGE,
+                                     wi::to_wide (len));
        if (integer_zerop (len))
        return alloca_type_and_limit (ALLOCA_ARG_IS_ZERO);
-       ret = alloca_type_and_limit (ALLOCA_OK);
+       return alloca_type_and_limit (ALLOCA_OK);
      }
    // Check the range info if available.
-   else if (TREE_CODE (len) == SSA_NAME && get_range_info (len, &min, &max))
 -  if (TREE_CODE (len) == SSA_NAME)
++  if (TREE_CODE (len) == SSA_NAME && get_range_info (len, &min, &max))
      {
 -      value_range_type range_type = get_range_info (len, &min, &max);
 -      if (range_type == VR_RANGE)
 +      irange r (len);
-       if (wi::leu_p (max, max_user_size))
-       ret = alloca_type_and_limit (ALLOCA_OK);
++      if (wi::leu_p (max, max_size))
++      return alloca_type_and_limit (ALLOCA_OK);
 +      else if (is_max (TREE_TYPE (len), max)
 +             && !r.range_for_type_p ()
 +             && cast_from_signed_p (len, invalid_casted_type))
        {
 -        if (wi::leu_p (max, max_size))
 -          return alloca_type_and_limit (ALLOCA_OK);
 -        else
 -          {
 -            // A cast may have created a range we don't care
 -            // about.  For instance, a cast from 16-bit to
 -            // 32-bit creates a range of 0..65535, even if there
 -            // is not really a determinable range in the
 -            // underlying code.  In this case, look through the
 -            // cast at the original argument, and fall through
 -            // to look at other alternatives.
 -            //
 -            // We only look at through the cast when its from
 -            // unsigned to unsigned, otherwise we may risk
 -            // looking at SIGNED_INT < N, which is clearly not
 -            // what we want.  In this case, we'd be interested
 -            // in a VR_RANGE of [0..N].
 -            //
 -            // Note: None of this is perfect, and should all go
 -            // away with better range information.  But it gets
 -            // most of the cases.
 -            gimple *def = SSA_NAME_DEF_STMT (len);
 -            if (gimple_assign_cast_p (def))
 -              {
 -                tree rhs1 = gimple_assign_rhs1 (def);
 -                tree rhs1type = TREE_TYPE (rhs1);
 -
 -                // Bail if the argument type is not valid.
 -                if (!INTEGRAL_TYPE_P (rhs1type))
 -                  return alloca_type_and_limit (ALLOCA_OK);
 -
 -                if (TYPE_UNSIGNED (rhs1type))
 -                  {
 -                    len_casted = rhs1;
 -                    range_type = get_range_info (len_casted, &min, &max);
 -                  }
 -              }
 -            // An unknown range or a range of the entire domain is
 -            // really no range at all.
 -            if (range_type == VR_VARYING
 -                || (!len_casted && is_max (len, max))
 -                || (len_casted && is_max (len_casted, max)))
 -              {
 -                // Fall through.
 -              }
 -            else if (range_type == VR_ANTI_RANGE)
 -              return alloca_type_and_limit (ALLOCA_UNBOUNDED);
 -            else if (range_type != VR_VARYING)
 -              return alloca_type_and_limit (ALLOCA_BOUND_MAYBE_LARGE, max);
 -          }
 -      }
 -      else if (range_type == VR_ANTI_RANGE)
 -      {
 -        // There may be some wrapping around going on.  Catch it
 -        // with this heuristic.  Hopefully, this VR_ANTI_RANGE
 -        // nonsense will go away, and we won't have to catch the
 -        // sign conversion problems with this crap.
 +        // A cast from signed to unsigned may cause us to have very
 +        // large numbers that can be caught with the above
 +        // heuristic.
          //
          // This is here to catch things like:
          // void foo(signed int n) {
index 0000000000000000000000000000000000000000,e7a85fdcd13b246f3e9fc035c169fe02bde91c1c..814797262d2d7616eabb8ac7ca5b9ddb0e2abb75
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1904 +1,1898 @@@
 -      value_range_type rng = get_range_info (offset, &min, &max);
 -      if (rng == VR_RANGE)
+ /* Pass to detect and issue warnings for violations of the restrict
+    qualifier.
+    Copyright (C) 2017-2018 Free Software Foundation, Inc.
+    Contributed by Martin Sebor <msebor@redhat.com>.
+    This file is part of GCC.
+    GCC is free software; you can redistribute it and/or modify it under
+    the terms of the GNU General Public License as published by the Free
+    Software Foundation; either version 3, or (at your option) any later
+    version.
+    GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+    WARRANTY; without even the implied warranty of MERCHANTABILITY or
+    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+    for more details.
+    You should have received a copy of the GNU General Public License
+    along with GCC; see the file COPYING3.  If not see
+    <http://www.gnu.org/licenses/>.  */
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+ #include "tree.h"
+ #include "gimple.h"
+ #include "domwalk.h"
+ #include "tree-pass.h"
+ #include "builtins.h"
+ #include "ssa.h"
+ #include "gimple-pretty-print.h"
+ #include "gimple-ssa-warn-restrict.h"
+ #include "diagnostic-core.h"
+ #include "fold-const.h"
+ #include "gimple-iterator.h"
+ #include "tree-dfa.h"
+ #include "tree-ssa.h"
+ #include "params.h"
+ #include "tree-cfg.h"
+ #include "tree-object-size.h"
+ #include "calls.h"
+ #include "cfgloop.h"
+ #include "intl.h"
+ namespace {
+ const pass_data pass_data_wrestrict = {
+   GIMPLE_PASS,
+   "wrestrict",
+   OPTGROUP_NONE,
+   TV_NONE,
+   PROP_cfg, /* Properties_required.  */
+   0,      /* properties_provided.  */
+   0,      /* properties_destroyed.  */
+   0,      /* properties_start */
+   0,      /* properties_finish */
+ };
+ /* Pass to detect violations of strict aliasing requirements in calls
+    to built-in string and raw memory functions.  */
+ class pass_wrestrict : public gimple_opt_pass
+ {
+  public:
+   pass_wrestrict (gcc::context *ctxt)
+     : gimple_opt_pass (pass_data_wrestrict, ctxt)
+     { }
+   opt_pass *clone () { return new pass_wrestrict (m_ctxt); }
+   virtual bool gate (function *);
+   virtual unsigned int execute (function *);
+ };
+ bool
+ pass_wrestrict::gate (function *fun ATTRIBUTE_UNUSED)
+ {
+   return warn_array_bounds != 0 || warn_restrict != 0;
+ }
+ /* Class to walk the basic blocks of a function in dominator order.  */
+ class wrestrict_dom_walker : public dom_walker
+ {
+  public:
+   wrestrict_dom_walker () : dom_walker (CDI_DOMINATORS) {}
+   edge before_dom_children (basic_block) FINAL OVERRIDE;
+   bool handle_gimple_call (gimple_stmt_iterator *);
+  private:
+   void check_call (gcall *);
+ };
+ edge
+ wrestrict_dom_walker::before_dom_children (basic_block bb)
+ {
+   /* Iterate over statements, looking for function calls.  */
+   for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
+        gsi_next (&si))
+     {
+       gimple *stmt = gsi_stmt (si);
+       if (!is_gimple_call (stmt))
+       continue;
+       if (gcall *call = as_a <gcall *> (stmt))
+       check_call (call);
+     }
+   return NULL;
+ }
+ /* Execute the pass for function FUN, walking in dominator order.  */
+ unsigned
+ pass_wrestrict::execute (function *fun)
+ {
+   calculate_dominance_info (CDI_DOMINATORS);
+   wrestrict_dom_walker walker;
+   walker.walk (ENTRY_BLOCK_PTR_FOR_FN (fun));
+   return 0;
+ }
+ /* Description of a memory reference by a built-in function.  This
+    is similar to ao_ref but made especially suitable for -Wrestrict
+    and not for optimization.  */
+ struct builtin_memref
+ {
+   /* The original pointer argument to the built-in function.  */
+   tree ptr;
+   /* The referenced subobject or NULL if not available, and the base
+      object of the memory reference or NULL.  */
+   tree ref;
+   tree base;
+   /* The size of the BASE object, PTRDIFF_MAX if indeterminate,
+      and negative until (possibly lazily) initialized.  */
+   offset_int basesize;
+   /* The non-negative offset of the referenced subobject.  Used to avoid
+      warnings for (apparently) possibly but not definitively overlapping
+      accesses to member arrays.  Negative when unknown/invalid.  */
+   offset_int refoff;
+   /* The offset range relative to the base.  */
+   offset_int offrange[2];
+   /* The size range of the access to this reference.  */
+   offset_int sizrange[2];
+   /* True for "bounded" string functions like strncat, and strncpy
+      and their variants that specify either an exact or upper bound
+      on the size of the accesses they perform.  For strncat both
+      the source and destination references are bounded.  For strncpy
+      only the destination reference is.  */
+   bool strbounded_p;
+   builtin_memref (tree, tree);
+   tree offset_out_of_bounds (int, offset_int[2]) const;
+ private:
+   /* Ctor helper to set or extend OFFRANGE based on argument.  */
+   void extend_offset_range (tree);
+   /*  Ctor helper to determine BASE and OFFRANGE from argument.  */
+   void set_base_and_offset (tree);
+ };
+ /* Description of a memory access by a raw memory or string built-in
+    function involving a pair of builtin_memref's.  */
+ class builtin_access
+ {
+  public:
+   /* Destination and source memory reference.  */
+   builtin_memref* const dstref;
+   builtin_memref* const srcref;
+   /* The size range of the access.  It's the greater of the accesses
+      to the two references.  */
+   HOST_WIDE_INT sizrange[2];
+   /* The minimum and maximum offset of an overlap of the access
+      (if it does, in fact, overlap), and the size of the overlap.  */
+   HOST_WIDE_INT ovloff[2];
+   HOST_WIDE_INT ovlsiz[2];
+   /* True to consider valid only accesses to the smallest subobject
+      and false for raw memory functions.  */
+   bool strict () const
+   {
+     return detect_overlap != &builtin_access::generic_overlap;
+   }
+   builtin_access (gcall *, builtin_memref &, builtin_memref &);
+   /* Entry point to determine overlap.  */
+   bool overlap ();
+  private:
+   /* Implementation functions used to determine overlap.  */
+   bool generic_overlap ();
+   bool strcat_overlap ();
+   bool strcpy_overlap ();
+   bool no_overlap ()
+   {
+     return false;
+   }
+   offset_int overlap_size (const offset_int [2], const offset_int[2],
+                          offset_int [2]);
+  private:
+   /* Temporaries used to compute the final result.  */
+   offset_int dstoff[2];
+   offset_int srcoff[2];
+   offset_int dstsiz[2];
+   offset_int srcsiz[2];
+   /* Pointer to a member function to call to determine overlap.  */
+   bool (builtin_access::*detect_overlap) ();
+ };
+ /* Initialize a memory reference representation from a pointer EXPR and
+    a size SIZE in bytes.  If SIZE is NULL_TREE then the size is assumed
+    to be unknown.  */
+ builtin_memref::builtin_memref (tree expr, tree size)
+ : ptr (expr),
+   ref (),
+   base (),
+   basesize (-1),
+   refoff (HOST_WIDE_INT_MIN),
+   offrange (),
+   sizrange (),
+   strbounded_p ()
+ {
+   /* Unfortunately, wide_int default ctor is a no-op so array members
+      of the type must be set individually.  */
+   offrange[0] = offrange[1] = 0;
+   sizrange[0] = sizrange[1] = 0;
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   /* Find the BASE object or pointer referenced by EXPR and set
+      the offset range OFFRANGE in the process.  */
+   set_base_and_offset (expr);
+   if (size)
+     {
+       tree range[2];
+       /* Determine the size range, allowing for the result to be [0, 0]
+        for SIZE in the anti-range ~[0, N] where N >= PTRDIFF_MAX.  */
+       get_size_range (size, range, true);
+       sizrange[0] = wi::to_offset (range[0]);
+       sizrange[1] = wi::to_offset (range[1]);
+       /* get_size_range returns SIZE_MAX for the maximum size.
+        Constrain it to the real maximum of PTRDIFF_MAX.  */
+       if (sizrange[1] > maxobjsize)
+       sizrange[1] = maxobjsize;
+     }
+   else
+     sizrange[1] = maxobjsize;
+   tree basetype = TREE_TYPE (base);
+   if (DECL_P (base) && TREE_CODE (basetype) == ARRAY_TYPE)
+     {
+       /* If the offset could be in range of the referenced object
+        constrain its bounds so neither exceeds those of the object.  */
+       if (offrange[0] < 0 && offrange[1] > 0)
+       offrange[0] = 0;
+       offset_int maxoff = maxobjsize;
+       if (ref && array_at_struct_end_p (ref))
+       ;   /* Use the maximum possible offset for last member arrays.  */
+       else if (tree basesize = TYPE_SIZE_UNIT (basetype))
+       maxoff = wi::to_offset (basesize);
+       if (offrange[0] >= 0)
+       {
+         if (offrange[1] < 0)
+           offrange[1] = offrange[0] <= maxoff ? maxoff : maxobjsize;
+         else if (offrange[0] <= maxoff && offrange[1] > maxoff)
+           offrange[1] = maxoff;
+       }
+     }
+ }
+ /* Ctor helper to set or extend OFFRANGE based on the OFFSET argument.  */
+ void
+ builtin_memref::extend_offset_range (tree offset)
+ {
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   if (TREE_CODE (offset) == INTEGER_CST)
+     {
+       offset_int off = int_cst_value (offset);
+       if (off != 0)
+       {
+         offrange[0] += off;
+         offrange[1] += off;
+       }
+       return;
+     }
+   if (TREE_CODE (offset) == SSA_NAME)
+     {
+       wide_int min, max;
 -      else if (rng == VR_ANTI_RANGE)
 -      {
 -        offrange[0] += offset_int::from (max + 1, SIGNED);
 -        offrange[1] += offset_int::from (min - 1, SIGNED);
 -      }
++      if (get_range_info (offset, &min, &max))
+       {
+         offrange[0] += offset_int::from (min, SIGNED);
+         offrange[1] += offset_int::from (max, SIGNED);
+       }
+       else
+       {
+         gimple *stmt = SSA_NAME_DEF_STMT (offset);
+         tree type;
+         if (is_gimple_assign (stmt)
+             && gimple_assign_rhs_code (stmt) == NOP_EXPR
+             && (type = TREE_TYPE (gimple_assign_rhs1 (stmt)))
+             && INTEGRAL_TYPE_P (type))
+           {
+             /* Use the bounds of the type of the NOP_EXPR operand
+                even if it's signed.  The result doesn't trigger
+                warnings but makes their output more readable.  */
+             offrange[0] += wi::to_offset (TYPE_MIN_VALUE (type));
+             offrange[1] += wi::to_offset (TYPE_MAX_VALUE (type));
+           }
+         else
+           offrange[1] += maxobjsize;
+       }
+       return;
+     }
+   offrange[1] += maxobjsize;
+ }
+ /* Determines the base object or pointer of the reference EXPR
+    and the offset range from the beginning of the base.  */
+ void
+ builtin_memref::set_base_and_offset (tree expr)
+ {
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   if (TREE_CODE (expr) == SSA_NAME)
+     {
+       /* Try to tease the offset out of the pointer.  */
+       gimple *stmt = SSA_NAME_DEF_STMT (expr);
+       if (!base
+         && gimple_assign_single_p (stmt)
+         && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
+       expr = gimple_assign_rhs1 (stmt);
+       else if (is_gimple_assign (stmt))
+       {
+         tree_code code = gimple_assign_rhs_code (stmt);
+         if (code == NOP_EXPR)
+           {
+             tree rhs = gimple_assign_rhs1 (stmt);
+             if (POINTER_TYPE_P (TREE_TYPE (rhs)))
+               expr = gimple_assign_rhs1 (stmt);
+             else
+               {
+                 base = expr;
+                 return;
+               }
+           }
+         else if (code == POINTER_PLUS_EXPR)
+           {
+             expr = gimple_assign_rhs1 (stmt);
+             tree offset = gimple_assign_rhs2 (stmt);
+             extend_offset_range (offset);
+           }
+         else
+           {
+             base = expr;
+             return;
+           }
+       }
+       else
+       {
+         base = expr;
+         return;
+       }
+     }
+   if (TREE_CODE (expr) == ADDR_EXPR)
+     expr = TREE_OPERAND (expr, 0);
+   /* Stash the reference for offset validation.  */
+   ref = expr;
+   poly_int64 bitsize, bitpos;
+   tree var_off;
+   machine_mode mode;
+   int sign, reverse, vol;
+   /* Determine the base object or pointer of the reference and
+      the constant bit offset from the beginning of the base.
+      If the offset has a non-constant component, it will be in
+      VAR_OFF.  MODE, SIGN, REVERSE, and VOL are write only and
+      unused here.  */
+   base = get_inner_reference (expr, &bitsize, &bitpos, &var_off,
+                             &mode, &sign, &reverse, &vol);
+   /* get_inner_reference is not expected to return null.  */
+   gcc_assert (base != NULL);
+   poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
+   /* Convert the poly_int64 offset to offset_int.  The offset
+      should be constant but be prepared for it not to be just in
+      case.  */
+   offset_int cstoff;
+   if (bytepos.is_constant (&cstoff))
+     {
+       offrange[0] += cstoff;
+       offrange[1] += cstoff;
+       /* Besides the reference saved above, also stash the offset
+        for validation.  */
+       if (TREE_CODE (expr) == COMPONENT_REF)
+       refoff = cstoff;
+     }
+   else
+     offrange[1] += maxobjsize;
+   if (var_off)
+     {
+       if (TREE_CODE (var_off) == INTEGER_CST)
+       {
+         cstoff = wi::to_offset (var_off);
+         offrange[0] += cstoff;
+         offrange[1] += cstoff;
+       }
+       else
+       offrange[1] += maxobjsize;
+     }
+   if (TREE_CODE (base) == MEM_REF)
+     {
+       tree memrefoff = TREE_OPERAND (base, 1);
+       extend_offset_range (memrefoff);
+       base = TREE_OPERAND (base, 0);
+     }
+   if (TREE_CODE (base) == SSA_NAME)
+     set_base_and_offset (base);
+ }
+ /* Return error_mark_node if the signed offset exceeds the bounds
+    of the address space (PTRDIFF_MAX).  Otherwise, return either
+    BASE or REF when the offset exceeds the bounds of the BASE or
+    REF object, and set OOBOFF to the past-the-end offset formed
+    by the reference, including its size.  When STRICT is non-zero
+    use REF size, when available, otherwise use BASE size.  When
+    STRICT is greater than 1, use the size of the last array member
+    as the bound, otherwise treat such a member as a flexible array
+    member.  Return NULL when the offset is in bounds.  */
+ tree
+ builtin_memref::offset_out_of_bounds (int strict, offset_int ooboff[2]) const
+ {
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   /* A temporary, possibly adjusted, copy of the offset range.  */
+   offset_int offrng[2] = { offrange[0], offrange[1] };
+   if (DECL_P (base) && TREE_CODE (TREE_TYPE (base)) == ARRAY_TYPE)
+     {
+       /* Check for offset in an anti-range with a negative lower bound.
+        For such a range, consider only the non-negative subrange.  */
+       if (offrng[1] < offrng[0] && offrng[1] < 0)
+       offrng[1] = maxobjsize;
+     }
+   /* Conservative offset of the last byte of the referenced object.  */
+   offset_int endoff;
+   /* The bounds need not be ordered.  Set HIB to use as the index
+      of the larger of the bounds and LOB as the opposite.  */
+   bool hib = wi::les_p (offrng[0], offrng[1]);
+   bool lob = !hib;
+   if (basesize < 0)
+     {
+       endoff = offrng[lob] + sizrange[0];
+       /* For a reference through a pointer to an object of unknown size
+        all initial offsets are considered valid, positive as well as
+        negative, since the pointer itself can point past the beginning
+        of the object.  However, the sum of the lower bound of the offset
+        and that of the size must be less than or equal than PTRDIFF_MAX.  */
+       if (endoff > maxobjsize)
+       return error_mark_node;
+       return NULL_TREE;
+     }
+   /* A reference to an object of known size must be within the bounds
+      of the base object.  */
+   if (offrng[hib] < 0 || offrng[lob] > basesize)
+     return base;
+   /* The extent of the reference must also be within the bounds of
+      the base object (if known) or the maximum object size otherwise.  */
+   endoff = wi::smax (offrng[lob], 0) + sizrange[0];
+   if (endoff > maxobjsize)
+     return error_mark_node;
+   offset_int size = basesize;
+   tree obj = base;
+   if (strict
+       && DECL_P (obj)
+       && ref
+       && refoff >= 0
+       && TREE_CODE (ref) == COMPONENT_REF
+       && (strict > 1
+         || !array_at_struct_end_p (ref)))
+     {
+       /* If the reference is to a member subobject, the offset must
+        be within the bounds of the subobject.  */
+       tree field = TREE_OPERAND (ref, 1);
+       tree type = TREE_TYPE (field);
+       if (tree sz = TYPE_SIZE_UNIT (type))
+       if (TREE_CODE (sz) == INTEGER_CST)
+         {
+           size = refoff + wi::to_offset (sz);
+           obj = ref;
+         }
+     }
+   if (endoff <= size)
+     return NULL_TREE;
+   /* Set the out-of-bounds offset range to be one greater than
+      that delimited by the reference including its size.  */
+   ooboff[lob] = size + 1;
+   if (endoff > ooboff[lob])
+     ooboff[hib] = endoff;
+   else
+     ooboff[hib] = wi::smax (offrng[lob], 0) + sizrange[1];
+   return obj;
+ }
+ /* Create an association between the memory references DST and SRC
+    for access by a call EXPR to a memory or string built-in funtion.  */
+ builtin_access::builtin_access (gcall *call, builtin_memref &dst,
+                               builtin_memref &src)
+ : dstref (&dst), srcref (&src), sizrange (), ovloff (), ovlsiz (),
+   dstoff (), srcoff (), dstsiz (), srcsiz ()
+ {
+   /* Zero out since the offset_int ctors invoked above are no-op.  */
+   dstoff[0] = dstoff[1] = 0;
+   srcoff[0] = srcoff[1] = 0;
+   dstsiz[0] = dstsiz[1] = 0;
+   srcsiz[0] = srcsiz[1] = 0;
+   /* Object Size Type to use to determine the size of the destination
+      and source objects.  Overridden below for raw memory functions.  */
+   int ostype = 1;
+   /* True when the size of one reference depends on the offset of
+      itself or the other.  */
+   bool depends_p = true;
+   /* True when the size of the destination reference DSTREF has been
+      determined from SRCREF and so needs to be adjusted by the latter's
+      offset.  Only meaningful for bounded string functions like strncpy.  */
+   bool dstadjust_p = false;
+   /* The size argument number (depends on the built-in).  */
+   unsigned sizeargno = 2;
+   if (gimple_call_with_bounds_p (call))
+     sizeargno += 2;
+   tree func = gimple_call_fndecl (call);
+   switch (DECL_FUNCTION_CODE (func))
+     {
+     case BUILT_IN_MEMCPY:
+     case BUILT_IN_MEMCPY_CHK:
+     case BUILT_IN_MEMCPY_CHKP:
+     case BUILT_IN_MEMCPY_CHK_CHKP:
+     case BUILT_IN_MEMPCPY:
+     case BUILT_IN_MEMPCPY_CHK:
+     case BUILT_IN_MEMPCPY_CHKP:
+     case BUILT_IN_MEMPCPY_CHK_CHKP:
+       ostype = 0;
+       depends_p = false;
+       detect_overlap = &builtin_access::generic_overlap;
+       break;
+     case BUILT_IN_MEMMOVE:
+     case BUILT_IN_MEMMOVE_CHK:
+     case BUILT_IN_MEMMOVE_CHKP:
+     case BUILT_IN_MEMMOVE_CHK_CHKP:
+       /* For memmove there is never any overlap to check for.  */
+       ostype = 0;
+       depends_p = false;
+       detect_overlap = &builtin_access::no_overlap;
+       break;
+     case BUILT_IN_STPNCPY:
+     case BUILT_IN_STPNCPY_CHK:
+     case BUILT_IN_STRNCPY:
+     case BUILT_IN_STRNCPY_CHK:
+       dstref->strbounded_p = true;
+       detect_overlap = &builtin_access::strcpy_overlap;
+       break;
+     case BUILT_IN_STPCPY:
+     case BUILT_IN_STPCPY_CHK:
+     case BUILT_IN_STPCPY_CHKP:
+     case BUILT_IN_STPCPY_CHK_CHKP:
+     case BUILT_IN_STRCPY:
+     case BUILT_IN_STRCPY_CHK:
+     case BUILT_IN_STRCPY_CHKP:
+     case BUILT_IN_STRCPY_CHK_CHKP:
+       detect_overlap = &builtin_access::strcpy_overlap;
+       break;
+     case BUILT_IN_STRCAT:
+     case BUILT_IN_STRCAT_CHK:
+     case BUILT_IN_STRCAT_CHKP:
+     case BUILT_IN_STRCAT_CHK_CHKP:
+       detect_overlap = &builtin_access::strcat_overlap;
+       break;
+     case BUILT_IN_STRNCAT:
+     case BUILT_IN_STRNCAT_CHK:
+       dstref->strbounded_p = true;
+       srcref->strbounded_p = true;
+       detect_overlap = &builtin_access::strcat_overlap;
+       break;
+     default:
+       /* Handle other string functions here whose access may need
+        to be validated for in-bounds offsets and non-overlapping
+        copies.  (Not all _chkp functions have BUILT_IN_XXX_CHKP
+        macros so they need to be handled here.)  */
+       return;
+     }
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   /* Try to determine the size of the base object.  compute_objsize
+      expects a pointer so create one if BASE is a non-pointer object.  */
+   tree addr;
+   if (dst.basesize < 0)
+     {
+       addr = dst.base;
+       if (!POINTER_TYPE_P (TREE_TYPE (addr)))
+       addr = build1 (ADDR_EXPR, (TREE_TYPE (addr)), addr);
+       if (tree dstsize = compute_objsize (addr, ostype))
+       dst.basesize = wi::to_offset (dstsize);
+       else if (POINTER_TYPE_P (TREE_TYPE (addr)))
+       dst.basesize = HOST_WIDE_INT_MIN;
+       else
+       dst.basesize = maxobjsize;
+     }
+   if (src.basesize < 0)
+     {
+       addr = src.base;
+       if (!POINTER_TYPE_P (TREE_TYPE (addr)))
+       addr = build1 (ADDR_EXPR, (TREE_TYPE (addr)), addr);
+       if (tree srcsize = compute_objsize (addr, ostype))
+       src.basesize = wi::to_offset (srcsize);
+       else if (POINTER_TYPE_P (TREE_TYPE (addr)))
+       src.basesize = HOST_WIDE_INT_MIN;
+       else
+       src.basesize = maxobjsize;
+     }
+   /* If there is no dependency between the references or the base
+      objects of the two references aren't the same there's nothing
+      else to do.  */
+   if (depends_p && dstref->base != srcref->base)
+     return;
+   /* ...otherwise, make adjustments for references to the same object
+      by string built-in functions to reflect the constraints imposed
+      by the function.  */
+   /* For bounded string functions determine the range of the bound
+      on the access.  For others, the range stays unbounded.  */
+   offset_int bounds[2] = { maxobjsize, maxobjsize };
+   if (dstref->strbounded_p)
+     {
+       tree size = gimple_call_arg (call, sizeargno);
+       tree range[2];
+       if (get_size_range (size, range, true))
+       {
+         bounds[0] = wi::to_offset (range[0]);
+         bounds[1] = wi::to_offset (range[1]);
+       }
+       /* If both references' size ranges are indeterminate use the last
+        (size) argument from the function call as a substitute.  This
+        may only be necessary for strncpy (but not for memcpy where
+        the size range would have been already determined this way).  */
+       if (dstref->sizrange[0] == 0 && dstref->sizrange[1] == maxobjsize
+         && srcref->sizrange[0] == 0 && srcref->sizrange[1] == maxobjsize)
+       {
+         dstref->sizrange[0] = bounds[0];
+         dstref->sizrange[1] = bounds[1];
+       }
+     }
+   /* The size range of one reference involving the same base object
+      can be determined from the size range of the other reference.
+      This makes it possible to compute accurate offsets for warnings
+      involving functions like strcpy where the length of just one of
+      the two arguments is known (determined by tree-ssa-strlen).  */
+   if (dstref->sizrange[0] == 0 && dstref->sizrange[1] == maxobjsize)
+     {
+       /* When the destination size is unknown set it to the size of
+        the source.  */
+       dstref->sizrange[0] = srcref->sizrange[0];
+       dstref->sizrange[1] = srcref->sizrange[1];
+     }
+   else if (srcref->sizrange[0] == 0 && srcref->sizrange[1] == maxobjsize)
+     {
+       /* When the source size is unknown set it to the size of
+        the destination.  */
+       srcref->sizrange[0] = dstref->sizrange[0];
+       srcref->sizrange[1] = dstref->sizrange[1];
+       if (depends_p)
+       {
+         if (dstref->strbounded_p)
+           {
+             /* Read access by strncpy is bounded.  */
+             if (bounds[0] < srcref->sizrange[0])
+               srcref->sizrange[0] = bounds[0];
+             if (bounds[1] < srcref->sizrange[1])
+               srcref->sizrange[1] = bounds[1];
+           }
+         /* For string functions, adjust the size range of the source
+            reference by the inverse boundaries of the offset (because
+            the higher the offset into the string the shorter its
+            length).  */
+         if (srcref->offrange[1] >= 0
+             && srcref->offrange[1] < srcref->sizrange[0])
+           srcref->sizrange[0] -= srcref->offrange[1];
+         else
+           srcref->sizrange[0] = 0;
+         if (srcref->offrange[0] > 0)
+           {
+             if (srcref->offrange[0] < srcref->sizrange[1])
+               srcref->sizrange[1] -= srcref->offrange[0];
+             else
+               srcref->sizrange[1] = 0;
+           }
+         dstadjust_p = true;
+       }
+     }
+   if (detect_overlap == &builtin_access::generic_overlap)
+     {
+       if (dstref->strbounded_p)
+       {
+         dstref->sizrange[0] = bounds[0];
+         dstref->sizrange[1] = bounds[1];
+         if (dstref->sizrange[0] < srcref->sizrange[0])
+           srcref->sizrange[0] = dstref->sizrange[0];
+         if (dstref->sizrange[1] < srcref->sizrange[1])
+           srcref->sizrange[1] = dstref->sizrange[1];
+       }
+     }
+   else if (detect_overlap == &builtin_access::strcpy_overlap)
+     {
+       if (!dstref->strbounded_p)
+       {
+         /* For strcpy, adjust the destination size range to match that
+            of the source computed above.  */
+         if (depends_p && dstadjust_p)
+           {
+             dstref->sizrange[0] = srcref->sizrange[0];
+             dstref->sizrange[1] = srcref->sizrange[1];
+           }
+       }
+     }
+   if (dstref->strbounded_p)
+     {
+       /* For strncpy, adjust the destination size range to match that
+        of the source computed above.  */
+       dstref->sizrange[0] = bounds[0];
+       dstref->sizrange[1] = bounds[1];
+       if (bounds[0] < srcref->sizrange[0])
+       srcref->sizrange[0] = bounds[0];
+       if (bounds[1] < srcref->sizrange[1])
+       srcref->sizrange[1] = bounds[1];
+     }
+ }
+ offset_int
+ builtin_access::overlap_size (const offset_int a[2], const offset_int b[2],
+                             offset_int *off)
+ {
+   const offset_int *p = a;
+   const offset_int *q = b;
+   /* Point P at the bigger of the two ranges and Q at the smaller.  */
+   if (wi::lts_p (a[1] - a[0], b[1] - b[0]))
+     {
+       p = b;
+       q = a;
+     }
+   if (p[0] < q[0])
+     {
+       if (p[1] < q[0])
+       return 0;
+       *off = q[0];
+       return wi::smin (p[1], q[1]) - q[0];
+     }
+   if (q[1] < p[0])
+     return 0;
+   off[0] = p[0];
+   return q[1] - p[0];
+ }
+ /* Return true if the bounded mempry (memcpy amd similar) or string function
+    access (strncpy and similar) ACS overlaps.  */
+ bool
+ builtin_access::generic_overlap ()
+ {
+   builtin_access &acs = *this;
+   const builtin_memref *dstref = acs.dstref;
+   const builtin_memref *srcref = acs.srcref;
+   gcc_assert (dstref->base == srcref->base);
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   offset_int maxsize = dstref->basesize < 0 ? maxobjsize : dstref->basesize;
+   gcc_assert (maxsize <= maxobjsize);
+   /* Adjust the larger bounds of the offsets (which may be the first
+      element if the lower bound is larger than the upper bound) to
+      make them valid for the smallest access (if possible) but no smaller
+      than the smaller bounds.  */
+   gcc_assert (wi::les_p (acs.dstoff[0], acs.dstoff[1]));
+   if (maxsize < acs.dstoff[1] + acs.dstsiz[0])
+     acs.dstoff[1] = maxsize - acs.dstsiz[0];
+   if (acs.dstoff[1] < acs.dstoff[0])
+     acs.dstoff[1] = acs.dstoff[0];
+   gcc_assert (wi::les_p (acs.srcoff[0], acs.srcoff[1]));
+   if (maxsize < acs.srcoff[1] + acs.srcsiz[0])
+     acs.srcoff[1] = maxsize - acs.srcsiz[0];
+   if (acs.srcoff[1] < acs.srcoff[0])
+     acs.srcoff[1] = acs.srcoff[0];
+   /* Determine the minimum and maximum space for the access given
+      the offsets.  */
+   offset_int space[2];
+   space[0] = wi::abs (acs.dstoff[0] - acs.srcoff[0]);
+   space[1] = space[0];
+   offset_int d = wi::abs (acs.dstoff[0] - acs.srcoff[1]);
+   if (acs.srcsiz[0] > 0)
+     {
+       if (d < space[0])
+       space[0] = d;
+       if (space[1] < d)
+       space[1] = d;
+     }
+   else
+     space[1] = acs.dstsiz[1];
+   d = wi::abs (acs.dstoff[1] - acs.srcoff[0]);
+   if (d < space[0])
+     space[0] = d;
+   if (space[1] < d)
+     space[1] = d;
+   /* Treat raw memory functions both of whose references are bounded
+      as special and permit uncertain overlaps to go undetected.  For
+      all kinds of constant offset and constant size accesses, if
+      overlap isn't certain it is not possible.  */
+   bool overlap_possible = space[0] < acs.dstsiz[1];
+   if (!overlap_possible)
+     return false;
+   bool overlap_certain = space[1] < acs.dstsiz[0];
+   /* True when the size of one reference depends on the offset of
+      the other.  */
+   bool depends_p = detect_overlap != &builtin_access::generic_overlap;
+   if (!overlap_certain)
+     {
+       if (!dstref->strbounded_p && !depends_p)
+       /* Memcpy only considers certain overlap.  */
+       return false;
+       /* There's no way to distinguish an access to the same member
+        of a structure from one to two distinct members of the same
+        structure.  Give up to avoid excessive false positives.  */
+       tree basetype = TREE_TYPE (dstref->base);
+       if (POINTER_TYPE_P (basetype))
+       basetype = TREE_TYPE (basetype);
+       else
+       while (TREE_CODE (basetype) == ARRAY_TYPE)
+         basetype = TREE_TYPE (basetype);
+       if (RECORD_OR_UNION_TYPE_P (basetype))
+       return false;
+     }
+   /* True for stpcpy and strcpy.  */
+   bool stxcpy_p = (!dstref->strbounded_p
+                  && detect_overlap == &builtin_access::strcpy_overlap);
+   if (dstref->refoff >= 0
+       && srcref->refoff >= 0
+       && dstref->refoff != srcref->refoff
+       && (stxcpy_p || dstref->strbounded_p || srcref->strbounded_p))
+     return false;
+   offset_int siz[2] = { maxobjsize + 1, 0 };
+   ovloff[0] = HOST_WIDE_INT_MAX;
+   ovloff[1] = HOST_WIDE_INT_MIN;
+   /* Adjustment to the lower bound of the offset of the overlap to
+      account for a subset of unbounded string calls where the size
+      of the destination string depends on the length of the source
+      which in turn depends on the offset into it.  */
+   bool sub1;
+   if (stxcpy_p)
+     {
+       sub1 = acs.dstoff[0] <= acs.srcoff[0];
+       /* Iterate over the extreme locations (on the horizontal axis formed
+        by their offsets) and sizes of two regions and find their smallest
+        and largest overlap and the corresponding offsets.  */
+       for (unsigned i = 0; i != 2; ++i)
+       {
+         const offset_int a[2] = {
+           acs.dstoff[i], acs.dstoff[i] + acs.dstsiz[!i]
+         };
+         const offset_int b[2] = {
+           acs.srcoff[i], acs.srcoff[i] + acs.srcsiz[!i]
+         };
+         offset_int off;
+         offset_int sz = overlap_size (a, b, &off);
+         if (sz < siz[0])
+           siz[0] = sz;
+         if (siz[1] <= sz)
+           siz[1] = sz;
+         if (sz != 0)
+           {
+             if (wi::lts_p (off, ovloff[0]))
+               ovloff[0] = off.to_shwi ();
+             if (wi::lts_p (ovloff[1], off))
+               ovloff[1] = off.to_shwi ();
+           }
+       }
+     }
+   else
+     {
+       sub1 = !depends_p;
+       /* Iterate over the extreme locations (on the horizontal axis
+        formed by their offsets) and sizes of two regions and find
+        their smallest and largest overlap and the corresponding
+        offsets.  */
+       for (unsigned io = 0; io != 2; ++io)
+       for (unsigned is = 0; is != 2; ++is)
+         {
+           const offset_int a[2] = {
+             acs.dstoff[io], acs.dstoff[io] + acs.dstsiz[is]
+           };
+           for (unsigned jo = 0; jo != 2; ++jo)
+             for (unsigned js = 0; js != 2; ++js)
+               {
+                 if (depends_p)
+                   {
+                     /* For st{p,r}ncpy the size of the source sequence
+                        depends on the offset into it.  */
+                     if (js)
+                       break;
+                     js = !jo;
+                   }
+                 const offset_int b[2] = {
+                   acs.srcoff[jo], acs.srcoff[jo] + acs.srcsiz[js]
+                 };
+                 offset_int off;
+                 offset_int sz = overlap_size (a, b, &off);
+                 if (sz < siz[0])
+                   siz[0] = sz;
+                 if (siz[1] <= sz)
+                   siz[1] = sz;
+                 if (sz != 0)
+                   {
+                     if (wi::lts_p (off, ovloff[0]))
+                       ovloff[0] = off.to_shwi ();
+                     if (wi::lts_p (ovloff[1], off))
+                       ovloff[1] = off.to_shwi ();
+                   }
+               }
+         }
+     }
+   ovlsiz[0] = siz[0].to_shwi ();
+   ovlsiz[1] = siz[1].to_shwi ();
+   if (ovlsiz[0] == 0 && ovlsiz[1] > 1)
+     ovloff[0] = ovloff[1] + ovlsiz[1] - 1 - sub1;
+   return true;
+ }
+ /* Return true if the strcat-like access overlaps.  */
+ bool
+ builtin_access::strcat_overlap ()
+ {
+   builtin_access &acs = *this;
+   const builtin_memref *dstref = acs.dstref;
+   const builtin_memref *srcref = acs.srcref;
+   gcc_assert (dstref->base == srcref->base);
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   gcc_assert (dstref->base && dstref->base == srcref->base);
+   /* Adjust for strcat-like accesses.  */
+   /* As a special case for strcat, set the DSTREF offsets to the length
+      of the source string since the function starts writing at the first
+      nul, and set the size to 1 for the length of the nul.  */
+   acs.dstoff[0] += acs.dstsiz[0];
+   acs.dstoff[1] += acs.dstsiz[1];
+   bool strfunc_unknown_args = acs.dstsiz[0] == 0 && acs.dstsiz[1] != 0;
+   /* The lower bound is zero when the size is unknown because then
+      overlap is not certain.  */
+   acs.dstsiz[0] = strfunc_unknown_args ? 0 : 1;
+   acs.dstsiz[1] = 1;
+   offset_int maxsize = dstref->basesize < 0 ? maxobjsize : dstref->basesize;
+   gcc_assert (maxsize <= maxobjsize);
+   /* For references to the same base object, determine if there's a pair
+      of valid offsets into the two references such that access between
+      them doesn't overlap.  Adjust both upper bounds to be valid for
+      the smaller size (i.e., at most MAXSIZE - SIZE).  */
+   if (maxsize < acs.dstoff[1] + acs.dstsiz[0])
+     acs.dstoff[1] = maxsize - acs.dstsiz[0];
+   if (maxsize < acs.srcoff[1] + acs.srcsiz[0])
+     acs.srcoff[1] = maxsize - acs.srcsiz[0];
+   /* Check to see if there's enough space for both accesses without
+      overlap.  Determine the optimistic (maximum) amount of available
+      space.  */
+   offset_int space;
+   if (acs.dstoff[0] <= acs.srcoff[0])
+     {
+       if (acs.dstoff[1] < acs.srcoff[1])
+       space = acs.srcoff[1] + acs.srcsiz[0] - acs.dstoff[0];
+       else
+       space = acs.dstoff[1] + acs.dstsiz[0] - acs.srcoff[0];
+     }
+   else
+     space = acs.dstoff[1] + acs.dstsiz[0] - acs.srcoff[0];
+   /* Overlap is certain if the distance between the farthest offsets
+      of the opposite accesses is less than the sum of the lower bounds
+      of the sizes of the two accesses.  */
+   bool overlap_certain = space < acs.dstsiz[0] + acs.srcsiz[0];
+   /* For a constant-offset, constant size access, consider the largest
+      distance between the offset bounds and the lower bound of the access
+      size.  If the overlap isn't certain return success.  */
+   if (!overlap_certain
+       && acs.dstoff[0] == acs.dstoff[1]
+       && acs.srcoff[0] == acs.srcoff[1]
+       && acs.dstsiz[0] == acs.dstsiz[1]
+       && acs.srcsiz[0] == acs.srcsiz[1])
+     return false;
+   /* Overlap is not certain but may be possible.  */
+   offset_int access_min = acs.dstsiz[0] + acs.srcsiz[0];
+   /* Determine the conservative (minimum) amount of space.  */
+   space = wi::abs (acs.dstoff[0] - acs.srcoff[0]);
+   offset_int d = wi::abs (acs.dstoff[0] - acs.srcoff[1]);
+   if (d < space)
+     space = d;
+   d = wi::abs (acs.dstoff[1] - acs.srcoff[0]);
+   if (d < space)
+     space = d;
+   /* For a strict test (used for strcpy and similar with unknown or
+      variable bounds or sizes), consider the smallest distance between
+      the offset bounds and either the upper bound of the access size
+      if known, or the lower bound otherwise.  */
+   if (access_min <= space && (access_min != 0 || !strfunc_unknown_args))
+     return false;
+   /* When strcat overlap is certain it is always a single byte:
+      the terminating NUL, regardless of offsets and sizes.  When
+      overlap is only possible its range is [0, 1].  */
+   acs.ovlsiz[0] = dstref->sizrange[0] == dstref->sizrange[1] ? 1 : 0;
+   acs.ovlsiz[1] = 1;
+   offset_int endoff = dstref->offrange[0] + dstref->sizrange[0];
+   if (endoff <= srcref->offrange[0])
+     acs.ovloff[0] = wi::smin (maxobjsize, srcref->offrange[0]).to_shwi ();
+   else
+     acs.ovloff[0] = wi::smin (maxobjsize, endoff).to_shwi ();
+   acs.sizrange[0] = wi::smax (wi::abs (endoff - srcref->offrange[0]) + 1,
+                             srcref->sizrange[0]).to_shwi ();
+   if (dstref->offrange[0] == dstref->offrange[1])
+     {
+       if (srcref->offrange[0] == srcref->offrange[1])
+       acs.ovloff[1] = acs.ovloff[0];
+       else
+       acs.ovloff[1]
+         = wi::smin (maxobjsize,
+                     srcref->offrange[1] + srcref->sizrange[1]).to_shwi ();
+     }
+   else
+     acs.ovloff[1]
+       = wi::smin (maxobjsize,
+                 dstref->offrange[1] + dstref->sizrange[1]).to_shwi ();
+   if (acs.sizrange[0] == 0)
+     acs.sizrange[0] = 1;
+   acs.sizrange[1] = wi::smax (acs.dstsiz[1], srcref->sizrange[1]).to_shwi ();
+   return true;
+ }
+ /* Return true if the strcpy-like access overlaps.  */
+ bool
+ builtin_access::strcpy_overlap ()
+ {
+   return generic_overlap ();
+ }
+ /* Return true if DSTREF and SRCREF describe accesses that either overlap
+    one another or that, in order not to overlap, would imply that the size
+    of the referenced object(s) exceeds the maximum size of an object.  Set
+    Otherwise, if DSTREF and SRCREF do not definitely overlap (even though
+    they may overlap in a way that's not apparent from the available data),
+    return false.  */
+ bool
+ builtin_access::overlap ()
+ {
+   builtin_access &acs = *this;
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   acs.sizrange[0] = wi::smax (dstref->sizrange[0],
+                             srcref->sizrange[0]).to_shwi ();
+   acs.sizrange[1] = wi::smax (dstref->sizrange[1],
+                             srcref->sizrange[1]).to_shwi ();
+   /* Check to see if the two references refer to regions that are
+      too large not to overlap in the address space (whose maximum
+      size is PTRDIFF_MAX).  */
+   offset_int size = dstref->sizrange[0] + srcref->sizrange[0];
+   if (maxobjsize < size)
+     {
+       acs.ovloff[0] = (maxobjsize - dstref->sizrange[0]).to_shwi ();
+       acs.ovlsiz[0] = (size - maxobjsize).to_shwi ();
+       return true;
+     }
+   /* If both base objects aren't known return the maximum possible
+      offset that would make them not overlap.  */
+   if (!dstref->base || !srcref->base)
+     return false;
+   /* Set the access offsets.  */
+   acs.dstoff[0] = dstref->offrange[0];
+   acs.dstoff[1] = dstref->offrange[1];
+   /* If the base object is an array adjust the bounds of the offset
+      to be non-negative and within the bounds of the array if possible.  */
+   if (dstref->base
+       && TREE_CODE (TREE_TYPE (dstref->base)) == ARRAY_TYPE)
+     {
+       if (acs.dstoff[0] < 0 && acs.dstoff[1] >= 0)
+       acs.dstoff[0] = 0;
+       if (acs.dstoff[1] < acs.dstoff[0])
+       {
+         if (tree size = TYPE_SIZE_UNIT (TREE_TYPE (dstref->base)))
+           acs.dstoff[1] = wi::umin (acs.dstoff[1], wi::to_offset (size));
+         else
+           acs.dstoff[1] = wi::umin (acs.dstoff[1], maxobjsize);
+       }
+     }
+   acs.srcoff[0] = srcref->offrange[0];
+   acs.srcoff[1] = srcref->offrange[1];
+   if (srcref->base
+       && TREE_CODE (TREE_TYPE (srcref->base)) == ARRAY_TYPE)
+     {
+       if (acs.srcoff[0] < 0 && acs.srcoff[1] >= 0)
+       acs.srcoff[0] = 0;
+       if (tree size = TYPE_SIZE_UNIT (TREE_TYPE (srcref->base)))
+       acs.srcoff[1] = wi::umin (acs.srcoff[1], wi::to_offset (size));
+       else if (acs.srcoff[1] < acs.srcoff[0])
+       acs.srcoff[1] = wi::umin (acs.srcoff[1], maxobjsize);
+     }
+   /* When the upper bound of the offset is less than the lower bound
+      the former is the result of a negative offset being represented
+      as a large positive value or vice versa.  The resulting range is
+      a union of two subranges: [MIN, UB] and [LB, MAX].  Since such
+      a union is not representable using the current data structure
+      replace it with the full range of offsets.  */
+   if (acs.dstoff[1] < acs.dstoff[0])
+     {
+       acs.dstoff[0] = -maxobjsize - 1;
+       acs.dstoff[1] = maxobjsize;
+     }
+   /* Validate the offset and size of each reference on its own first.
+      This is independent of whether or not the base objects are the
+      same.  Normally, this would have already been detected and
+      diagnosed by -Warray-bounds, unless it has been disabled.  */
+   offset_int maxoff = acs.dstoff[0] + dstref->sizrange[0];
+   if (maxobjsize < maxoff)
+     {
+       acs.ovlsiz[0] = (maxoff - maxobjsize).to_shwi ();
+       acs.ovloff[0] = acs.dstoff[0].to_shwi () - acs.ovlsiz[0];
+       return true;
+     }
+   /* Repeat the same as above but for the source offsets.  */
+   if (acs.srcoff[1] < acs.srcoff[0])
+     {
+       acs.srcoff[0] = -maxobjsize - 1;
+       acs.srcoff[1] = maxobjsize;
+     }
+   maxoff = acs.srcoff[0] + srcref->sizrange[0];
+   if (maxobjsize < maxoff)
+     {
+       acs.ovlsiz[0] = (maxoff - maxobjsize).to_shwi ();
+       acs.ovlsiz[1] = (acs.srcoff[0] + srcref->sizrange[1]
+                      - maxobjsize).to_shwi ();
+       acs.ovloff[0] = acs.srcoff[0].to_shwi () - acs.ovlsiz[0];
+       return true;
+     }
+   if (dstref->base != srcref->base)
+     return false;
+   acs.dstsiz[0] = dstref->sizrange[0];
+   acs.dstsiz[1] = dstref->sizrange[1];
+   acs.srcsiz[0] = srcref->sizrange[0];
+   acs.srcsiz[1] = srcref->sizrange[1];
+   /* Call the appropriate function to determine the overlap.  */
+   if ((this->*detect_overlap) ())
+     {
+       if (!sizrange[1])
+       {
+         /* Unless the access size range has already been set, do so here.  */
+         sizrange[0] = wi::smax (acs.dstsiz[0], srcref->sizrange[0]).to_shwi ();
+         sizrange[1] = wi::smax (acs.dstsiz[1], srcref->sizrange[1]).to_shwi ();
+       }
+       return true;
+     }
+   return false;
+ }
+ /* Attempt to detect and diagnose an overlapping copy in a call expression
+    EXPR involving an an access ACS to a built-in memory or string function.
+    Return true when one has been detected, false otherwise.  */
+ static bool
+ maybe_diag_overlap (location_t loc, gcall *call, builtin_access &acs)
+ {
+   if (!acs.overlap ())
+     return false;
+   /* For convenience.  */
+   const builtin_memref &dstref = *acs.dstref;
+   const builtin_memref &srcref = *acs.srcref;
+   /* Determine the range of offsets and sizes of the overlap if it
+      exists and issue diagnostics.  */
+   HOST_WIDE_INT *ovloff = acs.ovloff;
+   HOST_WIDE_INT *ovlsiz = acs.ovlsiz;
+   HOST_WIDE_INT *sizrange = acs.sizrange;
+   tree func = gimple_call_fndecl (call);
+   /* To avoid a combinatorial explosion of diagnostics format the offsets
+      or their ranges as strings and use them in the warning calls below.  */
+   char offstr[3][64];
+   if (dstref.offrange[0] == dstref.offrange[1]
+       || dstref.offrange[1] > HOST_WIDE_INT_MAX)
+     sprintf (offstr[0], HOST_WIDE_INT_PRINT_DEC,
+            dstref.offrange[0].to_shwi ());
+   else
+     sprintf (offstr[0],
+            "[" HOST_WIDE_INT_PRINT_DEC ", " HOST_WIDE_INT_PRINT_DEC "]",
+            dstref.offrange[0].to_shwi (),
+            dstref.offrange[1].to_shwi ());
+   if (srcref.offrange[0] == srcref.offrange[1]
+       || srcref.offrange[1] > HOST_WIDE_INT_MAX)
+     sprintf (offstr[1],
+            HOST_WIDE_INT_PRINT_DEC,
+            srcref.offrange[0].to_shwi ());
+   else
+     sprintf (offstr[1],
+            "[" HOST_WIDE_INT_PRINT_DEC ", " HOST_WIDE_INT_PRINT_DEC "]",
+            srcref.offrange[0].to_shwi (),
+            srcref.offrange[1].to_shwi ());
+   if (ovloff[0] == ovloff[1] || !ovloff[1])
+     sprintf (offstr[2], HOST_WIDE_INT_PRINT_DEC, ovloff[0]);
+   else
+     sprintf (offstr[2],
+            "[" HOST_WIDE_INT_PRINT_DEC ", " HOST_WIDE_INT_PRINT_DEC "]",
+            ovloff[0], ovloff[1]);
+   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+   bool must_overlap = ovlsiz[0] > 0;
+   if (ovlsiz[1] == 0)
+     ovlsiz[1] = ovlsiz[0];
+   if (must_overlap)
+     {
+       /* Issue definitive "overlaps" diagnostic in this block.  */
+       if (sizrange[0] == sizrange[1])
+       {
+         if (ovlsiz[0] == ovlsiz[1])
+           warning_at (loc, OPT_Wrestrict,
+                       sizrange[0] == 1
+                       ? (ovlsiz[0] == 1
+                          ? G_("%G%qD accessing %wu byte at offsets %s "
+                               "and %s overlaps %wu byte at offset %s")
+                          :  G_("%G%qD accessing %wu byte at offsets %s "
+                                "and %s overlaps %wu bytes at offset "
+                                "%s"))
+                       : (ovlsiz[0] == 1
+                          ? G_("%G%qD accessing %wu bytes at offsets %s "
+                               "and %s overlaps %wu byte at offset %s")
+                          : G_("%G%qD accessing %wu bytes at offsets %s "
+                               "and %s overlaps %wu bytes at offset "
+                               "%s")),
+                       call, func, sizrange[0],
+                       offstr[0], offstr[1], ovlsiz[0], offstr[2]);
+         else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
+           warning_n (loc, OPT_Wrestrict, sizrange[0],
+                      "%G%qD accessing %wu byte at offsets %s "
+                      "and %s overlaps between %wu and %wu bytes "
+                      "at offset %s",
+                      "%G%qD accessing %wu bytes at offsets %s "
+                      "and %s overlaps between %wu and %wu bytes "
+                      "at offset %s",
+                      call, func, sizrange[0], offstr[0], offstr[1],
+                      ovlsiz[0], ovlsiz[1], offstr[2]);
+         else
+           warning_n (loc, OPT_Wrestrict, sizrange[0],
+                      "%G%qD accessing %wu byte at offsets %s and "
+                      "%s overlaps %wu or more bytes at offset %s",
+                      "%G%qD accessing %wu bytes at offsets %s and "
+                      "%s overlaps %wu or more bytes at offset %s",
+                      call, func, sizrange[0],
+                      offstr[0], offstr[1], ovlsiz[0], offstr[2]);
+         return true;
+       }
+       if (sizrange[1] >= 0 && sizrange[1] < maxobjsize.to_shwi ())
+       {
+         if (ovlsiz[0] == ovlsiz[1])
+           warning_n (loc, OPT_Wrestrict, ovlsiz[0],
+                      "%G%qD accessing between %wu and %wu bytes "
+                      "at offsets %s and %s overlaps %wu byte at "
+                      "offset %s",
+                      "%G%qD accessing between %wu and %wu bytes "
+                      "at offsets %s and %s overlaps %wu bytes "
+                      "at offset %s",
+                      call, func, sizrange[0], sizrange[1],
+                      offstr[0], offstr[1], ovlsiz[0], offstr[2]);
+         else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
+           warning_at (loc, OPT_Wrestrict,
+                       "%G%qD accessing between %wu and %wu bytes at "
+                       "offsets %s and %s overlaps between %wu and %wu "
+                       "bytes at offset %s",
+                       call, func, sizrange[0], sizrange[1],
+                       offstr[0], offstr[1], ovlsiz[0], ovlsiz[1],
+                       offstr[2]);
+         else
+           warning_at (loc, OPT_Wrestrict,
+                       "%G%qD accessing between %wu and %wu bytes at "
+                       "offsets %s and %s overlaps %wu or more bytes "
+                       "at offset %s",
+                       call, func, sizrange[0], sizrange[1],
+                       offstr[0], offstr[1], ovlsiz[0], offstr[2]);
+         return true;
+       }
+       if (ovlsiz[0] != ovlsiz[1])
+       ovlsiz[1] = maxobjsize.to_shwi ();
+       if (ovlsiz[0] == ovlsiz[1])
+       warning_n (loc, OPT_Wrestrict, ovlsiz[0],
+                  "%G%qD accessing %wu or more bytes at offsets "
+                  "%s and %s overlaps %wu byte at offset %s",
+                  "%G%qD accessing %wu or more bytes at offsets "
+                  "%s and %s overlaps %wu bytes at offset %s",
+                  call, func, sizrange[0], offstr[0], offstr[1],
+                  ovlsiz[0], offstr[2]);
+       else if (ovlsiz[1] >= 0 && ovlsiz[1] < maxobjsize.to_shwi ())
+       warning_at (loc, OPT_Wrestrict,
+                   "%G%qD accessing %wu or more bytes at offsets %s "
+                   "and %s overlaps between %wu and %wu bytes "
+                   "at offset %s",
+                   call, func, sizrange[0], offstr[0], offstr[1],
+                   ovlsiz[0], ovlsiz[1], offstr[2]);
+       else
+       warning_at (loc, OPT_Wrestrict,
+                   "%G%qD accessing %wu or more bytes at offsets %s "
+                   "and %s overlaps %wu or more bytes at offset %s",
+                   call, func, sizrange[0], offstr[0], offstr[1],
+                   ovlsiz[0], offstr[2]);
+       return true;
+     }
+   /* Use more concise wording when one of the offsets is unbounded
+      to avoid confusing the user with large and mostly meaningless
+      numbers.  */
+   bool open_range;
+   if (DECL_P (dstref.base) && TREE_CODE (TREE_TYPE (dstref.base)) == ARRAY_TYPE)
+     open_range = ((dstref.offrange[0] == 0
+                  && dstref.offrange[1] == maxobjsize)
+                 || (srcref.offrange[0] == 0
+                     && srcref.offrange[1] == maxobjsize));
+   else
+     open_range = ((dstref.offrange[0] == -maxobjsize - 1
+                  && dstref.offrange[1] == maxobjsize)
+                 || (srcref.offrange[0] == -maxobjsize - 1
+                     && srcref.offrange[1] == maxobjsize));
+   if (sizrange[0] == sizrange[1] || sizrange[1] == 1)
+     {
+       if (ovlsiz[1] == 1)
+       {
+         if (open_range)
+           warning_n (loc, OPT_Wrestrict, sizrange[1],
+                      "%G%qD accessing %wu byte may overlap "
+                      "%wu byte",
+                      "%G%qD accessing %wu bytes may overlap "
+                      "%wu byte",
+                      call, func, sizrange[1], ovlsiz[1]);
+         else
+           warning_n (loc, OPT_Wrestrict, sizrange[1],
+                      "%G%qD accessing %wu byte at offsets %s "
+                      "and %s may overlap %wu byte at offset %s",
+                      "%G%qD accessing %wu bytes at offsets %s "
+                      "and %s may overlap %wu byte at offset %s",
+                      call, func, sizrange[1], offstr[0], offstr[1],
+                      ovlsiz[1], offstr[2]);
+         return true;
+       }
+       if (open_range)
+       warning_n (loc, OPT_Wrestrict, sizrange[1],
+                  "%G%qD accessing %wu byte may overlap "
+                  "up to %wu bytes",
+                  "%G%qD accessing %wu bytes may overlap "
+                  "up to %wu bytes",
+                  call, func, sizrange[1], ovlsiz[1]);
+       else
+       warning_n (loc, OPT_Wrestrict, sizrange[1],
+                  "%G%qD accessing %wu byte at offsets %s and "
+                  "%s may overlap up to %wu bytes at offset %s",
+                  "%G%qD accessing %wu bytes at offsets %s and "
+                  "%s may overlap up to %wu bytes at offset %s",
+                  call, func, sizrange[1], offstr[0], offstr[1],
+                  ovlsiz[1], offstr[2]);
+       return true;
+     }
+   if (sizrange[1] >= 0 && sizrange[1] < maxobjsize.to_shwi ())
+     {
+       if (open_range)
+       warning_n (loc, OPT_Wrestrict, ovlsiz[1],
+                  "%G%qD accessing between %wu and %wu bytes "
+                  "may overlap %wu byte",
+                  "%G%qD accessing between %wu and %wu bytes "
+                  "may overlap up to %wu bytes",
+                  call, func, sizrange[0], sizrange[1], ovlsiz[1]);
+       else
+       warning_n (loc, OPT_Wrestrict, ovlsiz[1],
+                  "%G%qD accessing between %wu and %wu bytes "
+                  "at offsets %s and %s may overlap %wu byte "
+                  "at offset %s",
+                  "%G%qD accessing between %wu and %wu bytes "
+                  "at offsets %s and %s may overlap up to %wu "
+                  "bytes at offset %s",
+                  call, func, sizrange[0], sizrange[1],
+                  offstr[0], offstr[1], ovlsiz[1], offstr[2]);
+       return true;
+     }
+   warning_n (loc, OPT_Wrestrict, ovlsiz[1],
+            "%G%qD accessing %wu or more bytes at offsets %s "
+            "and %s may overlap %wu byte at offset %s",
+            "%G%qD accessing %wu or more bytes at offsets %s "
+            "and %s may overlap up to %wu bytes at offset %s",
+            call, func, sizrange[0], offstr[0], offstr[1],
+            ovlsiz[1], offstr[2]);
+   return true;
+ }
+ /* Validate REF offsets in an EXPRession passed as an argument to a CALL
+    to a built-in function FUNC to make sure they are within the bounds
+    of the referenced object if its size is known, or PTRDIFF_MAX otherwise.
+    Both initial values of the offsets and their final value computed by
+    the function by incrementing the initial value by the size are
+    validated.  Return true if the offsets are not valid and a diagnostic
+    has been issued.  */
+ static bool
+ maybe_diag_offset_bounds (location_t loc, gcall *call, tree func, int strict,
+                         tree expr, const builtin_memref &ref)
+ {
+   if (!warn_array_bounds)
+     return false;
+   offset_int ooboff[] = { ref.offrange[0], ref.offrange[1] };
+   tree oobref = ref.offset_out_of_bounds (strict, ooboff);
+   if (!oobref)
+     return false;
+   if (EXPR_HAS_LOCATION (expr))
+     loc = EXPR_LOCATION (expr);
+   loc = expansion_point_location_if_in_system_header (loc);
+   tree type;
+   char rangestr[2][64];
+   if (ooboff[0] == ooboff[1]
+       || (ooboff[0] != ref.offrange[0]
+         && ooboff[0].to_shwi () >= ooboff[1].to_shwi ()))
+     sprintf (rangestr[0], "%lli", (long long) ooboff[0].to_shwi ());
+   else
+     sprintf (rangestr[0], "[%lli, %lli]",
+            (long long) ooboff[0].to_shwi (),
+            (long long) ooboff[1].to_shwi ());
+   if (oobref == error_mark_node)
+     {
+       if (ref.sizrange[0] == ref.sizrange[1])
+       sprintf (rangestr[1], "%lli", (long long) ref.sizrange[0].to_shwi ());
+       else
+       sprintf (rangestr[1], "[%lli, %lli]",
+                (long long) ref.sizrange[0].to_shwi (),
+                (long long) ref.sizrange[1].to_shwi ());
+       if (DECL_P (ref.base)
+         && TREE_CODE (type = TREE_TYPE (ref.base)) == ARRAY_TYPE)
+       {
+         if (warning_at (loc, OPT_Warray_bounds,
+                         "%G%qD pointer overflow between offset %s "
+                         "and size %s accessing array %qD with type %qT",
+                         call, func, rangestr[0], rangestr[1], ref.base, type))
+           inform (DECL_SOURCE_LOCATION (ref.base),
+                   "array %qD declared here", ref.base);
+         else
+           warning_at (loc, OPT_Warray_bounds,
+                       "%G%qD pointer overflow between offset %s "
+                       "and size %s",
+                       call, func, rangestr[0], rangestr[1]);
+       }
+       else
+       warning_at (loc, OPT_Warray_bounds,
+                   "%G%qD pointer overflow between offset %s "
+                   "and size %s",
+                   call, func, rangestr[0], rangestr[1]);
+     }
+   else if (oobref == ref.base)
+     {
+       const offset_int maxobjsize = tree_to_shwi (max_object_size ());
+       /* True when the offset formed by an access to the reference
+        is out of bounds, rather than the initial offset wich is
+        in bounds.  This implies access past the end.  */
+       bool form = ooboff[0] != ref.offrange[0];
+       if (DECL_P (ref.base))
+       {
+         if ((ref.basesize < maxobjsize
+              && warning_at (loc, OPT_Warray_bounds,
+                             form
+                             ? G_("%G%qD forming offset %s is out of "
+                                  "the bounds [0, %wu] of object %qD with "
+                                  "type %qT")
+                             : G_("%G%qD offset %s is out of the bounds "
+                                  "[0, %wu] of object %qD with type %qT"),
+                             call, func, rangestr[0], ref.basesize.to_uhwi (),
+                             ref.base, TREE_TYPE (ref.base)))
+             || warning_at (loc, OPT_Warray_bounds,
+                            form
+                            ? G_("%G%qD forming offset %s is out of "
+                                 "the bounds of object %qD with type %qT")
+                            : G_("%G%qD offset %s is out of the bounds "
+                                 "of object %qD with type %qT"),
+                            call, func, rangestr[0],
+                            ref.base, TREE_TYPE (ref.base)))
+           inform (DECL_SOURCE_LOCATION (ref.base),
+                   "%qD declared here", ref.base);
+       }
+       else if (ref.basesize < maxobjsize)
+       warning_at (loc, OPT_Warray_bounds,
+                   form
+                   ? G_("%G%qD forming offset %s is out of the bounds "
+                        "[0, %wu]")
+                   : G_("%G%qD offset %s is out of the bounds [0, %wu]"),
+                   call, func, rangestr[0], ref.basesize.to_uhwi ());
+       else
+       warning_at (loc, OPT_Warray_bounds,
+                   form
+                   ? G_("%G%qD forming offset %s is out of bounds")
+                   : G_("%G%qD offset %s is out of bounds"),
+                   call, func, rangestr[0]);
+     }
+   else if (TREE_CODE (ref.ref) == MEM_REF)
+     {
+       tree type = TREE_TYPE (TREE_OPERAND (ref.ref, 0));
+       if (POINTER_TYPE_P (type))
+       type = TREE_TYPE (type);
+       type = TYPE_MAIN_VARIANT (type);
+       warning_at (loc, OPT_Warray_bounds,
+                 "%G%qD offset %s from the object at %qE is out "
+                 "of the bounds of %qT",
+                 call, func, rangestr[0], ref.base, type);
+     }
+   else
+     {
+       type = TYPE_MAIN_VARIANT (TREE_TYPE (ref.ref));
+       warning_at (loc, OPT_Warray_bounds,
+               "%G%qD offset %s from the object at %qE is out "
+               "of the bounds of referenced subobject %qD with type %qT "
+               "at offset %wu",
+               call, func, rangestr[0], ref.base, TREE_OPERAND (ref.ref, 1),
+               type, ref.refoff.to_uhwi ());
+     }
+   return true;
+ }
+ /* Check a CALL statement for restrict-violations and issue warnings
+    if/when appropriate.  */
+ void
+ wrestrict_dom_walker::check_call (gcall *call)
+ {
+   /* Avoid checking the call if it has already been diagnosed for
+      some reason.  */
+   if (gimple_no_warning_p (call))
+     return;
+   tree func = gimple_call_fndecl (call);
+   if (!func || DECL_BUILT_IN_CLASS (func) != BUILT_IN_NORMAL)
+     return;
+   bool with_bounds = gimple_call_with_bounds_p (call);
+   /* Argument number to extract from the call (depends on the built-in
+      and its kind).  */
+   unsigned dst_idx = -1;
+   unsigned src_idx = -1;
+   unsigned bnd_idx = -1;
+   /* Is this CALL to a string function (as opposed to one to a raw
+      memory function).  */
+   bool strfun = true;
+   switch (DECL_FUNCTION_CODE (func))
+     {
+     case BUILT_IN_MEMCPY:
+     case BUILT_IN_MEMCPY_CHK:
+     case BUILT_IN_MEMCPY_CHKP:
+     case BUILT_IN_MEMCPY_CHK_CHKP:
+     case BUILT_IN_MEMPCPY:
+     case BUILT_IN_MEMPCPY_CHK:
+     case BUILT_IN_MEMPCPY_CHKP:
+     case BUILT_IN_MEMPCPY_CHK_CHKP:
+     case BUILT_IN_MEMMOVE:
+     case BUILT_IN_MEMMOVE_CHK:
+     case BUILT_IN_MEMMOVE_CHKP:
+     case BUILT_IN_MEMMOVE_CHK_CHKP:
+       strfun = false;
+       /* Fall through.  */
+     case BUILT_IN_STPNCPY:
+     case BUILT_IN_STPNCPY_CHK:
+     case BUILT_IN_STRNCAT:
+     case BUILT_IN_STRNCAT_CHK:
+     case BUILT_IN_STRNCPY:
+     case BUILT_IN_STRNCPY_CHK:
+       dst_idx = 0;
+       src_idx = 1 + with_bounds;
+       bnd_idx = 2 + 2 * with_bounds;
+       break;
+     case BUILT_IN_STPCPY:
+     case BUILT_IN_STPCPY_CHK:
+     case BUILT_IN_STPCPY_CHKP:
+     case BUILT_IN_STPCPY_CHK_CHKP:
+     case BUILT_IN_STRCPY:
+     case BUILT_IN_STRCPY_CHK:
+     case BUILT_IN_STRCPY_CHKP:
+     case BUILT_IN_STRCPY_CHK_CHKP:
+     case BUILT_IN_STRCAT:
+     case BUILT_IN_STRCAT_CHK:
+     case BUILT_IN_STRCAT_CHKP:
+     case BUILT_IN_STRCAT_CHK_CHKP:
+       dst_idx = 0;
+       src_idx = 1 + with_bounds;
+       break;
+     default:
+       /* Handle other string functions here whose access may need
+        to be validated for in-bounds offsets and non-overlapping
+        copies.  (Not all _chkp functions have BUILT_IN_XXX_CHKP
+        macros so they need to be handled here.)  */
+       return;
+     }
+   unsigned nargs = gimple_call_num_args (call);
+   tree dst = dst_idx < nargs ? gimple_call_arg (call, dst_idx) : NULL_TREE;
+   tree src = src_idx < nargs ? gimple_call_arg (call, src_idx) : NULL_TREE;
+   tree dstwr = bnd_idx < nargs ? gimple_call_arg (call, bnd_idx) : NULL_TREE;
+   /* For string functions with an unspecified or unknown bound,
+      assume the size of the access is one.  */
+   if (!dstwr && strfun)
+     dstwr = size_one_node;
+   /* DST and SRC can be null for a call with an insufficient number
+      of arguments to a built-in function declared without a protype.  */
+   if (!dst || !src)
+     return;
+   /* DST, SRC, or DSTWR can also have the wrong type in a call to
+      a function declared without a prototype.  Avoid checking such
+      invalid calls.  */
+   if (TREE_CODE (TREE_TYPE (dst)) != POINTER_TYPE
+       || TREE_CODE (TREE_TYPE (src)) != POINTER_TYPE
+       || (dstwr && !INTEGRAL_TYPE_P (TREE_TYPE (dstwr))))
+     return;
+   if (check_bounds_or_overlap (call, dst, src, dstwr, NULL_TREE))
+     return;
+   /* Avoid diagnosing the call again.  */
+   gimple_set_no_warning (call, true);
+ }
+ } /* anonymous namespace */
+ /* Attempt to detect and diagnose invalid offset bounds and (except for
+    memmove) overlapping copy in a call expression EXPR from SRC to DST
+    and DSTSIZE and SRCSIZE bytes, respectively.  Both DSTSIZE and
+    SRCSIZE may be NULL.  Return false when one or the other has been
+    detected and diagnosed, true otherwise.  */
+ bool
+ check_bounds_or_overlap (gcall *call, tree dst, tree src, tree dstsize,
+                        tree srcsize, bool bounds_only /* = false */)
+ {
+   location_t loc = gimple_location (call);
+   if (tree block = gimple_block (call))
+     if (location_t *pbloc = block_nonartificial_location (block))
+       loc = *pbloc;
+   loc = expansion_point_location_if_in_system_header (loc);
+   tree func = gimple_call_fndecl (call);
+   builtin_memref dstref (dst, dstsize);
+   builtin_memref srcref (src, srcsize);
+   builtin_access acs (call, dstref, srcref);
+   /* Set STRICT to the value of the -Warray-bounds=N argument for
+      string functions or when N > 1.  */
+   int strict = (acs.strict () || warn_array_bounds > 1 ? warn_array_bounds : 0);
+   /* Validate offsets first to make sure they are within the bounds
+      of the destination object if its size is known, or PTRDIFF_MAX
+      otherwise.  */
+   if (maybe_diag_offset_bounds (loc, call, func, strict, dst, dstref)
+       || maybe_diag_offset_bounds (loc, call, func, strict, src, srcref))
+     {
+       gimple_set_no_warning (call, true);
+       return false;
+     }
+   bool check_overlap
+     = (warn_restrict
+        && (bounds_only
+          || (DECL_FUNCTION_CODE (func) != BUILT_IN_MEMMOVE
+              && DECL_FUNCTION_CODE (func) != BUILT_IN_MEMMOVE_CHK)));
+   if (!check_overlap)
+     return true;
+   if (operand_equal_p (dst, src, 0))
+     {
+       warning_at (loc, OPT_Wrestrict,
+                 "%G%qD source argument is the same as destination",
+                 call, func);
+       gimple_set_no_warning (call, true);
+       return false;
+     }
+   /* Return false when overlap has been detected.  */
+   if (maybe_diag_overlap (loc, call, acs))
+     {
+       gimple_set_no_warning (call, true);
+       return false;
+     }
+   return true;
+ }
+ gimple_opt_pass *
+ make_pass_warn_restrict (gcc::context *ctxt)
+ {
+   return new pass_wrestrict (ctxt);
+ }
Simple merge
diff --cc gcc/ipa-cp.c
Simple merge
diff --cc gcc/ipa-prop.c
Simple merge
diff --cc gcc/passes.c
Simple merge
diff --cc gcc/range.c
index e17aa9327fd759387b346862c1ac380f211f9335,0000000000000000000000000000000000000000..2ae08d792df12c0d3c8b9d155df6b538da381f19
mode 100644,000000..100644
--- /dev/null
@@@ -1,1541 -1,0 +1,1541 @@@
-       wide_int lbound = fold_convert (const_cast<tree> (type),
-                                     TYPE_MIN_VALUE (new_type));
 +/* SSA range analysis implementation. -*- C++ -*-
 +   Copyright (C) 2017 Free Software Foundation, Inc.
 +   Contributed by Aldy Hernandez <aldyh@redhat.com>.
 +
 +This file is part of GCC.
 +
 +GCC is free software; you can redistribute it and/or modify it under
 +the terms of the GNU General Public License as published by the Free
 +Software Foundation; either version 3, or (at your option) any later
 +version.
 +
 +GCC is distributed in the hope that it will be useful, but WITHOUT ANY
 +WARRANTY; without even the implied warranty of MERCHANTABILITY or
 +FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 + for more details.
 +
 +You should have received a copy of the GNU General Public License
 +along with GCC; see the file COPYING3.  If not see
 +<http://www.gnu.org/licenses/>.  */
 +
 +#include "config.h"
 +#include "system.h"
 +#include "coretypes.h"
 +#include "backend.h"
 +#include "tree.h"
 +#include "gimple.h"
 +#include "gimple-pretty-print.h"
 +#include "fold-const.h"
 +#include "ssa.h"
 +#include "range.h"
 +#include "selftest.h"
 +
 +static bool
 +range_compatible_p (const_tree t1, const_tree t2)
 +{
 +  if (POINTER_TYPE_P (t1) && POINTER_TYPE_P (t2))
 +    return true;
 +  return types_compatible_p (const_cast <tree> (t1), const_cast <tree> (t2));
 +}
 +
 +/* Subtract 1 from X and set OVERFLOW if the operation overflows.  */
 +
 +static wide_int inline
 +subtract_one (const wide_int &x, const_tree type, bool &overflow)
 +{
 +  /* A signed 1-bit bit-field, has a range of [-1,0] so subtracting +1
 +     overflows, since +1 is unrepresentable.  This is why we have an
 +     addition of -1 here.  */
 +  if (TYPE_SIGN (type) == SIGNED)
 +    return wi::add (x, -1 , SIGNED, &overflow);
 +  else
 +    return wi::sub (x, 1, UNSIGNED, &overflow);
 +}
 +
 +/* Set range from a TYPE and some bounds (LBOUND and UBOUND).
 +
 +   RT is PLAIN if it is a normal range, or INVERSE if it is an inverse
 +   range.  */
 +
 +void
 +irange::set_range (const_tree typ, const wide_int &lbound,
 +                 const wide_int &ubound, kind rt)
 +{
 +  gcc_assert (INTEGRAL_TYPE_P (typ) || POINTER_TYPE_P (typ));
 +  gcc_assert (TYPE_PRECISION (typ) == lbound.get_precision ());
 +  gcc_assert (lbound.get_precision () == ubound.get_precision ());
 +  overflow = false;
 +  type = typ;
 +  gcc_assert (wi::le_p (lbound, ubound, TYPE_SIGN (type)));
 +  if (rt == INVERSE)
 +    {
 +      /* We calculate INVERSE([I,J]) as [-MIN, I-1][J+1, +MAX].  */
 +      bool ovf;
 +      nitems = 0;
 +      wide_int min = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
 +      wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
 +
 +      /* If we will overflow, don't bother.  This will handle unsigned
 +       underflow which doesn't set the overflow bit.
 +
 +       Note: Perhaps all these &ovf checks are unecessary since we
 +       are manually checking for overflow with the if() below.  */
 +      if (lbound != min)
 +      {
 +        bounds[nitems++] = min;
 +        bounds[nitems++] = subtract_one (lbound, type, ovf);
 +        if (ovf)
 +          nitems = 0;
 +      }
 +      /* If we will overflow, don't bother.  This will handle unsigned
 +       overflow which doesn't set the overflow bit.  */
 +      if (ubound != max)
 +      {
 +        bounds[nitems++] = wi::add (ubound, 1, TYPE_SIGN (type), &ovf);
 +        if (ovf)
 +          nitems--;
 +        else
 +          bounds[nitems++] = max;
 +      }
 +
 +      /* If we get here with N==0, it means we tried to calculate the
 +       inverse of [-MIN, +MAX] which is actually the empty set, and
 +       N==0 maps nicely to the empty set :).  */
 +    }
 +  else
 +    {
 +      nitems = 2;
 +      bounds[0] = lbound;
 +      bounds[1] = ubound;
 +    }
 +  gcc_assert (!CHECKING_P || (valid_p () && !empty_p ()));
 +}
 +
 +/* Set range from type T and integer bounds X, Y.
 +
 +   RT is PLAIN if it is a normal range, or INVERSE if it is an inverse
 +   range.  */
 +
 +void
 +irange::set_range (const_tree t, int x, int y, kind rt)
 +{
 +  int precision = TYPE_PRECISION (t);
 +  wide_int xi, yi;
 +  if (TYPE_UNSIGNED (t))
 +    {
 +      xi = wi::uhwi (x, precision);
 +      yi = wi::uhwi (y, precision);
 +    }
 +  else
 +    {
 +      xi = wi::shwi (x, precision);
 +      yi = wi::shwi (y, precision);
 +    }
 +  set_range (t, xi, yi, rt);
 +}
 +
 +// Set range from an IRANGE_STORAGE and TYPE.
 +
 +void
 +irange::set_range (const irange_storage *storage, const_tree typ)
 +{
 +  overflow = false;
 +  type = typ;
 +  nitems = 0;
 +  unsigned i = 0;
 +  unsigned precision = wi::get_precision (storage->trailing_bounds[0]);
 +  gcc_assert (precision == TYPE_PRECISION (typ));
 +  while (i < max_pairs * 2)
 +    {
 +      wide_int lo = storage->trailing_bounds[i];
 +      wide_int hi = storage->trailing_bounds[i + 1];
 +      // A nonsensical sub-range of [1,0] marks the end of valid ranges.
 +      if (lo == wi::one (precision) && hi == wi::zero (precision))
 +      break;
 +      bounds[i] = lo;
 +      bounds[i + 1] = hi;
 +      i += 2;
 +    }
 +  nitems = i;
 +  gcc_assert (!CHECKING_P || valid_p ());
 +}
 +
 +/* Set range from an SSA_NAME's available range.  If there is no
 +   available range, build a range for its entire domain.  */
 +
 +void
 +irange::set_range (const_tree ssa)
 +{
 +  tree t = TREE_TYPE (ssa);
 +  gcc_assert (TREE_CODE (ssa) == SSA_NAME && (INTEGRAL_TYPE_P (t)
 +                                            || POINTER_TYPE_P (t)));
 +  if (!SSA_NAME_RANGE_INFO (ssa)
 +      /* Pointers do not have range info in SSA_NAME_RANGE_INFO, so
 +       just return range_for_type in this case.  */
 +      || POINTER_TYPE_P (t))
 +    {
 +      set_range_for_type (t);
 +      return;
 +    }
 +  irange_storage *storage = SSA_NAME_RANGE_INFO (ssa);
 +  set_range (storage, t);
 +}
 +
 +/* Set range from the full domain of type T.  */
 +
 +void
 +irange::set_range_for_type (const_tree t)
 +{
 +  gcc_assert (TYPE_P (t));
 +  gcc_assert (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t));
 +  wide_int min = wi::min_value (TYPE_PRECISION (t), TYPE_SIGN (t));
 +  wide_int max = wi::max_value (TYPE_PRECISION (t), TYPE_SIGN (t));
 +  set_range (t, min, max);
 +}
 +
 +irange::irange (const irange &r)
 +{
 +  type = r.type;
 +  overflow = false;
 +  nitems = r.nitems;
 +  for (unsigned i = 0; i < nitems; ++i)
 +    bounds[i] = r.bounds[i];
 +  gcc_assert (!CHECKING_P || valid_p ());
 +}
 +
 +bool
 +irange::operator== (const irange &r) const
 +{
 +  if (!range_compatible_p (type, r.type)
 +      || nitems != r.nitems || overflow != r.overflow)
 +    return false;
 +  for (unsigned i = 0; i < nitems; ++i)
 +    if (!wi::eq_p (bounds[i], r.bounds[i]))
 +      return false;
 +  return true;
 +}
 +
 +irange&
 +irange::operator= (const irange &r)
 +{
 +  type = r.type;
 +  nitems = r.nitems;
 +  overflow = r.overflow;
 +  for (unsigned i = 0; i < nitems; ++i)
 +    bounds[i] = r.bounds[i];
 +  return *this;
 +}
 +
 +
 +irange&
 +irange::operator= (const_tree t)
 +{
 +  set_range (t);
 +  return *this;
 +}
 +
 +// Return true if this range is the full range for it's type
 +
 +bool
 +irange::range_for_type_p () const
 +{
 +  irange tmp;
 +  tmp.set_range_for_type (type);
 +  return (*this == tmp);
 +}
 +
 +
 +bool
 +irange::valid_p () const
 +{
 +  if (type == NULL_TREE
 +      || nitems % 2
 +      || nitems > max_pairs * 2)
 +    return false;
 +
 +  /* An empty range is valid, as long as it has a type.  */
 +  if (!nitems)
 +    return true;
 +
 +  /* Check that the bounds are in the right order.
 +
 +     So for [a,b][c,d][e,f] we must have:
 +     a <= b < c <= d < e <= f.  */
 +  if (wi::gt_p (bounds[0], bounds[1], TYPE_SIGN (type)))
 +    return false;
 +  for (unsigned i = 2; i < nitems; i += 2)
 +    {
 +      if (wi::le_p (bounds[i], bounds[i-1], TYPE_SIGN (type)))
 +      return false;
 +      if (wi::gt_p (bounds[i], bounds[i+1], TYPE_SIGN (type)))
 +      return false;
 +    }
 +  return true;
 +}
 +
 +/* Convert the current range in place into a range of type NEW_TYPE.
 +   The type of the original range is changed to the new type.  */
 +
 +void
 +irange::cast (const_tree new_type)
 +{
 +  if (!nitems)
 +    {
 +      type = new_type;
 +      return;
 +    }
 +  bool sign_change = TYPE_SIGN (new_type) != TYPE_SIGN (type);
 +  unsigned new_precision = TYPE_PRECISION (new_type);
 +
 +  /* If nothing changed, this may be a useless type conversion between
 +     two variants of the same type.  */
 +  if (!sign_change && TYPE_PRECISION (type) == new_precision)
 +    {
 +      type = new_type;
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return;
 +    }
 +
 +  /* If any of the old bounds are outside of the representable range
 +     for the new type, conservatively default to the entire range of
 +     the new type.  */
 +  if (new_precision < TYPE_PRECISION (type))
 +    {
 +      /* NOTE: There are some const_cast<> sprinkled throughout
 +       because the fold_convert machinery is not properly
 +       constified.  */
 +      /* Get the extreme bounds for the new type, but within the old type,
 +       so we can properly compare them.  */
-       = fold_convert (const_cast <tree> (type),
-                       TYPE_MAX_VALUE (new_type));
++      wide_int lbound = wi::to_wide (fold_convert (const_cast<tree> (type),
++                                                 TYPE_MIN_VALUE (new_type)));
 +      wide_int ubound
-         bounds[i] = b0;
-         bounds[i + 1] = b1;
++      = wi::to_wide (fold_convert (const_cast <tree> (type),
++                                   TYPE_MAX_VALUE (new_type)));
 +
 +      if (wi::lt_p (bounds[0], lbound, TYPE_SIGN (type))
 +        || wi::gt_p (bounds[nitems - 1], ubound, TYPE_SIGN (type)))
 +      {
 +        bounds[0] = wide_int::from (lbound, new_precision,
 +                                    TYPE_SIGN (new_type));
 +        bounds[1] = wide_int::from (ubound, new_precision,
 +                                    TYPE_SIGN (new_type));
 +        type = new_type;
 +        nitems = 2;
 +        gcc_assert (!CHECKING_P || valid_p ());
 +        return;
 +      }
 +    }
 +
 +  wide_int orig_low = lower_bound ();
 +  wide_int orig_high = upper_bound ();
 +  wide_int min = wi::min_value (new_precision, TYPE_SIGN (new_type));
 +  wide_int max = wi::max_value (new_precision, TYPE_SIGN (new_type));
 +  for (unsigned i = 0; i < nitems; i += 2)
 +    {
 +      tree b0
 +      = fold_convert (const_cast<tree> (new_type),
 +                      wide_int_to_tree (const_cast<tree> (type),
 +                                        bounds[i]));
 +      tree b1
 +      = fold_convert (const_cast<tree> (new_type),
 +                      wide_int_to_tree (const_cast<tree> (type),
 +                                        bounds[i+1]));
 +      bool sbit0 = bounds[i].sign_mask () < 0;
 +      bool sbit1 = bounds[i + 1].sign_mask () < 0;
 +
 +      /* If we're not doing a sign change, or we are moving to a
 +       higher precision, we can just blindly chop off bits.  */
 +      if (!sign_change
 +        || (TYPE_UNSIGNED (type)
 +            && !TYPE_UNSIGNED (new_type)
 +            && new_precision > TYPE_PRECISION (type))
 +        || sbit0 == sbit1)
 +      {
-         if ((wide_int) b1 == max)
++        bounds[i] = wi::to_wide (b0);
++        bounds[i + 1] = wi::to_wide (b1);
 +      }
 +      else
 +      {
 +        /* If we're about to go over the maximum number of ranges
 +           allowed, convert to something conservative and cast
 +           again.  */
 +        if (nitems >= max_pairs * 2)
 +          {
 +            bounds[0] = orig_low;
 +            bounds[1] = orig_high;
 +            nitems = 2;
 +            cast (new_type);
 +            return;
 +          }
 +        /*  If we're about to construct [MIN, b1==MAX].  That's just
 +            the entire range.  */
-             bounds[i + 1] = b1;
-             bounds[nitems++] = b0;
++        if (wi::to_wide (b1) == max)
 +          {
 +            bounds[0] = min;
 +            bounds[1] = max;
 +            nitems = 2;
 +            type = new_type;
 +            gcc_assert (!CHECKING_P || valid_p ());
 +            return;
 +          }
 +        /* From no sign bit to sign bit: [15, 150]
 +           => [15,127][-128,-106].  */
 +        if (!sbit0 && sbit1)
 +          {
 +            bounds[i] = min;
-             bounds[i + 1] = b1;
-             bounds[nitems++] = b0;
++            bounds[i + 1] = wi::to_wide (b1);
++            bounds[nitems++] = wi::to_wide (b0);
 +            bounds[nitems++] = max;
 +          }
 +        /* From sign bit to no sign bit: [-5, 5]
 +           => [251,255][0,5].  */
 +        else
 +          {
 +            bounds[i] = min;
-   wide_int wi = t;
++            bounds[i + 1] = wi::to_wide (b1);
++            bounds[nitems++] = wi::to_wide (b0);
 +            bounds[nitems++] = max;
 +          }
 +      }
 +    }
 +  type = new_type;
 +  if (sign_change)
 +    canonicalize ();
 +  gcc_assert (!CHECKING_P || (valid_p () && !empty_p ()));
 +}
 +
 +// Return TRUE if the current range contains ELEMENT.
 +
 +bool
 +irange::contains_p (const wide_int &element) const
 +{
 +  for (unsigned i = 0; i < nitems; i += 2)
 +    if (wi::ge_p (element, bounds[i], TYPE_SIGN (type))
 +      && wi::le_p (element, bounds[i + 1], TYPE_SIGN (type)))
 +      return true;
 +  return false;
 +}
 +
 +// Like above, but ELEMENT can be an INTEGER_CST of any type.
 +
 +bool
 +irange::contains_p (const_tree element) const
 +{
 +  gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (element)));
 +  tree t = fold_convert (const_cast <tree> (type),
 +                       const_cast <tree> (element));
 +  if (TREE_OVERFLOW (t))
 +    return false;
-   r1 = irange (integer_type_node, (wide_int) INT(5), (wide_int) INT(10));
++  wide_int wi = wi::to_wide (t);
 +  return contains_p (wi);
 +}
 +
 +// Like above, but element is an int.
 +
 +bool
 +irange::contains_p (int element) const
 +{
 +  if (TYPE_UNSIGNED (type))
 +    return contains_p (wi::uhwi (element, TYPE_PRECISION (type)));
 +  else
 +    return contains_p (wi::shwi (element, TYPE_PRECISION (type)));
 +}
 +
 +// Canonicalize the current range.
 +
 +void
 +irange::canonicalize ()
 +{
 +  if (nitems < 2)
 +    return;
 +
 +  /* Fix any out of order ranges: [10,20][-5,5] into [-5,5][10,20].  */
 +  for (unsigned i = 0; i < (unsigned) nitems; i += 2)
 +    for (unsigned j = i + 2; j < (unsigned) nitems; j += 2)
 +      if (wi::gt_p (bounds[i], bounds[j], TYPE_SIGN (type)))
 +      {
 +        wide_int t1 = bounds[i];
 +        wide_int t2 = bounds[i + 1];
 +        bounds[i] = bounds[j];
 +        bounds[i + 1] = bounds[j + 1];
 +        bounds[j] = t1;
 +        bounds[j + 1] = t2;
 +      }
 +
 +  /* Merge any edges that touch.
 +     [9,10][11,20] => [9,20].  */
 +  for (unsigned i = 1; i < (unsigned) (nitems - 2); i += 2)
 +    {
 +      bool ovf;
 +      wide_int x = wi::add (bounds[i], 1, TYPE_SIGN (type), &ovf);
 +      /* No need to check for overflow here for the +1, since the
 +       middle ranges cannot have MAXINT.  */
 +      if (x == bounds[i + 1])
 +      {
 +        bounds[i] = bounds[i + 2];
 +        remove (i + 1, i + 2);
 +      }
 +    }
 +  /* See note before for(;;).  */
 +  gcc_assert (!CHECKING_P || valid_p ());
 +}
 +
 +/* Insert [x,y] into position POS.  There must be enough space to hold
 +   the new sub-range, otherwise this function will abort.  */
 +
 +void
 +irange::insert (const wide_int &x, const wide_int &y, unsigned pos)
 +{
 +  /* Make sure it will fit.  */
 +  gcc_assert (nitems < max_pairs * 2);
 +  /* Make sure we're inserting into a sane position.  */
 +  gcc_assert (pos <= nitems && pos % 2 == 0);
 +
 +  if (pos == nitems)
 +    return append (x, y);
 +
 +  for (unsigned i = nitems; i > pos; i -= 2)
 +    {
 +      bounds[i] = bounds[i - 2];
 +      bounds[i + 1] = bounds[i - 1];
 +    }
 +  bounds[pos] = x;
 +  bounds[pos + 1] = y;
 +  nitems += 2;
 +  canonicalize ();
 +}
 +
 +// Prepend [X,Y] into THIS.
 +
 +void
 +irange::prepend (const wide_int &x, const wide_int &y)
 +{
 +  /* If we have enough space, shift everything to the right and
 +     prepend.  */
 +  if (nitems < max_pairs * 2)
 +    return insert (x, y, 0);
 +  /* Otherwise, merge it with the first entry.  */
 +  else
 +    bounds[0] = x;
 +  canonicalize ();
 +}
 +
 +// Place [X,Y] at the end of THIS.
 +
 +void
 +irange::append (const wide_int &x, const wide_int &y)
 +{
 +  /* If we have enough space, make space at the end and append.  */
 +  if (nitems < max_pairs * 2)
 +    {
 +      bounds[nitems++] = x;
 +      bounds[nitems++] = y;
 +    }
 +  /* Otherwise, merge it with the last entry.  */
 +  else
 +    bounds[nitems - 1] = y;
 +  canonicalize ();
 +}
 +
 +// Remove the bound entries from [i..j].
 +
 +void
 +irange::remove (unsigned i, unsigned j)
 +{
 +  gcc_assert (i < nitems && i < j);
 +  unsigned dst = i;
 +  unsigned ndeleted = j - i + 1;
 +  for (++j; j < nitems; ++j)
 +    bounds[dst++] = bounds[j];
 +  nitems -= ndeleted;
 +}
 +
 +// THIS = THIS U [X,Y]
 +
 +irange &
 +irange::union_ (const wide_int &x, const wide_int &y)
 +{
 +  if (empty_p ())
 +    {
 +      bounds[0] = x;
 +      bounds[1] = y;
 +      nitems = 2;
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +
 +  /* If [X,Y] comes before, put it at the front.  */
 +  if (wi::lt_p (y, bounds[0], TYPE_SIGN (type)))
 +    {
 +      prepend (x, y);
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +  /* If [X,Y] comes after, put it at the end.  */
 +  if (wi::gt_p (x, bounds[nitems - 1], TYPE_SIGN (type)))
 +    {
 +      append (x, y);
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +  /* Handle [X,Y] swalling up all of THIS.  */
 +  if (wi::le_p (x, bounds[0], TYPE_SIGN (type))
 +      && wi::ge_p (y, bounds[nitems - 1], TYPE_SIGN (type)))
 +    {
 +      bounds[0] = x;
 +      bounds[1] = y;
 +      nitems = 2;
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +  /* Handle X starting before, while Y is within.
 +                Y
 +     X[a,b][c,d][e,f][g,h][i,j]
 +     ==> [X,Y][g,h][i,j].  */
 +  if (wi::lt_p (x, bounds[0], TYPE_SIGN (type)))
 +    {
 +      bounds[0] = x;
 +
 +      /*    Y
 +       X[a,b]   => [X,b].  */
 +      if (nitems == 2)
 +      {
 +        gcc_assert (!CHECKING_P || valid_p ());
 +        return *this;
 +      }
 +
 +      for (unsigned i = 1; i < nitems; i += 2)
 +      if (wi::le_p (y, bounds[i], TYPE_SIGN (type)))
 +        {
 +          if (y == bounds[i])
 +            bounds[1] = y;
 +          else
 +            bounds[1] = bounds[i];
 +          if (i >= 2)
 +            remove (2, i);
 +          gcc_assert (!CHECKING_P || valid_p ());
 +          return *this;
 +        }
 +      gcc_unreachable ();
 +    }
 +  /* Handle Y being outside, while X is within.
 +                X           Y
 +     [a,b][c,d][e,f][g,h][i,j]
 +     ==> [a,b][c,d][e,Y].  */
 +  if (wi::gt_p (y, bounds[nitems - 1], TYPE_SIGN (type)))
 +    {
 +      for (unsigned i = 0; i < nitems; i += 2)
 +      if (wi::ge_p (bounds[i + 1], x, TYPE_SIGN (type)))
 +        {
 +          bounds[i + 1] = y;
 +          nitems = i + 2;
 +          return *this;
 +        }
 +      gcc_unreachable ();
 +    }
 +
 +  /* At this point, [X,Y] must be completely inside.
 +      X           Y
 +     [a,b][c,d][e,f][g,h].  */
 +  gcc_assert (wi::ge_p (x, bounds[0], TYPE_SIGN (type))
 +            && wi::le_p (y, bounds[nitems - 1], TYPE_SIGN (type)));
 +
 +  /* Find X.  */
 +  gcc_assert (nitems >= 2);
 +  unsigned xpos = ~0U;
 +  unsigned i = nitems;
 +  do
 +    {
 +      i -= 2;
 +      if (wi::ge_p (x, bounds[i], TYPE_SIGN (type)))
 +      {
 +        xpos = i;
 +        break;
 +      }
 +    }
 +  while (i);
 +  gcc_assert (xpos != ~0U);
 +
 +  /* Handle [X,Y] fitting between two sub-ranges:
 +
 +     [a,b][X,Y][b,c].  */
 +  if (nitems < max_pairs * 2
 +      && wi::gt_p (x, bounds[xpos + 1], TYPE_SIGN (type))
 +      && wi::lt_p (y, bounds[xpos + 2], TYPE_SIGN (type)))
 +    {
 +      insert (x, y, xpos + 2);
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +
 +  /* Find Y.  */
 +  unsigned ypos = ~0U;
 +  for (i = 1; i < nitems; i += 2)
 +    if (wi::le_p (y, bounds[i], TYPE_SIGN (type)))
 +      {
 +      ypos = i;
 +      break;
 +      }
 +  gcc_assert (ypos != ~0U);
 +
 +  /* If [x,y] is inside of subrange [xpos,ypos], there's nothing to do.  */
 +  if (xpos + 1 == ypos)
 +    {
 +      gcc_assert (!CHECKING_P || valid_p ());
 +      return *this;
 +    }
 +
 +  /* Squash the sub-ranges in between xpos and ypos.  */
 +  wide_int tmp = bounds[ypos];
 +  remove (xpos + 2, ypos);
 +  bounds[xpos + 1] = tmp;
 +
 +  gcc_assert (!CHECKING_P || valid_p ());
 +  return *this;
 +}
 +
 +// THIS = THIS U R
 +
 +irange &
 +irange::union_ (const irange &r)
 +{
 +  gcc_assert (range_compatible_p (type, r.type));
 +
 +  if (empty_p ())
 +    {
 +      *this = r;
 +      return *this;
 +    }
 +  else if (r.empty_p ())
 +    return *this;
 +
 +  /* FIXME: It would be nice to look at both THIS and R as a whole and
 +     optimize the case where they don't overlap and be easily appended
 +     or prepended.  That is, do the calculation in this function
 +     instead of doing it piecemeal below.
 +
 +     For example: [8,10][14,14] U [135,255].  */
 +  for (unsigned i = 0; i < r.nitems; i += 2)
 +    union_ (r.bounds[i], r.bounds[i + 1]);
 +
 +  /* There is no valid_p() check here because the calls to union_
 +     above would have called valid_p().  */
 +
 +  overflow |= r.overflow;
 +  return *this;
 +}
 +
 +// THIS = THIS ^ [X,Y].
 +
 +irange &
 +irange::intersect (const wide_int &x, const wide_int &y)
 +{
 +  unsigned pos = 0;
 +
 +  for (unsigned i = 0; i < nitems; i += 2)
 +    {
 +      wide_int newlo = wi::max (bounds[i], x, TYPE_SIGN (type));
 +      wide_int newhi = wi::min (bounds[i + 1], y, TYPE_SIGN (type));
 +      if (wi::gt_p (newlo, newhi, TYPE_SIGN (type)))
 +      {
 +        /* If the new sub-range doesn't make sense, it's an
 +           impossible range and must be kept out of the result.  */
 +      }
 +      else
 +      {
 +        bounds[pos++] = newlo;
 +        bounds[pos++] = newhi;
 +      }
 +    }
 +  nitems = pos;
 +  gcc_assert (!CHECKING_P || valid_p ());
 +  return *this;
 +}
 +
 +// THIS = THIS ^ R.
 +
 +irange &
 +irange::intersect (const irange &r)
 +{
 +  gcc_assert (range_compatible_p (type, r.type));
 +  irange orig_range (*this);
 +
 +  /* Intersection with an empty range is an empty range.  */
 +  clear ();
 +  if (orig_range.empty_p () || r.empty_p ())
 +    return *this;
 +
 +  /* The general algorithm is as follows.
 +
 +     Intersect each sub-range of R with all of ORIG_RANGE one at a time, and
 +     join/union the results of these intersections together.  I.e:
 +
 +     [10,20][30,40][50,60] ^ [15,25][38,51][55,70]
 +
 +     Step 1: [10,20][30,40][50,60] ^ [15,25] => [15,20]
 +     Step 2: [10,20][30,40][50,60] ^ [38,51] => [38,40]
 +     Step 3: [10,20][30,40][50,60] ^ [55,70] => [55,60]
 +     Final:  [15,20] U [38,40] U [55,60] => [15,20][38,40][55,60]
 +
 +     ?? We should probably stop making a copy of ORIG_RANGE at every step.  */
 +  for (unsigned i = 0; i < r.nitems; i += 2)
 +    union_ (irange (orig_range).intersect (r.bounds[i], r.bounds[i + 1]));
 +
 +  /* Overflow is sticky only if both ranges overflowed.  */
 +  overflow = (orig_range.overflow && r.overflow);
 +  /* There is no valid_p() check here because the calls to union_
 +     above would have called valid_p().  */
 +  return *this;
 +}
 +
 +// Set THIS to the inverse of its range.
 +
 +irange &
 +irange::invert ()
 +{
 +  /* We always need one more set of bounds to represent an inverse, so
 +     if we're at the limit, we can't properly represent things.
 +
 +     For instance, to represent the inverse of a 2 sub-range set
 +     [5, 10][20, 30], we would need a 3 sub-range set
 +     [-MIN, 4][11, 19][31, MAX].
 +
 +     In this case, return the most conservative thing.
 +
 +     However, if any of the extremes of the range are -MIN/+MAX, we
 +     know we will not need an extra bound.  For example:
 +
 +      INVERT([-MIN,20][30,40]) => [21,29][41,+MAX]
 +      INVERT([-MIN,20][30,MAX]) => [21,29]
 +  */
 +  wide_int min = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
 +  wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
 +  if (nitems == max_pairs * 2
 +      && bounds[0] != min
 +      && bounds[nitems] != max)
 +    {
 +      bounds[1] = max;
 +      nitems = 2;
 +      return *this;
 +    }
 +
 +  /* The inverse of the empty set is the entire domain.  */
 +  if (empty_p ())
 +    {
 +      set_range_for_type (type);
 +      return *this;
 +    }
 +
 +  /* The algorithm is as follows.  To calculate INVERT ([a,b][c,d]), we
 +     generate [-MIN, a-1][b+1, c-1][d+1, MAX].
 +
 +     If there is an over/underflow in the calculation for any
 +     sub-range, we eliminate that subrange.  This allows us to easily
 +     calculate INVERT([-MIN, 5]) with: [-MIN, -MIN-1][6, MAX].  And since
 +     we eliminate the underflow, only [6, MAX] remains.  */
 +
 +  unsigned i = 0;
 +  bool ovf;
 +
 +  /* Construct leftmost range.  */
 +  irange orig_range (*this);
 +  nitems = 0;
 +  /* If this is going to underflow on the MINUS 1, don't even bother
 +     checking.  This also handles subtracting one from an unsigned 0,
 +     which doesn't set the underflow bit.  */
 +  if (min != orig_range.bounds[i])
 +    {
 +      bounds[nitems++] = min;
 +      bounds[nitems++] = subtract_one (orig_range.bounds[i], type, ovf);
 +      if (ovf)
 +      nitems = 0;
 +    }
 +  i++;
 +  /* Construct middle ranges if applicable.  */
 +  if (orig_range.nitems > 2)
 +    {
 +      unsigned j = i;
 +      for (; j < (unsigned) (orig_range.nitems - 2); j += 2)
 +      {
 +        /* The middle ranges cannot have MAX/MIN, so there's no need
 +           to check for unsigned overflow on the +1 and -1 here.  */
 +        bounds[nitems++]
 +          = wi::add (orig_range.bounds[j], 1, TYPE_SIGN (type), &ovf);
 +        bounds[nitems++]
 +          = subtract_one (orig_range.bounds[j + 1], type, ovf);
 +        if (ovf)
 +          nitems -= 2;
 +      }
 +      i = j;
 +    }
 +  /* Construct rightmost range.
 +
 +     However, if this will overflow on the PLUS 1, don't even bother.
 +     This also handles adding one to an unsigned MAX, which doesn't
 +     set the overflow bit.  */
 +  if (max != orig_range.bounds[i])
 +    {
 +      bounds[nitems++]
 +      = wi::add (orig_range.bounds[i], 1, TYPE_SIGN (type), &ovf);
 +      bounds[nitems++] = max;
 +      if (ovf)
 +      nitems -= 2;
 +    }
 +
 +  gcc_assert (!CHECKING_P || valid_p ());
 +  return *this;
 +}
 +
 +/* Returns the upper bound of PAIR.  */
 +
 +wide_int
 +irange::upper_bound (unsigned pair) const
 +{
 +  gcc_assert (nitems != 0 && pair <= num_pairs ());
 +  return bounds[pair * 2 + 1];
 +}
 +
 +/* Dump the current range onto BUFFER.  */
 +
 +void
 +irange::dump (pretty_printer *buffer) const
 +{
 +  if (POINTER_TYPE_P (type) && non_zero_p ())
 +    pp_string (buffer, "[ non-zero pointer ]");
 +  else
 +    for (unsigned i = 0; i < nitems; ++i)
 +      {
 +      if (i % 2 == 0)
 +        pp_character (buffer, '[');
 +
 +      /* Wide ints may be sign extended to the full extent of the
 +         underlying HWI storage, even if the precision we care about
 +         is smaller.  Chop off the excess bits for prettier output.  */
 +      signop sign = TYPE_UNSIGNED (type) ? UNSIGNED : SIGNED;
 +      widest_int val = widest_int::from (bounds[i], sign);
 +      val &= wi::mask<widest_int> (bounds[i].get_precision (), false);
 +
 +      if (val > 0xffff)
 +        print_hex (val, pp_buffer (buffer)->digit_buffer);
 +      else
 +        print_dec (bounds[i], pp_buffer (buffer)->digit_buffer, sign);
 +      pp_string (buffer, pp_buffer (buffer)->digit_buffer);
 +      if (i % 2 == 0)
 +        pp_string (buffer, ", ");
 +      else
 +        pp_character (buffer, ']');
 +      }
 +  if (!nitems)
 +    pp_string (buffer, "[]");
 +
 +  pp_character (buffer, ' ');
 +  dump_generic_node (buffer, const_cast <tree> (type), 0, 0, false);
 +  if (overflow)
 +    pp_string (buffer, " (overflow)");
 +  pp_newline_and_flush (buffer);
 +}
 +
 +/* Dump the current range onto FILE F.  */
 +
 +void
 +irange::dump (FILE *f) const
 +{
 +  pretty_printer buffer;
 +  buffer.buffer->stream = f;
 +  dump (&buffer);
 +}
 +
 +/* Like above but dump to STDERR.
 +
 +   ?? You'd think we could have a default parameter for dump(FILE),
 +   but gdb currently doesn't do default parameters gracefully-- or at
 +   all, and since this is a function we need to be callable from the
 +   debugger... */
 +
 +void
 +irange::dump () const
 +{
 +  dump (stderr);
 +}
 +
 +/* Initialize the current irange_storage to the irange in IR.  */
 +
 +void
 +irange_storage::set_irange (const irange &ir)
 +{
 +  unsigned precision = TYPE_PRECISION (ir.get_type ());
 +  trailing_bounds.set_precision (precision);
 +  unsigned i;
 +  for (i = 0; i < ir.num_pairs () * 2; ++i)
 +    trailing_bounds[i] = ir.bounds[i];
 +
 +  /* Build nonsensical [1,0] pairs for the remaining empty ranges.
 +     These will be recognized as empty when we read the structure
 +     back.  */
 +  for (; i < irange::max_pairs * 2; i += 2)
 +    {
 +      trailing_bounds[i] = wi::one (precision);
 +      trailing_bounds[i + 1] = wi::zero (precision);
 +    }
 +}
 +
 +bool
 +make_irange (irange *result, const_tree lb, const_tree ub, const_tree type)
 +{
 +  irange r (TREE_TYPE (lb), lb, ub);
 +  *result = r;
 +  if (result->valid_p () && !result->empty_p ())
 +    {
 +      if (type)
 +        result->cast (type);
 +      return true;
 +    }
 +  return false;
 +}
 +
 +bool
 +make_irange_not (irange *result, const_tree not_exp, const_tree type)
 +{
 +  irange r (TREE_TYPE (not_exp), not_exp, not_exp, irange::INVERSE);
 +  *result = r;
 +  if (result->valid_p () && !result->empty_p ())
 +    {
 +      if (type)
 +        result->cast (type);
 +      return true;
 +    }
 +  return false;
 +}
 +
 +void
 +range_one (irange *r, tree type)
 +{
 +  tree one = build_int_cst (type, 1);
 +  r->set_range (type, one, one);
 +}
 +
 +void
 +range_zero (irange *r, tree type)
 +{
 +  tree zero = build_int_cst (type, 0);
 +  r->set_range (type, zero, zero);
 +}
 +
 +bool
 +range_non_zero (irange *r, tree type)
 +{
 +  tree zero = build_int_cst (type, 0);
 +  return make_irange_not (r, zero, type);
 +}
 +
 +/* Set the range of R to the set of positive numbers starting at START.  */
 +
 +void
 +range_positives (irange *r, tree type, unsigned int start)
 +{
 +  r->set_range (type, build_int_cst (type, start), TYPE_MAX_VALUE (type));
 +}
 +
 +#ifdef CHECKING_P
 +namespace selftest {
 +
 +
 +#define INT(N) build_int_cst (integer_type_node, (N))
 +#define UINT(N) build_int_cstu (unsigned_type_node, (N))
 +#define INT16(N) build_int_cst (short_integer_type_node, (N))
 +#define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
 +#define INT64(N) build_int_cstu (long_long_integer_type_node, (N))
 +#define UINT64(N) build_int_cstu (long_long_unsigned_type_node, (N))
 +#define UINT128(N) build_int_cstu (u128_type, (N))
 +#define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
 +#define SCHAR(N) build_int_cst (signed_char_type_node, (N))
 +
 +#define RANGE3(A,B,C,D,E,F)                           \
 +( i1 = irange (integer_type_node, INT (A), INT (B)),  \
 +  i2 = irange (integer_type_node, INT (C), INT (D)),  \
 +  i3 = irange (integer_type_node, INT (E), INT (F)),  \
 +  i1.union_ (i2),                                     \
 +  i1.union_ (i3),                                     \
 +  i1 )
 +
 +// Run all of the selftests within this file.
 +
 +void
 +irange_tests ()
 +{
 +  tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
 +  irange i1, i2, i3;
 +  irange r0, r1, rold;
 +  ASSERT_FALSE (r0.valid_p ());
 +
 +  /* Test that NOT(255) is [0..254] in 8-bit land.  */
 +  irange not_255;
 +  make_irange_not (&not_255, UCHAR(255), unsigned_char_type_node);
 +  ASSERT_TRUE (not_255 == irange (unsigned_char_type_node, 0, 254));
 +
 +  /* Test that NOT(0) is [1..255] in 8-bit land.  */
 +  irange not_zero;
 +  range_non_zero (&not_zero, unsigned_char_type_node);
 +  ASSERT_TRUE (not_zero == irange (unsigned_char_type_node, 1, 255));
 +
 +  /* Check that [0,127][0x..ffffff80,0x..ffffff]
 +     => ~[128, 0x..ffffff7f].  */
 +  r0 = irange (u128_type, 0, 127);
 +  tree high = build_minus_one_cst (u128_type);
 +  /* low = -1 - 127 => 0x..ffffff80.  */
 +  tree low = fold_build2 (MINUS_EXPR, u128_type, high, UINT128(127));
 +  r1 = irange (u128_type, low, high); // [0x..ffffff80, 0x..ffffffff]
 +  /* r0 = [0,127][0x..ffffff80,0x..fffffff].  */
 +  r0.union_ (r1);
 +  /* r1 = [128, 0x..ffffff7f].  */
 +  r1 = irange (u128_type,
 +             UINT128(128),
 +             fold_build2 (MINUS_EXPR, u128_type,
 +                          build_minus_one_cst (u128_type),
 +                          UINT128(128)));
 +  r0.invert ();
 +  ASSERT_TRUE (r0 == r1);
 +
 +  r0.set_range_for_type (integer_type_node);
 +  tree minint = wide_int_to_tree (integer_type_node, r0.lower_bound ());
 +  tree maxint = wide_int_to_tree (integer_type_node, r0.upper_bound ());
 +
 +  r0.set_range_for_type (short_integer_type_node);
 +  tree minshort = wide_int_to_tree (short_integer_type_node, r0.lower_bound ());
 +  tree maxshort = wide_int_to_tree (short_integer_type_node, r0.upper_bound ());
 +
 +  r0.set_range_for_type (unsigned_type_node);
 +  tree maxuint = wide_int_to_tree (unsigned_type_node, r0.upper_bound ());
 +
 +  /* Check that ~[0,5] => [6,MAX] for unsigned int.  */
 +  r0 = irange (unsigned_type_node, 0, 5);
 +  r0.invert ();
 +  ASSERT_TRUE (r0 == irange (unsigned_type_node, UINT(6), maxuint));
 +
 +  /* Check that ~[10,MAX] => [0,9] for unsigned int.  */
 +  r0 = irange (unsigned_type_node, UINT(10), maxuint, irange::PLAIN);
 +  r0.invert ();
 +  ASSERT_TRUE (r0 == irange (unsigned_type_node, 0, 9));
 +
 +  /* Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.  */
 +  r0.set_range (u128_type, 0, 5, irange::INVERSE);
 +  r1 = irange (u128_type, UINT128(6), build_minus_one_cst (u128_type));
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* Check that [~5] is really [-MIN,4][6,MAX].  */
 +  r0.set_range (integer_type_node, 5, 5, irange::INVERSE);
 +  r1 = irange (integer_type_node, minint, INT(4));
 +  ASSERT_FALSE (r1.union_ (irange (integer_type_node,
 +                                 INT(6), maxint)).empty_p ());
 +
 +  ASSERT_TRUE (r0 == r1);
 +
 +  r1.set_range (integer_type_node, 5, 5);
 +  ASSERT_TRUE (r1.valid_p ());
 +  irange r2 (r1);
 +  ASSERT_TRUE (r1 == r2);
 +
 +  r1 = irange (integer_type_node, 5, 10);
 +  ASSERT_TRUE (r1.valid_p ());
 +
-   ASSERT_TRUE (r1.lower_bound () == minshort && r1.upper_bound() == maxshort);
++  r1 = irange (integer_type_node,
++             wi::to_wide (INT(5)), wi::to_wide (INT(10)));
 +  ASSERT_TRUE (r1.valid_p ());
 +  ASSERT_TRUE (r1.contains_p (INT (7)));
 +  ASSERT_TRUE (r1.contains_p (7));
 +
 +  r1 = irange (signed_char_type_node, 0, 20);
 +  ASSERT_TRUE (r1.contains_p (INT(15)));
 +  ASSERT_FALSE (r1.contains_p (INT(300)));
 +
 +  /* If a range is in any way outside of the range for the converted
 +     to range, default to the range for the new type.  */
 +  r1 = irange (integer_type_node, integer_zero_node, maxint);
 +  r1.cast (short_integer_type_node);
-   r0 = irange (integer_type_node,
-              wide_int_to_tree (integer_type_node, minint),
-              wide_int_to_tree (integer_type_node, maxint));
++  ASSERT_TRUE (r1.lower_bound () == wi::to_wide (minshort)
++             && r1.upper_bound() == wi::to_wide (maxshort));
 +
 +  /* (unsigned char)[-5,-1] => [251,255].  */
 +  r0 = rold = irange (signed_char_type_node, -5, -1);
 +  r0.cast (unsigned_char_type_node);
 +  ASSERT_TRUE (r0 == irange (unsigned_char_type_node, 251, 255));
 +  r0.cast (signed_char_type_node);
 +  ASSERT_TRUE (r0 == rold);
 +
 +  /* (signed char)[15, 150] => [-128,-106][15,127].  */
 +  r0 = rold = irange (unsigned_char_type_node, 15, 150);
 +  r0.cast (signed_char_type_node);
 +  r1 = irange (signed_char_type_node, 15, 127);
 +  r2 = irange (signed_char_type_node, -128, -106);
 +  r1.union_ (r2);
 +  ASSERT_TRUE (r1 == r0);
 +  r0.cast (unsigned_char_type_node);
 +  ASSERT_TRUE (r0 == rold);
 +
 +  /* (unsigned char)[-5, 5] => [0,5][251,255].  */
 +  r0 = rold = irange (signed_char_type_node, -5, 5);
 +  r0.cast (unsigned_char_type_node);
 +  r1 = irange (unsigned_char_type_node, 251, 255);
 +  r2 = irange (unsigned_char_type_node, 0, 5);
 +  r1.union_ (r2);
 +  ASSERT_TRUE (r0 == r1);
 +  r0.cast (signed_char_type_node);
 +  ASSERT_TRUE (r0 == rold);
 +
 +  /* (unsigned char)[-5,5] => [0,255].  */
 +  r0 = irange (integer_type_node, -5, 5);
 +  r0.cast (unsigned_char_type_node);
 +  r1 = irange (unsigned_char_type_node,
 +             TYPE_MIN_VALUE (unsigned_char_type_node),
 +             TYPE_MAX_VALUE (unsigned_char_type_node));
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* (unsigned char)[5U,1974U] => [0,255].  */
 +  r0 = irange (unsigned_type_node, 5, 1974);
 +  r0.cast (unsigned_char_type_node);
 +  ASSERT_TRUE (r0 == irange (unsigned_char_type_node, 0, 255));
 +  r0.cast (integer_type_node);
 +  /* Going to a wider range should not sign extend.  */
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 0, 255));
 +
 +  /* (unsigned char)[-350,15] => [0,255].  */
 +  r0 = irange (integer_type_node, -350, 15);
 +  r0.cast (unsigned_char_type_node);
 +  ASSERT_TRUE (r0 == irange (unsigned_char_type_node,
 +                           TYPE_MIN_VALUE (unsigned_char_type_node),
 +                           TYPE_MAX_VALUE (unsigned_char_type_node)));
 +
 +  /* Casting [-120,20] from signed char to unsigned short.
 +      (unsigned)[(signed char)-120, (signed char)20]
 +     => (unsigned)[0, 0x14][0x88, 0xff]
 +     => [0,0x14][0xff88,0xffff].  */
 +  r0 = irange (signed_char_type_node, -120, 20);
 +  r0.cast (short_unsigned_type_node);
 +  r1 = irange (short_unsigned_type_node, 0, 0x14);
 +  r2 = irange (short_unsigned_type_node, 0xff88, 0xffff);
 +  r1.union_ (r2);
 +  ASSERT_TRUE (r0 == r1);
 +  /* Casting back to signed char (a smaller type), would be outside of
 +     the range, we it'll be the entire range of the signed char.  */
 +  r0.cast (signed_char_type_node);
 +  ASSERT_TRUE (r0 == irange (signed_char_type_node,
 +                           TYPE_MIN_VALUE (signed_char_type_node),
 +                           TYPE_MAX_VALUE (signed_char_type_node)));
 +
 +  /* unsigned char -> signed short
 +      (signed short)[(unsigned char)25, (unsigned char)250]
 +     => [(signed short)25, (signed short)250].  */
 +  r0 = rold = irange (unsigned_char_type_node, 25, 250);
 +  r0.cast (short_integer_type_node);
 +  r1 = irange (short_integer_type_node, 25, 250);
 +  ASSERT_TRUE (r0 == r1);
 +  r0.cast (unsigned_char_type_node);
 +  ASSERT_TRUE (r0 == rold);
 +
 +  /* Test casting a wider signed [-MIN,MAX] to a narrower unsigned.  */
 +  r0 = irange (long_long_integer_type_node,
 +             TYPE_MIN_VALUE (long_long_integer_type_node),
 +             TYPE_MAX_VALUE (long_long_integer_type_node));
 +  r0.cast (short_unsigned_type_node);
 +  r1 = irange (short_unsigned_type_node,
 +             TYPE_MIN_VALUE (short_unsigned_type_node),
 +             TYPE_MAX_VALUE (short_unsigned_type_node));
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* Test that casting a range with MAX_PAIRS that changes sign is
 +     done conservatively.
 +
 +        (unsigned short)[-5,5][20,30][40,50]...
 +     => (unsigned short)[-5,50]
 +     => [0,50][65531,65535].  */
 +  r0 = irange (short_integer_type_node, -5, 5);
 +  gcc_assert (r0.max_pairs * 2 * 10 + 10 < 32767);
 +  unsigned i;
 +  for (i = 2; i < r0.max_pairs * 2; i += 2)
 +    {
 +      r1 = irange (short_integer_type_node, i * 10, i * 10 + 10);
 +      r0.union_ (r1);
 +    }
 +  r0.cast(short_unsigned_type_node);
 +  r1 = irange (short_unsigned_type_node, 0, (i - 2) * 10 + 10);
 +  r2 = irange (short_unsigned_type_node, 65531, 65535);
 +  r1.union_ (r2);
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* NOT([10,20]) ==> [-MIN,9][21,MAX].  */
 +  r0 = r1 = irange (integer_type_node, 10, 20);
 +  r2 = irange (integer_type_node, minint, INT(9));
 +  ASSERT_FALSE (r2.union_ (irange (integer_type_node,
 +                                 INT(21), maxint)).empty_p ());
 +  r1.invert ();
 +  ASSERT_TRUE (r1 == r2);
 +  /* Test that NOT(NOT(x)) == x.  */
 +  r2.invert ();
 +  ASSERT_TRUE (r0 == r2);
 +
 +  /* NOT(-MIN,+MAX) is the empty set and should return false.  */
++  r0 = irange (integer_type_node, minint, maxint);
 +  ASSERT_TRUE (r0.invert ().empty_p ());
 +  r1.clear ();
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* Test that booleans and their inverse work as expected.  */
 +  range_zero (&r0, boolean_type_node);
 +  ASSERT_TRUE (r0 == irange (boolean_type_node, 0, 0));
 +  r0.invert();
 +  ASSERT_TRUE (r0 == irange (boolean_type_node, 1, 1));
 +
 +  /* Casting NONZERO to a narrower type will wrap/overflow so
 +     it's just the entire range for the narrower type.
 +
 +     "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32].  This is
 +     is outside of the range of a smaller range, return the full
 +     smaller range.  */
 +  range_non_zero (&r0, integer_type_node);
 +  r0.cast (short_integer_type_node);
 +  r1 = irange (short_integer_type_node,
 +             TYPE_MIN_VALUE (short_integer_type_node),
 +             TYPE_MAX_VALUE (short_integer_type_node));
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* Casting NONZERO from a narrower signed to a wider signed.
 +
 +     NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
 +     Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].  */
 +  range_non_zero (&r0, short_integer_type_node);
 +  r0.cast (integer_type_node);
 +  r1 = irange (integer_type_node, -32768, -1);
 +  r2 = irange (integer_type_node, 1, 32767);
 +  r1.union_ (r2);
 +  ASSERT_TRUE (r0 == r1);
 +
 +  if (irange::max_pairs > 2)
 +    {
 +      /* ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].  */
 +      r0 = irange (integer_type_node, 10, 20);
 +      r1 = irange (integer_type_node, 5, 8);
 +      r0.union_ (r1);
 +      r1 = irange (integer_type_node, 1, 3);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == RANGE3 (1, 3, 5, 8, 10, 20));
 +
 +      /* [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].  */
 +      r1 = irange (integer_type_node, -5, 0);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == RANGE3 (-5, 3, 5, 8, 10, 20));
 +    }
 +
 +  /* [10,20] U [30,40] ==> [10,20][30,40].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 30, 40);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 10, 20),
 +                                 irange (integer_type_node, 30, 40)));
 +  if (irange::max_pairs > 2)
 +    {
 +      /* [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].  */
 +      r1 = irange (integer_type_node, 50, 60);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == RANGE3 (10, 20, 30, 40, 50, 60));
 +      /* [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].  */
 +      r1 = irange (integer_type_node, 70, 80);
 +      r0.union_ (r1);
 +
 +      r2 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r2.union_ (irange (integer_type_node, 70, 80));
 +      ASSERT_TRUE (r0 == r2);
 +    }
 +
 +  /* Make sure NULL and non-NULL of pointer types work, and that
 +     inverses of them are consistent.  */
 +  tree voidp = build_pointer_type (void_type_node);
 +  range_zero (&r0, voidp);
 +  r1 = r0;
 +  r0.invert ();
 +  r0.invert ();
 +  ASSERT_TRUE (r0 == r1);
 +
 +  if (irange::max_pairs > 2)
 +    {
 +      /* [10,20][30,40][50,60] U [6,35] => [6,40][50,60].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 6, 35);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 6, 40),
 +                                     irange (integer_type_node, 50, 60)));
 +
 +      /* [10,20][30,40][50,60] U [6,60] => [6,60] */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 6, 60);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == irange (integer_type_node, 6, 60));
 +
 +      /* [10,20][30,40][50,60] U [6,70] => [6,70].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 6, 70);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == irange (integer_type_node, 6, 70));
 +
 +      /* [10,20][30,40][50,60] U [35,70] => [10,20][30,70].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 35, 70);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 10, 20),
 +                                     irange (integer_type_node, 30, 70)));
 +    }
 +
 +  /* [10,20][30,40] U [25,70] => [10,70].  */
 +  r0 = irange_union (irange (integer_type_node, 10, 20),
 +                   irange (integer_type_node, 30, 40));
 +  r1 = irange (integer_type_node, 25, 70);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 10, 20),
 +                                 irange (integer_type_node, 30, 70)));
 +
 +  if (irange::max_pairs > 2)
 +    {
 +      /* [10,20][30,40][50,60] U [15,35] => [10,40][50,60].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 15, 35);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 10, 40),
 +                                     irange (integer_type_node, 50, 60)));
 +    }
 +
 +  /* [10,20] U [15, 30] => [10, 30].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 15, 30);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 10, 30));
 +
 +  /* [10,20] U [25,25] => [10,20][25,25].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 25, 25);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange_union (irange (integer_type_node, 10, 20),
 +                                 irange (integer_type_node, 25, 25)));
 +
 +  if (irange::max_pairs > 2)
 +    {
 +      /* [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = irange (integer_type_node, 35, 35);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == RANGE3 (10, 20, 30, 40, 50, 60));
 +    }
 +
 +  /* [15,40] U [] => [15,40].  */
 +  r0 = irange (integer_type_node, 15, 40);
 +  r1.clear ();
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 15, 40));
 +
 +  /* [10,20] U [10,10] => [10,20].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 10, 10);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 10, 20));
 +
 +  /* [10,20] U [9,9] => [9,20].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 9, 9);
 +  r0.union_ (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 9, 20));
 +
 +  if (irange::max_pairs > 2)
 +    {
 +      /* [10,10][12,12][20,100] ^ [15,200].  */
 +      r0 = RANGE3 (10, 10, 12, 12, 20, 100);
 +      r1 = irange (integer_type_node, 15, 200);
 +      r0.intersect (r1);
 +      ASSERT_TRUE (r0 == irange (integer_type_node, 20,100));
 +
 +      /* [10,20][30,40][50,60] ^ [15,25][38,51][55,70]
 +       => [15,20][38,40][50,51][55,60].  */
 +      r0 = RANGE3 (10, 20, 30, 40, 50, 60);
 +      r1 = RANGE3 (15, 25, 38, 51, 55, 70);
 +      r0.intersect (r1);
 +      if (irange::max_pairs == 3)
 +      {
 +        /* When pairs==3, we don't have enough space, so
 +           conservatively handle things.  Thus, the ...[50,60].  */
 +        ASSERT_TRUE (r0 == RANGE3 (15, 20, 38, 40, 50, 60));
 +      }
 +      else
 +      {
 +        r2 = RANGE3 (15, 20, 38, 40, 50, 51);
 +        r2.union_ (irange (integer_type_node, 55, 60));
 +        ASSERT_TRUE (r0 == r2);
 +      }
 +
 +      /* [15,20][30,40][50,60] ^ [15,35][40,90][100,200]
 +       => [15,20][30,35][40,60].  */
 +      r0 = RANGE3 (15, 20, 30, 40, 50, 60);
 +      r1 = RANGE3 (15, 35, 40, 90, 100, 200);
 +      r0.intersect (r1);
 +      if (irange::max_pairs == 3)
 +      {
 +        /* When pairs==3, we don't have enough space, so
 +           conservatively handle things.  */
 +        ASSERT_TRUE (r0 == RANGE3 (15, 20, 30, 35, 40, 60));
 +      }
 +      else
 +      {
 +        r2 = RANGE3 (15, 20, 30, 35, 40, 40);
 +        r2.union_ (irange (integer_type_node, 50, 60));
 +        ASSERT_TRUE (r0 == r2);
 +      }
 +
 +      /* Test cases where a union inserts a sub-range inside a larger
 +       range.
 +
 +       [8,10][135,255] U [14,14] => [8,10][14,14][135,255].  */
 +      r0 = irange_union (irange (integer_type_node, 8, 10),
 +                       irange (integer_type_node, 135, 255));
 +      r1 = irange (integer_type_node, 14, 14);
 +      r0.union_ (r1);
 +      ASSERT_TRUE (r0 == RANGE3 (8, 10, 14, 14, 135, 255));
 +    }
 +
 +  /* [10,20] ^ [15,30] => [15,20].  */
 +  r0 = irange (integer_type_node, 10, 20);
 +  r1 = irange (integer_type_node, 15, 30);
 +  r0.intersect (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 15, 20));
 +
 +  /* [10,20][30,40] ^ [40,50] => [40,40].  */
 +  r0 = irange_union (irange (integer_type_node, 10, 20),
 +                   irange (integer_type_node, 30, 40));
 +  r1 = irange (integer_type_node, 40, 50);
 +  r0.intersect (r1);
 +  ASSERT_TRUE (r0 == irange (integer_type_node, 40, 40));
 +
 +  /* Test non-destructive intersection.  */
 +  r0 = rold = irange (integer_type_node, 10, 20);
 +  ASSERT_FALSE (irange_intersect (r0,
 +                                irange (integer_type_node, 15, 30)).empty_p ());
 +  ASSERT_TRUE (r0 == rold);
 +
 +  /* Test the internal sanity of wide_int's wrt HWIs.  */
 +  ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
 +                            TYPE_SIGN (boolean_type_node))
 +             == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
 +
 +  /* Test irange_storage.  */
 +  r0.set_range (integer_type_node, 5, 10);
 +  irange_storage *stow = irange_storage::ggc_alloc_init (r0);
 +  stow->extract_irange (r1, integer_type_node);
 +  ASSERT_TRUE (r0 == r1);
 +
 +  /* Test zero_p().  */
 +  r0.set_range (integer_type_node, 0, 0);
 +  ASSERT_TRUE (r0.zero_p ());
 +
 +  /* Test non_zero_p().  */
 +  r0 = irange (integer_type_node, 0, 0);
 +  r0.invert ();
 +  ASSERT_TRUE (r0.non_zero_p ());
 +}
 +
 +} // namespace selftest
 +#endif // CHECKING_P
diff --cc gcc/range.h
index aaab47283aba5d7ea3780b6567466aa9024022b8,0000000000000000000000000000000000000000..762f949629c70a6b628cfb71172a548dddddb01e
mode 100644,000000..100644
--- /dev/null
@@@ -1,315 -1,0 +1,321 @@@
 +/* Header file for range analysis.
 +   Copyright (C) 2017 Free Software Foundation, Inc.
 +   Contributed by Aldy Hernandez <aldyh@redhat.com>.
 +
 +This file is part of GCC.
 +
 +GCC is free software; you can redistribute it and/or modify it under
 +the terms of the GNU General Public License as published by the Free
 +Software Foundation; either version 3, or (at your option) any later
 +version.
 +
 +GCC is distributed in the hope that it will be useful, but WITHOUT ANY
 +WARRANTY; without even the implied warranty of MERCHANTABILITY or
 +FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 + for more details.
 +
 +You should have received a copy of the GNU General Public License
 +along with GCC; see the file COPYING3.  If not see
 +<http://www.gnu.org/licenses/>.  */
 +
 +#ifndef GCC_RANGE_H
 +#define GCC_RANGE_H
 +
 +class irange_storage;
 +
 +/* This is a class for working with ranges, currently integer ones.
 +   With it you can specify a range of [5,10] (5 through 10 inclusive),
 +   or even ranges including multi-part ranges [-10,5][30,40][50,60].
 +   This last one specifies the union of the different sub-ranges.
 +
 +   Inverse ranges are represented as an actual range.  For instance,
 +   the inverse of 0 is [-MIN,-1][1,+MAX] for a signed integer.
 +
 +   Methods are provided for intersecting and uniting ranges, as well
 +   as converting between them.  In performing any of these operations,
 +   when no efficient way can be computed, we may default to a more
 +   conservative range.
 +
 +   For example, the inverse of [5,10][15,20][30,40] is actually
 +   [-MIN,4][11,14][21,29][41,+MAX].  If this cannot be efficiently or
 +   quickly computed, we may opt to represent the inverse as
 +   [-MIN,4][41,+MAX] which is an equivalent conservative
 +   representation.
 +
 +   This class is not meant to live in long term storage (GC).
 +   Consequently, there are no GTY markers.  For long term storage, use
 +   the irange_storage class described later.  */
 +class irange
 +{
 +  friend class irange_storage;
 + public:
 +  /* Maximum number of pairs of ranges allowed.  */
 +  static const unsigned int max_pairs = 3;
 +
 + private:
 +  /* Number of items in bounds[].  */
 +  unsigned char nitems;
 +  /* Whether or not a set operation overflowed.  */
 +  bool overflow;
 +  /* The type of the range.  */
 +  const_tree type;
 +  /* The pairs of sub-ranges in the range.  */
 +  wide_int bounds[max_pairs * 2];
 +
 +  void insert (const wide_int &x, const wide_int &y, unsigned pos);
 +  void prepend (const wide_int &x, const wide_int &y);
 +  void append (const wide_int &x, const wide_int &y);
 +  void remove (unsigned i, unsigned j);
 +  void canonicalize ();
 +
 + public:
 +  /* When constructing a range, this specifies wether this is a
 +     regular range, or the inverse of a range.  */
 +  enum kind { PLAIN, INVERSE };
 +  irange () { type = NULL_TREE; nitems = 0; }
 +  explicit irange (const_tree t) { set_range (t); }
 +  irange (const_tree typ, const wide_int &lbound, const wide_int &ubound,
 +        kind rt = PLAIN)
 +    { set_range (typ, lbound, ubound, rt); }
++  irange (const_tree typ, const_tree lbound, const_tree ubound,
++        kind rt = PLAIN)
++    { set_range (typ, wi::to_wide (lbound), wi::to_wide (ubound), rt); }
 +  irange (const irange &);
 +  irange (const irange_storage *stor, tree typ) { set_range (stor, typ); }
 +  irange (const_tree t, int x, int y) { set_range (t, x, y, PLAIN); }
 +
 +  void set_range (const irange_storage *, const_tree);
 +  void set_range (const_tree);
 +  void set_range (const_tree, const wide_int &lbound, const wide_int &ubound,
 +                kind rt = PLAIN);
++  void set_range (const_tree typ, const_tree lbound, const_tree ubound,
++                kind rt = PLAIN)
++    { set_range (typ, wi::to_wide (lbound), wi::to_wide (ubound), rt);  }
 +  void set_range (const_tree t, int x, int y, kind rt = PLAIN);
 +  void set_range_for_type (const_tree);
 +
 +  bool overflow_p () const { return overflow && !TYPE_OVERFLOW_WRAPS (type); }
 +  void set_overflow () { overflow = true; }
 +  void clear_overflow () { overflow = false; }
 +
 +  unsigned num_pairs () const { return nitems / 2; }
 +  /* Returns the lower bound of PAIR.  */
 +  wide_int lower_bound (unsigned pair = 0) const
 +    {
 +      gcc_assert (nitems != 0 && pair <= num_pairs ());
 +      return bounds[pair * 2];
 +    }
 +  /* Returns the uppermost bound.  */
 +  wide_int upper_bound () const
 +    {
 +      gcc_assert (nitems != 0);
 +      return bounds[nitems - 1];
 +    }
 +  wide_int upper_bound (unsigned pair) const;
 +
 +  /* Remove a sub-range from a range.  PAIR is the zero-based
 +     sub-range to remove.  */
 +  void remove_pair (unsigned pair) { remove (pair * 2, pair * 2 + 1); }
 +  void clear () { nitems = 0; }
 +  void clear (const_tree t) { type = t; nitems = 0; overflow = false; }
 +  bool empty_p () const { return !nitems; }
 +  bool range_for_type_p () const;
 +  bool simple_range_p () const { return nitems == 2; }
 +  bool zero_p () const { return *this == irange (type, 0, 0); }
 +  bool non_zero_p () const
 +    {
 +      irange nz;
 +      nz.set_range (type, 0, 0, INVERSE);
 +      return *this == nz;
 +    }
 +  inline bool singleton_p (wide_int &) const;
 +
 +  void dump () const;
 +  void dump (pretty_printer *pp) const;
 +  void dump (FILE *) const;
 +
 +  bool valid_p () const;
 +  void cast (const_tree type);
 +  bool contains_p (const wide_int &element) const;
 +  bool contains_p (const_tree) const;
 +  bool contains_p (int) const;
 +
 +  const_tree get_type () const { return type; }
 +
 +  irange& operator= (const irange &r);
 +  irange& operator= (const_tree t);
 +
 +  bool operator== (const irange &r) const;
 +  bool operator!= (const irange &r) const { return !(*this == r); }
 +
 +  irange &union_ (const wide_int &x, const wide_int &y);
 +  irange &union_ (const irange &r);
 +  irange &intersect (const wide_int &x, const wide_int &y);
 +  irange &intersect (const irange &r);
 +  irange &invert ();
 +};
 +
 +/* Return TRUE if range contains exactly one element.  If so, set ELEM
 +   to said element.  */
 +
 +inline bool
 +irange::singleton_p (wide_int &elem) const
 +{
 +  if (num_pairs () == 1 && bounds[0] == bounds[1])
 +    {
 +      elem = bounds[0];
 +      return true;
 +    }
 +  return false;
 +}
 +
 +/* Return R1 U R2.  */
 +static inline
 +irange irange_union (const irange &r1, const irange &r2)
 +{
 +  return irange (r1).union_ (r2);
 +}
 +
 +/* Return R1 ^ R2.  */
 +static inline
 +irange irange_intersect (const irange &r1, const irange &r2)
 +{
 +  return irange (r1).intersect (r2);
 +}
 +
 +/* Return the inverse range of R1.  */
 +static inline
 +irange irange_invert (const irange &r1)
 +{
 +  return irange (r1).invert ();
 +}
 +
 +void range_zero (irange *r, tree type);
 +void range_one (irange *r, tree type);
 +bool range_non_zero (irange *r, tree type);
 +void range_positives (irange *r, tree type, unsigned int);
 +
 +// From ssa-range-gen.c.  
 +// class gori will control this  until we get globals set up properly.
 +bool get_global_ssa_range (irange& r, tree name);
 +void set_global_ssa_range (tree name, const irange&r);
 +
 +
 +/* An irange is inefficient when it comes to memory, so this class is
 +   used to store iranges in memory (off of an SSA_NAME likely).  It is
 +   a variable length structure that contains the sub-range pairs as
 +   well as the non-zero bitmask.  The number of entries are
 +   irnage::max_pairs * 2 + 1 (to accomodate the non-zero bits).
 +
 +   To store an irange class X into this memory efficient irange_storage
 +   class use:
 +
 +      irange X;
 +      irange_storage *stow = irange_storage::ggc_alloc_init (X);
 +   or
 +      irange_storage *stow = irange_storage::ggc_alloc (precision);
 +      stow->set_irange (X);
 +
 +   To convert it back to an irange use:
 +
 +      tree type = ...;
 +      irange X (stow, type);
 +   or
 +      if (SSA_NAME_RANGE_INFO (ssa)) {
 +        irange X (ssa);
 +        ...
 +      }
 +   or
 +      irange x;
 +      stow->extract_irange (x, TYPE);
 +
 +   To get at the nonzero bits use:
 +
 +      irange_storage *stow = ...;
 +      stow->set_nonzero_bits();
 +      stow->get_nonzero_bits();
 +*/
 +
 +class GTY((variable_size)) irange_storage
 +{
 +  friend class irange;
 + public:
 +  /* These are the pair of subranges for the irange.  The last
 +     wide_int allocated is a mask representing which bits in an
 +     integer are known to be non-zero.  */
 +  trailing_wide_ints<irange::max_pairs * 2 + 1> trailing_bounds;
 +
 +  void set_irange (const irange &);
 +  /* Returns the size of an irange_storage with PRECISION.  */
 +  static size_t size (unsigned precision)
 +  { return sizeof (irange_storage)
 +      /* There is a +1 for the non-zero bits field.  */
 +      + trailing_wide_ints<irange::max_pairs * 2 + 1>::extra_size (precision);
 +  }
 +  /* Allocate GC memory for an irange_storage with PRECISION.
 +
 +     Note: The precision is set, but the irange_storage returned is
 +     otherwise uninitialized.  The caller must still call
 +     stow->set_irange().  */
 +  static irange_storage *ggc_alloc (unsigned precision)
 +  { irange_storage *stow = static_cast<irange_storage *> (ggc_internal_alloc
 +                                                        (size (precision)));
 +    stow->trailing_bounds.set_precision (precision);
 +    stow->set_nonzero_bits (wi::shwi (-1, precision));
 +    return stow;
 +  }
 +  /* Like irange_storage::ggc_alloc (), but initialize the storage to
 +     the range in IR.  */
 +  static irange_storage *ggc_alloc_init (const irange &ir)
 +  {
 +    unsigned precision = TYPE_PRECISION (ir.type);
 +    irange_storage *stow = static_cast<irange_storage *> (ggc_internal_alloc
 +                                                        (size (precision)));
 +    stow->set_irange (ir);
 +    stow->set_nonzero_bits (wi::shwi (-1, precision));
 +    return stow;
 +  }
 +  /* Extract the current range onto OUTPUT with a type of TYP.
 +     Returns the range.  */
 +  inline irange &extract_irange (irange &output, const_tree typ);
 +  /* Set the nonzero bit mask to WI.  */
 +  void set_nonzero_bits (const wide_int &wi)
 +  { trailing_bounds[irange::max_pairs * 2] = wi; }
 +  /* Return the nonzero bits in the range.  */
 +  wide_int get_nonzero_bits (void)
 +  { return trailing_bounds[irange::max_pairs * 2]; }
 +};
 +
 +/* Extract the range in THIS and store it in OUTPUT with a type of TYP.
 +   Returns OUTPUT.  */
 +
 +inline irange &
 +irange_storage::extract_irange (irange &output, const_tree typ)
 +{
 +  output.set_range (this, typ);
 +  return output;
 +}
 +
 +// ----------------------------------------------------------------------
 +
 +/* Return T if it is a valid type for irange to operator on. 
 +   Otherwise return NULL_TREE.  */
 +static inline
 +tree valid_irange_type (tree t)
 +{
 +  if (t && (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t)))
 +    return t;
 +  return NULL_TREE;
 +}
 +
 +/* Return T if it is an SSA_NAME and a valid type for irange to operator on. 
 +   Otherwise return NULL_TREE.  */
 +static inline
 +tree valid_irange_ssa (tree t)
 +{
 +  if (t && TREE_CODE (t) == SSA_NAME && valid_irange_type (TREE_TYPE (t)))
 +    return t;
 +  return NULL_TREE;
 +}
 +
 +#endif // GCC_RANGE_H
diff --cc gcc/selftest.h
index ef2cff71f90456e2cec3aa41a88728fbe55b3b23,e3117c6bfc425c124b26bdd9f2b92b7db246952b..9fd1a41c73f39bece2baf9c63d010831109aa3ef
@@@ -197,7 -214,8 +214,9 @@@ extern void unique_ptr_tests_cc_tests (
  extern void vec_c_tests ();
  extern void wide_int_cc_tests ();
  extern void predict_c_tests ();
 +extern void irange_tests ();
+ extern void simplify_rtx_c_tests ();
+ extern void vec_perm_indices_c_tests ();
  
  extern int num_passes;
  
diff --cc gcc/ssa.h
Simple merge
diff --cc gcc/tree-core.h
Simple merge
index 26387f86b873cdd687aaa447e79a362c4d6e2412,af35e41c745af3915a28ed53c3acb10a656dd92d..c0b079b581ae538ee109f8168109dcf4ea682827
@@@ -95,6 -95,10 +95,11 @@@ along with GCC; see the file COPYING3
  #include "tree-affine.h"
  #include "params.h"
  #include "builtins.h"
+ #include "stringpool.h"
+ #include "tree-vrp.h"
+ #include "tree-ssanames.h"
+ #include "tree-eh.h"
++#include "range.h"
  
  static struct datadep_stats
  {
@@@ -705,7 -709,47 +710,51 @@@ split_constant_offset_1 (tree type, tre
            && TYPE_PRECISION (type) >= TYPE_PRECISION (itype)
            && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)))
          {
-           split_constant_offset (op0, &var0, off);
+           if (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_WRAPS (itype))
+             {
+               /* Split the unconverted operand and try to prove that
+                  wrapping isn't a problem.  */
+               tree tmp_var, tmp_off;
+               split_constant_offset (op0, &tmp_var, &tmp_off);
+               /* See whether we have an SSA_NAME whose range is known
+                  to be [A, B].  */
+               if (TREE_CODE (tmp_var) != SSA_NAME)
+                 return false;
 -              wide_int var_min, var_max;
 -              value_range_type vr_type = get_range_info (tmp_var, &var_min,
 -                                                         &var_max);
 -              wide_int var_nonzero = get_nonzero_bits (tmp_var);
 -              signop sgn = TYPE_SIGN (itype);
 -              if (intersect_range_with_nonzero_bits (vr_type, &var_min,
 -                                                     &var_max, var_nonzero,
 -                                                     sgn) != VR_RANGE)
++
++              if (!SSA_NAME_RANGE_INFO (tmp_var))
+                 return false;
++              irange var_range (tmp_var);
++              irange var_nonzero;
++              get_nonzero_bits_as_range (var_nonzero, tmp_var);
++              // FIXME: enable after get_nonzero_bits_as_range gets tested.
++              // var_range.intersect (var_nonzero);
++              // if (var_range.empty_p ())
++              //   return false;
++              wide_int var_min = var_range.lower_bound ();
++              wide_int var_max = var_range.upper_bound ();
++              signop sgn = TYPE_SIGN (itype);
+               /* See whether the range of OP0 (i.e. TMP_VAR + TMP_OFF)
+                  is known to be [A + TMP_OFF, B + TMP_OFF], with all
+                  operations done in ITYPE.  The addition must overflow
+                  at both ends of the range or at neither.  */
+               bool overflow[2];
+               unsigned int prec = TYPE_PRECISION (itype);
+               wide_int woff = wi::to_wide (tmp_off, prec);
+               wide_int op0_min = wi::add (var_min, woff, sgn, &overflow[0]);
+               wi::add (var_max, woff, sgn, &overflow[1]);
+               if (overflow[0] != overflow[1])
+                 return false;
+               /* Calculate (ssizetype) OP0 - (ssizetype) TMP_VAR.  */
+               widest_int diff = (widest_int::from (op0_min, sgn)
+                                  - widest_int::from (var_min, sgn));
+               var0 = tmp_var;
+               *off = wide_int_to_tree (ssizetype, diff);
+             }
+           else
+             split_constant_offset (op0, &var0, off);
            *var = fold_convert (type, var0);
            return true;
          }
@@@ -5263,3 -5367,90 +5372,90 @@@ free_data_refs (vec<data_reference_p> d
      free_data_ref (dr);
    datarefs.release ();
  }
 -        || get_range_info (step, &step_min, &step_max) != VR_RANGE)
+ /* Common routine implementing both dr_direction_indicator and
+    dr_zero_step_indicator.  Return USEFUL_MIN if the indicator is known
+    to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
+    Return the step as the indicator otherwise.  */
+ static tree
+ dr_step_indicator (struct data_reference *dr, int useful_min)
+ {
+   tree step = DR_STEP (dr);
+   STRIP_NOPS (step);
+   /* Look for cases where the step is scaled by a positive constant
+      integer, which will often be the access size.  If the multiplication
+      doesn't change the sign (due to overflow effects) then we can
+      test the unscaled value instead.  */
+   if (TREE_CODE (step) == MULT_EXPR
+       && TREE_CODE (TREE_OPERAND (step, 1)) == INTEGER_CST
+       && tree_int_cst_sgn (TREE_OPERAND (step, 1)) > 0)
+     {
+       tree factor = TREE_OPERAND (step, 1);
+       step = TREE_OPERAND (step, 0);
+       /* Strip widening and truncating conversions as well as nops.  */
+       if (CONVERT_EXPR_P (step)
+         && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step, 0))))
+       step = TREE_OPERAND (step, 0);
+       tree type = TREE_TYPE (step);
+       /* Get the range of step values that would not cause overflow.  */
+       widest_int minv = (wi::to_widest (TYPE_MIN_VALUE (ssizetype))
+                        / wi::to_widest (factor));
+       widest_int maxv = (wi::to_widest (TYPE_MAX_VALUE (ssizetype))
+                        / wi::to_widest (factor));
+       /* Get the range of values that the unconverted step actually has.  */
+       wide_int step_min, step_max;
+       if (TREE_CODE (step) != SSA_NAME
++        || !get_range_info (step, &step_min, &step_max))
+       {
+         step_min = wi::to_wide (TYPE_MIN_VALUE (type));
+         step_max = wi::to_wide (TYPE_MAX_VALUE (type));
+       }
+       /* Check whether the unconverted step has an acceptable range.  */
+       signop sgn = TYPE_SIGN (type);
+       if (wi::les_p (minv, widest_int::from (step_min, sgn))
+         && wi::ges_p (maxv, widest_int::from (step_max, sgn)))
+       {
+         if (wi::ge_p (step_min, useful_min, sgn))
+           return ssize_int (useful_min);
+         else if (wi::lt_p (step_max, 0, sgn))
+           return ssize_int (-1);
+         else
+           return fold_convert (ssizetype, step);
+       }
+     }
+   return DR_STEP (dr);
+ }
+ /* Return a value that is negative iff DR has a negative step.  */
+ tree
+ dr_direction_indicator (struct data_reference *dr)
+ {
+   return dr_step_indicator (dr, 0);
+ }
+ /* Return a value that is zero iff DR has a zero step.  */
+ tree
+ dr_zero_step_indicator (struct data_reference *dr)
+ {
+   return dr_step_indicator (dr, 1);
+ }
+ /* Return true if DR is known to have a nonnegative (but possibly zero)
+    step.  */
+ bool
+ dr_known_forward_stride_p (struct data_reference *dr)
+ {
+   tree indicator = dr_direction_indicator (dr);
+   tree neg_step_val = fold_binary (LT_EXPR, boolean_type_node,
+                                  fold_convert (ssizetype, indicator),
+                                  ssize_int (0));
+   return neg_step_val && integer_zerop (neg_step_val);
+ }
index 8a4ae8b7ae4be4a7c82bce326dde40d79258f54c,fefc9de96af3da9080eb8bd32df1f1c27e09ae70..591d29a65af5d854dba02a4e659e0e29af72443c
@@@ -3329,19 -3154,19 +3154,19 @@@ iv_can_overflow_p (struct loop *loop, t
      return false;
  
    if (TREE_CODE (base) == INTEGER_CST)
-     base_min = base_max = base;
+     base_min = base_max = wi::to_wide (base);
    else if (TREE_CODE (base) == SSA_NAME
           && INTEGRAL_TYPE_P (TREE_TYPE (base))
 -         && get_range_info (base, &base_min, &base_max) == VR_RANGE)
 +         && get_range_info (base, &base_min, &base_max))
      ;
    else
      return true;
  
    if (TREE_CODE (step) == INTEGER_CST)
-     step_min = step_max = step;
+     step_min = step_max = wi::to_wide (step);
    else if (TREE_CODE (step) == SSA_NAME
           && INTEGRAL_TYPE_P (TREE_TYPE (step))
 -         && get_range_info (step, &step_min, &step_max) == VR_RANGE)
 +         && get_range_info (step, &step_min, &step_max))
      ;
    else
      return true;
Simple merge
index 49ae3d09cea356e2548ef5e5dcc7f8cc3e54aa00,fa49abf8d3bed9f1d959ccf7da7e95c013a35274..b4d3c596f696d0b9041a1e19e1eebca7478502ae
@@@ -3221,9 -3225,9 +3223,9 @@@ record_nonwrapping_iv (struct loop *loo
        if (TREE_CODE (orig_base) == SSA_NAME
          && TREE_CODE (high) == INTEGER_CST
          && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
 -        && (get_range_info (orig_base, &min, &max) == VR_RANGE
 +        && (get_range_info (orig_base, &min, &max)
              || get_cst_init_from_scev (orig_base, &max, false))
-         && wi::gts_p (high, max))
+         && wi::gts_p (wi::to_wide (high), max))
        base = wide_int_to_tree (unsigned_type, max);
        else if (TREE_CODE (base) != INTEGER_CST
               && dominated_by_p (CDI_DOMINATORS,
        if (TREE_CODE (orig_base) == SSA_NAME
          && TREE_CODE (low) == INTEGER_CST
          && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
 -        && (get_range_info (orig_base, &min, &max) == VR_RANGE
 +        && (get_range_info (orig_base, &min, &max)
              || get_cst_init_from_scev (orig_base, &min, true))
-         && wi::gts_p (min, low))
+         && wi::gts_p (min, wi::to_wide (low)))
        base = wide_int_to_tree (unsigned_type, min);
        else if (TREE_CODE (base) != INTEGER_CST
               && dominated_by_p (CDI_DOMINATORS,
index 9928f9c7cbbce9b7222f0271a06922a6e002da10,8e94f6a999ab8d48c88ee78a1cb138143b5a0259..1497902bd17b507fe1fbc6b1e51e542fcd09eed9
@@@ -1047,31 -1141,39 +1141,39 @@@ value_replacement (basic_block cond_bb
                                              cond_rhs, false, rhs2))))))
      {
        gsi = gsi_for_stmt (cond);
+       /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
+        def-stmt in:
+          if (n_5 != 0)
+            goto <bb 3>;
+          else
+            goto <bb 4>;
+          <bb 3>:
+          # RANGE [0, 4294967294]
+          u_6 = n_5 + 4294967295;
+          <bb 4>:
+          # u_3 = PHI <u_6(3), 4294967295(2)>  */
+       reset_flow_sensitive_info (lhs);
        if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
        {
-         /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
-            def-stmt in:
-            if (n_5 != 0)
-              goto <bb 3>;
-            else
-              goto <bb 4>;
-            <bb 3>:
-            # RANGE [0, 4294967294]
-            u_6 = n_5 + 4294967295;
-            <bb 4>:
-            # u_3 = PHI <u_6(3), 4294967295(2)>  */
-         SSA_NAME_RANGE_INFO (lhs) = NULL;
          /* If available, we can use VR of phi result at least.  */
          tree phires = gimple_phi_result (phi);
 -        struct range_info_def *phires_range_info
 +        irange_storage *phires_range_info
            = SSA_NAME_RANGE_INFO (phires);
          if (phires_range_info)
 -          duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
 -                                         phires_range_info);
 +          duplicate_ssa_name_range_info (lhs, phires_range_info,
 +                                         TREE_TYPE (phires));
        }
-       gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
+       gimple_stmt_iterator gsi_from;
+       for (int i = prep_cnt - 1; i >= 0; --i)
+       {
+         tree plhs = gimple_assign_lhs (prep_stmt[i]);
+         reset_flow_sensitive_info (plhs);
+         gsi_from = gsi_for_stmt (prep_stmt[i]);
+         gsi_move_before (&gsi_from, &gsi);
+       }
+       gsi_from = gsi_for_stmt (assign);
        gsi_move_before (&gsi_from, &gsi);
        replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
        return 2;
Simple merge
index 7583916911fce75a2c4f24aa3f7d0854c75cc7bb,9844bbbbcf361ad76c93cfb2d757d10e27f9d7e7..ac7d91d8646c66aa9b3502e6c00b1d98352c8ab9
@@@ -5137,3 -5165,868 +5157,868 @@@ vn_nary_may_trap (vn_nary_op_t nary
  
    return false;
  }
 -                                             VN_INFO_RANGE_TYPE (lhs),
 -                                             VN_INFO_RANGE_INFO (lhs));
+ class eliminate_dom_walker : public dom_walker
+ {
+ public:
+   eliminate_dom_walker (cdi_direction, bitmap);
+   ~eliminate_dom_walker ();
+   virtual edge before_dom_children (basic_block);
+   virtual void after_dom_children (basic_block);
+   tree eliminate_avail (tree op);
+   void eliminate_push_avail (tree op);
+   tree eliminate_insert (gimple_stmt_iterator *gsi, tree val);
+   bool do_pre;
+   unsigned int el_todo;
+   unsigned int eliminations;
+   unsigned int insertions;
+   /* SSA names that had their defs inserted by PRE if do_pre.  */
+   bitmap inserted_exprs;
+   /* Blocks with statements that have had their EH properties changed.  */
+   bitmap need_eh_cleanup;
+   /* Blocks with statements that have had their AB properties changed.  */
+   bitmap need_ab_cleanup;
+   auto_vec<gimple *> to_remove;
+   auto_vec<gimple *> to_fixup;
+   auto_vec<tree> avail;
+   auto_vec<tree> avail_stack;
+ };
+ eliminate_dom_walker::eliminate_dom_walker (cdi_direction direction,
+                                           bitmap inserted_exprs_)
+   : dom_walker (direction), do_pre (inserted_exprs_ != NULL),
+     el_todo (0), eliminations (0), insertions (0),
+     inserted_exprs (inserted_exprs_)
+ {
+   need_eh_cleanup = BITMAP_ALLOC (NULL);
+   need_ab_cleanup = BITMAP_ALLOC (NULL);
+ }
+ eliminate_dom_walker::~eliminate_dom_walker ()
+ {
+   BITMAP_FREE (need_eh_cleanup);
+   BITMAP_FREE (need_ab_cleanup);
+ }
+ /* Return a leader for OP that is available at the current point of the
+    eliminate domwalk.  */
+ tree
+ eliminate_dom_walker::eliminate_avail (tree op)
+ {
+   tree valnum = VN_INFO (op)->valnum;
+   if (TREE_CODE (valnum) == SSA_NAME)
+     {
+       if (SSA_NAME_IS_DEFAULT_DEF (valnum))
+       return valnum;
+       if (avail.length () > SSA_NAME_VERSION (valnum))
+       return avail[SSA_NAME_VERSION (valnum)];
+     }
+   else if (is_gimple_min_invariant (valnum))
+     return valnum;
+   return NULL_TREE;
+ }
+ /* At the current point of the eliminate domwalk make OP available.  */
+ void
+ eliminate_dom_walker::eliminate_push_avail (tree op)
+ {
+   tree valnum = VN_INFO (op)->valnum;
+   if (TREE_CODE (valnum) == SSA_NAME)
+     {
+       if (avail.length () <= SSA_NAME_VERSION (valnum))
+       avail.safe_grow_cleared (SSA_NAME_VERSION (valnum) + 1);
+       tree pushop = op;
+       if (avail[SSA_NAME_VERSION (valnum)])
+       pushop = avail[SSA_NAME_VERSION (valnum)];
+       avail_stack.safe_push (pushop);
+       avail[SSA_NAME_VERSION (valnum)] = op;
+     }
+ }
+ /* Insert the expression recorded by SCCVN for VAL at *GSI.  Returns
+    the leader for the expression if insertion was successful.  */
+ tree
+ eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
+ {
+   /* We can insert a sequence with a single assignment only.  */
+   gimple_seq stmts = VN_INFO (val)->expr;
+   if (!gimple_seq_singleton_p (stmts))
+     return NULL_TREE;
+   gassign *stmt = dyn_cast <gassign *> (gimple_seq_first_stmt (stmts));
+   if (!stmt
+       || (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
+         && gimple_assign_rhs_code (stmt) != VIEW_CONVERT_EXPR
+         && gimple_assign_rhs_code (stmt) != BIT_FIELD_REF
+         && (gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+             || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)))
+     return NULL_TREE;
+   tree op = gimple_assign_rhs1 (stmt);
+   if (gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
+       || gimple_assign_rhs_code (stmt) == BIT_FIELD_REF)
+     op = TREE_OPERAND (op, 0);
+   tree leader = TREE_CODE (op) == SSA_NAME ? eliminate_avail (op) : op;
+   if (!leader)
+     return NULL_TREE;
+   tree res;
+   stmts = NULL;
+   if (gimple_assign_rhs_code (stmt) == BIT_FIELD_REF)
+     res = gimple_build (&stmts, BIT_FIELD_REF,
+                       TREE_TYPE (val), leader,
+                       TREE_OPERAND (gimple_assign_rhs1 (stmt), 1),
+                       TREE_OPERAND (gimple_assign_rhs1 (stmt), 2));
+   else if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR)
+     res = gimple_build (&stmts, BIT_AND_EXPR,
+                       TREE_TYPE (val), leader, gimple_assign_rhs2 (stmt));
+   else
+     res = gimple_build (&stmts, gimple_assign_rhs_code (stmt),
+                       TREE_TYPE (val), leader);
+   if (TREE_CODE (res) != SSA_NAME
+       || SSA_NAME_IS_DEFAULT_DEF (res)
+       || gimple_bb (SSA_NAME_DEF_STMT (res)))
+     {
+       gimple_seq_discard (stmts);
+       /* During propagation we have to treat SSA info conservatively
+          and thus we can end up simplifying the inserted expression
+        at elimination time to sth not defined in stmts.  */
+       /* But then this is a redundancy we failed to detect.  Which means
+          res now has two values.  That doesn't play well with how
+        we track availability here, so give up.  */
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         if (TREE_CODE (res) == SSA_NAME)
+           res = eliminate_avail (res);
+         if (res)
+           {
+             fprintf (dump_file, "Failed to insert expression for value ");
+             print_generic_expr (dump_file, val);
+             fprintf (dump_file, " which is really fully redundant to ");
+             print_generic_expr (dump_file, res);
+             fprintf (dump_file, "\n");
+           }
+       }
+       return NULL_TREE;
+     }
+   else
+     {
+       gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+       VN_INFO_GET (res)->valnum = val;
+     }
+   insertions++;
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Inserted ");
+       print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (res), 0);
+     }
+   return res;
+ }
+ /* Perform elimination for the basic-block B during the domwalk.  */
+ edge
+ eliminate_dom_walker::before_dom_children (basic_block b)
+ {
+   /* Mark new bb.  */
+   avail_stack.safe_push (NULL_TREE);
+   /* Skip unreachable blocks marked unreachable during the SCCVN domwalk.  */
+   edge_iterator ei;
+   edge e;
+   FOR_EACH_EDGE (e, ei, b->preds)
+     if (e->flags & EDGE_EXECUTABLE)
+       break;
+   if (! e)
+     return NULL;
+   for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi);)
+     {
+       gphi *phi = gsi.phi ();
+       tree res = PHI_RESULT (phi);
+       if (virtual_operand_p (res))
+       {
+         gsi_next (&gsi);
+         continue;
+       }
+       tree sprime = eliminate_avail (res);
+       if (sprime
+         && sprime != res)
+       {
+         if (dump_file && (dump_flags & TDF_DETAILS))
+           {
+             fprintf (dump_file, "Replaced redundant PHI node defining ");
+             print_generic_expr (dump_file, res);
+             fprintf (dump_file, " with ");
+             print_generic_expr (dump_file, sprime);
+             fprintf (dump_file, "\n");
+           }
+         /* If we inserted this PHI node ourself, it's not an elimination.  */
+         if (! inserted_exprs
+             || ! bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res)))
+           eliminations++;
+         /* If we will propagate into all uses don't bother to do
+            anything.  */
+         if (may_propagate_copy (res, sprime))
+           {
+             /* Mark the PHI for removal.  */
+             to_remove.safe_push (phi);
+             gsi_next (&gsi);
+             continue;
+           }
+         remove_phi_node (&gsi, false);
+         if (!useless_type_conversion_p (TREE_TYPE (res), TREE_TYPE (sprime)))
+           sprime = fold_convert (TREE_TYPE (res), sprime);
+         gimple *stmt = gimple_build_assign (res, sprime);
+         gimple_stmt_iterator gsi2 = gsi_after_labels (b);
+         gsi_insert_before (&gsi2, stmt, GSI_NEW_STMT);
+         continue;
+       }
+       eliminate_push_avail (res);
+       gsi_next (&gsi);
+     }
+   for (gimple_stmt_iterator gsi = gsi_start_bb (b);
+        !gsi_end_p (gsi);
+        gsi_next (&gsi))
+     {
+       tree sprime = NULL_TREE;
+       gimple *stmt = gsi_stmt (gsi);
+       tree lhs = gimple_get_lhs (stmt);
+       if (lhs && TREE_CODE (lhs) == SSA_NAME
+         && !gimple_has_volatile_ops (stmt)
+         /* See PR43491.  Do not replace a global register variable when
+            it is a the RHS of an assignment.  Do replace local register
+            variables since gcc does not guarantee a local variable will
+            be allocated in register.
+            ???  The fix isn't effective here.  This should instead
+            be ensured by not value-numbering them the same but treating
+            them like volatiles?  */
+         && !(gimple_assign_single_p (stmt)
+              && (TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL
+                  && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))
+                  && is_global_var (gimple_assign_rhs1 (stmt)))))
+       {
+         sprime = eliminate_avail (lhs);
+         if (!sprime)
+           {
+             /* If there is no existing usable leader but SCCVN thinks
+                it has an expression it wants to use as replacement,
+                insert that.  */
+             tree val = VN_INFO (lhs)->valnum;
+             if (val != VN_TOP
+                 && TREE_CODE (val) == SSA_NAME
+                 && VN_INFO (val)->needs_insertion
+                 && VN_INFO (val)->expr != NULL
+                 && (sprime = eliminate_insert (&gsi, val)) != NULL_TREE)
+               eliminate_push_avail (sprime);
+           }
+         /* If this now constitutes a copy duplicate points-to
+            and range info appropriately.  This is especially
+            important for inserted code.  See tree-ssa-copy.c
+            for similar code.  */
+         if (sprime
+             && TREE_CODE (sprime) == SSA_NAME)
+           {
+             basic_block sprime_b = gimple_bb (SSA_NAME_DEF_STMT (sprime));
+             if (POINTER_TYPE_P (TREE_TYPE (lhs))
+                 && VN_INFO_PTR_INFO (lhs)
+                 && ! VN_INFO_PTR_INFO (sprime))
+               {
+                 duplicate_ssa_name_ptr_info (sprime,
+                                              VN_INFO_PTR_INFO (lhs));
+                 if (b != sprime_b)
+                   mark_ptr_info_alignment_unknown
+                       (SSA_NAME_PTR_INFO (sprime));
+               }
+             else if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+                      && VN_INFO_RANGE_INFO (lhs)
+                      && ! VN_INFO_RANGE_INFO (sprime)
+                      && b == sprime_b)
+               duplicate_ssa_name_range_info (sprime,
++                                             VN_INFO_RANGE_INFO (lhs),
++                                             TREE_TYPE (lhs));
+           }
+         /* Inhibit the use of an inserted PHI on a loop header when
+            the address of the memory reference is a simple induction
+            variable.  In other cases the vectorizer won't do anything
+            anyway (either it's loop invariant or a complicated
+            expression).  */
+         if (sprime
+             && TREE_CODE (sprime) == SSA_NAME
+             && do_pre
+             && (flag_tree_loop_vectorize || flag_tree_parallelize_loops > 1)
+             && loop_outer (b->loop_father)
+             && has_zero_uses (sprime)
+             && bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))
+             && gimple_assign_load_p (stmt))
+           {
+             gimple *def_stmt = SSA_NAME_DEF_STMT (sprime);
+             basic_block def_bb = gimple_bb (def_stmt);
+             if (gimple_code (def_stmt) == GIMPLE_PHI
+                 && def_bb->loop_father->header == def_bb)
+               {
+                 loop_p loop = def_bb->loop_father;
+                 ssa_op_iter iter;
+                 tree op;
+                 bool found = false;
+                 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
+                   {
+                     affine_iv iv;
+                     def_bb = gimple_bb (SSA_NAME_DEF_STMT (op));
+                     if (def_bb
+                         && flow_bb_inside_loop_p (loop, def_bb)
+                         && simple_iv (loop, loop, op, &iv, true))
+                       {
+                         found = true;
+                         break;
+                       }
+                   }
+                 if (found)
+                   {
+                     if (dump_file && (dump_flags & TDF_DETAILS))
+                       {
+                         fprintf (dump_file, "Not replacing ");
+                         print_gimple_expr (dump_file, stmt, 0);
+                         fprintf (dump_file, " with ");
+                         print_generic_expr (dump_file, sprime);
+                         fprintf (dump_file, " which would add a loop"
+                                  " carried dependence to loop %d\n",
+                                  loop->num);
+                       }
+                     /* Don't keep sprime available.  */
+                     sprime = NULL_TREE;
+                   }
+               }
+           }
+         if (sprime)
+           {
+             /* If we can propagate the value computed for LHS into
+                all uses don't bother doing anything with this stmt.  */
+             if (may_propagate_copy (lhs, sprime))
+               {
+                 /* Mark it for removal.  */
+                 to_remove.safe_push (stmt);
+                 /* ???  Don't count copy/constant propagations.  */
+                 if (gimple_assign_single_p (stmt)
+                     && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
+                         || gimple_assign_rhs1 (stmt) == sprime))
+                   continue;
+                 if (dump_file && (dump_flags & TDF_DETAILS))
+                   {
+                     fprintf (dump_file, "Replaced ");
+                     print_gimple_expr (dump_file, stmt, 0);
+                     fprintf (dump_file, " with ");
+                     print_generic_expr (dump_file, sprime);
+                     fprintf (dump_file, " in all uses of ");
+                     print_gimple_stmt (dump_file, stmt, 0);
+                   }
+                 eliminations++;
+                 continue;
+               }
+             /* If this is an assignment from our leader (which
+                happens in the case the value-number is a constant)
+                then there is nothing to do.  */
+             if (gimple_assign_single_p (stmt)
+                 && sprime == gimple_assign_rhs1 (stmt))
+               continue;
+             /* Else replace its RHS.  */
+             bool can_make_abnormal_goto
+                 = is_gimple_call (stmt)
+                 && stmt_can_make_abnormal_goto (stmt);
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               {
+                 fprintf (dump_file, "Replaced ");
+                 print_gimple_expr (dump_file, stmt, 0);
+                 fprintf (dump_file, " with ");
+                 print_generic_expr (dump_file, sprime);
+                 fprintf (dump_file, " in ");
+                 print_gimple_stmt (dump_file, stmt, 0);
+               }
+             eliminations++;
+             gimple *orig_stmt = stmt;
+             if (!useless_type_conversion_p (TREE_TYPE (lhs),
+                                             TREE_TYPE (sprime)))
+               sprime = fold_convert (TREE_TYPE (lhs), sprime);
+             tree vdef = gimple_vdef (stmt);
+             tree vuse = gimple_vuse (stmt);
+             propagate_tree_value_into_stmt (&gsi, sprime);
+             stmt = gsi_stmt (gsi);
+             update_stmt (stmt);
+             if (vdef != gimple_vdef (stmt))
+               VN_INFO (vdef)->valnum = vuse;
+             /* If we removed EH side-effects from the statement, clean
+                its EH information.  */
+             if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
+               {
+                 bitmap_set_bit (need_eh_cleanup,
+                                 gimple_bb (stmt)->index);
+                 if (dump_file && (dump_flags & TDF_DETAILS))
+                   fprintf (dump_file, "  Removed EH side-effects.\n");
+               }
+             /* Likewise for AB side-effects.  */
+             if (can_make_abnormal_goto
+                 && !stmt_can_make_abnormal_goto (stmt))
+               {
+                 bitmap_set_bit (need_ab_cleanup,
+                                 gimple_bb (stmt)->index);
+                 if (dump_file && (dump_flags & TDF_DETAILS))
+                   fprintf (dump_file, "  Removed AB side-effects.\n");
+               }
+             continue;
+           }
+       }
+       /* If the statement is a scalar store, see if the expression
+          has the same value number as its rhs.  If so, the store is
+          dead.  */
+       if (gimple_assign_single_p (stmt)
+         && !gimple_has_volatile_ops (stmt)
+         && !is_gimple_reg (gimple_assign_lhs (stmt))
+         && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
+             || is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
+       {
+         tree val;
+         tree rhs = gimple_assign_rhs1 (stmt);
+         vn_reference_t vnresult;
+         val = vn_reference_lookup (lhs, gimple_vuse (stmt), VN_WALKREWRITE,
+                                    &vnresult, false);
+         if (TREE_CODE (rhs) == SSA_NAME)
+           rhs = VN_INFO (rhs)->valnum;
+         if (val
+             && operand_equal_p (val, rhs, 0))
+           {
+             /* We can only remove the later store if the former aliases
+                at least all accesses the later one does or if the store
+                was to readonly memory storing the same value.  */
+             alias_set_type set = get_alias_set (lhs);
+             if (! vnresult
+                 || vnresult->set == set
+                 || alias_set_subset_of (set, vnresult->set))
+               {
+                 if (dump_file && (dump_flags & TDF_DETAILS))
+                   {
+                     fprintf (dump_file, "Deleted redundant store ");
+                     print_gimple_stmt (dump_file, stmt, 0);
+                   }
+                 /* Queue stmt for removal.  */
+                 to_remove.safe_push (stmt);
+                 continue;
+               }
+           }
+       }
+       /* If this is a control statement value numbering left edges
+        unexecuted on force the condition in a way consistent with
+        that.  */
+       if (gcond *cond = dyn_cast <gcond *> (stmt))
+       {
+         if ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE)
+             ^ (EDGE_SUCC (b, 1)->flags & EDGE_EXECUTABLE))
+           {
+               if (dump_file && (dump_flags & TDF_DETAILS))
+                 {
+                   fprintf (dump_file, "Removing unexecutable edge from ");
+                 print_gimple_stmt (dump_file, stmt, 0);
+                 }
+             if (((EDGE_SUCC (b, 0)->flags & EDGE_TRUE_VALUE) != 0)
+                 == ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE) != 0))
+               gimple_cond_make_true (cond);
+             else
+               gimple_cond_make_false (cond);
+             update_stmt (cond);
+             el_todo |= TODO_cleanup_cfg;
+             continue;
+           }
+       }
+       bool can_make_abnormal_goto = stmt_can_make_abnormal_goto (stmt);
+       bool was_noreturn = (is_gimple_call (stmt)
+                          && gimple_call_noreturn_p (stmt));
+       tree vdef = gimple_vdef (stmt);
+       tree vuse = gimple_vuse (stmt);
+       /* If we didn't replace the whole stmt (or propagate the result
+          into all uses), replace all uses on this stmt with their
+        leaders.  */
+       bool modified = false;
+       use_operand_p use_p;
+       ssa_op_iter iter;
+       FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
+       {
+         tree use = USE_FROM_PTR (use_p);
+         /* ???  The call code above leaves stmt operands un-updated.  */
+         if (TREE_CODE (use) != SSA_NAME)
+           continue;
+         tree sprime = eliminate_avail (use);
+         if (sprime && sprime != use
+             && may_propagate_copy (use, sprime)
+             /* We substitute into debug stmts to avoid excessive
+                debug temporaries created by removed stmts, but we need
+                to avoid doing so for inserted sprimes as we never want
+                to create debug temporaries for them.  */
+             && (!inserted_exprs
+                 || TREE_CODE (sprime) != SSA_NAME
+                 || !is_gimple_debug (stmt)
+                 || !bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))))
+           {
+             propagate_value (use_p, sprime);
+             modified = true;
+           }
+       }
+       /* Fold the stmt if modified, this canonicalizes MEM_REFs we propagated
+          into which is a requirement for the IPA devirt machinery.  */
+       gimple *old_stmt = stmt;
+       if (modified)
+       {
+         /* If a formerly non-invariant ADDR_EXPR is turned into an
+            invariant one it was on a separate stmt.  */
+         if (gimple_assign_single_p (stmt)
+             && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR)
+           recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt));
+         gimple_stmt_iterator prev = gsi;
+         gsi_prev (&prev);
+         if (fold_stmt (&gsi))
+           {
+             /* fold_stmt may have created new stmts inbetween
+                the previous stmt and the folded stmt.  Mark
+                all defs created there as varying to not confuse
+                the SCCVN machinery as we're using that even during
+                elimination.  */
+             if (gsi_end_p (prev))
+               prev = gsi_start_bb (b);
+             else
+               gsi_next (&prev);
+             if (gsi_stmt (prev) != gsi_stmt (gsi))
+               do
+                 {
+                   tree def;
+                   ssa_op_iter dit;
+                   FOR_EACH_SSA_TREE_OPERAND (def, gsi_stmt (prev),
+                                              dit, SSA_OP_ALL_DEFS)
+                     /* As existing DEFs may move between stmts
+                        we have to guard VN_INFO_GET.  */
+                     if (! has_VN_INFO (def))
+                       VN_INFO_GET (def)->valnum = def;
+                   if (gsi_stmt (prev) == gsi_stmt (gsi))
+                     break;
+                   gsi_next (&prev);
+                 }
+               while (1);
+           }
+         stmt = gsi_stmt (gsi);
+         /* In case we folded the stmt away schedule the NOP for removal.  */
+         if (gimple_nop_p (stmt))
+           to_remove.safe_push (stmt);
+       }
+       /* Visit indirect calls and turn them into direct calls if
+        possible using the devirtualization machinery.  Do this before
+        checking for required EH/abnormal/noreturn cleanup as devird
+        may expose more of those.  */
+       if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
+       {
+         tree fn = gimple_call_fn (call_stmt);
+         if (fn
+             && flag_devirtualize
+             && virtual_method_call_p (fn))
+           {
+             tree otr_type = obj_type_ref_class (fn);
+             unsigned HOST_WIDE_INT otr_tok
+               = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (fn));
+             tree instance;
+             ipa_polymorphic_call_context context (current_function_decl,
+                                                   fn, stmt, &instance);
+             context.get_dynamic_type (instance, OBJ_TYPE_REF_OBJECT (fn),
+                                       otr_type, stmt);
+             bool final;
+             vec <cgraph_node *> targets
+               = possible_polymorphic_call_targets (obj_type_ref_class (fn),
+                                                    otr_tok, context, &final);
+             if (dump_file)
+               dump_possible_polymorphic_call_targets (dump_file, 
+                                                       obj_type_ref_class (fn),
+                                                       otr_tok, context);
+             if (final && targets.length () <= 1 && dbg_cnt (devirt))
+               {
+                 tree fn;
+                 if (targets.length () == 1)
+                   fn = targets[0]->decl;
+                 else
+                   fn = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
+                 if (dump_enabled_p ())
+                   {
+                     location_t loc = gimple_location (stmt);
+                     dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
+                                      "converting indirect call to "
+                                      "function %s\n",
+                                      lang_hooks.decl_printable_name (fn, 2));
+                   }
+                 gimple_call_set_fndecl (call_stmt, fn);
+                 /* If changing the call to __builtin_unreachable
+                    or similar noreturn function, adjust gimple_call_fntype
+                    too.  */
+                 if (gimple_call_noreturn_p (call_stmt)
+                     && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fn)))
+                     && TYPE_ARG_TYPES (TREE_TYPE (fn))
+                     && (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fn)))
+                         == void_type_node))
+                   gimple_call_set_fntype (call_stmt, TREE_TYPE (fn));
+                 maybe_remove_unused_call_args (cfun, call_stmt);
+                 modified = true;
+               }
+           }
+       }
+       if (modified)
+       {
+         /* When changing a call into a noreturn call, cfg cleanup
+            is needed to fix up the noreturn call.  */
+         if (!was_noreturn
+             && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
+           to_fixup.safe_push  (stmt);
+         /* When changing a condition or switch into one we know what
+            edge will be executed, schedule a cfg cleanup.  */
+         if ((gimple_code (stmt) == GIMPLE_COND
+              && (gimple_cond_true_p (as_a <gcond *> (stmt))
+                  || gimple_cond_false_p (as_a <gcond *> (stmt))))
+             || (gimple_code (stmt) == GIMPLE_SWITCH
+                 && TREE_CODE (gimple_switch_index
+                                 (as_a <gswitch *> (stmt))) == INTEGER_CST))
+           el_todo |= TODO_cleanup_cfg;
+         /* If we removed EH side-effects from the statement, clean
+            its EH information.  */
+         if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
+           {
+             bitmap_set_bit (need_eh_cleanup,
+                             gimple_bb (stmt)->index);
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               fprintf (dump_file, "  Removed EH side-effects.\n");
+           }
+         /* Likewise for AB side-effects.  */
+         if (can_make_abnormal_goto
+             && !stmt_can_make_abnormal_goto (stmt))
+           {
+             bitmap_set_bit (need_ab_cleanup,
+                             gimple_bb (stmt)->index);
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               fprintf (dump_file, "  Removed AB side-effects.\n");
+           }
+         update_stmt (stmt);
+         if (vdef != gimple_vdef (stmt))
+           VN_INFO (vdef)->valnum = vuse;
+       }
+       /* Make new values available - for fully redundant LHS we
+          continue with the next stmt above and skip this.  */
+       def_operand_p defp;
+       FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_DEF)
+       eliminate_push_avail (DEF_FROM_PTR (defp));
+     }
+   /* Replace destination PHI arguments.  */
+   FOR_EACH_EDGE (e, ei, b->succs)
+     if (e->flags & EDGE_EXECUTABLE)
+       for (gphi_iterator gsi = gsi_start_phis (e->dest);
+          !gsi_end_p (gsi);
+          gsi_next (&gsi))
+       {
+         gphi *phi = gsi.phi ();
+         use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
+         tree arg = USE_FROM_PTR (use_p);
+         if (TREE_CODE (arg) != SSA_NAME
+             || virtual_operand_p (arg))
+           continue;
+         tree sprime = eliminate_avail (arg);
+         if (sprime && may_propagate_copy (arg, sprime))
+           propagate_value (use_p, sprime);
+       }
+   return NULL;
+ }
+ /* Make no longer available leaders no longer available.  */
+ void
+ eliminate_dom_walker::after_dom_children (basic_block)
+ {
+   tree entry;
+   while ((entry = avail_stack.pop ()) != NULL_TREE)
+     {
+       tree valnum = VN_INFO (entry)->valnum;
+       tree old = avail[SSA_NAME_VERSION (valnum)];
+       if (old == entry)
+       avail[SSA_NAME_VERSION (valnum)] = NULL_TREE;
+       else
+       avail[SSA_NAME_VERSION (valnum)] = entry;
+     }
+ }
+ /* Eliminate fully redundant computations.  */
+ unsigned int
+ vn_eliminate (bitmap inserted_exprs)
+ {
+   eliminate_dom_walker el (CDI_DOMINATORS, inserted_exprs);
+   el.avail.reserve (num_ssa_names);
+   el.walk (cfun->cfg->x_entry_block_ptr);
+   /* We cannot remove stmts during BB walk, especially not release SSA
+      names there as this confuses the VN machinery.  The stmts ending
+      up in to_remove are either stores or simple copies.
+      Remove stmts in reverse order to make debug stmt creation possible.  */
+   while (!el.to_remove.is_empty ())
+     {
+       gimple *stmt = el.to_remove.pop ();
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         fprintf (dump_file, "Removing dead stmt ");
+         print_gimple_stmt (dump_file, stmt, 0, 0);
+       }
+       gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+       if (gimple_code (stmt) == GIMPLE_PHI)
+       remove_phi_node (&gsi, true);
+       else
+       {
+         basic_block bb = gimple_bb (stmt);
+         unlink_stmt_vdef (stmt);
+         if (gsi_remove (&gsi, true))
+           bitmap_set_bit (el.need_eh_cleanup, bb->index);
+         if (is_gimple_call (stmt) && stmt_can_make_abnormal_goto (stmt))
+           bitmap_set_bit (el.need_ab_cleanup, bb->index);
+         release_defs (stmt);
+       }
+       /* Removing a stmt may expose a forwarder block.  */
+       el.el_todo |= TODO_cleanup_cfg;
+     }
+   /* Fixup stmts that became noreturn calls.  This may require splitting
+      blocks and thus isn't possible during the dominator walk.  Do this
+      in reverse order so we don't inadvertedly remove a stmt we want to
+      fixup by visiting a dominating now noreturn call first.  */
+   while (!el.to_fixup.is_empty ())
+     {
+       gimple *stmt = el.to_fixup.pop ();
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         fprintf (dump_file, "Fixing up noreturn call ");
+         print_gimple_stmt (dump_file, stmt, 0);
+       }
+       if (fixup_noreturn_call (stmt))
+       el.el_todo |= TODO_cleanup_cfg;
+     }
+   bool do_eh_cleanup = !bitmap_empty_p (el.need_eh_cleanup);
+   bool do_ab_cleanup = !bitmap_empty_p (el.need_ab_cleanup);
+   if (do_eh_cleanup)
+     gimple_purge_all_dead_eh_edges (el.need_eh_cleanup);
+   if (do_ab_cleanup)
+     gimple_purge_all_dead_abnormal_call_edges (el.need_ab_cleanup);
+   if (do_eh_cleanup || do_ab_cleanup)
+     el.el_todo |= TODO_cleanup_cfg;
+   statistics_counter_event (cfun, "Eliminated", el.eliminations);
+   statistics_counter_event (cfun, "Insertions", el.insertions);
+   return el.el_todo;
+ }
+ namespace {
+ const pass_data pass_data_fre =
+ {
+   GIMPLE_PASS, /* type */
+   "fre", /* name */
+   OPTGROUP_NONE, /* optinfo_flags */
+   TV_TREE_FRE, /* tv_id */
+   ( PROP_cfg | PROP_ssa ), /* properties_required */
+   0, /* properties_provided */
+   0, /* properties_destroyed */
+   0, /* todo_flags_start */
+   0, /* todo_flags_finish */
+ };
+ class pass_fre : public gimple_opt_pass
+ {
+ public:
+   pass_fre (gcc::context *ctxt)
+     : gimple_opt_pass (pass_data_fre, ctxt)
+   {}
+   /* opt_pass methods: */
+   opt_pass * clone () { return new pass_fre (m_ctxt); }
+   virtual bool gate (function *) { return flag_tree_fre != 0; }
+   virtual unsigned int execute (function *);
+ }; // class pass_fre
+ unsigned int
+ pass_fre::execute (function *)
+ {
+   unsigned int todo = 0;
+   run_scc_vn (VN_WALKREWRITE);
+   /* Remove all the redundant expressions.  */
+   todo |= vn_eliminate (NULL);
+   scc_vn_restore_ssa_info ();
+   free_scc_vn ();
+   return todo;
+ }
+ } // anon namespace
+ gimple_opt_pass *
+ make_pass_fre (gcc::context *ctxt)
+ {
+   return new pass_fre (ctxt);
+ }
Simple merge
index 4ec0dacf38aa07175c5517ec6323646d9e4ee1ec,33004b6870d29594985b47ca40e3fcb21bdb3050..d0d4db4c7855ad2043f127151c955b23da8b27be
@@@ -1605,6 -1712,456 +1712,440 @@@ handle_builtin_strcpy (enum built_in_fu
      }
    else if (dump_file && (dump_flags & TDF_DETAILS) != 0)
      fprintf (dump_file, "not possible.\n");
 -      enum value_range_type rng = get_range_info (cnt, cntrange, cntrange + 1);
 -      if (rng == VR_RANGE)
+   if (set_no_warning)
+     gimple_set_no_warning (stmt, true);
+ }
+ /* Check the size argument to the built-in forms of stpncpy and strncpy
+    for out-of-bounds offsets or overlapping access, and to see if the
+    size argument is derived from a call to strlen() on the source argument,
+    and if so, issue an appropriate warning.  */
+ static void
+ handle_builtin_strncat (built_in_function bcode, gimple_stmt_iterator *gsi)
+ {
+   /* Same as stxncpy().  */
+   handle_builtin_stxncpy (bcode, gsi);
+ }
+ /* Return true if LEN depends on a call to strlen(SRC) in an interesting
+    way.  LEN can either be an integer expression, or a pointer (to char).
+    When it is the latter (such as in recursive calls to self) is is
+    assumed to be the argument in some call to strlen() whose relationship
+    to SRC is being ascertained.  */
+ static bool
+ is_strlen_related_p (tree src, tree len)
+ {
+   if (TREE_CODE (TREE_TYPE (len)) == POINTER_TYPE
+       && operand_equal_p (src, len, 0))
+     return true;
+   if (TREE_CODE (len) != SSA_NAME)
+     return false;
+   gimple *def_stmt = SSA_NAME_DEF_STMT (len);
+   if (!def_stmt)
+     return false;
+   if (is_gimple_call (def_stmt))
+     {
+       tree func = gimple_call_fndecl (def_stmt);
+       if (!valid_builtin_call (def_stmt)
+         || DECL_FUNCTION_CODE (func) != BUILT_IN_STRLEN)
+       return false;
+       tree arg = gimple_call_arg (def_stmt, 0);
+       return is_strlen_related_p (src, arg);
+     }
+   if (!is_gimple_assign (def_stmt))
+     return false;
+   tree_code code = gimple_assign_rhs_code (def_stmt);
+   tree rhs1 = gimple_assign_rhs1 (def_stmt);
+   tree rhstype = TREE_TYPE (rhs1);
+   if ((TREE_CODE (rhstype) == POINTER_TYPE && code == POINTER_PLUS_EXPR)
+       || (INTEGRAL_TYPE_P (rhstype)
+         && (code == BIT_AND_EXPR
+             || code == NOP_EXPR)))
+     {
+       /* Pointer plus (an integer) and integer cast or truncation are
+        considered among the (potentially) related expressions to strlen.
+        Others are not.  */
+       return is_strlen_related_p (src, rhs1);
+     }
+   return false;
+ }
+ /* Called by handle_builtin_stxncpy and by gimple_fold_builtin_strncpy
+    in gimple-fold.c.
+    Check to see if the specified bound is a) equal to the size of
+    the destination DST and if so, b) if it's immediately followed by
+    DST[CNT - 1] = '\0'.  If a) holds and b) does not, warn.  Otherwise,
+    do nothing.  Return true if diagnostic has been issued.
+    The purpose is to diagnose calls to strncpy and stpncpy that do
+    not nul-terminate the copy while allowing for the idiom where
+    such a call is immediately followed by setting the last element
+    to nul, as in:
+      char a[32];
+      strncpy (a, s, sizeof a);
+      a[sizeof a - 1] = '\0';
+ */
+ bool
+ maybe_diag_stxncpy_trunc (gimple_stmt_iterator gsi, tree src, tree cnt)
+ {
+   gimple *stmt = gsi_stmt (gsi);
+   if (gimple_no_warning_p (stmt))
+     return false;
+   wide_int cntrange[2];
+   if (TREE_CODE (cnt) == INTEGER_CST)
+     cntrange[0] = cntrange[1] = wi::to_wide (cnt);
+   else if (TREE_CODE (cnt) == SSA_NAME)
+     {
 -      else if (rng == VR_ANTI_RANGE)
 -      {
 -        wide_int maxobjsize = wi::to_wide (TYPE_MAX_VALUE (ptrdiff_type_node));
 -
 -        if (wi::ltu_p (cntrange[1], maxobjsize))
 -          {
 -            cntrange[0] = cntrange[1] + 1;
 -            cntrange[1] = maxobjsize;
 -          }
 -        else
 -          {
 -            cntrange[1] = cntrange[0] - 1;
 -            cntrange[0] = wi::zero (TYPE_PRECISION (TREE_TYPE (cnt)));
 -          }
 -      }
++      if (get_range_info (cnt, cntrange, cntrange + 1))
+       ;
+       else
+       return false;
+     }
+   else
+     return false;
+   /* Negative value is the constant string length.  If it's less than
+      the lower bound there is no truncation.  Avoid calling get_stridx()
+      when ssa_ver_to_stridx is empty.  That implies the caller isn't
+      running under the control of this pass and ssa_ver_to_stridx hasn't
+      been created yet.  */
+   int sidx = ssa_ver_to_stridx.length () ? get_stridx (src) : 0;
+   if (sidx < 0 && wi::gtu_p (cntrange[0], ~sidx))
+     return false;
+   tree dst = gimple_call_arg (stmt, 0);
+   tree dstdecl = dst;
+   if (TREE_CODE (dstdecl) == ADDR_EXPR)
+     dstdecl = TREE_OPERAND (dstdecl, 0);
+   /* If the destination refers to a an array/pointer declared nonstring
+      return early.  */
+   tree ref = NULL_TREE;
+   if (get_attr_nonstring_decl (dstdecl, &ref))
+     return false;
+   /* Look for dst[i] = '\0'; after the stxncpy() call and if found
+      avoid the truncation warning.  */
+   gsi_next_nondebug (&gsi);
+   gimple *next_stmt = gsi_stmt (gsi);
+   if (!next_stmt)
+     {
+       /* When there is no statement in the same basic block check
+        the immediate successor block.  */
+       if (basic_block bb = gimple_bb (stmt))
+       {
+         if (single_succ_p (bb))
+           {
+             /* For simplicity, ignore blocks with multiple outgoing
+                edges for now and only consider successor blocks along
+                normal edges.  */
+             edge e = EDGE_SUCC (bb, 0);
+             if (!(e->flags & EDGE_ABNORMAL))
+               {
+                 gsi = gsi_start_bb (e->dest);
+                 next_stmt = gsi_stmt (gsi);
+                 if (next_stmt && is_gimple_debug (next_stmt))
+                   {
+                     gsi_next_nondebug (&gsi);
+                     next_stmt = gsi_stmt (gsi);
+                   }
+               }
+           }
+       }
+     }
+   if (next_stmt && is_gimple_assign (next_stmt))
+     {
+       tree lhs = gimple_assign_lhs (next_stmt);
+       tree_code code = TREE_CODE (lhs);
+       if (code == ARRAY_REF || code == MEM_REF)
+       lhs = TREE_OPERAND (lhs, 0);
+       tree func = gimple_call_fndecl (stmt);
+       if (DECL_FUNCTION_CODE (func) == BUILT_IN_STPNCPY)
+       {
+         tree ret = gimple_call_lhs (stmt);
+         if (ret && operand_equal_p (ret, lhs, 0))
+           return false;
+       }
+       /* Determine the base address and offset of the reference,
+        ignoring the innermost array index.  */
+       if (TREE_CODE (ref) == ARRAY_REF)
+       ref = TREE_OPERAND (ref, 0);
+       poly_int64 dstoff;
+       tree dstbase = get_addr_base_and_unit_offset (ref, &dstoff);
+       poly_int64 lhsoff;
+       tree lhsbase = get_addr_base_and_unit_offset (lhs, &lhsoff);
+       if (lhsbase
+         && dstbase
+         && known_eq (dstoff, lhsoff)
+         && operand_equal_p (dstbase, lhsbase, 0))
+       return false;
+     }
+   int prec = TYPE_PRECISION (TREE_TYPE (cnt));
+   wide_int lenrange[2];
+   if (strinfo *sisrc = sidx > 0 ? get_strinfo (sidx) : NULL)
+     {
+       lenrange[0] = (sisrc->nonzero_chars
+                    && TREE_CODE (sisrc->nonzero_chars) == INTEGER_CST
+                    ? wi::to_wide (sisrc->nonzero_chars)
+                    : wi::zero (prec));
+       lenrange[1] = lenrange[0];
+     }
+   else if (sidx < 0)
+     lenrange[0] = lenrange[1] = wi::shwi (~sidx, prec);
+   else
+     {
+       tree range[2];
+       get_range_strlen (src, range);
+       if (range[0] != NULL_TREE
+         && TREE_CODE (range[0]) == INTEGER_CST
+         && range[1] != NULL_TREE
+         && TREE_CODE (range[1]) == INTEGER_CST)
+       {
+         lenrange[0] = wi::to_wide (range[0], prec);
+         lenrange[1] = wi::to_wide (range[1], prec);
+       }
+       else
+       {
+         lenrange[0] = wi::shwi (0, prec);
+         lenrange[1] = wi::shwi (-1, prec);
+       }
+     }
+   location_t callloc = gimple_location (stmt);
+   tree func = gimple_call_fndecl (stmt);
+   if (lenrange[0] != 0 || !wi::neg_p (lenrange[1]))
+     {
+       /* If the longest source string is shorter than the lower bound
+        of the specified count the copy is definitely nul-terminated.  */
+       if (wi::ltu_p (lenrange[1], cntrange[0]))
+       return false;
+       if (wi::neg_p (lenrange[1]))
+       {
+         /* The length of one of the strings is unknown but at least
+            one has non-zero length and that length is stored in
+            LENRANGE[1].  Swap the bounds to force a "may be truncated"
+            warning below.  */
+         lenrange[1] = lenrange[0];
+         lenrange[0] = wi::shwi (0, prec);
+       }
+       gcall *call = as_a <gcall *> (stmt);
+       if (lenrange[0] == cntrange[1] && cntrange[0] == cntrange[1])
+       return warning_n (callloc, OPT_Wstringop_truncation,
+                         cntrange[0].to_uhwi (),
+                         "%G%qD output truncated before terminating "
+                         "nul copying %E byte from a string of the "
+                         "same length",
+                         "%G%qD output truncated before terminating nul "
+                         "copying %E bytes from a string of the same "
+                         "length",
+                         call, func, cnt);
+       else if (wi::geu_p (lenrange[0], cntrange[1]))
+       {
+         /* The shortest string is longer than the upper bound of
+            the count so the truncation is certain.  */
+         if (cntrange[0] == cntrange[1])
+           return warning_n (callloc, OPT_Wstringop_truncation,
+                             cntrange[0].to_uhwi (),
+                             "%G%qD output truncated copying %E byte "
+                             "from a string of length %wu",
+                             "%G%qD output truncated copying %E bytes "
+                             "from a string of length %wu",
+                             call, func, cnt, lenrange[0].to_uhwi ());
+         return warning_at (callloc, OPT_Wstringop_truncation,
+                            "%G%qD output truncated copying between %wu "
+                            "and %wu bytes from a string of length %wu",
+                            call, func, cntrange[0].to_uhwi (),
+                            cntrange[1].to_uhwi (), lenrange[0].to_uhwi ());
+       }
+       else if (wi::geu_p (lenrange[1], cntrange[1]))
+       {
+         /* The longest string is longer than the upper bound of
+            the count so the truncation is possible.  */
+         if (cntrange[0] == cntrange[1])
+           return warning_n (callloc, OPT_Wstringop_truncation,
+                             cntrange[0].to_uhwi (),
+                             "%G%qD output may be truncated copying %E "
+                             "byte from a string of length %wu",
+                             "%G%qD output may be truncated copying %E "
+                             "bytes from a string of length %wu",
+                             call, func, cnt, lenrange[1].to_uhwi ());
+         return warning_at (callloc, OPT_Wstringop_truncation,
+                            "%G%qD output may be truncated copying between %wu "
+                            "and %wu bytes from a string of length %wu",
+                            call, func, cntrange[0].to_uhwi (),
+                            cntrange[1].to_uhwi (), lenrange[1].to_uhwi ());
+       }
+       if (cntrange[0] != cntrange[1]
+         && wi::leu_p (cntrange[0], lenrange[0])
+         && wi::leu_p (cntrange[1], lenrange[0] + 1))
+       {
+         /* If the source (including the terminating nul) is longer than
+            the lower bound of the specified count but shorter than the
+            upper bound the copy may (but need not) be truncated.  */
+         return warning_at (callloc, OPT_Wstringop_truncation,
+                            "%G%qD output may be truncated copying between "
+                            "%wu and %wu bytes from a string of length %wu",
+                            call, func, cntrange[0].to_uhwi (),
+                            cntrange[1].to_uhwi (), lenrange[0].to_uhwi ());
+       }
+     }
+   if (tree dstsize = compute_objsize (dst, 1))
+     {
+       /* The source length is uknown.  Try to determine the destination
+        size and see if it matches the specified bound.  If not, bail.
+        Otherwise go on to see if it should be diagnosed for possible
+        truncation.  */
+       if (!dstsize)
+       return false;
+       if (wi::to_wide (dstsize) != cntrange[1])
+       return false;
+       if (cntrange[0] == cntrange[1])
+       return warning_at (callloc, OPT_Wstringop_truncation,
+                          "%G%qD specified bound %E equals destination size",
+                          as_a <gcall *> (stmt), func, cnt);
+     }
+   return false;
+ }
+ /* Check the arguments to the built-in forms of stpncpy and strncpy for
+    out-of-bounds offsets or overlapping access, and to see if the size
+    is derived from calling strlen() on the source argument, and if so,
+    issue the appropriate warning.  */
+ static void
+ handle_builtin_stxncpy (built_in_function, gimple_stmt_iterator *gsi)
+ {
+   if (!strlen_to_stridx)
+     return;
+   gimple *stmt = gsi_stmt (*gsi);
+   bool with_bounds = gimple_call_with_bounds_p (stmt);
+   tree dst = gimple_call_arg (stmt, with_bounds ? 1 : 0);
+   tree src = gimple_call_arg (stmt, with_bounds ? 2 : 1);
+   tree len = gimple_call_arg (stmt, with_bounds ? 3 : 2);
+   tree dstsize = NULL_TREE, srcsize = NULL_TREE;
+   int didx = get_stridx (dst);
+   if (strinfo *sidst = didx > 0 ? get_strinfo (didx) : NULL)
+     {
+       /* Compute the size of the destination string including the NUL.  */
+       if (sidst->nonzero_chars)
+       {
+         tree type = TREE_TYPE (sidst->nonzero_chars);
+         dstsize = fold_build2 (PLUS_EXPR, type, sidst->nonzero_chars,
+                                build_int_cst (type, 1));
+       }
+       dst = sidst->ptr;
+     }
+   int sidx = get_stridx (src);
+   strinfo *sisrc = sidx > 0 ? get_strinfo (sidx) : NULL;
+   if (sisrc)
+     {
+       /* strncat() and strncpy() can modify the source string by writing
+        over the terminating nul so SISRC->DONT_INVALIDATE must be left
+        clear.  */
+       /* Compute the size of the source string including the NUL.  */
+       if (sisrc->nonzero_chars)
+       {
+         tree type = TREE_TYPE (sisrc->nonzero_chars);
+         srcsize = fold_build2 (PLUS_EXPR, type, sisrc->nonzero_chars,
+                                build_int_cst (type, 1));
+       }
+       src = sisrc->ptr;
+     }
+   else
+     srcsize = NULL_TREE;
+   if (!check_bounds_or_overlap (as_a <gcall *>(stmt), dst, src,
+                               dstsize, srcsize))
+     {
+       gimple_set_no_warning (stmt, true);
+       return;
+     }
+   /* If the length argument was computed from strlen(S) for some string
+      S retrieve the strinfo index for the string (PSS->FIRST) alonng with
+      the location of the strlen() call (PSS->SECOND).  */
+   stridx_strlenloc *pss = strlen_to_stridx->get (len);
+   if (!pss || pss->first <= 0)
+     {
+       if (maybe_diag_stxncpy_trunc (*gsi, src, len))
+       gimple_set_no_warning (stmt, true);
+       return;
+     }
+   /* Retrieve the strinfo data for the string S that LEN was computed
+      from as some function F of strlen (S) (i.e., LEN need not be equal
+      to strlen(S)).  */
+   strinfo *silen = get_strinfo (pss->first);
+   location_t callloc = gimple_location (stmt);
+   tree func = gimple_call_fndecl (stmt);
+   bool warned = false;
+   /* When -Wstringop-truncation is set, try to determine truncation
+      before diagnosing possible overflow.  Truncation is implied by
+      the LEN argument being equal to strlen(SRC), regardless of
+      whether its value is known.  Otherwise, issue the more generic
+      -Wstringop-overflow which triggers for LEN arguments that in
+      any meaningful way depend on strlen(SRC).  */
+   if (sisrc == silen
+       && is_strlen_related_p (src, len)
+       && warning_at (callloc, OPT_Wstringop_truncation,
+                    "%G%qD output truncated before terminating nul "
+                    "copying as many bytes from a string as its length",
+                    as_a <gcall *>(stmt), func))
+     warned = true;
+   else if (silen && is_strlen_related_p (src, silen->ptr))
+     warned = warning_at (callloc, OPT_Wstringop_overflow_,
+                        "%G%qD specified bound depends on the length "
+                        "of the source argument",
+                        as_a <gcall *>(stmt), func);
+   if (warned)
+     {
+       location_t strlenloc = pss->second;
+       if (strlenloc != UNKNOWN_LOCATION && strlenloc != callloc)
+       inform (strlenloc, "length computed here");
+     }
  }
  
  /* Handle a memcpy-like ({mem{,p}cpy,__mem{,p}cpy_chk}) call.
@@@ -2576,6 -3251,100 +3235,93 @@@ strlen_check_and_optimize_stmt (gimple_
        else if (code == EQ_EXPR || code == NE_EXPR)
          fold_strstr_to_strncmp (gimple_assign_rhs1 (stmt),
                                  gimple_assign_rhs2 (stmt), stmt);
 -                      wide_int min, max;
+       else if (gimple_assign_load_p (stmt)
+                && TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
+                && TYPE_MODE (TREE_TYPE (lhs)) == TYPE_MODE (char_type_node)
+                && (TYPE_PRECISION (TREE_TYPE (lhs))
+                    == TYPE_PRECISION (char_type_node))
+                && !gimple_has_volatile_ops (stmt))
+         {
+           tree off = integer_zero_node;
+           unsigned HOST_WIDE_INT coff = 0;
+           int idx = 0;
+           tree rhs1 = gimple_assign_rhs1 (stmt);
+           if (code == MEM_REF)
+             {
+               idx = get_stridx (TREE_OPERAND (rhs1, 0));
+               if (idx > 0)
+                 {
+                   strinfo *si = get_strinfo (idx);
+                   if (si
+                       && si->nonzero_chars
+                       && TREE_CODE (si->nonzero_chars) == INTEGER_CST
+                       && (wi::to_widest (si->nonzero_chars)
+                           >= wi::to_widest (off)))
+                     off = TREE_OPERAND (rhs1, 1);
+                   else
+                     /* This case is not useful.  See if get_addr_stridx
+                        returns something usable.  */
+                     idx = 0;
+                 }
+             }
+           if (idx <= 0)
+             idx = get_addr_stridx (rhs1, NULL_TREE, &coff);
+           if (idx > 0)
+             {
+               strinfo *si = get_strinfo (idx);
+               if (si
+                   && si->nonzero_chars
+                   && TREE_CODE (si->nonzero_chars) == INTEGER_CST)
+                 {
+                   widest_int w1 = wi::to_widest (si->nonzero_chars);
+                   widest_int w2 = wi::to_widest (off) + coff;
+                   if (w1 == w2
+                       && si->full_string_p)
+                     {
+                       if (dump_file && (dump_flags & TDF_DETAILS) != 0)
+                         {
+                           fprintf (dump_file, "Optimizing: ");
+                           print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+                         }
+                       /* Reading the final '\0' character.  */
+                       tree zero = build_int_cst (TREE_TYPE (lhs), 0);
+                       gimple_set_vuse (stmt, NULL_TREE);
+                       gimple_assign_set_rhs_from_tree (gsi, zero);
+                       *cleanup_eh
+                         |= maybe_clean_or_replace_eh_stmt (stmt,
+                                                            gsi_stmt (*gsi));
+                       stmt = gsi_stmt (*gsi);
+                       update_stmt (stmt);
+                       if (dump_file && (dump_flags & TDF_DETAILS) != 0)
+                         {
+                           fprintf (dump_file, "into: ");
+                           print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+                         }
+                     }
+                   else if (w1 > w2)
+                     {
+                       /* Reading a character before the final '\0'
+                          character.  Just set the value range to ~[0, 0]
+                          if we don't have anything better.  */
 -                      enum value_range_type vr
 -                        = get_range_info (lhs, &min, &max);
 -                      if (vr == VR_VARYING
 -                          || (vr == VR_RANGE
 -                              && min == wi::min_value (TYPE_PRECISION (type),
 -                                                       TYPE_SIGN (type))
 -                              && max == wi::max_value (TYPE_PRECISION (type),
 -                                                       TYPE_SIGN (type))))
+                       tree type = TREE_TYPE (lhs);
++                      irange ir (lhs);
++                      if (ir.range_for_type_p ())
+                         set_range_info (lhs, VR_ANTI_RANGE,
+                                         wi::zero (TYPE_PRECISION (type)),
+                                         wi::zero (TYPE_PRECISION (type)));
+                     }
+                 }
+             }
+         }
+       if (strlen_to_stridx)
+         {
+           tree rhs1 = gimple_assign_rhs1 (stmt);
+           if (stridx_strlenloc *ps = strlen_to_stridx->get (rhs1))
+             strlen_to_stridx->put (lhs, stridx_strlenloc (*ps));
+         }
        }
      else if (TREE_CODE (lhs) != SSA_NAME && !TREE_SIDE_EFFECTS (lhs))
        {
index 1a1134eecfb1a5260655dc7862bf066f9379e8ab,6cce43be593fcb3b64a98a357eca36e0fbd2a614..938fdc21ad6221e2b4f4eac8e819d3c3375464c1
@@@ -452,10 -459,10 +457,10 @@@ set_nonzero_bits (tree name, const wide
        if (mask == -1)
        return;
        set_range_info_raw (name, VR_RANGE,
-                         TYPE_MIN_VALUE (TREE_TYPE (name)),
-                         TYPE_MAX_VALUE (TREE_TYPE (name)));
+                         wi::to_wide (TYPE_MIN_VALUE (TREE_TYPE (name))),
+                         wi::to_wide (TYPE_MAX_VALUE (TREE_TYPE (name))));
      }
 -  range_info_def *ri = SSA_NAME_RANGE_INFO (name);
 +  irange_storage *ri = SSA_NAME_RANGE_INFO (name);
    ri->set_nonzero_bits (mask);
  }
  
@@@ -487,6 -494,6 +492,23 @@@ get_nonzero_bits (const_tree name
    return ri->get_nonzero_bits ();
  }
  
++/* Similar to above, but return the non-zero bits as an irange in IR.  */
++/* FIXME: This possibly deprecates intersect_range_with_nonzero_bits.  */
++
++void
++get_nonzero_bits_as_range (irange &ir, const_tree name)
++{
++  wide_int nzb = get_nonzero_bits (name);
++  if (nzb == 0)
++    {
++      ir.set_range_for_type (TREE_TYPE (name));
++      ir.clear ();
++      return;
++    }
++  // FIXME: errr, this needs testing.
++  ir = irange (TREE_TYPE (name), wi::clz (nzb), wi::ctz (nzb));
++}
++
  /* Return TRUE is OP, an SSA_NAME has a range of values [0..1], false
     otherwise.
  
index 55a96255977fd974c44ac85ac4c0bda37a1e2300,d39f39694366bf569224f473e9920ab39a2ac5d4..0a499fe066a8fbb0f83bc26bc67b396ea4822b96
@@@ -71,9 -73,10 +71,10 @@@ extern void set_range_info_raw (tree, e
                                const wide_int_ref &,
                                const wide_int_ref &);
  /* Gets the value range from SSA.  */
 -extern enum value_range_type get_range_info (const_tree, wide_int *,
 -                                           wide_int *);
 -extern void set_nonzero_bits (tree, const wide_int_ref &);
 +extern bool get_range_info (const_tree, wide_int *, wide_int *);
 +extern void set_nonzero_bits (tree, const wide_int &);
  extern wide_int get_nonzero_bits (const_tree);
++extern void get_nonzero_bits_as_range (irange &, const_tree);
  extern bool ssa_name_has_boolean_range (tree);
  extern void init_ssanames (struct function *, int);
  extern void fini_ssanames (struct function *);
Simple merge
diff --cc gcc/tree-vrp.c
index 71d19d73baccef21b45e65a2a8d5cf8070ff6159,aa53db655762ce33fd3afe423b0bc6e09a180f5b..bde6cceb73583ed9bd17d71a767787f63a187b20
@@@ -500,168 -513,9 +513,74 @@@ abs_extent_range (value_range *vr, tre
    set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
  }
  
-    converted.  For instnace, ipa-prop.c.  */
 +/* Return TRUE if an irange is an ANTI_RANGE.  This is a temporary
 +   measure offering backward compatibility with range_info_def, and
 +   will go away.  */
 +
 +static bool
 +irange_is_anti_range (irange r)
 +{
 +  const_tree type = r.get_type ();
 +  // Remember: VR_ANTI_RANGE([3,10]) ==> [-MIN,2][11,MAX]
 +  unsigned int precision = TYPE_PRECISION (type);
 +  wide_int min = wi::min_value (precision, TYPE_SIGN (type));
 +  wide_int max = wi::max_value (precision, TYPE_SIGN (type));
 +  return (r.num_pairs () == 2
 +          && r.lower_bound () == min
 +          && r.upper_bound () == max);
 +}
 +
 +/* Convert the range info of an SSA name into VRP's internal
 +   value_range representation.  Return VR_RANGE/VR_ANTI_RANGE if range
 +   info is available for the SSA name, otherwise VR_VARYING is
 +   returned.  MIN/MAX is set if range info is available.
 +
 +   FIXME: Any use of this function outside of tree-vrp must be
-       || (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (ssa)))
++   converted.  For instance, ipa-prop.c.  */
 +
 +enum value_range_type
 +get_range_info_as_value_range (const_tree ssa, wide_int *min, wide_int *max)
 +{
 +  if (!SSA_NAME_RANGE_INFO (ssa)
- /* Return value range information for VAR.
-    If we have no values ranges recorded (ie, VRP is not running), then
-    return NULL.  Otherwise create an empty range if none existed for VAR.  */
- static value_range *
- get_value_range (const_tree var)
- {
-   static const value_range vr_const_varying
-     = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
-   value_range *vr;
-   tree sym;
-   unsigned ver = SSA_NAME_VERSION (var);
-   /* If we have no recorded ranges, then return NULL.  */
-   if (! vr_value)
-     return NULL;
-   /* If we query the range for a new SSA name return an unmodifiable VARYING.
-      We should get here at most from the substitute-and-fold stage which
-      will never try to change values.  */
-   if (ver >= num_vr_values)
-     return CONST_CAST (value_range *, &vr_const_varying);
-   vr = vr_value[ver];
-   if (vr)
-     return vr;
-   /* After propagation finished do not allocate new value-ranges.  */
-   if (values_propagated)
-     return CONST_CAST (value_range *, &vr_const_varying);
-   /* Create a default value range.  */
-   vr_value[ver] = vr = vrp_value_range_pool.allocate ();
-   memset (vr, 0, sizeof (*vr));
-   /* Defer allocating the equivalence set.  */
-   vr->equiv = NULL;
-   /* If VAR is a default definition of a parameter, the variable can
-      take any value in VAR's type.  */
-   if (SSA_NAME_IS_DEFAULT_DEF (var))
-     {
-       sym = SSA_NAME_VAR (var);
-       if (TREE_CODE (sym) == PARM_DECL)
-       {
-         /* Try to use the "nonnull" attribute to create ~[0, 0]
-            anti-ranges for pointers.  Note that this is only valid with
-            default definitions of PARM_DECLs.  */
-         if (POINTER_TYPE_P (TREE_TYPE (sym))
-             && (nonnull_arg_p (sym)
-                 || get_ptr_nonnull (var)))
-           set_value_range_to_nonnull (vr, TREE_TYPE (sym));
-         else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
-           {
-             wide_int min, max;
-             value_range_type rtype
-               = get_range_info_as_value_range (var, &min, &max);
-             if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
-               set_value_range (vr, rtype,
-                                wide_int_to_tree (TREE_TYPE (var), min),
-                                wide_int_to_tree (TREE_TYPE (var), max),
-                                NULL);
-             else
-               set_value_range_to_varying (vr);
-           }
-         else
-           set_value_range_to_varying (vr);
-       }
-       else if (TREE_CODE (sym) == RESULT_DECL
-              && DECL_BY_REFERENCE (sym))
-       set_value_range_to_nonnull (vr, TREE_TYPE (sym));
-     }
-   return vr;
- }
- /* Set value-ranges of all SSA names defined by STMT to varying.  */
- static void
- set_defs_to_varying (gimple *stmt)
- {
-   ssa_op_iter i;
-   tree def;
-   FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
-     {
-       value_range *vr = get_value_range (def);
-       /* Avoid writing to vr_const_varying get_value_range may return.  */
-       if (vr->type != VR_VARYING)
-       set_value_range_to_varying (vr);
-     }
- }
++      || (GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (TREE_TYPE (ssa)))
 +        > 2 * HOST_BITS_PER_WIDE_INT))
 +    return VR_VARYING;
 +
 +  irange ri (ssa);
 +  if (irange_is_anti_range (ri))
 +    {
 +      irange tmp (ri);
 +      tmp.invert ();
 +      gcc_assert (!tmp.overflow_p ());
 +      if (tmp.num_pairs () != 1)
 +       {
 +         fprintf (stderr, "Inverse of anti range does not have 2 elements.\n");
 +         fprintf (stderr, "Type: ");
 +         debug_generic_stmt (const_cast<tree> (ri.get_type ()));
 +         fprintf (stderr, "Original anti range:\n");
 +         ri.dump ();
 +         fprintf (stderr, "\n");
 +         fprintf (stderr, "Supposed inverse of anti range:\n");
 +         tmp.dump ();
 +         fprintf (stderr, "\n");
 +         gcc_unreachable ();
 +       }
 +      *min = tmp.lower_bound ();
 +      *max = tmp.upper_bound ();
 +      return VR_ANTI_RANGE;
 +    }
 +
 +  /* We chop off any middle ranges, because range_info_def has no use
 +     for such granularity.  */
 +  *min = ri.lower_bound ();
 +  *max = ri.upper_bound ();
 +  return VR_RANGE;
 +}
 +
 +
  /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
  
static inline bool
+ bool
  vrp_operand_equal_p (const_tree val1, const_tree val2)
  {
    if (val1 == val2)
@@@ -6289,4213 -4991,1508 +5056,1509 @@@ vrp_prop::check_array_ref (location_t l
      }
  }
  
- /* Do an RPO walk over the function computing SSA name liveness
-    on-the-fly and deciding on assert expressions to insert.  */
+ /* Searches if the expr T, located at LOCATION computes
+    address of an ARRAY_REF, and call check_array_ref on it.  */
  
static void
find_assert_locations (void)
+ void
vrp_prop::search_for_addr_array (tree t, location_t location)
  {
-   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
-   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
-   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
-   int rpo_cnt, i;
-   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
-   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
-   for (i = 0; i < rpo_cnt; ++i)
-     bb_rpo[rpo[i]] = i;
-   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
-      the order we compute liveness and insert asserts we otherwise
-      fail to insert asserts into the loop latch.  */
-   loop_p loop;
-   FOR_EACH_LOOP (loop, 0)
+   /* Check each ARRAY_REFs in the reference chain. */
+   do
      {
-       i = loop->latch->index;
-       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
-       for (gphi_iterator gsi = gsi_start_phis (loop->header);
-          !gsi_end_p (gsi); gsi_next (&gsi))
-       {
-         gphi *phi = gsi.phi ();
-         if (virtual_operand_p (gimple_phi_result (phi)))
-           continue;
-         tree arg = gimple_phi_arg_def (phi, j);
-         if (TREE_CODE (arg) == SSA_NAME)
-           {
-             if (live[i] == NULL)
-               {
-                 live[i] = sbitmap_alloc (num_ssa_names);
-                 bitmap_clear (live[i]);
-               }
-             bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
-           }
-       }
+       if (TREE_CODE (t) == ARRAY_REF)
+       check_array_ref (location, t, true /*ignore_off_by_one*/);
+       t = TREE_OPERAND (t, 0);
      }
+   while (handled_component_p (t));
  
-   for (i = rpo_cnt - 1; i >= 0; --i)
+   if (TREE_CODE (t) == MEM_REF
+       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
+       && !TREE_NO_WARNING (t))
      {
-       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
-       edge e;
-       edge_iterator ei;
+       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+       tree low_bound, up_bound, el_sz;
+       offset_int idx;
+       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
+         || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
+         || !TYPE_DOMAIN (TREE_TYPE (tem)))
+       return;
  
-       if (!live[rpo[i]])
-       {
-         live[rpo[i]] = sbitmap_alloc (num_ssa_names);
-         bitmap_clear (live[rpo[i]]);
-       }
+       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
+       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
+       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
+       if (!low_bound
+         || TREE_CODE (low_bound) != INTEGER_CST
+         || !up_bound
+         || TREE_CODE (up_bound) != INTEGER_CST
+         || !el_sz
+         || TREE_CODE (el_sz) != INTEGER_CST)
+       return;
  
-       /* Process BB and update the live information with uses in
-          this block.  */
-       find_assert_locations_1 (bb, live[rpo[i]]);
+       if (!mem_ref_offset (t).is_constant (&idx))
+       return;
  
-       /* Merge liveness into the predecessor blocks and free it.  */
-       if (!bitmap_empty_p (live[rpo[i]]))
+       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
+       if (idx < 0)
        {
-         int pred_rpo = i;
-         FOR_EACH_EDGE (e, ei, bb->preds)
+         if (dump_file && (dump_flags & TDF_DETAILS))
            {
-             int pred = e->src->index;
-             if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
-               continue;
-             if (!live[pred])
-               {
-                 live[pred] = sbitmap_alloc (num_ssa_names);
-                 bitmap_clear (live[pred]);
-               }
-             bitmap_ior (live[pred], live[pred], live[rpo[i]]);
-             if (bb_rpo[pred] < pred_rpo)
-               pred_rpo = bb_rpo[pred];
+             fprintf (dump_file, "Array bound warning for ");
+             dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
+             fprintf (dump_file, "\n");
            }
-         /* Record the RPO number of the last visited block that needs
-            live information from this block.  */
-         last_rpo[rpo[i]] = pred_rpo;
+         warning_at (location, OPT_Warray_bounds,
+                     "array subscript %wi is below array bounds of %qT",
+                     idx.to_shwi (), TREE_TYPE (tem));
+         TREE_NO_WARNING (t) = 1;
        }
-       else
+       else if (idx > (wi::to_offset (up_bound)
+                     - wi::to_offset (low_bound) + 1))
        {
-         sbitmap_free (live[rpo[i]]);
-         live[rpo[i]] = NULL;
+         if (dump_file && (dump_flags & TDF_DETAILS))
+           {
+             fprintf (dump_file, "Array bound warning for ");
+             dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
+             fprintf (dump_file, "\n");
+           }
+         warning_at (location, OPT_Warray_bounds,
+                     "array subscript %wu is above array bounds of %qT",
+                     idx.to_uhwi (), TREE_TYPE (tem));
+         TREE_NO_WARNING (t) = 1;
        }
-       /* We can free all successors live bitmaps if all their
-          predecessors have been visited already.  */
-       FOR_EACH_EDGE (e, ei, bb->succs)
-       if (last_rpo[e->dest->index] == i
-           && live[e->dest->index])
-         {
-           sbitmap_free (live[e->dest->index]);
-           live[e->dest->index] = NULL;
-         }
      }
-   XDELETEVEC (rpo);
-   XDELETEVEC (bb_rpo);
-   XDELETEVEC (last_rpo);
-   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
-     if (live[i])
-       sbitmap_free (live[i]);
-   XDELETEVEC (live);
  }
  
- /* Create an ASSERT_EXPR for NAME and insert it in the location
-    indicated by LOC.  Return true if we made any edge insertions.  */
+ /* walk_tree() callback that checks if *TP is
+    an ARRAY_REF inside an ADDR_EXPR (in which an array
+    subscript one outside the valid range is allowed). Call
+    check_array_ref for each ARRAY_REF found. The location is
+    passed in DATA.  */
  
- static bool
process_assert_insertions_for (tree name, assert_locus *loc)
+ static tree
check_array_bounds (tree *tp, int *walk_subtree, void *data)
  {
-   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
-   gimple *stmt;
-   tree cond;
-   gimple *assert_stmt;
-   edge_iterator ei;
-   edge e;
-   /* If we have X <=> X do not insert an assert expr for that.  */
-   if (loc->expr == loc->val)
-     return false;
+   tree t = *tp;
+   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+   location_t location;
  
-   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
-   assert_stmt = build_assert_expr_for (cond, name);
-   if (loc->e)
-     {
-       /* We have been asked to insert the assertion on an edge.  This
-        is used only by COND_EXPR and SWITCH_EXPR assertions.  */
-       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
-                          || (gimple_code (gsi_stmt (loc->si))
-                              == GIMPLE_SWITCH));
+   if (EXPR_HAS_LOCATION (t))
+     location = EXPR_LOCATION (t);
+   else
+     location = gimple_location (wi->stmt);
  
-       gsi_insert_on_edge (loc->e, assert_stmt);
-       return true;
-     }
+   *walk_subtree = TRUE;
  
-   /* If the stmt iterator points at the end then this is an insertion
-      at the beginning of a block.  */
-   if (gsi_end_p (loc->si))
-     {
-       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
-       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
-       return false;
+   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
+   if (TREE_CODE (t) == ARRAY_REF)
+     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
  
-     }
-   /* Otherwise, we can insert right after LOC->SI iff the
-      statement must not be the last statement in the block.  */
-   stmt = gsi_stmt (loc->si);
-   if (!stmt_ends_bb_p (stmt))
+   else if (TREE_CODE (t) == ADDR_EXPR)
      {
-       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
-       return false;
+       vrp_prop->search_for_addr_array (t, location);
+       *walk_subtree = FALSE;
      }
  
-   /* If STMT must be the last statement in BB, we can only insert new
-      assertions on the non-abnormal edge out of BB.  Note that since
-      STMT is not control flow, there may only be one non-abnormal/eh edge
-      out of BB.  */
-   FOR_EACH_EDGE (e, ei, loc->bb->succs)
-     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
-       {
-       gsi_insert_on_edge (e, assert_stmt);
-       return true;
-       }
-   gcc_unreachable ();
+   return NULL_TREE;
  }
  
- /* Qsort helper for sorting assert locations.  If stable is true, don't
-    use iterative_hash_expr because it can be unstable for -fcompare-debug,
-    on the other side some pointers might be NULL.  */
+ /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
+    to walk over all statements of all reachable BBs and call
+    check_array_bounds on them.  */
  
- template <bool stable>
- static int
- compare_assert_loc (const void *pa, const void *pb)
+ class check_array_bounds_dom_walker : public dom_walker
  {
-   assert_locus * const a = *(assert_locus * const *)pa;
-   assert_locus * const b = *(assert_locus * const *)pb;
+  public:
+   check_array_bounds_dom_walker (vrp_prop *prop)
+     : dom_walker (CDI_DOMINATORS,
+                 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
+                    flags, so that we can merge in information on
+                    non-executable edges from vrp_folder .  */
+                 REACHABLE_BLOCKS_PRESERVING_FLAGS),
+       m_prop (prop) {}
+   ~check_array_bounds_dom_walker () {}
  
-   /* If stable, some asserts might be optimized away already, sort
-      them last.  */
-   if (stable)
-     {
-       if (a == NULL)
-       return b != NULL;
-       else if (b == NULL)
-       return -1;
-     }
+   edge before_dom_children (basic_block) FINAL OVERRIDE;
  
-   if (a->e == NULL && b->e != NULL)
-     return 1;
-   else if (a->e != NULL && b->e == NULL)
-     return -1;
+  private:
+   vrp_prop *m_prop;
+ };
  
-   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
-      no need to test both a->e and b->e.  */
+ /* Implementation of dom_walker::before_dom_children.
  
-   /* Sort after destination index.  */
-   if (a->e == NULL)
-     ;
-   else if (a->e->dest->index > b->e->dest->index)
-     return 1;
-   else if (a->e->dest->index < b->e->dest->index)
-     return -1;
+    Walk over all statements of BB and call check_array_bounds on them,
+    and determine if there's a unique successor edge.  */
  
-   /* Sort after comp_code.  */
-   if (a->comp_code > b->comp_code)
-     return 1;
-   else if (a->comp_code < b->comp_code)
-     return -1;
+ edge
+ check_array_bounds_dom_walker::before_dom_children (basic_block bb)
+ {
+   gimple_stmt_iterator si;
+   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
+     {
+       gimple *stmt = gsi_stmt (si);
+       struct walk_stmt_info wi;
+       if (!gimple_has_location (stmt)
+         || is_gimple_debug (stmt))
+       continue;
  
-   hashval_t ha, hb;
+       memset (&wi, 0, sizeof (wi));
  
-   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
-      uses DECL_UID of the VAR_DECL, so sorting might differ between
-      -g and -g0.  When doing the removal of redundant assert exprs
-      and commonization to successors, this does not matter, but for
-      the final sort needs to be stable.  */
-   if (stable)
-     {
-       ha = 0;
-       hb = 0;
-     }
-   else
-     {
-       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
-       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
+       wi.info = m_prop;
+       walk_gimple_op (stmt, check_array_bounds, &wi);
      }
  
-   /* Break the tie using hashing and source/bb index.  */
-   if (ha == hb)
-     return (a->e != NULL
-           ? a->e->src->index - b->e->src->index
-           : a->bb->index - b->bb->index);
-   return ha > hb ? 1 : -1;
+   /* Determine if there's a unique successor edge, and if so, return
+      that back to dom_walker, ensuring that we don't visit blocks that
+      became unreachable during the VRP propagation
+      (PR tree-optimization/83312).  */
+   return find_taken_edge (bb, NULL_TREE);
  }
  
- /* Process all the insertions registered for every name N_i registered
-    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
-    found in ASSERTS_FOR[i].  */
+ /* Walk over all statements of all reachable BBs and call check_array_bounds
+    on them.  */
  
static void
process_assert_insertions (void)
+ void
vrp_prop::check_all_array_refs ()
  {
-   unsigned i;
-   bitmap_iterator bi;
-   bool update_edges_p = false;
-   int num_asserts = 0;
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     dump_all_asserts (dump_file);
+   check_array_bounds_dom_walker w (this);
+   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ }
  
-   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
-     {
-       assert_locus *loc = asserts_for[i];
-       gcc_assert (loc);
+ /* Return true if all imm uses of VAR are either in STMT, or
+    feed (optionally through a chain of single imm uses) GIMPLE_COND
+    in basic block COND_BB.  */
  
-       auto_vec<assert_locus *, 16> asserts;
-       for (; loc; loc = loc->next)
-       asserts.safe_push (loc);
-       asserts.qsort (compare_assert_loc<false>);
+ static bool
+ all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
+ {
+   use_operand_p use_p, use2_p;
+   imm_use_iterator iter;
  
-       /* Push down common asserts to successors and remove redundant ones.  */
-       unsigned ecnt = 0;
-       assert_locus *common = NULL;
-       unsigned commonj = 0;
-       for (unsigned j = 0; j < asserts.length (); ++j)
-       {
-         loc = asserts[j];
-         if (! loc->e)
-           common = NULL;
-         else if (! common
-                  || loc->e->dest != common->e->dest
-                  || loc->comp_code != common->comp_code
-                  || ! operand_equal_p (loc->val, common->val, 0)
-                  || ! operand_equal_p (loc->expr, common->expr, 0))
-           {
-             commonj = j;
-             common = loc;
-             ecnt = 1;
-           }
-         else if (loc->e == asserts[j-1]->e)
-           {
-             /* Remove duplicate asserts.  */
-             if (commonj == j - 1)
-               {
-                 commonj = j;
-                 common = loc;
-               }
-             free (asserts[j-1]);
-             asserts[j-1] = NULL;
-           }
-         else
-           {
-             ecnt++;
-             if (EDGE_COUNT (common->e->dest->preds) == ecnt)
-               {
-                 /* We have the same assertion on all incoming edges of a BB.
-                    Insert it at the beginning of that block.  */
-                 loc->bb = loc->e->dest;
-                 loc->e = NULL;
-                 loc->si = gsi_none ();
-                 common = NULL;
-                 /* Clear asserts commoned.  */
-                 for (; commonj != j; ++commonj)
-                   if (asserts[commonj])
-                     {
-                       free (asserts[commonj]);
-                       asserts[commonj] = NULL;
-                     }
-               }
-           }
-       }
-       /* The asserts vector sorting above might be unstable for
-        -fcompare-debug, sort again to ensure a stable sort.  */
-       asserts.qsort (compare_assert_loc<true>);
-       for (unsigned j = 0; j < asserts.length (); ++j)
-       {
-         loc = asserts[j];
-         if (! loc)
-           break;
-         update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
-         num_asserts++;
-         free (loc);
-       }
-     }
-   if (update_edges_p)
-     gsi_commit_edge_inserts ();
-   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
-                           num_asserts);
+   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
+     if (USE_STMT (use_p) != stmt)
+       {
+       gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
+       if (is_gimple_debug (use_stmt))
+         continue;
+       while (is_gimple_assign (use_stmt)
+              && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
+              && single_imm_use (gimple_assign_lhs (use_stmt),
+                                 &use2_p, &use_stmt2))
+         use_stmt = use_stmt2;
+       if (gimple_code (use_stmt) != GIMPLE_COND
+           || gimple_bb (use_stmt) != cond_bb)
+         return false;
+       }
+   return true;
  }
  
- /* Traverse the flowgraph looking for conditional jumps to insert range
-    expressions.  These range expressions are meant to provide information
-    to optimizations that need to reason in terms of value ranges.  They
-    will not be expanded into RTL.  For instance, given:
-    x = ...
-    y = ...
-    if (x < y)
-      y = x - 2;
-    else
-      x = y + 3;
-    this pass will transform the code into:
-    x = ...
-    y = ...
-    if (x < y)
-     {
-       x = ASSERT_EXPR <x, x < y>
-       y = x - 2
-     }
+ /* Handle
+    _4 = x_3 & 31;
+    if (_4 != 0)
+      goto <bb 6>;
     else
-     {
-       y = ASSERT_EXPR <y, x >= y>
-       x = y + 3
-     }
-    The idea is that once copy and constant propagation have run, other
-    optimizations will be able to determine what ranges of values can 'x'
-    take in different paths of the code, simply by checking the reaching
-    definition of 'x'.  */
+      goto <bb 7>;
+    <bb 6>:
+    __builtin_unreachable ();
+    <bb 7>:
+    x_5 = ASSERT_EXPR <x_3, ...>;
+    If x_3 has no other immediate uses (checked by caller),
+    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
+    from the non-zero bitmask.  */
  
static void
insert_range_assertions (void)
+ void
maybe_set_nonzero_bits (edge e, tree var)
  {
-   need_assert_for = BITMAP_ALLOC (NULL);
-   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
+   basic_block cond_bb = e->src;
+   gimple *stmt = last_stmt (cond_bb);
+   tree cst;
  
-   calculate_dominance_info (CDI_DOMINATORS);
+   if (stmt == NULL
+       || gimple_code (stmt) != GIMPLE_COND
+       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
+                                    ? EQ_EXPR : NE_EXPR)
+       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
+       || !integer_zerop (gimple_cond_rhs (stmt)))
+     return;
  
-   find_assert_locations ();
-   if (!bitmap_empty_p (need_assert_for))
+   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
+   if (!is_gimple_assign (stmt)
+       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
+     return;
+   if (gimple_assign_rhs1 (stmt) != var)
      {
-       process_assert_insertions ();
-       update_ssa (TODO_update_ssa_no_phi);
-     }
+       gimple *stmt2;
  
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
-       dump_function_to_file (current_function_decl, dump_file, dump_flags);
+       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+       return;
+       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+       if (!gimple_assign_cast_p (stmt2)
+         || gimple_assign_rhs1 (stmt2) != var
+         || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
+         || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
+                             != TYPE_PRECISION (TREE_TYPE (var))))
+       return;
      }
-   free (asserts_for);
-   BITMAP_FREE (need_assert_for);
+   cst = gimple_assign_rhs2 (stmt);
+   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+                                         wi::to_wide (cst)));
  }
  
- /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
-    and "struct" hacks. If VRP can determine that the
-    array subscript is a constant, check if it is outside valid
-    range. If the array subscript is a RANGE, warn if it is
-    non-overlapping with valid range.
-    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
+ /* Convert range assertion expressions into the implied copies and
+    copy propagate away the copies.  Doing the trivial copy propagation
+    here avoids the need to run the full copy propagation pass after
+    VRP.
  
- static void
- check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
- {
-   value_range *vr = NULL;
-   tree low_sub, up_sub;
-   tree low_bound, up_bound, up_bound_p1;
+    FIXME, this will eventually lead to copy propagation removing the
+    names that had useful range information attached to them.  For
+    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
+    then N_i will have the range [3, +INF].
  
-   if (TREE_NO_WARNING (ref))
-     return;
+    However, by converting the assertion into the implied copy
+    operation N_i = N_j, we will then copy-propagate N_j into the uses
+    of N_i and lose the range information.  We may want to hold on to
+    ASSERT_EXPRs a little while longer as the ranges could be used in
+    things like jump threading.
  
-   low_sub = up_sub = TREE_OPERAND (ref, 1);
-   up_bound = array_ref_up_bound (ref);
+    The problem with keeping ASSERT_EXPRs around is that passes after
+    VRP need to handle them appropriately.
  
-   /* Can not check flexible arrays.  */
-   if (!up_bound
-       || TREE_CODE (up_bound) != INTEGER_CST)
-     return;
+    Another approach would be to make the range information a first
+    class property of the SSA_NAME so that it can be queried from
+    any pass.  This is made somewhat more complex by the need for
+    multiple ranges to be associated with one SSA_NAME.  */
  
-   /* Accesses to trailing arrays via pointers may access storage
-      beyond the types array bounds.  */
-   if (warn_array_bounds < 2
-       && array_at_struct_end_p (ref))
-     return;
+ static void
+ remove_range_assertions (void)
+ {
+   basic_block bb;
+   gimple_stmt_iterator si;
+   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
+      a basic block preceeded by GIMPLE_COND branching to it and
+      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
+   int is_unreachable;
  
-   low_bound = array_ref_low_bound (ref);
-   up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
-                                build_int_cst (TREE_TYPE (up_bound), 1));
+   /* Note that the BSI iterator bump happens at the bottom of the
+      loop and no bump is necessary if we're removing the statement
+      referenced by the current BSI.  */
+   FOR_EACH_BB_FN (bb, cfun)
+     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
+       {
+       gimple *stmt = gsi_stmt (si);
  
-   /* Empty array.  */
-   if (tree_int_cst_equal (low_bound, up_bound_p1))
-     {
-       warning_at (location, OPT_Warray_bounds,
-                 "array subscript is above array bounds");
-       TREE_NO_WARNING (ref) = 1;
-     }
+       if (is_gimple_assign (stmt)
+           && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
+         {
+           tree lhs = gimple_assign_lhs (stmt);
+           tree rhs = gimple_assign_rhs1 (stmt);
+           tree var;
  
-   if (TREE_CODE (low_sub) == SSA_NAME)
-     {
-       vr = get_value_range (low_sub);
-       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
-         {
-           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
-           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
-         }
-     }
+           var = ASSERT_EXPR_VAR (rhs);
  
-   if (vr && vr->type == VR_ANTI_RANGE)
-     {
-       if (TREE_CODE (up_sub) == INTEGER_CST
-           && (ignore_off_by_one
-             ? tree_int_cst_lt (up_bound, up_sub)
-             : tree_int_cst_le (up_bound, up_sub))
-           && TREE_CODE (low_sub) == INTEGER_CST
-           && tree_int_cst_le (low_sub, low_bound))
-         {
-           warning_at (location, OPT_Warray_bounds,
-                     "array subscript is outside array bounds");
-           TREE_NO_WARNING (ref) = 1;
-         }
-     }
-   else if (TREE_CODE (up_sub) == INTEGER_CST
-          && (ignore_off_by_one
-              ? !tree_int_cst_le (up_sub, up_bound_p1)
-              : !tree_int_cst_le (up_sub, up_bound)))
-     {
-       if (dump_file && (dump_flags & TDF_DETAILS))
-       {
-         fprintf (dump_file, "Array bound warning for ");
-         dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-         fprintf (dump_file, "\n");
-       }
-       warning_at (location, OPT_Warray_bounds,
-                 "array subscript is above array bounds");
-       TREE_NO_WARNING (ref) = 1;
-     }
-   else if (TREE_CODE (low_sub) == INTEGER_CST
-            && tree_int_cst_lt (low_sub, low_bound))
-     {
-       if (dump_file && (dump_flags & TDF_DETAILS))
-       {
-         fprintf (dump_file, "Array bound warning for ");
-         dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-         fprintf (dump_file, "\n");
-       }
-       warning_at (location, OPT_Warray_bounds,
-                 "array subscript is below array bounds");
-       TREE_NO_WARNING (ref) = 1;
-     }
- }
- /* Searches if the expr T, located at LOCATION computes
-    address of an ARRAY_REF, and call check_array_ref on it.  */
- static void
- search_for_addr_array (tree t, location_t location)
- {
-   /* Check each ARRAY_REFs in the reference chain. */
-   do
-     {
-       if (TREE_CODE (t) == ARRAY_REF)
-       check_array_ref (location, t, true /*ignore_off_by_one*/);
-       t = TREE_OPERAND (t, 0);
-     }
-   while (handled_component_p (t));
-   if (TREE_CODE (t) == MEM_REF
-       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
-       && !TREE_NO_WARNING (t))
-     {
-       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
-       tree low_bound, up_bound, el_sz;
-       offset_int idx;
-       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
-         || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
-         || !TYPE_DOMAIN (TREE_TYPE (tem)))
-       return;
-       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
-       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
-       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
-       if (!low_bound
-         || TREE_CODE (low_bound) != INTEGER_CST
-         || !up_bound
-         || TREE_CODE (up_bound) != INTEGER_CST
-         || !el_sz
-         || TREE_CODE (el_sz) != INTEGER_CST)
-       return;
-       idx = mem_ref_offset (t);
-       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
-       if (idx < 0)
-       {
-         if (dump_file && (dump_flags & TDF_DETAILS))
-           {
-             fprintf (dump_file, "Array bound warning for ");
-             dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
-             fprintf (dump_file, "\n");
-           }
-         warning_at (location, OPT_Warray_bounds,
-                     "array subscript is below array bounds");
-         TREE_NO_WARNING (t) = 1;
-       }
-       else if (idx > (wi::to_offset (up_bound)
-                     - wi::to_offset (low_bound) + 1))
-       {
-         if (dump_file && (dump_flags & TDF_DETAILS))
-           {
-             fprintf (dump_file, "Array bound warning for ");
-             dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
-             fprintf (dump_file, "\n");
-           }
-         warning_at (location, OPT_Warray_bounds,
-                     "array subscript is above array bounds");
-         TREE_NO_WARNING (t) = 1;
-       }
-     }
- }
- /* walk_tree() callback that checks if *TP is
-    an ARRAY_REF inside an ADDR_EXPR (in which an array
-    subscript one outside the valid range is allowed). Call
-    check_array_ref for each ARRAY_REF found. The location is
-    passed in DATA.  */
- static tree
- check_array_bounds (tree *tp, int *walk_subtree, void *data)
- {
-   tree t = *tp;
-   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
-   location_t location;
-   if (EXPR_HAS_LOCATION (t))
-     location = EXPR_LOCATION (t);
-   else
-     {
-       location_t *locp = (location_t *) wi->info;
-       location = *locp;
-     }
-   *walk_subtree = TRUE;
-   if (TREE_CODE (t) == ARRAY_REF)
-     check_array_ref (location, t, false /*ignore_off_by_one*/);
-   else if (TREE_CODE (t) == ADDR_EXPR)
-     {
-       search_for_addr_array (t, location);
-       *walk_subtree = FALSE;
-     }
-   return NULL_TREE;
- }
- /* Walk over all statements of all reachable BBs and call check_array_bounds
-    on them.  */
- static void
- check_all_array_refs (void)
- {
-   basic_block bb;
-   gimple_stmt_iterator si;
-   FOR_EACH_BB_FN (bb, cfun)
-     {
-       edge_iterator ei;
-       edge e;
-       bool executable = false;
-       /* Skip blocks that were found to be unreachable.  */
-       FOR_EACH_EDGE (e, ei, bb->preds)
-       executable |= !!(e->flags & EDGE_EXECUTABLE);
-       if (!executable)
-       continue;
-       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
-       {
-         gimple *stmt = gsi_stmt (si);
-         struct walk_stmt_info wi;
-         if (!gimple_has_location (stmt)
-             || is_gimple_debug (stmt))
-           continue;
-         memset (&wi, 0, sizeof (wi));
-         location_t loc = gimple_location (stmt);
-         wi.info = &loc;
-         walk_gimple_op (gsi_stmt (si),
-                         check_array_bounds,
-                         &wi);
-       }
-     }
- }
- /* Return true if all imm uses of VAR are either in STMT, or
-    feed (optionally through a chain of single imm uses) GIMPLE_COND
-    in basic block COND_BB.  */
+           if (TREE_CODE (var) == SSA_NAME
+               && !POINTER_TYPE_P (TREE_TYPE (lhs))
+               && SSA_NAME_RANGE_INFO (lhs))
+             {
+               if (is_unreachable == -1)
+                 {
+                   is_unreachable = 0;
+                   if (single_pred_p (bb)
+                       && assert_unreachable_fallthru_edge_p
+                                                   (single_pred_edge (bb)))
+                     is_unreachable = 1;
+                 }
+               /* Handle
+                  if (x_7 >= 10 && x_7 < 20)
+                    __builtin_unreachable ();
+                  x_8 = ASSERT_EXPR <x_7, ...>;
+                  if the only uses of x_7 are in the ASSERT_EXPR and
+                  in the condition.  In that case, we can copy the
+                  range info from x_8 computed in this pass also
+                  for x_7.  */
+               if (is_unreachable
+                   && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
+                                                         single_pred (bb)))
+                 {
 -                  set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
 -                                  SSA_NAME_RANGE_INFO (lhs)->get_min (),
 -                                  SSA_NAME_RANGE_INFO (lhs)->get_max ());
++                  wide_int min, max;
++                  enum value_range_type rtype
++                    = get_range_info_as_value_range (lhs, &min, &max);
++                  set_range_info (var, rtype, min, max);
+                   maybe_set_nonzero_bits (single_pred_edge (bb), var);
+                 }
+             }
  
- static bool
- all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
- {
-   use_operand_p use_p, use2_p;
-   imm_use_iterator iter;
+           /* Propagate the RHS into every use of the LHS.  For SSA names
+              also propagate abnormals as it merely restores the original
+              IL in this case (an replace_uses_by would assert).  */
+           if (TREE_CODE (var) == SSA_NAME)
+             {
+               imm_use_iterator iter;
+               use_operand_p use_p;
+               gimple *use_stmt;
+               FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+                 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+                   SET_USE (use_p, var);
+             }
+           else
+             replace_uses_by (lhs, var);
  
-   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
-     if (USE_STMT (use_p) != stmt)
-       {
-       gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
-       if (is_gimple_debug (use_stmt))
-         continue;
-       while (is_gimple_assign (use_stmt)
-              && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
-              && single_imm_use (gimple_assign_lhs (use_stmt),
-                                 &use2_p, &use_stmt2))
-         use_stmt = use_stmt2;
-       if (gimple_code (use_stmt) != GIMPLE_COND
-           || gimple_bb (use_stmt) != cond_bb)
-         return false;
+           /* And finally, remove the copy, it is not needed.  */
+           gsi_remove (&si, true);
+           release_defs (stmt);
+         }
+       else
+         {
+           if (!is_gimple_debug (gsi_stmt (si)))
+             is_unreachable = 0;
+           gsi_next (&si);
+         }
        }
-   return true;
  }
- /* Handle
-    _4 = x_3 & 31;
-    if (_4 != 0)
-      goto <bb 6>;
-    else
-      goto <bb 7>;
-    <bb 6>:
-    __builtin_unreachable ();
-    <bb 7>:
-    x_5 = ASSERT_EXPR <x_3, ...>;
-    If x_3 has no other immediate uses (checked by caller),
-    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
-    from the non-zero bitmask.  */
- static void
- maybe_set_nonzero_bits (basic_block bb, tree var)
- {
-   edge e = single_pred_edge (bb);
-   basic_block cond_bb = e->src;
-   gimple *stmt = last_stmt (cond_bb);
-   tree cst;
-   if (stmt == NULL
-       || gimple_code (stmt) != GIMPLE_COND
-       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
-                                    ? EQ_EXPR : NE_EXPR)
-       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
-       || !integer_zerop (gimple_cond_rhs (stmt)))
-     return;
-   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
-   if (!is_gimple_assign (stmt)
-       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
-       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
-     return;
-   if (gimple_assign_rhs1 (stmt) != var)
-     {
-       gimple *stmt2;
-       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
-       return;
-       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
-       if (!gimple_assign_cast_p (stmt2)
-         || gimple_assign_rhs1 (stmt2) != var
-         || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
-         || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
-                             != TYPE_PRECISION (TREE_TYPE (var))))
-       return;
-     }
-   cst = gimple_assign_rhs2 (stmt);
-   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
- }
- /* Convert range assertion expressions into the implied copies and
-    copy propagate away the copies.  Doing the trivial copy propagation
-    here avoids the need to run the full copy propagation pass after
-    VRP.
-    FIXME, this will eventually lead to copy propagation removing the
-    names that had useful range information attached to them.  For
-    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
-    then N_i will have the range [3, +INF].
-    However, by converting the assertion into the implied copy
-    operation N_i = N_j, we will then copy-propagate N_j into the uses
-    of N_i and lose the range information.  We may want to hold on to
-    ASSERT_EXPRs a little while longer as the ranges could be used in
-    things like jump threading.
-    The problem with keeping ASSERT_EXPRs around is that passes after
-    VRP need to handle them appropriately.
-    Another approach would be to make the range information a first
-    class property of the SSA_NAME so that it can be queried from
-    any pass.  This is made somewhat more complex by the need for
-    multiple ranges to be associated with one SSA_NAME.  */
- static void
- remove_range_assertions (void)
- {
-   basic_block bb;
-   gimple_stmt_iterator si;
-   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
-      a basic block preceeded by GIMPLE_COND branching to it and
-      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
-   int is_unreachable;
-   /* Note that the BSI iterator bump happens at the bottom of the
-      loop and no bump is necessary if we're removing the statement
-      referenced by the current BSI.  */
-   FOR_EACH_BB_FN (bb, cfun)
-     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
-       {
-       gimple *stmt = gsi_stmt (si);
-       if (is_gimple_assign (stmt)
-           && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
-         {
-           tree lhs = gimple_assign_lhs (stmt);
-           tree rhs = gimple_assign_rhs1 (stmt);
-           tree var;
-           var = ASSERT_EXPR_VAR (rhs);
-           if (TREE_CODE (var) == SSA_NAME
-               && !POINTER_TYPE_P (TREE_TYPE (lhs))
-               && SSA_NAME_RANGE_INFO (lhs))
-             {
-               if (is_unreachable == -1)
-                 {
-                   is_unreachable = 0;
-                   if (single_pred_p (bb)
-                       && assert_unreachable_fallthru_edge_p
-                                                   (single_pred_edge (bb)))
-                     is_unreachable = 1;
-                 }
-               /* Handle
-                  if (x_7 >= 10 && x_7 < 20)
-                    __builtin_unreachable ();
-                  x_8 = ASSERT_EXPR <x_7, ...>;
-                  if the only uses of x_7 are in the ASSERT_EXPR and
-                  in the condition.  In that case, we can copy the
-                  range info from x_8 computed in this pass also
-                  for x_7.  */
-               if (is_unreachable
-                   && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
-                                                         single_pred (bb)))
-                 {
-                   wide_int min, max;
-                   enum value_range_type rtype
-                     = get_range_info_as_value_range (lhs, &min, &max);
-                   set_range_info (var, rtype, min, max);
-                   maybe_set_nonzero_bits (bb, var);
-                 }
-             }
-           /* Propagate the RHS into every use of the LHS.  For SSA names
-              also propagate abnormals as it merely restores the original
-              IL in this case (an replace_uses_by would assert).  */
-           if (TREE_CODE (var) == SSA_NAME)
-             {
-               imm_use_iterator iter;
-               use_operand_p use_p;
-               gimple *use_stmt;
-               FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
-                 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
-                   SET_USE (use_p, var);
-             }
-           else
-             replace_uses_by (lhs, var);
-           /* And finally, remove the copy, it is not needed.  */
-           gsi_remove (&si, true);
-           release_defs (stmt);
-         }
-       else
-         {
-           if (!is_gimple_debug (gsi_stmt (si)))
-             is_unreachable = 0;
-           gsi_next (&si);
-         }
-       }
- }
- /* Return true if STMT is interesting for VRP.  */
- static bool
- stmt_interesting_for_vrp (gimple *stmt)
- {
-   if (gimple_code (stmt) == GIMPLE_PHI)
-     {
-       tree res = gimple_phi_result (stmt);
-       return (!virtual_operand_p (res)
-             && (INTEGRAL_TYPE_P (TREE_TYPE (res))
-                 || POINTER_TYPE_P (TREE_TYPE (res))));
-     }
-   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
-     {
-       tree lhs = gimple_get_lhs (stmt);
-       /* In general, assignments with virtual operands are not useful
-        for deriving ranges, with the obvious exception of calls to
-        builtin functions.  */
-       if (lhs && TREE_CODE (lhs) == SSA_NAME
-         && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
-             || POINTER_TYPE_P (TREE_TYPE (lhs)))
-         && (is_gimple_call (stmt)
-             || !gimple_vuse (stmt)))
-       return true;
-       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
-       switch (gimple_call_internal_fn (stmt))
-         {
-         case IFN_ADD_OVERFLOW:
-         case IFN_SUB_OVERFLOW:
-         case IFN_MUL_OVERFLOW:
-         case IFN_ATOMIC_COMPARE_EXCHANGE:
-           /* These internal calls return _Complex integer type,
-              but are interesting to VRP nevertheless.  */
-           if (lhs && TREE_CODE (lhs) == SSA_NAME)
-             return true;
-           break;
-         default:
-           break;
-         }
-     }
-   else if (gimple_code (stmt) == GIMPLE_COND
-          || gimple_code (stmt) == GIMPLE_SWITCH)
-     return true;
-   return false;
- }
- /* Initialize VRP lattice.  */
- static void
- vrp_initialize_lattice ()
- {
-   values_propagated = false;
-   num_vr_values = num_ssa_names;
-   vr_value = XCNEWVEC (value_range *, num_vr_values);
-   vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
-   bitmap_obstack_initialize (&vrp_equiv_obstack);
- }
- /* Initialization required by ssa_propagate engine.  */
- static void
- vrp_initialize ()
- {
-   basic_block bb;
-   FOR_EACH_BB_FN (bb, cfun)
-     {
-       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
-          gsi_next (&si))
-       {
-         gphi *phi = si.phi ();
-         if (!stmt_interesting_for_vrp (phi))
-           {
-             tree lhs = PHI_RESULT (phi);
-             set_value_range_to_varying (get_value_range (lhs));
-             prop_set_simulate_again (phi, false);
-           }
-         else
-           prop_set_simulate_again (phi, true);
-       }
-       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
-          gsi_next (&si))
-         {
-         gimple *stmt = gsi_stmt (si);
-         /* If the statement is a control insn, then we do not
-            want to avoid simulating the statement once.  Failure
-            to do so means that those edges will never get added.  */
-         if (stmt_ends_bb_p (stmt))
-           prop_set_simulate_again (stmt, true);
-         else if (!stmt_interesting_for_vrp (stmt))
-           {
-             set_defs_to_varying (stmt);
-             prop_set_simulate_again (stmt, false);
-           }
-         else
-           prop_set_simulate_again (stmt, true);
-       }
-     }
- }
- /* Return the singleton value-range for NAME or NAME.  */
- static inline tree
- vrp_valueize (tree name)
- {
-   if (TREE_CODE (name) == SSA_NAME)
-     {
-       value_range *vr = get_value_range (name);
-       if (vr->type == VR_RANGE
-         && (TREE_CODE (vr->min) == SSA_NAME
-             || is_gimple_min_invariant (vr->min))
-         && vrp_operand_equal_p (vr->min, vr->max))
-       return vr->min;
-     }
-   return name;
- }
- /* Return the singleton value-range for NAME if that is a constant
-    but signal to not follow SSA edges.  */
- static inline tree
- vrp_valueize_1 (tree name)
- {
-   if (TREE_CODE (name) == SSA_NAME)
-     {
-       /* If the definition may be simulated again we cannot follow
-          this SSA edge as the SSA propagator does not necessarily
-        re-visit the use.  */
-       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-       if (!gimple_nop_p (def_stmt)
-         && prop_simulate_again_p (def_stmt))
-       return NULL_TREE;
-       value_range *vr = get_value_range (name);
-       if (range_int_cst_singleton_p (vr))
-       return vr->min;
-     }
-   return name;
- }
- /* Visit assignment STMT.  If it produces an interesting range, record
-    the range in VR and set LHS to OUTPUT_P.  */
- static void
- vrp_visit_assignment_or_call (gimple *stmt, tree *output_p, value_range *vr)
- {
-   tree lhs;
-   enum gimple_code code = gimple_code (stmt);
-   lhs = gimple_get_lhs (stmt);
-   *output_p = NULL_TREE;
-   /* We only keep track of ranges in integral and pointer types.  */
-   if (TREE_CODE (lhs) == SSA_NAME
-       && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
-          /* It is valid to have NULL MIN/MAX values on a type.  See
-             build_range_type.  */
-          && TYPE_MIN_VALUE (TREE_TYPE (lhs))
-          && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
-         || POINTER_TYPE_P (TREE_TYPE (lhs))))
-     {
-       *output_p = lhs;
-       /* Try folding the statement to a constant first.  */
-       tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
-                                                vrp_valueize_1);
-       if (tem)
-       {
-         if (TREE_CODE (tem) == SSA_NAME
-             && (SSA_NAME_IS_DEFAULT_DEF (tem)
-                 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
-           {
-             extract_range_from_ssa_name (vr, tem);
-             return;
-           }
-         else if (is_gimple_min_invariant (tem))
-           {
-             set_value_range_to_value (vr, tem, NULL);
-             return;
-           }
-       }
-       /* Then dispatch to value-range extracting functions.  */
-       if (code == GIMPLE_CALL)
-       extract_range_basic (vr, stmt);
-       else
-       extract_range_from_assignment (vr, as_a <gassign *> (stmt));
-     }
- }
- /* Helper that gets the value range of the SSA_NAME with version I
-    or a symbolic range containing the SSA_NAME only if the value range
-    is varying or undefined.  */
- static inline value_range
- get_vr_for_comparison (int i)
- {
-   value_range vr = *get_value_range (ssa_name (i));
-   /* If name N_i does not have a valid range, use N_i as its own
-      range.  This allows us to compare against names that may
-      have N_i in their ranges.  */
-   if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
-     {
-       vr.type = VR_RANGE;
-       vr.min = ssa_name (i);
-       vr.max = ssa_name (i);
-     }
-   return vr;
- }
- /* Compare all the value ranges for names equivalent to VAR with VAL
-    using comparison code COMP.  Return the same value returned by
-    compare_range_with_value, including the setting of
-    *STRICT_OVERFLOW_P.  */
- static tree
- compare_name_with_value (enum tree_code comp, tree var, tree val,
-                        bool *strict_overflow_p, bool use_equiv_p)
- {
-   bitmap_iterator bi;
-   unsigned i;
-   bitmap e;
-   tree retval, t;
-   int used_strict_overflow;
-   bool sop;
-   value_range equiv_vr;
-   /* Get the set of equivalences for VAR.  */
-   e = get_value_range (var)->equiv;
-   /* Start at -1.  Set it to 0 if we do a comparison without relying
-      on overflow, or 1 if all comparisons rely on overflow.  */
-   used_strict_overflow = -1;
-   /* Compare vars' value range with val.  */
-   equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
-   sop = false;
-   retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
-   if (retval)
-     used_strict_overflow = sop ? 1 : 0;
-   /* If the equiv set is empty we have done all work we need to do.  */
-   if (e == NULL)
-     {
-       if (retval
-         && used_strict_overflow > 0)
-       *strict_overflow_p = true;
-       return retval;
-     }
-   EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
-     {
-       tree name = ssa_name (i);
-       if (! name)
-       continue;
-       if (! use_equiv_p
-         && ! SSA_NAME_IS_DEFAULT_DEF (name)
-         && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
-       continue;
-       equiv_vr = get_vr_for_comparison (i);
-       sop = false;
-       t = compare_range_with_value (comp, &equiv_vr, val, &sop);
-       if (t)
-       {
-         /* If we get different answers from different members
-            of the equivalence set this check must be in a dead
-            code region.  Folding it to a trap representation
-            would be correct here.  For now just return don't-know.  */
-         if (retval != NULL
-             && t != retval)
-           {
-             retval = NULL_TREE;
-             break;
-           }
-         retval = t;
-         if (!sop)
-           used_strict_overflow = 0;
-         else if (used_strict_overflow < 0)
-           used_strict_overflow = 1;
-       }
-     }
-   if (retval
-       && used_strict_overflow > 0)
-     *strict_overflow_p = true;
-   return retval;
- }
- /* Given a comparison code COMP and names N1 and N2, compare all the
-    ranges equivalent to N1 against all the ranges equivalent to N2
-    to determine the value of N1 COMP N2.  Return the same value
-    returned by compare_ranges.  Set *STRICT_OVERFLOW_P to indicate
-    whether we relied on undefined signed overflow in the comparison.  */
- static tree
- compare_names (enum tree_code comp, tree n1, tree n2,
-              bool *strict_overflow_p)
- {
-   tree t, retval;
-   bitmap e1, e2;
-   bitmap_iterator bi1, bi2;
-   unsigned i1, i2;
-   int used_strict_overflow;
-   static bitmap_obstack *s_obstack = NULL;
-   static bitmap s_e1 = NULL, s_e2 = NULL;
-   /* Compare the ranges of every name equivalent to N1 against the
-      ranges of every name equivalent to N2.  */
-   e1 = get_value_range (n1)->equiv;
-   e2 = get_value_range (n2)->equiv;
-   /* Use the fake bitmaps if e1 or e2 are not available.  */
-   if (s_obstack == NULL)
-     {
-       s_obstack = XNEW (bitmap_obstack);
-       bitmap_obstack_initialize (s_obstack);
-       s_e1 = BITMAP_ALLOC (s_obstack);
-       s_e2 = BITMAP_ALLOC (s_obstack);
-     }
-   if (e1 == NULL)
-     e1 = s_e1;
-   if (e2 == NULL)
-     e2 = s_e2;
-   /* Add N1 and N2 to their own set of equivalences to avoid
-      duplicating the body of the loop just to check N1 and N2
-      ranges.  */
-   bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
-   bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
-   /* If the equivalence sets have a common intersection, then the two
-      names can be compared without checking their ranges.  */
-   if (bitmap_intersect_p (e1, e2))
-     {
-       bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
-       bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-       return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
-            ? boolean_true_node
-            : boolean_false_node;
-     }
-   /* Start at -1.  Set it to 0 if we do a comparison without relying
-      on overflow, or 1 if all comparisons rely on overflow.  */
-   used_strict_overflow = -1;
-   /* Otherwise, compare all the equivalent ranges.  First, add N1 and
-      N2 to their own set of equivalences to avoid duplicating the body
-      of the loop just to check N1 and N2 ranges.  */
-   EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
-     {
-       if (! ssa_name (i1))
-       continue;
-       value_range vr1 = get_vr_for_comparison (i1);
-       t = retval = NULL_TREE;
-       EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
-       {
-         if (! ssa_name (i2))
-           continue;
-         bool sop = false;
-         value_range vr2 = get_vr_for_comparison (i2);
-         t = compare_ranges (comp, &vr1, &vr2, &sop);
-         if (t)
-           {
-             /* If we get different answers from different members
-                of the equivalence set this check must be in a dead
-                code region.  Folding it to a trap representation
-                would be correct here.  For now just return don't-know.  */
-             if (retval != NULL
-                 && t != retval)
-               {
-                 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
-                 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-                 return NULL_TREE;
-               }
-             retval = t;
-             if (!sop)
-               used_strict_overflow = 0;
-             else if (used_strict_overflow < 0)
-               used_strict_overflow = 1;
-           }
-       }
-       if (retval)
-       {
-         bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
-         bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-         if (used_strict_overflow > 0)
-           *strict_overflow_p = true;
-         return retval;
-       }
-     }
-   /* None of the equivalent ranges are useful in computing this
-      comparison.  */
-   bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
-   bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-   return NULL_TREE;
- }
- /* Helper function for vrp_evaluate_conditional_warnv & other
-    optimizers.  */
- static tree
- vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
-                                                     tree op0, tree op1,
-                                                     bool * strict_overflow_p)
- {
-   value_range *vr0, *vr1;
-   vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
-   vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
-   tree res = NULL_TREE;
-   if (vr0 && vr1)
-     res = compare_ranges (code, vr0, vr1, strict_overflow_p);
-   if (!res && vr0)
-     res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
-   if (!res && vr1)
-     res = (compare_range_with_value
-           (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
-   return res;
- }
- /* Helper function for vrp_evaluate_conditional_warnv. */
- static tree
- vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
-                                        tree op1, bool use_equiv_p,
-                                        bool *strict_overflow_p, bool *only_ranges)
- {
-   tree ret;
-   if (only_ranges)
-     *only_ranges = true;
-   /* We only deal with integral and pointer types.  */
-   if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
-       && !POINTER_TYPE_P (TREE_TYPE (op0)))
-     return NULL_TREE;
-   /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
-      as a simple equality test, then prefer that over its current form
-      for evaluation.
-      An overflow test which collapses to an equality test can always be
-      expressed as a comparison of one argument against zero.  Overflow
-      occurs when the chosen argument is zero and does not occur if the
-      chosen argument is not zero.  */
-   tree x;
-   if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
-     {
-       wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
-       /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
-          B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
-          B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
-          B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
-       if (integer_zerop (x))
-       {
-         op1 = x;
-         code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
-       }
-       /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
-          B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
-          B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
-          B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
-       else if (wi::eq_p (x, max - 1))
-       {
-         op0 = op1;
-         op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
-         code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
-       }
-     }
-   if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
-              (code, op0, op1, strict_overflow_p)))
-     return ret;
-   if (only_ranges)
-     *only_ranges = false;
-   /* Do not use compare_names during propagation, it's quadratic.  */
-   if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
-       && use_equiv_p)
-     return compare_names (code, op0, op1, strict_overflow_p);
-   else if (TREE_CODE (op0) == SSA_NAME)
-     return compare_name_with_value (code, op0, op1,
-                                   strict_overflow_p, use_equiv_p);
-   else if (TREE_CODE (op1) == SSA_NAME)
-     return compare_name_with_value (swap_tree_comparison (code), op1, op0,
-                                   strict_overflow_p, use_equiv_p);
-   return NULL_TREE;
- }
- /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
-    information.  Return NULL if the conditional can not be evaluated.
-    The ranges of all the names equivalent with the operands in COND
-    will be used when trying to compute the value.  If the result is
-    based on undefined signed overflow, issue a warning if
-    appropriate.  */
- static tree
- vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
- {
-   bool sop;
-   tree ret;
-   bool only_ranges;
-   /* Some passes and foldings leak constants with overflow flag set
-      into the IL.  Avoid doing wrong things with these and bail out.  */
-   if ((TREE_CODE (op0) == INTEGER_CST
-        && TREE_OVERFLOW (op0))
-       || (TREE_CODE (op1) == INTEGER_CST
-         && TREE_OVERFLOW (op1)))
-     return NULL_TREE;
-   sop = false;
-   ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
-                                                &only_ranges);
-   if (ret && sop)
-     {
-       enum warn_strict_overflow_code wc;
-       const char* warnmsg;
-       if (is_gimple_min_invariant (ret))
-       {
-         wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
-         warnmsg = G_("assuming signed overflow does not occur when "
-                      "simplifying conditional to constant");
-       }
-       else
-       {
-         wc = WARN_STRICT_OVERFLOW_COMPARISON;
-         warnmsg = G_("assuming signed overflow does not occur when "
-                      "simplifying conditional");
-       }
-       if (issue_strict_overflow_warning (wc))
-       {
-         location_t location;
-         if (!gimple_has_location (stmt))
-           location = input_location;
-         else
-           location = gimple_location (stmt);
-         warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
-       }
-     }
-   if (warn_type_limits
-       && ret && only_ranges
-       && TREE_CODE_CLASS (code) == tcc_comparison
-       && TREE_CODE (op0) == SSA_NAME)
-     {
-       /* If the comparison is being folded and the operand on the LHS
-        is being compared against a constant value that is outside of
-        the natural range of OP0's type, then the predicate will
-        always fold regardless of the value of OP0.  If -Wtype-limits
-        was specified, emit a warning.  */
-       tree type = TREE_TYPE (op0);
-       value_range *vr0 = get_value_range (op0);
-       if (vr0->type == VR_RANGE
-         && INTEGRAL_TYPE_P (type)
-         && vrp_val_is_min (vr0->min)
-         && vrp_val_is_max (vr0->max)
-         && is_gimple_min_invariant (op1))
-       {
-         location_t location;
-         if (!gimple_has_location (stmt))
-           location = input_location;
-         else
-           location = gimple_location (stmt);
-         warning_at (location, OPT_Wtype_limits,
-                     integer_zerop (ret)
-                     ? G_("comparison always false "
-                            "due to limited range of data type")
-                     : G_("comparison always true "
-                            "due to limited range of data type"));
-       }
-     }
-   return ret;
- }
- /* Visit conditional statement STMT.  If we can determine which edge
-    will be taken out of STMT's basic block, record it in
-    *TAKEN_EDGE_P.  Otherwise, set *TAKEN_EDGE_P to NULL.  */
- static void
- vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
- {
-   tree val;
-   *taken_edge_p = NULL;
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       tree use;
-       ssa_op_iter i;
-       fprintf (dump_file, "\nVisiting conditional with predicate: ");
-       print_gimple_stmt (dump_file, stmt, 0);
-       fprintf (dump_file, "\nWith known ranges\n");
-       FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
-       {
-         fprintf (dump_file, "\t");
-         print_generic_expr (dump_file, use);
-         fprintf (dump_file, ": ");
-         dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
-       }
-       fprintf (dump_file, "\n");
-     }
-   /* Compute the value of the predicate COND by checking the known
-      ranges of each of its operands.
-      Note that we cannot evaluate all the equivalent ranges here
-      because those ranges may not yet be final and with the current
-      propagation strategy, we cannot determine when the value ranges
-      of the names in the equivalence set have changed.
-      For instance, given the following code fragment
-         i_5 = PHI <8, i_13>
-       ...
-       i_14 = ASSERT_EXPR <i_5, i_5 != 0>
-       if (i_14 == 1)
-         ...
-      Assume that on the first visit to i_14, i_5 has the temporary
-      range [8, 8] because the second argument to the PHI function is
-      not yet executable.  We derive the range ~[0, 0] for i_14 and the
-      equivalence set { i_5 }.  So, when we visit 'if (i_14 == 1)' for
-      the first time, since i_14 is equivalent to the range [8, 8], we
-      determine that the predicate is always false.
-      On the next round of propagation, i_13 is determined to be
-      VARYING, which causes i_5 to drop down to VARYING.  So, another
-      visit to i_14 is scheduled.  In this second visit, we compute the
-      exact same range and equivalence set for i_14, namely ~[0, 0] and
-      { i_5 }.  But we did not have the previous range for i_5
-      registered, so vrp_visit_assignment thinks that the range for
-      i_14 has not changed.  Therefore, the predicate 'if (i_14 == 1)'
-      is not visited again, which stops propagation from visiting
-      statements in the THEN clause of that if().
-      To properly fix this we would need to keep the previous range
-      value for the names in the equivalence set.  This way we would've
-      discovered that from one visit to the other i_5 changed from
-      range [8, 8] to VR_VARYING.
-      However, fixing this apparent limitation may not be worth the
-      additional checking.  Testing on several code bases (GCC, DLV,
-      MICO, TRAMP3D and SPEC2000) showed that doing this results in
-      4 more predicates folded in SPEC.  */
-   bool sop;
-   val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
-                                                gimple_cond_lhs (stmt),
-                                                gimple_cond_rhs (stmt),
-                                                false, &sop, NULL);
-   if (val)
-     *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "\nPredicate evaluates to: ");
-       if (val == NULL_TREE)
-       fprintf (dump_file, "DON'T KNOW\n");
-       else
-       print_generic_stmt (dump_file, val);
-     }
- }
- /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
-    that includes the value VAL.  The search is restricted to the range
-    [START_IDX, n - 1] where n is the size of VEC.
-    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
-    returned.
-    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
-    it is placed in IDX and false is returned.
-    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
-    returned. */
- static bool
- find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
- {
-   size_t n = gimple_switch_num_labels (stmt);
-   size_t low, high;
-   /* Find case label for minimum of the value range or the next one.
-      At each iteration we are searching in [low, high - 1]. */
-   for (low = start_idx, high = n; high != low; )
-     {
-       tree t;
-       int cmp;
-       /* Note that i != high, so we never ask for n. */
-       size_t i = (high + low) / 2;
-       t = gimple_switch_label (stmt, i);
-       /* Cache the result of comparing CASE_LOW and val.  */
-       cmp = tree_int_cst_compare (CASE_LOW (t), val);
-       if (cmp == 0)
-       {
-         /* Ranges cannot be empty. */
-         *idx = i;
-         return true;
-       }
-       else if (cmp > 0)
-         high = i;
-       else
-       {
-         low = i + 1;
-         if (CASE_HIGH (t) != NULL
-             && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
-           {
-             *idx = i;
-             return true;
-           }
-         }
-     }
-   *idx = high;
-   return false;
- }
- /* Searches the case label vector VEC for the range of CASE_LABELs that is used
-    for values between MIN and MAX. The first index is placed in MIN_IDX. The
-    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
-    then MAX_IDX < MIN_IDX.
-    Returns true if the default label is not needed. */
- static bool
- find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
-                      size_t *max_idx)
- {
-   size_t i, j;
-   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
-   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
-   if (i == j
-       && min_take_default
-       && max_take_default)
-     {
-       /* Only the default case label reached.
-          Return an empty range. */
-       *min_idx = 1;
-       *max_idx = 0;
-       return false;
-     }
-   else
-     {
-       bool take_default = min_take_default || max_take_default;
-       tree low, high;
-       size_t k;
-       if (max_take_default)
-       j--;
-       /* If the case label range is continuous, we do not need
-        the default case label.  Verify that.  */
-       high = CASE_LOW (gimple_switch_label (stmt, i));
-       if (CASE_HIGH (gimple_switch_label (stmt, i)))
-       high = CASE_HIGH (gimple_switch_label (stmt, i));
-       for (k = i + 1; k <= j; ++k)
-       {
-         low = CASE_LOW (gimple_switch_label (stmt, k));
-         if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
-           {
-             take_default = true;
-             break;
-           }
-         high = low;
-         if (CASE_HIGH (gimple_switch_label (stmt, k)))
-           high = CASE_HIGH (gimple_switch_label (stmt, k));
-       }
-       *min_idx = i;
-       *max_idx = j;
-       return !take_default;
-     }
- }
- /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
-    used in range VR.  The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
-    MAX_IDX2.  If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
-    Returns true if the default label is not needed.  */
- static bool
- find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
-                       size_t *max_idx1, size_t *min_idx2,
-                       size_t *max_idx2)
- {
-   size_t i, j, k, l;
-   unsigned int n = gimple_switch_num_labels (stmt);
-   bool take_default;
-   tree case_low, case_high;
-   tree min = vr->min, max = vr->max;
-   gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
-   take_default = !find_case_label_range (stmt, min, max, &i, &j);
-   /* Set second range to emtpy.  */
-   *min_idx2 = 1;
-   *max_idx2 = 0;
-   if (vr->type == VR_RANGE)
-     {
-       *min_idx1 = i;
-       *max_idx1 = j;
-       return !take_default;
-     }
-   /* Set first range to all case labels.  */
-   *min_idx1 = 1;
-   *max_idx1 = n - 1;
-   if (i > j)
-     return false;
-   /* Make sure all the values of case labels [i , j] are contained in
-      range [MIN, MAX].  */
-   case_low = CASE_LOW (gimple_switch_label (stmt, i));
-   case_high = CASE_HIGH (gimple_switch_label (stmt, j));
-   if (tree_int_cst_compare (case_low, min) < 0)
-     i += 1;
-   if (case_high != NULL_TREE
-       && tree_int_cst_compare (max, case_high) < 0)
-     j -= 1;
-   if (i > j)
-     return false;
-   /* If the range spans case labels [i, j], the corresponding anti-range spans
-      the labels [1, i - 1] and [j + 1, n -  1].  */
-   k = j + 1;
-   l = n - 1;
-   if (k > l)
-     {
-       k = 1;
-       l = 0;
-     }
-   j = i - 1;
-   i = 1;
-   if (i > j)
-     {
-       i = k;
-       j = l;
-       k = 1;
-       l = 0;
-     }
-   *min_idx1 = i;
-   *max_idx1 = j;
-   *min_idx2 = k;
-   *max_idx2 = l;
-   return false;
- }
- /* Visit switch statement STMT.  If we can determine which edge
-    will be taken out of STMT's basic block, record it in
-    *TAKEN_EDGE_P.  Otherwise, *TAKEN_EDGE_P set to NULL.  */
- static void
- vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
- {
-   tree op, val;
-   value_range *vr;
-   size_t i = 0, j = 0, k, l;
-   bool take_default;
-   *taken_edge_p = NULL;
-   op = gimple_switch_index (stmt);
-   if (TREE_CODE (op) != SSA_NAME)
-     return;
-   vr = get_value_range (op);
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "\nVisiting switch expression with operand ");
-       print_generic_expr (dump_file, op);
-       fprintf (dump_file, " with known range ");
-       dump_value_range (dump_file, vr);
-       fprintf (dump_file, "\n");
-     }
-   if ((vr->type != VR_RANGE
-        && vr->type != VR_ANTI_RANGE)
-       || symbolic_range_p (vr))
-     return;
-   /* Find the single edge that is taken from the switch expression.  */
-   take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
-   /* Check if the range spans no CASE_LABEL. If so, we only reach the default
-      label */
-   if (j < i)
-     {
-       gcc_assert (take_default);
-       val = gimple_switch_default_label (stmt);
-     }
-   else
-     {
-       /* Check if labels with index i to j and maybe the default label
-        are all reaching the same label.  */
-       val = gimple_switch_label (stmt, i);
-       if (take_default
-         && CASE_LABEL (gimple_switch_default_label (stmt))
-         != CASE_LABEL (val))
-       {
-         if (dump_file && (dump_flags & TDF_DETAILS))
-           fprintf (dump_file, "  not a single destination for this "
-                    "range\n");
-         return;
-       }
-       for (++i; i <= j; ++i)
-         {
-           if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
-           {
-             if (dump_file && (dump_flags & TDF_DETAILS))
-               fprintf (dump_file, "  not a single destination for this "
-                        "range\n");
-             return;
-           }
-         }
-       for (; k <= l; ++k)
-         {
-           if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
-           {
-             if (dump_file && (dump_flags & TDF_DETAILS))
-               fprintf (dump_file, "  not a single destination for this "
-                        "range\n");
-             return;
-           }
-         }
-     }
-   *taken_edge_p = find_edge (gimple_bb (stmt),
-                            label_to_block (CASE_LABEL (val)));
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "  will take edge to ");
-       print_generic_stmt (dump_file, CASE_LABEL (val));
-     }
- }
- /* Evaluate statement STMT.  If the statement produces a useful range,
-    set VR and corepsponding OUTPUT_P.
-    If STMT is a conditional branch and we can determine its truth
-    value, the taken edge is recorded in *TAKEN_EDGE_P.  */
- static void
- extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
-                        tree *output_p, value_range *vr)
- {
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "\nVisiting statement:\n");
-       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
-     }
-   if (!stmt_interesting_for_vrp (stmt))
-     gcc_assert (stmt_ends_bb_p (stmt));
-   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
-     vrp_visit_assignment_or_call (stmt, output_p, vr);
-   else if (gimple_code (stmt) == GIMPLE_COND)
-     vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
-   else if (gimple_code (stmt) == GIMPLE_SWITCH)
-     vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
- }
- /* Evaluate statement STMT.  If the statement produces a useful range,
-    return SSA_PROP_INTERESTING and record the SSA name with the
-    interesting range into *OUTPUT_P.
-    If STMT is a conditional branch and we can determine its truth
-    value, the taken edge is recorded in *TAKEN_EDGE_P.
-    If STMT produces a varying value, return SSA_PROP_VARYING.  */
- static enum ssa_prop_result
- vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
- {
-   value_range vr = VR_INITIALIZER;
-   tree lhs = gimple_get_lhs (stmt);
-   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
-   if (*output_p)
-     {
-       if (update_value_range (*output_p, &vr))
-       {
-         if (dump_file && (dump_flags & TDF_DETAILS))
-           {
-             fprintf (dump_file, "Found new range for ");
-             print_generic_expr (dump_file, *output_p);
-             fprintf (dump_file, ": ");
-             dump_value_range (dump_file, &vr);
-             fprintf (dump_file, "\n");
-           }
-         if (vr.type == VR_VARYING)
-           return SSA_PROP_VARYING;
-         return SSA_PROP_INTERESTING;
-       }
-       return SSA_PROP_NOT_INTERESTING;
-     }
-   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
-     switch (gimple_call_internal_fn (stmt))
-       {
-       case IFN_ADD_OVERFLOW:
-       case IFN_SUB_OVERFLOW:
-       case IFN_MUL_OVERFLOW:
-       case IFN_ATOMIC_COMPARE_EXCHANGE:
-       /* These internal calls return _Complex integer type,
-          which VRP does not track, but the immediate uses
-          thereof might be interesting.  */
-       if (lhs && TREE_CODE (lhs) == SSA_NAME)
-         {
-           imm_use_iterator iter;
-           use_operand_p use_p;
-           enum ssa_prop_result res = SSA_PROP_VARYING;
-           set_value_range_to_varying (get_value_range (lhs));
-           FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
-             {
-               gimple *use_stmt = USE_STMT (use_p);
-               if (!is_gimple_assign (use_stmt))
-                 continue;
-               enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
-               if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
-                 continue;
-               tree rhs1 = gimple_assign_rhs1 (use_stmt);
-               tree use_lhs = gimple_assign_lhs (use_stmt);
-               if (TREE_CODE (rhs1) != rhs_code
-                   || TREE_OPERAND (rhs1, 0) != lhs
-                   || TREE_CODE (use_lhs) != SSA_NAME
-                   || !stmt_interesting_for_vrp (use_stmt)
-                   || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
-                       || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
-                       || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
-                 continue;
-               /* If there is a change in the value range for any of the
-                  REALPART_EXPR/IMAGPART_EXPR immediate uses, return
-                  SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
-                  or IMAGPART_EXPR immediate uses, but none of them have
-                  a change in their value ranges, return
-                  SSA_PROP_NOT_INTERESTING.  If there are no
-                  {REAL,IMAG}PART_EXPR uses at all,
-                  return SSA_PROP_VARYING.  */
-               value_range new_vr = VR_INITIALIZER;
-               extract_range_basic (&new_vr, use_stmt);
-               value_range *old_vr = get_value_range (use_lhs);
-               if (old_vr->type != new_vr.type
-                   || !vrp_operand_equal_p (old_vr->min, new_vr.min)
-                   || !vrp_operand_equal_p (old_vr->max, new_vr.max)
-                   || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
-                 res = SSA_PROP_INTERESTING;
-               else
-                 res = SSA_PROP_NOT_INTERESTING;
-               BITMAP_FREE (new_vr.equiv);
-               if (res == SSA_PROP_INTERESTING)
-                 {
-                   *output_p = lhs;
-                   return res;
-                 }
-             }
-           return res;
-         }
-       break;
-       default:
-       break;
-       }
-   /* All other statements produce nothing of interest for VRP, so mark
-      their outputs varying and prevent further simulation.  */
-   set_defs_to_varying (stmt);
-   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
- }
- /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
-    { VR1TYPE, VR0MIN, VR0MAX } and store the result
-    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
-    possible such range.  The resulting range is not canonicalized.  */
- static void
- union_ranges (enum value_range_type *vr0type,
-             tree *vr0min, tree *vr0max,
-             enum value_range_type vr1type,
-             tree vr1min, tree vr1max)
- {
-   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
-   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
-   /* [] is vr0, () is vr1 in the following classification comments.  */
-   if (mineq && maxeq)
-     {
-       /* [(  )] */
-       if (*vr0type == vr1type)
-       /* Nothing to do for equal ranges.  */
-       ;
-       else if ((*vr0type == VR_RANGE
-               && vr1type == VR_ANTI_RANGE)
-              || (*vr0type == VR_ANTI_RANGE
-                  && vr1type == VR_RANGE))
-       {
-         /* For anti-range with range union the result is varying.  */
-         goto give_up;
-       }
-       else
-       gcc_unreachable ();
-     }
-   else if (operand_less_p (*vr0max, vr1min) == 1
-          || operand_less_p (vr1max, *vr0min) == 1)
-     {
-       /* [ ] ( ) or ( ) [ ]
-        If the ranges have an empty intersection, result of the union
-        operation is the anti-range or if both are anti-ranges
-        it covers all.  */
-       if (*vr0type == VR_ANTI_RANGE
-         && vr1type == VR_ANTI_RANGE)
-       goto give_up;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       ;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_RANGE)
-       {
-         /* The result is the convex hull of both ranges.  */
-         if (operand_less_p (*vr0max, vr1min) == 1)
-           {
-             /* If the result can be an anti-range, create one.  */
-             if (TREE_CODE (*vr0max) == INTEGER_CST
-                 && TREE_CODE (vr1min) == INTEGER_CST
-                 && vrp_val_is_min (*vr0min)
-                 && vrp_val_is_max (vr1max))
-               {
-                 tree min = int_const_binop (PLUS_EXPR,
-                                             *vr0max,
-                                             build_int_cst (TREE_TYPE (*vr0max), 1));
-                 tree max = int_const_binop (MINUS_EXPR,
-                                             vr1min,
-                                             build_int_cst (TREE_TYPE (vr1min), 1));
-                 if (!operand_less_p (max, min))
-                   {
-                     *vr0type = VR_ANTI_RANGE;
-                     *vr0min = min;
-                     *vr0max = max;
-                   }
-                 else
-                   *vr0max = vr1max;
-               }
-             else
-               *vr0max = vr1max;
-           }
-         else
-           {
-             /* If the result can be an anti-range, create one.  */
-             if (TREE_CODE (vr1max) == INTEGER_CST
-                 && TREE_CODE (*vr0min) == INTEGER_CST
-                 && vrp_val_is_min (vr1min)
-                 && vrp_val_is_max (*vr0max))
-               {
-                 tree min = int_const_binop (PLUS_EXPR,
-                                             vr1max,
-                                             build_int_cst (TREE_TYPE (vr1max), 1));
-                 tree max = int_const_binop (MINUS_EXPR,
-                                             *vr0min,
-                                             build_int_cst (TREE_TYPE (*vr0min), 1));
-                 if (!operand_less_p (max, min))
-                   {
-                     *vr0type = VR_ANTI_RANGE;
-                     *vr0min = min;
-                     *vr0max = max;
-                   }
-                 else
-                   *vr0min = vr1min;
-               }
-             else
-               *vr0min = vr1min;
-           }
-       }
-       else
-       gcc_unreachable ();
-     }
-   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
-          && (mineq || operand_less_p (*vr0min, vr1min) == 1))
-     {
-       /* [ (  ) ] or [(  ) ] or [ (  )] */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       ;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         /* Arbitrarily choose the right or left gap.  */
-         if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
-           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
-                                      build_int_cst (TREE_TYPE (vr1min), 1));
-         else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
-           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
-                                      build_int_cst (TREE_TYPE (vr1max), 1));
-         else
-           goto give_up;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       /* The result covers everything.  */
-       goto give_up;
-       else
-       gcc_unreachable ();
-     }
-   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
-          && (mineq || operand_less_p (vr1min, *vr0min) == 1))
-     {
-       /* ( [  ] ) or ([  ] ) or ( [  ]) */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       {
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       ;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         *vr0type = VR_ANTI_RANGE;
-         if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
-           {
-             *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
-                                        build_int_cst (TREE_TYPE (*vr0min), 1));
-             *vr0min = vr1min;
-           }
-         else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
-           {
-             *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
-                                        build_int_cst (TREE_TYPE (*vr0max), 1));
-             *vr0max = vr1max;
-           }
-         else
-           goto give_up;
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       /* The result covers everything.  */
-       goto give_up;
-       else
-       gcc_unreachable ();
-     }
-   else if ((operand_less_p (vr1min, *vr0max) == 1
-           || operand_equal_p (vr1min, *vr0max, 0))
-          && operand_less_p (*vr0min, vr1min) == 1
-          && operand_less_p (*vr0max, vr1max) == 1)
-     {
-       /* [  (  ]  ) or [   ](   ) */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       *vr0max = vr1max;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       *vr0min = vr1min;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         if (TREE_CODE (vr1min) == INTEGER_CST)
-           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
-                                      build_int_cst (TREE_TYPE (vr1min), 1));
-         else
-           goto give_up;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         if (TREE_CODE (*vr0max) == INTEGER_CST)
-           {
-             *vr0type = vr1type;
-             *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
-                                        build_int_cst (TREE_TYPE (*vr0max), 1));
-             *vr0max = vr1max;
-           }
-         else
-           goto give_up;
-       }
-       else
-       gcc_unreachable ();
-     }
-   else if ((operand_less_p (*vr0min, vr1max) == 1
-           || operand_equal_p (*vr0min, vr1max, 0))
-          && operand_less_p (vr1min, *vr0min) == 1
-          && operand_less_p (vr1max, *vr0max) == 1)
-     {
-       /* (  [  )  ] or (   )[   ] */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       *vr0min = vr1min;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       *vr0max = vr1max;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         if (TREE_CODE (vr1max) == INTEGER_CST)
-           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
-                                      build_int_cst (TREE_TYPE (vr1max), 1));
-         else
-           goto give_up;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         if (TREE_CODE (*vr0min) == INTEGER_CST)
-           {
-             *vr0type = vr1type;
-             *vr0min = vr1min;
-             *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
-                                        build_int_cst (TREE_TYPE (*vr0min), 1));
-           }
-         else
-           goto give_up;
-       }
-       else
-       gcc_unreachable ();
-     }
-   else
-     goto give_up;
-   return;
- give_up:
-   *vr0type = VR_VARYING;
-   *vr0min = NULL_TREE;
-   *vr0max = NULL_TREE;
- }
- /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
-    { VR1TYPE, VR0MIN, VR0MAX } and store the result
-    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
-    possible such range.  The resulting range is not canonicalized.  */
- static void
- intersect_ranges (enum value_range_type *vr0type,
-                 tree *vr0min, tree *vr0max,
-                 enum value_range_type vr1type,
-                 tree vr1min, tree vr1max)
- {
-   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
-   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
-   /* [] is vr0, () is vr1 in the following classification comments.  */
-   if (mineq && maxeq)
-     {
-       /* [(  )] */
-       if (*vr0type == vr1type)
-       /* Nothing to do for equal ranges.  */
-       ;
-       else if ((*vr0type == VR_RANGE
-               && vr1type == VR_ANTI_RANGE)
-              || (*vr0type == VR_ANTI_RANGE
-                  && vr1type == VR_RANGE))
-       {
-         /* For anti-range with range intersection the result is empty.  */
-         *vr0type = VR_UNDEFINED;
-         *vr0min = NULL_TREE;
-         *vr0max = NULL_TREE;
-       }
-       else
-       gcc_unreachable ();
-     }
-   else if (operand_less_p (*vr0max, vr1min) == 1
-          || operand_less_p (vr1max, *vr0min) == 1)
-     {
-       /* [ ] ( ) or ( ) [ ]
-        If the ranges have an empty intersection, the result of the
-        intersect operation is the range for intersecting an
-        anti-range with a range or empty when intersecting two ranges.  */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_ANTI_RANGE)
-       ;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_RANGE)
-       {
-         *vr0type = VR_UNDEFINED;
-         *vr0min = NULL_TREE;
-         *vr0max = NULL_TREE;
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         /* If the anti-ranges are adjacent to each other merge them.  */
-         if (TREE_CODE (*vr0max) == INTEGER_CST
-             && TREE_CODE (vr1min) == INTEGER_CST
-             && operand_less_p (*vr0max, vr1min) == 1
-             && integer_onep (int_const_binop (MINUS_EXPR,
-                                               vr1min, *vr0max)))
-           *vr0max = vr1max;
-         else if (TREE_CODE (vr1max) == INTEGER_CST
-                  && TREE_CODE (*vr0min) == INTEGER_CST
-                  && operand_less_p (vr1max, *vr0min) == 1
-                  && integer_onep (int_const_binop (MINUS_EXPR,
-                                                    *vr0min, vr1max)))
-           *vr0min = vr1min;
-         /* Else arbitrarily take VR0.  */
-       }
-     }
-   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
-          && (mineq || operand_less_p (*vr0min, vr1min) == 1))
-     {
-       /* [ (  ) ] or [(  ) ] or [ (  )] */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       {
-         /* If both are ranges the result is the inner one.  */
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         /* Choose the right gap if the left one is empty.  */
-         if (mineq)
-           {
-             if (TREE_CODE (vr1max) != INTEGER_CST)
-               *vr0min = vr1max;
-             else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
-                      && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
-               *vr0min
-                 = int_const_binop (MINUS_EXPR, vr1max,
-                                    build_int_cst (TREE_TYPE (vr1max), -1));
-             else
-               *vr0min
-                 = int_const_binop (PLUS_EXPR, vr1max,
-                                    build_int_cst (TREE_TYPE (vr1max), 1));
-           }
-         /* Choose the left gap if the right one is empty.  */
-         else if (maxeq)
-           {
-             if (TREE_CODE (vr1min) != INTEGER_CST)
-               *vr0max = vr1min;
-             else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
-                      && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
-               *vr0max
-                 = int_const_binop (PLUS_EXPR, vr1min,
-                                    build_int_cst (TREE_TYPE (vr1min), -1));
-             else
-               *vr0max
-                 = int_const_binop (MINUS_EXPR, vr1min,
-                                    build_int_cst (TREE_TYPE (vr1min), 1));
-           }
-         /* Choose the anti-range if the range is effectively varying.  */
-         else if (vrp_val_is_min (*vr0min)
-                  && vrp_val_is_max (*vr0max))
-           {
-             *vr0type = vr1type;
-             *vr0min = vr1min;
-             *vr0max = vr1max;
-           }
-         /* Else choose the range.  */
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       /* If both are anti-ranges the result is the outer one.  */
-       ;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         /* The intersection is empty.  */
-         *vr0type = VR_UNDEFINED;
-         *vr0min = NULL_TREE;
-         *vr0max = NULL_TREE;
-       }
-       else
-       gcc_unreachable ();
-     }
-   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
-          && (mineq || operand_less_p (vr1min, *vr0min) == 1))
-     {
-       /* ( [  ] ) or ([  ] ) or ( [  ]) */
-       if (*vr0type == VR_RANGE
-         && vr1type == VR_RANGE)
-       /* Choose the inner range.  */
-       ;
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         /* Choose the right gap if the left is empty.  */
-         if (mineq)
-           {
-             *vr0type = VR_RANGE;
-             if (TREE_CODE (*vr0max) != INTEGER_CST)
-               *vr0min = *vr0max;
-             else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
-                      && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
-               *vr0min
-                 = int_const_binop (MINUS_EXPR, *vr0max,
-                                    build_int_cst (TREE_TYPE (*vr0max), -1));
-             else
-               *vr0min
-                 = int_const_binop (PLUS_EXPR, *vr0max,
-                                    build_int_cst (TREE_TYPE (*vr0max), 1));
-             *vr0max = vr1max;
-           }
-         /* Choose the left gap if the right is empty.  */
-         else if (maxeq)
-           {
-             *vr0type = VR_RANGE;
-             if (TREE_CODE (*vr0min) != INTEGER_CST)
-               *vr0max = *vr0min;
-             else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
-                      && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
-               *vr0max
-                 = int_const_binop (PLUS_EXPR, *vr0min,
-                                    build_int_cst (TREE_TYPE (*vr0min), -1));
-             else
-               *vr0max
-                 = int_const_binop (MINUS_EXPR, *vr0min,
-                                    build_int_cst (TREE_TYPE (*vr0min), 1));
-             *vr0min = vr1min;
-           }
-         /* Choose the anti-range if the range is effectively varying.  */
-         else if (vrp_val_is_min (vr1min)
-                  && vrp_val_is_max (vr1max))
-           ;
-         /* Choose the anti-range if it is ~[0,0], that range is special
-            enough to special case when vr1's range is relatively wide.  */
-         else if (*vr0min == *vr0max
-                  && integer_zerop (*vr0min)
-                  && (TYPE_PRECISION (TREE_TYPE (*vr0min))
-                      == TYPE_PRECISION (ptr_type_node))
-                  && TREE_CODE (vr1max) == INTEGER_CST
-                  && TREE_CODE (vr1min) == INTEGER_CST
-                  && (wi::clz (wi::sub (vr1max, vr1min))
-                      < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
-           ;
-         /* Else choose the range.  */
-         else
-           {
-             *vr0type = vr1type;
-             *vr0min = vr1min;
-             *vr0max = vr1max;
-           }
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         /* If both are anti-ranges the result is the outer one.  */
-         *vr0type = vr1type;
-         *vr0min = vr1min;
-         *vr0max = vr1max;
-       }
-       else if (vr1type == VR_ANTI_RANGE
-              && *vr0type == VR_RANGE)
-       {
-         /* The intersection is empty.  */
-         *vr0type = VR_UNDEFINED;
-         *vr0min = NULL_TREE;
-         *vr0max = NULL_TREE;
-       }
-       else
-       gcc_unreachable ();
+ /* Return true if STMT is interesting for VRP.  */
+ bool
+ stmt_interesting_for_vrp (gimple *stmt)
+ {
+   if (gimple_code (stmt) == GIMPLE_PHI)
+     {
+       tree res = gimple_phi_result (stmt);
+       return (!virtual_operand_p (res)
+             && (INTEGRAL_TYPE_P (TREE_TYPE (res))
+                 || POINTER_TYPE_P (TREE_TYPE (res))));
      }
-   else if ((operand_less_p (vr1min, *vr0max) == 1
-           || operand_equal_p (vr1min, *vr0max, 0))
-          && operand_less_p (*vr0min, vr1min) == 1)
+   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
      {
-       /* [  (  ]  ) or [  ](  ) */
-       if (*vr0type == VR_ANTI_RANGE
-         && vr1type == VR_ANTI_RANGE)
-       *vr0max = vr1max;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_RANGE)
-       *vr0min = vr1min;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
-       {
-         if (TREE_CODE (vr1min) == INTEGER_CST)
-           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
-                                      build_int_cst (TREE_TYPE (vr1min), 1));
-         else
-           *vr0max = vr1min;
-       }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         *vr0type = VR_RANGE;
-         if (TREE_CODE (*vr0max) == INTEGER_CST)
-           *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
-                                      build_int_cst (TREE_TYPE (*vr0max), 1));
-         else
-           *vr0min = *vr0max;
-         *vr0max = vr1max;
-       }
-       else
-       gcc_unreachable ();
+       tree lhs = gimple_get_lhs (stmt);
+       /* In general, assignments with virtual operands are not useful
+        for deriving ranges, with the obvious exception of calls to
+        builtin functions.  */
+       if (lhs && TREE_CODE (lhs) == SSA_NAME
+         && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+             || POINTER_TYPE_P (TREE_TYPE (lhs)))
+         && (is_gimple_call (stmt)
+             || !gimple_vuse (stmt)))
+       return true;
+       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+       switch (gimple_call_internal_fn (stmt))
+         {
+         case IFN_ADD_OVERFLOW:
+         case IFN_SUB_OVERFLOW:
+         case IFN_MUL_OVERFLOW:
+         case IFN_ATOMIC_COMPARE_EXCHANGE:
+           /* These internal calls return _Complex integer type,
+              but are interesting to VRP nevertheless.  */
+           if (lhs && TREE_CODE (lhs) == SSA_NAME)
+             return true;
+           break;
+         default:
+           break;
+         }
      }
-   else if ((operand_less_p (*vr0min, vr1max) == 1
-           || operand_equal_p (*vr0min, vr1max, 0))
-          && operand_less_p (vr1min, *vr0min) == 1)
+   else if (gimple_code (stmt) == GIMPLE_COND
+          || gimple_code (stmt) == GIMPLE_SWITCH)
+     return true;
+   return false;
+ }
+ /* Initialization required by ssa_propagate engine.  */
+ void
+ vrp_prop::vrp_initialize ()
+ {
+   basic_block bb;
+   FOR_EACH_BB_FN (bb, cfun)
      {
-       /* (  [  )  ] or (  )[  ] */
-       if (*vr0type == VR_ANTI_RANGE
-         && vr1type == VR_ANTI_RANGE)
-       *vr0min = vr1min;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_RANGE)
-       *vr0max = vr1max;
-       else if (*vr0type == VR_RANGE
-              && vr1type == VR_ANTI_RANGE)
+       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+          gsi_next (&si))
        {
-         if (TREE_CODE (vr1max) == INTEGER_CST)
-           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
-                                      build_int_cst (TREE_TYPE (vr1max), 1));
+         gphi *phi = si.phi ();
+         if (!stmt_interesting_for_vrp (phi))
+           {
+             tree lhs = PHI_RESULT (phi);
+             set_value_range_to_varying (get_value_range (lhs));
+             prop_set_simulate_again (phi, false);
+           }
          else
-           *vr0min = vr1max;
+           prop_set_simulate_again (phi, true);
        }
-       else if (*vr0type == VR_ANTI_RANGE
-              && vr1type == VR_RANGE)
-       {
-         *vr0type = VR_RANGE;
-         if (TREE_CODE (*vr0min) == INTEGER_CST)
-           *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
-                                      build_int_cst (TREE_TYPE (*vr0min), 1));
+       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
+          gsi_next (&si))
+         {
+         gimple *stmt = gsi_stmt (si);
+         /* If the statement is a control insn, then we do not
+            want to avoid simulating the statement once.  Failure
+            to do so means that those edges will never get added.  */
+         if (stmt_ends_bb_p (stmt))
+           prop_set_simulate_again (stmt, true);
+         else if (!stmt_interesting_for_vrp (stmt))
+           {
+             set_defs_to_varying (stmt);
+             prop_set_simulate_again (stmt, false);
+           }
          else
-           *vr0max = *vr0min;
-         *vr0min = vr1min;
+           prop_set_simulate_again (stmt, true);
        }
-       else
-       gcc_unreachable ();
      }
+ }
  
-   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
-      result for the intersection.  That's always a conservative
-      correct estimate unless VR1 is a constant singleton range
-      in which case we choose that.  */
-   if (vr1type == VR_RANGE
-       && is_gimple_min_invariant (vr1min)
-       && vrp_operand_equal_p (vr1min, vr1max))
-     {
-       *vr0type = vr1type;
-       *vr0min = vr1min;
-       *vr0max = vr1max;
-     }
+ /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
+    that includes the value VAL.  The search is restricted to the range
+    [START_IDX, n - 1] where n is the size of VEC.
  
-   return;
- }
+    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
+    returned.
  
+    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
+    it is placed in IDX and false is returned.
  
- /* Intersect the two value-ranges *VR0 and *VR1 and store the result
-    in *VR0.  This may not be the smallest possible such range.  */
+    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
+    returned. */
  
- static void
vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
+ bool
find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
  {
-   value_range saved;
-   /* If either range is VR_VARYING the other one wins.  */
-   if (vr1->type == VR_VARYING)
-     return;
-   if (vr0->type == VR_VARYING)
-     {
-       copy_value_range (vr0, vr1);
-       return;
-     }
+   size_t n = gimple_switch_num_labels (stmt);
+   size_t low, high;
  
-   /* When either range is VR_UNDEFINED the resulting range is
-      VR_UNDEFINED, too.  */
-   if (vr0->type == VR_UNDEFINED)
-     return;
-   if (vr1->type == VR_UNDEFINED)
-     {
-       set_value_range_to_undefined (vr0);
-       return;
-     }
+   /* Find case label for minimum of the value range or the next one.
+      At each iteration we are searching in [low, high - 1]. */
  
-   /* Save the original vr0 so we can return it as conservative intersection
-      result when our worker turns things to varying.  */
-   saved = *vr0;
-   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
-                   vr1->type, vr1->min, vr1->max);
-   /* Make sure to canonicalize the result though as the inversion of a
-      VR_RANGE can still be a VR_RANGE.  */
-   set_and_canonicalize_value_range (vr0, vr0->type,
-                                   vr0->min, vr0->max, vr0->equiv);
-   /* If that failed, use the saved original VR0.  */
-   if (vr0->type == VR_VARYING)
+   for (low = start_idx, high = n; high != low; )
      {
-       *vr0 = saved;
-       return;
-     }
-   /* If the result is VR_UNDEFINED there is no need to mess with
-      the equivalencies.  */
-   if (vr0->type == VR_UNDEFINED)
-     return;
+       tree t;
+       int cmp;
+       /* Note that i != high, so we never ask for n. */
+       size_t i = (high + low) / 2;
+       t = gimple_switch_label (stmt, i);
  
-   /* The resulting set of equivalences for range intersection is the union of
-      the two sets.  */
-   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
-     bitmap_ior_into (vr0->equiv, vr1->equiv);
-   else if (vr1->equiv && !vr0->equiv)
-     {
-       vr0->equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
-       bitmap_copy (vr0->equiv, vr1->equiv);
-     }
- }
+       /* Cache the result of comparing CASE_LOW and val.  */
+       cmp = tree_int_cst_compare (CASE_LOW (t), val);
  
- void
- vrp_intersect_ranges (value_range *vr0, value_range *vr1)
- {
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "Intersecting\n  ");
-       dump_value_range (dump_file, vr0);
-       fprintf (dump_file, "\nand\n  ");
-       dump_value_range (dump_file, vr1);
-       fprintf (dump_file, "\n");
-     }
-   vrp_intersect_ranges_1 (vr0, vr1);
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "to\n  ");
-       dump_value_range (dump_file, vr0);
-       fprintf (dump_file, "\n");
+       if (cmp == 0)
+       {
+         /* Ranges cannot be empty. */
+         *idx = i;
+         return true;
+       }
+       else if (cmp > 0)
+         high = i;
+       else
+       {
+         low = i + 1;
+         if (CASE_HIGH (t) != NULL
+             && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
+           {
+             *idx = i;
+             return true;
+           }
+         }
      }
+   *idx = high;
+   return false;
  }
  
- /* Meet operation for value ranges.  Given two value ranges VR0 and
-    VR1, store in VR0 a range that contains both VR0 and VR1.  This
-    may not be the smallest possible such range.  */
+ /* Searches the case label vector VEC for the range of CASE_LABELs that is used
+    for values between MIN and MAX. The first index is placed in MIN_IDX. The
+    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
+    then MAX_IDX < MIN_IDX.
+    Returns true if the default label is not needed. */
  
- static void
- vrp_meet_1 (value_range *vr0, const value_range *vr1)
+ bool
+ find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
+                      size_t *max_idx)
  {
-   value_range saved;
-   if (vr0->type == VR_UNDEFINED)
-     {
-       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
-       return;
-     }
+   size_t i, j;
+   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
+   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
  
-   if (vr1->type == VR_UNDEFINED)
+   if (i == j
+       && min_take_default
+       && max_take_default)
      {
-       /* VR0 already has the resulting range.  */
-       return;
+       /* Only the default case label reached.
+          Return an empty range. */
+       *min_idx = 1;
+       *max_idx = 0;
+       return false;
      }
-   if (vr0->type == VR_VARYING)
+   else
      {
-       /* Nothing to do.  VR0 already has the resulting range.  */
-       return;
-     }
+       bool take_default = min_take_default || max_take_default;
+       tree low, high;
+       size_t k;
  
-   if (vr1->type == VR_VARYING)
-     {
-       set_value_range_to_varying (vr0);
-       return;
-     }
+       if (max_take_default)
+       j--;
  
-   saved = *vr0;
-   union_ranges (&vr0->type, &vr0->min, &vr0->max,
-               vr1->type, vr1->min, vr1->max);
-   if (vr0->type == VR_VARYING)
-     {
-       /* Failed to find an efficient meet.  Before giving up and setting
-        the result to VARYING, see if we can at least derive a useful
-        anti-range.  FIXME, all this nonsense about distinguishing
-        anti-ranges from ranges is necessary because of the odd
-        semantics of range_includes_zero_p and friends.  */
-       if (((saved.type == VR_RANGE
-           && range_includes_zero_p (saved.min, saved.max) == 0)
-          || (saved.type == VR_ANTI_RANGE
-              && range_includes_zero_p (saved.min, saved.max) == 1))
-         && ((vr1->type == VR_RANGE
-              && range_includes_zero_p (vr1->min, vr1->max) == 0)
-             || (vr1->type == VR_ANTI_RANGE
-                 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
+       /* If the case label range is continuous, we do not need
+        the default case label.  Verify that.  */
+       high = CASE_LOW (gimple_switch_label (stmt, i));
+       if (CASE_HIGH (gimple_switch_label (stmt, i)))
+       high = CASE_HIGH (gimple_switch_label (stmt, i));
+       for (k = i + 1; k <= j; ++k)
        {
-         set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
-         /* Since this meet operation did not result from the meeting of
-            two equivalent names, VR0 cannot have any equivalences.  */
-         if (vr0->equiv)
-           bitmap_clear (vr0->equiv);
-         return;
+         low = CASE_LOW (gimple_switch_label (stmt, k));
+         if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
+           {
+             take_default = true;
+             break;
+           }
+         high = low;
+         if (CASE_HIGH (gimple_switch_label (stmt, k)))
+           high = CASE_HIGH (gimple_switch_label (stmt, k));
        }
  
-       set_value_range_to_varying (vr0);
-       return;
+       *min_idx = i;
+       *max_idx = j;
+       return !take_default;
      }
-   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
-                                   vr0->equiv);
-   if (vr0->type == VR_VARYING)
-     return;
-   /* The resulting set of equivalences is always the intersection of
-      the two sets.  */
-   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
-     bitmap_and_into (vr0->equiv, vr1->equiv);
-   else if (vr0->equiv && !vr1->equiv)
-     bitmap_clear (vr0->equiv);
  }
  
- void
- vrp_meet (value_range *vr0, const value_range *vr1)
+ /* Evaluate statement STMT.  If the statement produces a useful range,
+    return SSA_PROP_INTERESTING and record the SSA name with the
+    interesting range into *OUTPUT_P.
+    If STMT is a conditional branch and we can determine its truth
+    value, the taken edge is recorded in *TAKEN_EDGE_P.
+    If STMT produces a varying value, return SSA_PROP_VARYING.  */
+ enum ssa_prop_result
+ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
  {
-   if (dump_file && (dump_flags & TDF_DETAILS))
-     {
-       fprintf (dump_file, "Meeting\n  ");
-       dump_value_range (dump_file, vr0);
-       fprintf (dump_file, "\nand\n  ");
-       dump_value_range (dump_file, vr1);
-       fprintf (dump_file, "\n");
-     }
-   vrp_meet_1 (vr0, vr1);
-   if (dump_file && (dump_flags & TDF_DETAILS))
+   value_range vr = VR_INITIALIZER;
+   tree lhs = gimple_get_lhs (stmt);
+   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
+   if (*output_p)
      {
-       fprintf (dump_file, "to\n  ");
-       dump_value_range (dump_file, vr0);
-       fprintf (dump_file, "\n");
+       if (update_value_range (*output_p, &vr))
+       {
+         if (dump_file && (dump_flags & TDF_DETAILS))
+           {
+             fprintf (dump_file, "Found new range for ");
+             print_generic_expr (dump_file, *output_p);
+             fprintf (dump_file, ": ");
+             dump_value_range (dump_file, &vr);
+             fprintf (dump_file, "\n");
+           }
+         if (vr.type == VR_VARYING)
+           return SSA_PROP_VARYING;
+         return SSA_PROP_INTERESTING;
+       }
+       return SSA_PROP_NOT_INTERESTING;
      }
- }
  
+   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+     switch (gimple_call_internal_fn (stmt))
+       {
+       case IFN_ADD_OVERFLOW:
+       case IFN_SUB_OVERFLOW:
+       case IFN_MUL_OVERFLOW:
+       case IFN_ATOMIC_COMPARE_EXCHANGE:
+       /* These internal calls return _Complex integer type,
+          which VRP does not track, but the immediate uses
+          thereof might be interesting.  */
+       if (lhs && TREE_CODE (lhs) == SSA_NAME)
+         {
+           imm_use_iterator iter;
+           use_operand_p use_p;
+           enum ssa_prop_result res = SSA_PROP_VARYING;
+           set_value_range_to_varying (get_value_range (lhs));
  
- /* Visit all arguments for PHI node PHI that flow through executable
-    edges.  If a valid value range can be derived from all the incoming
-    value ranges, set a new range in VR_RESULT.  */
+           FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
+             {
+               gimple *use_stmt = USE_STMT (use_p);
+               if (!is_gimple_assign (use_stmt))
+                 continue;
+               enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
+               if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
+                 continue;
+               tree rhs1 = gimple_assign_rhs1 (use_stmt);
+               tree use_lhs = gimple_assign_lhs (use_stmt);
+               if (TREE_CODE (rhs1) != rhs_code
+                   || TREE_OPERAND (rhs1, 0) != lhs
+                   || TREE_CODE (use_lhs) != SSA_NAME
+                   || !stmt_interesting_for_vrp (use_stmt)
+                   || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
+                       || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
+                       || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
+                 continue;
+               /* If there is a change in the value range for any of the
+                  REALPART_EXPR/IMAGPART_EXPR immediate uses, return
+                  SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
+                  or IMAGPART_EXPR immediate uses, but none of them have
+                  a change in their value ranges, return
+                  SSA_PROP_NOT_INTERESTING.  If there are no
+                  {REAL,IMAG}PART_EXPR uses at all,
+                  return SSA_PROP_VARYING.  */
+               value_range new_vr = VR_INITIALIZER;
+               extract_range_basic (&new_vr, use_stmt);
+               value_range *old_vr = get_value_range (use_lhs);
+               if (old_vr->type != new_vr.type
+                   || !vrp_operand_equal_p (old_vr->min, new_vr.min)
+                   || !vrp_operand_equal_p (old_vr->max, new_vr.max)
+                   || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
+                 res = SSA_PROP_INTERESTING;
+               else
+                 res = SSA_PROP_NOT_INTERESTING;
+               BITMAP_FREE (new_vr.equiv);
+               if (res == SSA_PROP_INTERESTING)
+                 {
+                   *output_p = lhs;
+                   return res;
+                 }
+             }
+           return res;
+         }
+       break;
+       default:
+       break;
+       }
+   /* All other statements produce nothing of interest for VRP, so mark
+      their outputs varying and prevent further simulation.  */
+   set_defs_to_varying (stmt);
+   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
+ }
+ /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
+    { VR1TYPE, VR0MIN, VR0MAX } and store the result
+    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
+    possible such range.  The resulting range is not canonicalized.  */
  
  static void
- extract_range_from_phi_node (gphi *phi, value_range *vr_result)
+ union_ranges (enum value_range_type *vr0type,
+             tree *vr0min, tree *vr0max,
+             enum value_range_type vr1type,
+             tree vr1min, tree vr1max)
  {
-   size_t i;
-   tree lhs = PHI_RESULT (phi);
-   value_range *lhs_vr = get_value_range (lhs);
-   bool first = true;
-   int edges, old_edges;
-   struct loop *l;
+   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
  
-   if (dump_file && (dump_flags & TDF_DETAILS))
+   /* [] is vr0, () is vr1 in the following classification comments.  */
+   if (mineq && maxeq)
      {
-       fprintf (dump_file, "\nVisiting PHI node: ");
-       print_gimple_stmt (dump_file, phi, 0, dump_flags);
+       /* [(  )] */
+       if (*vr0type == vr1type)
+       /* Nothing to do for equal ranges.  */
+       ;
+       else if ((*vr0type == VR_RANGE
+               && vr1type == VR_ANTI_RANGE)
+              || (*vr0type == VR_ANTI_RANGE
+                  && vr1type == VR_RANGE))
+       {
+         /* For anti-range with range union the result is varying.  */
+         goto give_up;
+       }
+       else
+       gcc_unreachable ();
      }
-   bool may_simulate_backedge_again = false;
-   edges = 0;
-   for (i = 0; i < gimple_phi_num_args (phi); i++)
+   else if (operand_less_p (*vr0max, vr1min) == 1
+          || operand_less_p (vr1max, *vr0min) == 1)
      {
-       edge e = gimple_phi_arg_edge (phi, i);
-       if (dump_file && (dump_flags & TDF_DETAILS))
+       /* [ ] ( ) or ( ) [ ]
+        If the ranges have an empty intersection, result of the union
+        operation is the anti-range or if both are anti-ranges
+        it covers all.  */
+       if (*vr0type == VR_ANTI_RANGE
+         && vr1type == VR_ANTI_RANGE)
+       goto give_up;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
+       ;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         fprintf (dump_file,
-             "    Argument #%d (%d -> %d %sexecutable)\n",
-             (int) i, e->src->index, e->dest->index,
-             (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-       if (e->flags & EDGE_EXECUTABLE)
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_RANGE)
        {
-         tree arg = PHI_ARG_DEF (phi, i);
-         value_range vr_arg;
-         ++edges;
-         if (TREE_CODE (arg) == SSA_NAME)
+         /* The result is the convex hull of both ranges.  */
+         if (operand_less_p (*vr0max, vr1min) == 1)
            {
-             /* See if we are eventually going to change one of the args.  */
-             gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
-             if (! gimple_nop_p (def_stmt)
-                 && prop_simulate_again_p (def_stmt)
-                 && e->flags & EDGE_DFS_BACK)
-               may_simulate_backedge_again = true;
-             vr_arg = *(get_value_range (arg));
-             /* Do not allow equivalences or symbolic ranges to leak in from
-                backedges.  That creates invalid equivalencies.
-                See PR53465 and PR54767.  */
-             if (e->flags & EDGE_DFS_BACK)
+             /* If the result can be an anti-range, create one.  */
+             if (TREE_CODE (*vr0max) == INTEGER_CST
+                 && TREE_CODE (vr1min) == INTEGER_CST
+                 && vrp_val_is_min (*vr0min)
+                 && vrp_val_is_max (vr1max))
                {
-                 if (vr_arg.type == VR_RANGE
-                     || vr_arg.type == VR_ANTI_RANGE)
+                 tree min = int_const_binop (PLUS_EXPR,
+                                             *vr0max,
+                                             build_int_cst (TREE_TYPE (*vr0max), 1));
+                 tree max = int_const_binop (MINUS_EXPR,
+                                             vr1min,
+                                             build_int_cst (TREE_TYPE (vr1min), 1));
+                 if (!operand_less_p (max, min))
                    {
-                     vr_arg.equiv = NULL;
-                     if (symbolic_range_p (&vr_arg))
-                       {
-                         vr_arg.type = VR_VARYING;
-                         vr_arg.min = NULL_TREE;
-                         vr_arg.max = NULL_TREE;
-                       }
+                     *vr0type = VR_ANTI_RANGE;
+                     *vr0min = min;
+                     *vr0max = max;
                    }
+                 else
+                   *vr0max = vr1max;
                }
              else
+               *vr0max = vr1max;
+           }
+         else
+           {
+             /* If the result can be an anti-range, create one.  */
+             if (TREE_CODE (vr1max) == INTEGER_CST
+                 && TREE_CODE (*vr0min) == INTEGER_CST
+                 && vrp_val_is_min (vr1min)
+                 && vrp_val_is_max (*vr0max))
                {
-                 /* If the non-backedge arguments range is VR_VARYING then
-                    we can still try recording a simple equivalence.  */
-                 if (vr_arg.type == VR_VARYING)
+                 tree min = int_const_binop (PLUS_EXPR,
+                                             vr1max,
+                                             build_int_cst (TREE_TYPE (vr1max), 1));
+                 tree max = int_const_binop (MINUS_EXPR,
+                                             *vr0min,
+                                             build_int_cst (TREE_TYPE (*vr0min), 1));
+                 if (!operand_less_p (max, min))
                    {
-                     vr_arg.type = VR_RANGE;
-                     vr_arg.min = arg;
-                     vr_arg.max = arg;
-                     vr_arg.equiv = NULL;
+                     *vr0type = VR_ANTI_RANGE;
+                     *vr0min = min;
+                     *vr0max = max;
                    }
+                 else
+                   *vr0min = vr1min;
                }
+             else
+               *vr0min = vr1min;
            }
-         else
-           {
-             if (TREE_OVERFLOW_P (arg))
-               arg = drop_tree_overflow (arg);
-             vr_arg.type = VR_RANGE;
-             vr_arg.min = arg;
-             vr_arg.max = arg;
-             vr_arg.equiv = NULL;
-           }
-         if (dump_file && (dump_flags & TDF_DETAILS))
-           {
-             fprintf (dump_file, "\t");
-             print_generic_expr (dump_file, arg, dump_flags);
-             fprintf (dump_file, ": ");
-             dump_value_range (dump_file, &vr_arg);
-             fprintf (dump_file, "\n");
-           }
-         if (first)
-           copy_value_range (vr_result, &vr_arg);
-         else
-           vrp_meet (vr_result, &vr_arg);
-         first = false;
-         if (vr_result->type == VR_VARYING)
-           break;
-       }
-     }
-   if (vr_result->type == VR_VARYING)
-     goto varying;
-   else if (vr_result->type == VR_UNDEFINED)
-     goto update_range;
-   old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
-   vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
-   /* To prevent infinite iterations in the algorithm, derive ranges
-      when the new value is slightly bigger or smaller than the
-      previous one.  We don't do this if we have seen a new executable
-      edge; this helps us avoid an infinity for conditionals
-      which are not in a loop.  If the old value-range was VR_UNDEFINED
-      use the updated range and iterate one more time.  If we will not
-      simulate this PHI again via the backedge allow us to iterate.  */
-   if (edges > 0
-       && gimple_phi_num_args (phi) > 1
-       && edges == old_edges
-       && lhs_vr->type != VR_UNDEFINED
-       && may_simulate_backedge_again)
-     {
-       /* Compare old and new ranges, fall back to varying if the
-          values are not comparable.  */
-       int cmp_min = compare_values (lhs_vr->min, vr_result->min);
-       if (cmp_min == -2)
-       goto varying;
-       int cmp_max = compare_values (lhs_vr->max, vr_result->max);
-       if (cmp_max == -2)
-       goto varying;
-       /* For non VR_RANGE or for pointers fall back to varying if
-        the range changed.  */
-       if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
-          || POINTER_TYPE_P (TREE_TYPE (lhs)))
-         && (cmp_min != 0 || cmp_max != 0))
-       goto varying;
-       /* If the new minimum is larger than the previous one
-        retain the old value.  If the new minimum value is smaller
-        than the previous one and not -INF go all the way to -INF + 1.
-        In the first case, to avoid infinite bouncing between different
-        minimums, and in the other case to avoid iterating millions of
-        times to reach -INF.  Going to -INF + 1 also lets the following
-        iteration compute whether there will be any overflow, at the
-        expense of one additional iteration.  */
-       if (cmp_min < 0)
-       vr_result->min = lhs_vr->min;
-       else if (cmp_min > 0
-              && !vrp_val_is_min (vr_result->min))
-       vr_result->min
-         = int_const_binop (PLUS_EXPR,
-                            vrp_val_min (TREE_TYPE (vr_result->min)),
-                            build_int_cst (TREE_TYPE (vr_result->min), 1));
-       /* Similarly for the maximum value.  */
-       if (cmp_max > 0)
-       vr_result->max = lhs_vr->max;
-       else if (cmp_max < 0
-              && !vrp_val_is_max (vr_result->max))
-       vr_result->max
-         = int_const_binop (MINUS_EXPR,
-                            vrp_val_max (TREE_TYPE (vr_result->min)),
-                            build_int_cst (TREE_TYPE (vr_result->min), 1));
-       /* If we dropped either bound to +-INF then if this is a loop
-        PHI node SCEV may known more about its value-range.  */
-       if (cmp_min > 0 || cmp_min < 0
-          || cmp_max < 0 || cmp_max > 0)
-       goto scev_check;
-       goto infinite_check;
-     }
-   goto update_range;
- varying:
-   set_value_range_to_varying (vr_result);
- scev_check:
-   /* If this is a loop PHI node SCEV may known more about its value-range.
-      scev_check can be reached from two paths, one is a fall through from above
-      "varying" label, the other is direct goto from code block which tries to
-      avoid infinite simulation.  */
-   if ((l = loop_containing_stmt (phi))
-       && l->header == gimple_bb (phi))
-     adjust_range_with_scev (vr_result, l, phi, lhs);
- infinite_check:
-   /* If we will end up with a (-INF, +INF) range, set it to
-      VARYING.  Same if the previous max value was invalid for
-      the type and we end up with vr_result.min > vr_result.max.  */
-   if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
-       && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
-          || compare_values (vr_result->min, vr_result->max) > 0))
-     ;
-   else
-     set_value_range_to_varying (vr_result);
-   /* If the new range is different than the previous value, keep
-      iterating.  */
- update_range:
-   return;
- }
- /* Visit all arguments for PHI node PHI that flow through executable
-    edges.  If a valid value range can be derived from all the incoming
-    value ranges, set a new range for the LHS of PHI.  */
- static enum ssa_prop_result
- vrp_visit_phi_node (gphi *phi)
- {
-   tree lhs = PHI_RESULT (phi);
-   value_range vr_result = VR_INITIALIZER;
-   extract_range_from_phi_node (phi, &vr_result);
-   if (update_value_range (lhs, &vr_result))
-     {
-       if (dump_file && (dump_flags & TDF_DETAILS))
-       {
-         fprintf (dump_file, "Found new range for ");
-         print_generic_expr (dump_file, lhs);
-         fprintf (dump_file, ": ");
-         dump_value_range (dump_file, &vr_result);
-         fprintf (dump_file, "\n");
        }
-       if (vr_result.type == VR_VARYING)
-       return SSA_PROP_VARYING;
-       return SSA_PROP_INTERESTING;
-     }
-   /* Nothing changed, don't add outgoing edges.  */
-   return SSA_PROP_NOT_INTERESTING;
- }
- /* Simplify boolean operations if the source is known
-    to be already a boolean.  */
- static bool
- simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
- {
-   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
-   tree lhs, op0, op1;
-   bool need_conversion;
-   /* We handle only !=/== case here.  */
-   gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
-   op0 = gimple_assign_rhs1 (stmt);
-   if (!op_with_boolean_value_range_p (op0))
-     return false;
-   op1 = gimple_assign_rhs2 (stmt);
-   if (!op_with_boolean_value_range_p (op1))
-     return false;
-   /* Reduce number of cases to handle to NE_EXPR.  As there is no
-      BIT_XNOR_EXPR we cannot replace A == B with a single statement.  */
-   if (rhs_code == EQ_EXPR)
-     {
-       if (TREE_CODE (op1) == INTEGER_CST)
-       op1 = int_const_binop (BIT_XOR_EXPR, op1,
-                              build_int_cst (TREE_TYPE (op1), 1));
        else
-       return false;
-     }
-   lhs = gimple_assign_lhs (stmt);
-   need_conversion
-     = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
-   /* Make sure to not sign-extend a 1-bit 1 when converting the result.  */
-   if (need_conversion
-       && !TYPE_UNSIGNED (TREE_TYPE (op0))
-       && TYPE_PRECISION (TREE_TYPE (op0)) == 1
-       && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
-     return false;
-   /* For A != 0 we can substitute A itself.  */
-   if (integer_zerop (op1))
-     gimple_assign_set_rhs_with_ops (gsi,
-                                   need_conversion
-                                   ? NOP_EXPR : TREE_CODE (op0), op0);
-   /* For A != B we substitute A ^ B.  Either with conversion.  */
-   else if (need_conversion)
-     {
-       tree tem = make_ssa_name (TREE_TYPE (op0));
-       gassign *newop
-       = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
-       gsi_insert_before (gsi, newop, GSI_SAME_STMT);
-       if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
-         && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
-       set_range_info (tem, VR_RANGE,
-                       wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
-                       wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
-       gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
-     }
-   /* Or without.  */
-   else
-     gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
-   update_stmt (gsi_stmt (*gsi));
-   fold_stmt (gsi, follow_single_use_edges);
-   return true;
- }
- /* Simplify a division or modulo operator to a right shift or bitwise and
-    if the first operand is unsigned or is greater than zero and the second
-    operand is an exact power of two.  For TRUNC_MOD_EXPR op0 % op1 with
-    constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
-    optimize it into just op0 if op0's range is known to be a subset of
-    [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
-    modulo.  */
- static bool
- simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
- {
-   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
-   tree val = NULL;
-   tree op0 = gimple_assign_rhs1 (stmt);
-   tree op1 = gimple_assign_rhs2 (stmt);
-   tree op0min = NULL_TREE, op0max = NULL_TREE;
-   tree op1min = op1;
-   value_range *vr = NULL;
-   if (TREE_CODE (op0) == INTEGER_CST)
-     {
-       op0min = op0;
-       op0max = op0;
-     }
-   else
-     {
-       vr = get_value_range (op0);
-       if (range_int_cst_p (vr))
-       {
-         op0min = vr->min;
-         op0max = vr->max;
-       }
-     }
-   if (rhs_code == TRUNC_MOD_EXPR
-       && TREE_CODE (op1) == SSA_NAME)
-     {
-       value_range *vr1 = get_value_range (op1);
-       if (range_int_cst_p (vr1))
-       op1min = vr1->min;
+       gcc_unreachable ();
      }
-   if (rhs_code == TRUNC_MOD_EXPR
-       && TREE_CODE (op1min) == INTEGER_CST
-       && tree_int_cst_sgn (op1min) == 1
-       && op0max
-       && tree_int_cst_lt (op0max, op1min))
+   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
+          && (mineq || operand_less_p (*vr0min, vr1min) == 1))
      {
-       if (TYPE_UNSIGNED (TREE_TYPE (op0))
-         || tree_int_cst_sgn (op0min) >= 0
-         || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
-                             op0min))
+       /* [ (  ) ] or [(  ) ] or [ (  )] */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
+       ;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         /* If op0 already has the range op0 % op1 has,
-            then TRUNC_MOD_EXPR won't change anything.  */
-         gimple_assign_set_rhs_from_tree (gsi, op0);
-         return true;
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-     }
-   if (TREE_CODE (op0) != SSA_NAME)
-     return false;
-   if (!integer_pow2p (op1))
-     {
-       /* X % -Y can be only optimized into X % Y either if
-        X is not INT_MIN, or Y is not -1.  Fold it now, as after
-        remove_range_assertions the range info might be not available
-        anymore.  */
-       if (rhs_code == TRUNC_MOD_EXPR
-         && fold_stmt (gsi, follow_single_use_edges))
-       return true;
-       return false;
-     }
-   if (TYPE_UNSIGNED (TREE_TYPE (op0)))
-     val = integer_one_node;
-   else
-     {
-       bool sop = false;
-       val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
-       if (val
-         && sop
-         && integer_onep (val)
-         && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         location_t location;
-         if (!gimple_has_location (stmt))
-           location = input_location;
+         /* Arbitrarily choose the right or left gap.  */
+         if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
+           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+                                      build_int_cst (TREE_TYPE (vr1min), 1));
+         else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
+           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+                                      build_int_cst (TREE_TYPE (vr1max), 1));
          else
-           location = gimple_location (stmt);
-         warning_at (location, OPT_Wstrict_overflow,
-                     "assuming signed overflow does not occur when "
-                     "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
+           goto give_up;
        }
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       /* The result covers everything.  */
+       goto give_up;
+       else
+       gcc_unreachable ();
      }
-   if (val && integer_onep (val))
+   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
+          && (mineq || operand_less_p (vr1min, *vr0min) == 1))
      {
-       tree t;
-       if (rhs_code == TRUNC_DIV_EXPR)
+       /* ( [  ] ) or ([  ] ) or ( [  ]) */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
        {
-         t = build_int_cst (integer_type_node, tree_log2 (op1));
-         gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
-         gimple_assign_set_rhs1 (stmt, op0);
-         gimple_assign_set_rhs2 (stmt, t);
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-       else
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       ;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         t = build_int_cst (TREE_TYPE (op1), 1);
-         t = int_const_binop (MINUS_EXPR, op1, t);
-         t = fold_convert (TREE_TYPE (op0), t);
-         gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
-         gimple_assign_set_rhs1 (stmt, op0);
-         gimple_assign_set_rhs2 (stmt, t);
+         *vr0type = VR_ANTI_RANGE;
+         if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
+           {
+             *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+                                        build_int_cst (TREE_TYPE (*vr0min), 1));
+             *vr0min = vr1min;
+           }
+         else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
+           {
+             *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+                                        build_int_cst (TREE_TYPE (*vr0max), 1));
+             *vr0max = vr1max;
+           }
+         else
+           goto give_up;
        }
-       update_stmt (stmt);
-       fold_stmt (gsi, follow_single_use_edges);
-       return true;
-     }
-   return false;
- }
- /* Simplify a min or max if the ranges of the two operands are
-    disjoint.   Return true if we do simplify.  */
- static bool
- simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
- {
-   tree op0 = gimple_assign_rhs1 (stmt);
-   tree op1 = gimple_assign_rhs2 (stmt);
-   bool sop = false;
-   tree val;
-   val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
-        (LE_EXPR, op0, op1, &sop));
-   if (!val)
-     {
-       sop = false;
-       val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
-            (LT_EXPR, op0, op1, &sop));
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
+       /* The result covers everything.  */
+       goto give_up;
+       else
+       gcc_unreachable ();
      }
-   if (val)
+   else if ((operand_less_p (vr1min, *vr0max) == 1
+           || operand_equal_p (vr1min, *vr0max, 0))
+          && operand_less_p (*vr0min, vr1min) == 1
+          && operand_less_p (*vr0max, vr1max) == 1)
      {
-       if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+       /* [  (  ]  ) or [   ](   ) */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
+       *vr0max = vr1max;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       *vr0min = vr1min;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         location_t location;
-         if (!gimple_has_location (stmt))
-           location = input_location;
+         if (TREE_CODE (vr1min) == INTEGER_CST)
+           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+                                      build_int_cst (TREE_TYPE (vr1min), 1));
          else
-           location = gimple_location (stmt);
-         warning_at (location, OPT_Wstrict_overflow,
-                     "assuming signed overflow does not occur when "
-                     "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
+           goto give_up;
        }
-       /* VAL == TRUE -> OP0 < or <= op1
-        VAL == FALSE -> OP0 > or >= op1.  */
-       tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
-                 == integer_zerop (val)) ? op0 : op1;
-       gimple_assign_set_rhs_from_tree (gsi, res);
-       return true;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       {
+         if (TREE_CODE (*vr0max) == INTEGER_CST)
+           {
+             *vr0type = vr1type;
+             *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+                                        build_int_cst (TREE_TYPE (*vr0max), 1));
+             *vr0max = vr1max;
+           }
+         else
+           goto give_up;
+       }
+       else
+       gcc_unreachable ();
      }
-   return false;
- }
- /* If the operand to an ABS_EXPR is >= 0, then eliminate the
-    ABS_EXPR.  If the operand is <= 0, then simplify the
-    ABS_EXPR into a NEGATE_EXPR.  */
- static bool
- simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
- {
-   tree op = gimple_assign_rhs1 (stmt);
-   value_range *vr = get_value_range (op);
-   if (vr)
+   else if ((operand_less_p (*vr0min, vr1max) == 1
+           || operand_equal_p (*vr0min, vr1max, 0))
+          && operand_less_p (vr1min, *vr0min) == 1
+          && operand_less_p (vr1max, *vr0max) == 1)
      {
-       tree val = NULL;
-       bool sop = false;
-       val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
-       if (!val)
+       /* (  [  )  ] or (   )[   ] */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
+       *vr0min = vr1min;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       *vr0max = vr1max;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         /* The range is neither <= 0 nor > 0.  Now see if it is
-            either < 0 or >= 0.  */
-         sop = false;
-         val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
-                                         &sop);
+         if (TREE_CODE (vr1max) == INTEGER_CST)
+           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+                                      build_int_cst (TREE_TYPE (vr1max), 1));
+         else
+           goto give_up;
        }
-       if (val)
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+         if (TREE_CODE (*vr0min) == INTEGER_CST)
            {
-             location_t location;
-             if (!gimple_has_location (stmt))
-               location = input_location;
-             else
-               location = gimple_location (stmt);
-             warning_at (location, OPT_Wstrict_overflow,
-                         "assuming signed overflow does not occur when "
-                         "simplifying %<abs (X)%> to %<X%> or %<-X%>");
+             *vr0type = vr1type;
+             *vr0min = vr1min;
+             *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+                                        build_int_cst (TREE_TYPE (*vr0min), 1));
            }
-         gimple_assign_set_rhs1 (stmt, op);
-         if (integer_zerop (val))
-           gimple_assign_set_rhs_code (stmt, SSA_NAME);
          else
-           gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
-         update_stmt (stmt);
-         fold_stmt (gsi, follow_single_use_edges);
-         return true;
+           goto give_up;
        }
+       else
+       gcc_unreachable ();
      }
+   else
+     goto give_up;
  
-   return false;
+   return;
+ give_up:
+   *vr0type = VR_VARYING;
+   *vr0min = NULL_TREE;
+   *vr0max = NULL_TREE;
  }
  
- /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
-    If all the bits that are being cleared by & are already
-    known to be zero from VR, or all the bits that are being
-    set by | are already known to be one from VR, the bit
-    operation is redundant.  */
+ /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
+    { VR1TYPE, VR0MIN, VR0MAX } and store the result
+    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
+    possible such range.  The resulting range is not canonicalized.  */
  
- static bool
- simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+ static void
+ intersect_ranges (enum value_range_type *vr0type,
+                 tree *vr0min, tree *vr0max,
+                 enum value_range_type vr1type,
+                 tree vr1min, tree vr1max)
  {
-   tree op0 = gimple_assign_rhs1 (stmt);
-   tree op1 = gimple_assign_rhs2 (stmt);
-   tree op = NULL_TREE;
-   value_range vr0 = VR_INITIALIZER;
-   value_range vr1 = VR_INITIALIZER;
-   wide_int may_be_nonzero0, may_be_nonzero1;
-   wide_int must_be_nonzero0, must_be_nonzero1;
-   wide_int mask;
-   if (TREE_CODE (op0) == SSA_NAME)
-     vr0 = *(get_value_range (op0));
-   else if (is_gimple_min_invariant (op0))
-     set_value_range_to_value (&vr0, op0, NULL);
-   else
-     return false;
-   if (TREE_CODE (op1) == SSA_NAME)
-     vr1 = *(get_value_range (op1));
-   else if (is_gimple_min_invariant (op1))
-     set_value_range_to_value (&vr1, op1, NULL);
-   else
-     return false;
-   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
-                                 &must_be_nonzero0))
-     return false;
-   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
-                                 &must_be_nonzero1))
-     return false;
+   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
  
-   switch (gimple_assign_rhs_code (stmt))
+   /* [] is vr0, () is vr1 in the following classification comments.  */
+   if (mineq && maxeq)
      {
-     case BIT_AND_EXPR:
-       mask = may_be_nonzero0.and_not (must_be_nonzero1);
-       if (mask == 0)
-       {
-         op = op0;
-         break;
-       }
-       mask = may_be_nonzero1.and_not (must_be_nonzero0);
-       if (mask == 0)
+       /* [(  )] */
+       if (*vr0type == vr1type)
+       /* Nothing to do for equal ranges.  */
+       ;
+       else if ((*vr0type == VR_RANGE
+               && vr1type == VR_ANTI_RANGE)
+              || (*vr0type == VR_ANTI_RANGE
+                  && vr1type == VR_RANGE))
        {
-         op = op1;
-         break;
+         /* For anti-range with range intersection the result is empty.  */
+         *vr0type = VR_UNDEFINED;
+         *vr0min = NULL_TREE;
+         *vr0max = NULL_TREE;
        }
-       break;
-     case BIT_IOR_EXPR:
-       mask = may_be_nonzero0.and_not (must_be_nonzero1);
-       if (mask == 0)
+       else
+       gcc_unreachable ();
+     }
+   else if (operand_less_p (*vr0max, vr1min) == 1
+          || operand_less_p (vr1max, *vr0min) == 1)
+     {
+       /* [ ] ( ) or ( ) [ ]
+        If the ranges have an empty intersection, the result of the
+        intersect operation is the range for intersecting an
+        anti-range with a range or empty when intersecting two ranges.  */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_ANTI_RANGE)
+       ;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         op = op1;
-         break;
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-       mask = may_be_nonzero1.and_not (must_be_nonzero0);
-       if (mask == 0)
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_RANGE)
        {
-         op = op0;
-         break;
+         *vr0type = VR_UNDEFINED;
+         *vr0min = NULL_TREE;
+         *vr0max = NULL_TREE;
        }
-       break;
-     default:
-       gcc_unreachable ();
-     }
-   if (op == NULL_TREE)
-     return false;
-   gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
-   update_stmt (gsi_stmt (*gsi));
-   return true;
- }
- /* We are comparing trees OP0 and OP1 using COND_CODE.  OP0 has
-    a known value range VR.
-    If there is one and only one value which will satisfy the
-    conditional, then return that value.  Else return NULL.
-    If signed overflow must be undefined for the value to satisfy
-    the conditional, then set *STRICT_OVERFLOW_P to true.  */
- static tree
- test_for_singularity (enum tree_code cond_code, tree op0,
-                     tree op1, value_range *vr)
- {
-   tree min = NULL;
-   tree max = NULL;
-   /* Extract minimum/maximum values which satisfy the conditional as it was
-      written.  */
-   if (cond_code == LE_EXPR || cond_code == LT_EXPR)
-     {
-       min = TYPE_MIN_VALUE (TREE_TYPE (op0));
-       max = op1;
-       if (cond_code == LT_EXPR)
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         tree one = build_int_cst (TREE_TYPE (op0), 1);
-         max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
-         /* Signal to compare_values_warnv this expr doesn't overflow.  */
-         if (EXPR_P (max))
-           TREE_NO_WARNING (max) = 1;
+         /* If the anti-ranges are adjacent to each other merge them.  */
+         if (TREE_CODE (*vr0max) == INTEGER_CST
+             && TREE_CODE (vr1min) == INTEGER_CST
+             && operand_less_p (*vr0max, vr1min) == 1
+             && integer_onep (int_const_binop (MINUS_EXPR,
+                                               vr1min, *vr0max)))
+           *vr0max = vr1max;
+         else if (TREE_CODE (vr1max) == INTEGER_CST
+                  && TREE_CODE (*vr0min) == INTEGER_CST
+                  && operand_less_p (vr1max, *vr0min) == 1
+                  && integer_onep (int_const_binop (MINUS_EXPR,
+                                                    *vr0min, vr1max)))
+           *vr0min = vr1min;
+         /* Else arbitrarily take VR0.  */
        }
      }
-   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
+          && (mineq || operand_less_p (*vr0min, vr1min) == 1))
      {
-       max = TYPE_MAX_VALUE (TREE_TYPE (op0));
-       min = op1;
-       if (cond_code == GT_EXPR)
+       /* [ (  ) ] or [(  ) ] or [ (  )] */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
        {
-         tree one = build_int_cst (TREE_TYPE (op0), 1);
-         min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
-         /* Signal to compare_values_warnv this expr doesn't overflow.  */
-         if (EXPR_P (min))
-           TREE_NO_WARNING (min) = 1;
+         /* If both are ranges the result is the inner one.  */
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-     }
-   /* Now refine the minimum and maximum values using any
-      value range information we have for op0.  */
-   if (min && max)
-     {
-       if (compare_values (vr->min, min) == 1)
-       min = vr->min;
-       if (compare_values (vr->max, max) == -1)
-       max = vr->max;
-       /* If the new min/max values have converged to a single value,
-        then there is only one value which can satisfy the condition,
-        return that value.  */
-       if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
-       return min;
-     }
-   return NULL;
- }
- /* Return whether the value range *VR fits in an integer type specified
-    by PRECISION and UNSIGNED_P.  */
- static bool
- range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
- {
-   tree src_type;
-   unsigned src_precision;
-   widest_int tem;
-   signop src_sgn;
-   /* We can only handle integral and pointer types.  */
-   src_type = TREE_TYPE (vr->min);
-   if (!INTEGRAL_TYPE_P (src_type)
-       && !POINTER_TYPE_P (src_type))
-     return false;
-   /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
-      and so is an identity transform.  */
-   src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
-   src_sgn = TYPE_SIGN (src_type);
-   if ((src_precision < dest_precision
-        && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
-       || (src_precision == dest_precision && src_sgn == dest_sgn))
-     return true;
-   /* Now we can only handle ranges with constant bounds.  */
-   if (vr->type != VR_RANGE
-       || TREE_CODE (vr->min) != INTEGER_CST
-       || TREE_CODE (vr->max) != INTEGER_CST)
-     return false;
-   /* For sign changes, the MSB of the wide_int has to be clear.
-      An unsigned value with its MSB set cannot be represented by
-      a signed wide_int, while a negative value cannot be represented
-      by an unsigned wide_int.  */
-   if (src_sgn != dest_sgn
-       && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
-     return false;
-   /* Then we can perform the conversion on both ends and compare
-      the result for equality.  */
-   tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
-   if (tem != wi::to_widest (vr->min))
-     return false;
-   tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
-   if (tem != wi::to_widest (vr->max))
-     return false;
-   return true;
- }
- /* Simplify a conditional using a relational operator to an equality
-    test if the range information indicates only one value can satisfy
-    the original conditional.  */
- static bool
- simplify_cond_using_ranges_1 (gcond *stmt)
- {
-   tree op0 = gimple_cond_lhs (stmt);
-   tree op1 = gimple_cond_rhs (stmt);
-   enum tree_code cond_code = gimple_cond_code (stmt);
-   if (cond_code != NE_EXPR
-       && cond_code != EQ_EXPR
-       && TREE_CODE (op0) == SSA_NAME
-       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
-       && is_gimple_min_invariant (op1))
-     {
-       value_range *vr = get_value_range (op0);
-       /* If we have range information for OP0, then we might be
-        able to simplify this conditional. */
-       if (vr->type == VR_RANGE)
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
-         if (new_tree)
+         /* Choose the right gap if the left one is empty.  */
+         if (mineq)
            {
-             if (dump_file)
-               {
-                 fprintf (dump_file, "Simplified relational ");
-                 print_gimple_stmt (dump_file, stmt, 0);
-                 fprintf (dump_file, " into ");
-               }
-             gimple_cond_set_code (stmt, EQ_EXPR);
-             gimple_cond_set_lhs (stmt, op0);
-             gimple_cond_set_rhs (stmt, new_tree);
-             update_stmt (stmt);
-             if (dump_file)
-               {
-                 print_gimple_stmt (dump_file, stmt, 0);
-                 fprintf (dump_file, "\n");
-               }
-             return true;
+             if (TREE_CODE (vr1max) != INTEGER_CST)
+               *vr0min = vr1max;
+             else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
+                      && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
+               *vr0min
+                 = int_const_binop (MINUS_EXPR, vr1max,
+                                    build_int_cst (TREE_TYPE (vr1max), -1));
+             else
+               *vr0min
+                 = int_const_binop (PLUS_EXPR, vr1max,
+                                    build_int_cst (TREE_TYPE (vr1max), 1));
            }
-         /* Try again after inverting the condition.  We only deal
-            with integral types here, so no need to worry about
-            issues with inverting FP comparisons.  */
-         new_tree = test_for_singularity
-                      (invert_tree_comparison (cond_code, false),
-                       op0, op1, vr);
-         if (new_tree)
+         /* Choose the left gap if the right one is empty.  */
+         else if (maxeq)
            {
-             if (dump_file)
-               {
-                 fprintf (dump_file, "Simplified relational ");
-                 print_gimple_stmt (dump_file, stmt, 0);
-                 fprintf (dump_file, " into ");
-               }
-             gimple_cond_set_code (stmt, NE_EXPR);
-             gimple_cond_set_lhs (stmt, op0);
-             gimple_cond_set_rhs (stmt, new_tree);
-             update_stmt (stmt);
-             if (dump_file)
-               {
-                 print_gimple_stmt (dump_file, stmt, 0);
-                 fprintf (dump_file, "\n");
-               }
-             return true;
+             if (TREE_CODE (vr1min) != INTEGER_CST)
+               *vr0max = vr1min;
+             else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
+                      && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
+               *vr0max
+                 = int_const_binop (PLUS_EXPR, vr1min,
+                                    build_int_cst (TREE_TYPE (vr1min), -1));
+             else
+               *vr0max
+                 = int_const_binop (MINUS_EXPR, vr1min,
+                                    build_int_cst (TREE_TYPE (vr1min), 1));
+           }
+         /* Choose the anti-range if the range is effectively varying.  */
+         else if (vrp_val_is_min (*vr0min)
+                  && vrp_val_is_max (*vr0max))
+           {
+             *vr0type = vr1type;
+             *vr0min = vr1min;
+             *vr0max = vr1max;
            }
+         /* Else choose the range.  */
+       }
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
+       /* If both are anti-ranges the result is the outer one.  */
+       ;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
+       {
+         /* The intersection is empty.  */
+         *vr0type = VR_UNDEFINED;
+         *vr0min = NULL_TREE;
+         *vr0max = NULL_TREE;
        }
+       else
+       gcc_unreachable ();
      }
-   return false;
- }
- /* STMT is a conditional at the end of a basic block.
-    If the conditional is of the form SSA_NAME op constant and the SSA_NAME
-    was set via a type conversion, try to replace the SSA_NAME with the RHS
-    of the type conversion.  Doing so makes the conversion dead which helps
-    subsequent passes.  */
- static void
- simplify_cond_using_ranges_2 (gcond *stmt)
- {
-   tree op0 = gimple_cond_lhs (stmt);
-   tree op1 = gimple_cond_rhs (stmt);
-   /* If we have a comparison of an SSA_NAME (OP0) against a constant,
-      see if OP0 was set by a type conversion where the source of
-      the conversion is another SSA_NAME with a range that fits
-      into the range of OP0's type.
-      If so, the conversion is redundant as the earlier SSA_NAME can be
-      used for the comparison directly if we just massage the constant in the
-      comparison.  */
-   if (TREE_CODE (op0) == SSA_NAME
-       && TREE_CODE (op1) == INTEGER_CST)
+   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
+          && (mineq || operand_less_p (vr1min, *vr0min) == 1))
      {
-       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
-       tree innerop;
-       if (!is_gimple_assign (def_stmt)
-         || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
-       return;
-       innerop = gimple_assign_rhs1 (def_stmt);
-       if (TREE_CODE (innerop) == SSA_NAME
-         && !POINTER_TYPE_P (TREE_TYPE (innerop))
-         && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
-         && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
+       /* ( [  ] ) or ([  ] ) or ( [  ]) */
+       if (*vr0type == VR_RANGE
+         && vr1type == VR_RANGE)
+       /* Choose the inner range.  */
+       ;
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         value_range *vr = get_value_range (innerop);
-         if (range_int_cst_p (vr)
-             && range_fits_type_p (vr,
-                                   TYPE_PRECISION (TREE_TYPE (op0)),
-                                   TYPE_SIGN (TREE_TYPE (op0)))
-             && int_fits_type_p (op1, TREE_TYPE (innerop)))
+         /* Choose the right gap if the left is empty.  */
+         if (mineq)
            {
-             tree newconst = fold_convert (TREE_TYPE (innerop), op1);
-             gimple_cond_set_lhs (stmt, innerop);
-             gimple_cond_set_rhs (stmt, newconst);
-             update_stmt (stmt);
-             if (dump_file && (dump_flags & TDF_DETAILS))
-               {
-                 fprintf (dump_file, "Folded into: ");
-                 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
-                 fprintf (dump_file, "\n");
-               }
+             *vr0type = VR_RANGE;
+             if (TREE_CODE (*vr0max) != INTEGER_CST)
+               *vr0min = *vr0max;
+             else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
+                      && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
+               *vr0min
+                 = int_const_binop (MINUS_EXPR, *vr0max,
+                                    build_int_cst (TREE_TYPE (*vr0max), -1));
+             else
+               *vr0min
+                 = int_const_binop (PLUS_EXPR, *vr0max,
+                                    build_int_cst (TREE_TYPE (*vr0max), 1));
+             *vr0max = vr1max;
+           }
+         /* Choose the left gap if the right is empty.  */
+         else if (maxeq)
+           {
+             *vr0type = VR_RANGE;
+             if (TREE_CODE (*vr0min) != INTEGER_CST)
+               *vr0max = *vr0min;
+             else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
+                      && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
+               *vr0max
+                 = int_const_binop (PLUS_EXPR, *vr0min,
+                                    build_int_cst (TREE_TYPE (*vr0min), -1));
+             else
+               *vr0max
+                 = int_const_binop (MINUS_EXPR, *vr0min,
+                                    build_int_cst (TREE_TYPE (*vr0min), 1));
+             *vr0min = vr1min;
+           }
+         /* Choose the anti-range if the range is effectively varying.  */
+         else if (vrp_val_is_min (vr1min)
+                  && vrp_val_is_max (vr1max))
+           ;
+         /* Choose the anti-range if it is ~[0,0], that range is special
+            enough to special case when vr1's range is relatively wide.
+            At least for types bigger than int - this covers pointers
+            and arguments to functions like ctz.  */
+         else if (*vr0min == *vr0max
+                  && integer_zerop (*vr0min)
+                  && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
+                       >= TYPE_PRECISION (integer_type_node))
+                      || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
+                  && TREE_CODE (vr1max) == INTEGER_CST
+                  && TREE_CODE (vr1min) == INTEGER_CST
+                  && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
+                      < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
+           ;
+         /* Else choose the range.  */
+         else
+           {
+             *vr0type = vr1type;
+             *vr0min = vr1min;
+             *vr0max = vr1max;
            }
        }
-     }
- }
- /* Simplify a switch statement using the value range of the switch
-    argument.  */
- static bool
- simplify_switch_using_ranges (gswitch *stmt)
- {
-   tree op = gimple_switch_index (stmt);
-   value_range *vr = NULL;
-   bool take_default;
-   edge e;
-   edge_iterator ei;
-   size_t i = 0, j = 0, n, n2;
-   tree vec2;
-   switch_update su;
-   size_t k = 1, l = 0;
-   if (TREE_CODE (op) == SSA_NAME)
-     {
-       vr = get_value_range (op);
-       /* We can only handle integer ranges.  */
-       if ((vr->type != VR_RANGE
-          && vr->type != VR_ANTI_RANGE)
-         || symbolic_range_p (vr))
-       return false;
-       /* Find case label for min/max of the value range.  */
-       take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
-     }
-   else if (TREE_CODE (op) == INTEGER_CST)
-     {
-       take_default = !find_case_label_index (stmt, 1, op, &i);
-       if (take_default)
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         i = 1;
-         j = 0;
+         /* If both are anti-ranges the result is the outer one.  */
+         *vr0type = vr1type;
+         *vr0min = vr1min;
+         *vr0max = vr1max;
        }
-       else
+       else if (vr1type == VR_ANTI_RANGE
+              && *vr0type == VR_RANGE)
        {
-         j = i;
+         /* The intersection is empty.  */
+         *vr0type = VR_UNDEFINED;
+         *vr0min = NULL_TREE;
+         *vr0max = NULL_TREE;
        }
+       else
+       gcc_unreachable ();
      }
-   else
-     return false;
-   n = gimple_switch_num_labels (stmt);
-   /* We can truncate the case label ranges that partially overlap with OP's
-      value range.  */
-   size_t min_idx = 1, max_idx = 0;
-   if (vr != NULL)
-     find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
-   if (min_idx <= max_idx)
+   else if ((operand_less_p (vr1min, *vr0max) == 1
+           || operand_equal_p (vr1min, *vr0max, 0))
+          && operand_less_p (*vr0min, vr1min) == 1)
      {
-       tree min_label = gimple_switch_label (stmt, min_idx);
-       tree max_label = gimple_switch_label (stmt, max_idx);
-       /* Avoid changing the type of the case labels when truncating.  */
-       tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
-       tree vr_min = fold_convert (case_label_type, vr->min);
-       tree vr_max = fold_convert (case_label_type, vr->max);
-       if (vr->type == VR_RANGE)
+       /* [  (  ]  ) or [  ](  ) */
+       if (*vr0type == VR_ANTI_RANGE
+         && vr1type == VR_ANTI_RANGE)
+       *vr0max = vr1max;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_RANGE)
+       *vr0min = vr1min;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         /* If OP's value range is [2,8] and the low label range is
-            0 ... 3, truncate the label's range to 2 .. 3.  */
-         if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
-             && CASE_HIGH (min_label) != NULL_TREE
-             && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
-           CASE_LOW (min_label) = vr_min;
-         /* If OP's value range is [2,8] and the high label range is
-            7 ... 10, truncate the label's range to 7 .. 8.  */
-         if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
-             && CASE_HIGH (max_label) != NULL_TREE
-             && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
-           CASE_HIGH (max_label) = vr_max;
+         if (TREE_CODE (vr1min) == INTEGER_CST)
+           *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+                                      build_int_cst (TREE_TYPE (vr1min), 1));
+         else
+           *vr0max = vr1min;
        }
-       else if (vr->type == VR_ANTI_RANGE)
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         tree one_cst = build_one_cst (case_label_type);
-         if (min_label == max_label)
-           {
-             /* If OP's value range is ~[7,8] and the label's range is
-                7 ... 10, truncate the label's range to 9 ... 10.  */
-             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
-                 && CASE_HIGH (min_label) != NULL_TREE
-                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
-               CASE_LOW (min_label)
-                 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
-             /* If OP's value range is ~[7,8] and the label's range is
-                5 ... 8, truncate the label's range to 5 ... 6.  */
-             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
-                 && CASE_HIGH (min_label) != NULL_TREE
-                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
-               CASE_HIGH (min_label)
-                 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
-           }
+         *vr0type = VR_RANGE;
+         if (TREE_CODE (*vr0max) == INTEGER_CST)
+           *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+                                      build_int_cst (TREE_TYPE (*vr0max), 1));
          else
-           {
-             /* If OP's value range is ~[2,8] and the low label range is
-                0 ... 3, truncate the label's range to 0 ... 1.  */
-             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
-                 && CASE_HIGH (min_label) != NULL_TREE
-                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
-               CASE_HIGH (min_label)
-                 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
-             /* If OP's value range is ~[2,8] and the high label range is
-                7 ... 10, truncate the label's range to 9 ... 10.  */
-             if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
-                 && CASE_HIGH (max_label) != NULL_TREE
-                 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
-               CASE_LOW (max_label)
-                 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
-           }
+           *vr0min = *vr0max;
+         *vr0max = vr1max;
        }
-       /* Canonicalize singleton case ranges.  */
-       if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
-       CASE_HIGH (min_label) = NULL_TREE;
-       if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
-       CASE_HIGH (max_label) = NULL_TREE;
-     }
-   /* We can also eliminate case labels that lie completely outside OP's value
-      range.  */
-   /* Bail out if this is just all edges taken.  */
-   if (i == 1
-       && j == n - 1
-       && take_default)
-     return false;
-   /* Build a new vector of taken case labels.  */
-   vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
-   n2 = 0;
-   /* Add the default edge, if necessary.  */
-   if (take_default)
-     TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
-   for (; i <= j; ++i, ++n2)
-     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
-   for (; k <= l; ++k, ++n2)
-     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
-   /* Mark needed edges.  */
-   for (i = 0; i < n2; ++i)
-     {
-       e = find_edge (gimple_bb (stmt),
-                    label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
-       e->aux = (void *)-1;
+       else
+       gcc_unreachable ();
      }
-   /* Queue not needed edges for later removal.  */
-   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+   else if ((operand_less_p (*vr0min, vr1max) == 1
+           || operand_equal_p (*vr0min, vr1max, 0))
+          && operand_less_p (vr1min, *vr0min) == 1)
      {
-       if (e->aux == (void *)-1)
+       /* (  [  )  ] or (  )[  ] */
+       if (*vr0type == VR_ANTI_RANGE
+         && vr1type == VR_ANTI_RANGE)
+       *vr0min = vr1min;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_RANGE)
+       *vr0max = vr1max;
+       else if (*vr0type == VR_RANGE
+              && vr1type == VR_ANTI_RANGE)
        {
-         e->aux = NULL;
-         continue;
+         if (TREE_CODE (vr1max) == INTEGER_CST)
+           *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+                                      build_int_cst (TREE_TYPE (vr1max), 1));
+         else
+           *vr0min = vr1max;
        }
-       if (dump_file && (dump_flags & TDF_DETAILS))
+       else if (*vr0type == VR_ANTI_RANGE
+              && vr1type == VR_RANGE)
        {
-         fprintf (dump_file, "removing unreachable case label\n");
+         *vr0type = VR_RANGE;
+         if (TREE_CODE (*vr0min) == INTEGER_CST)
+           *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+                                      build_int_cst (TREE_TYPE (*vr0min), 1));
+         else
+           *vr0max = *vr0min;
+         *vr0min = vr1min;
        }
-       to_remove_edges.safe_push (e);
-       e->flags &= ~EDGE_EXECUTABLE;
+       else
+       gcc_unreachable ();
+     }
+   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
+      result for the intersection.  That's always a conservative
+      correct estimate unless VR1 is a constant singleton range
+      in which case we choose that.  */
+   if (vr1type == VR_RANGE
+       && is_gimple_min_invariant (vr1min)
+       && vrp_operand_equal_p (vr1min, vr1max))
+     {
+       *vr0type = vr1type;
+       *vr0min = vr1min;
+       *vr0max = vr1max;
      }
  
-   /* And queue an update for the stmt.  */
-   su.stmt = stmt;
-   su.vec = vec2;
-   to_update_switch_stmts.safe_push (su);
-   return false;
+   return;
  }
  
- /* Simplify an integral conversion from an SSA name in STMT.  */
- static bool
- simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
- {
-   tree innerop, middleop, finaltype;
-   gimple *def_stmt;
-   signop inner_sgn, middle_sgn, final_sgn;
-   unsigned inner_prec, middle_prec, final_prec;
-   widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
-   finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
-   if (!INTEGRAL_TYPE_P (finaltype))
-     return false;
-   middleop = gimple_assign_rhs1 (stmt);
-   def_stmt = SSA_NAME_DEF_STMT (middleop);
-   if (!is_gimple_assign (def_stmt)
-       || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
-     return false;
-   innerop = gimple_assign_rhs1 (def_stmt);
-   if (TREE_CODE (innerop) != SSA_NAME
-       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
-     return false;
-   /* Get the value-range of the inner operand.  Use get_range_info in
-      case innerop was created during substitute-and-fold.  */
-   wide_int imin, imax;
-   if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
-       || !get_range_info (innerop, &imin, &imax))
-     return false;
-   innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
-   innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
-   /* Simulate the conversion chain to check if the result is equal if
-      the middle conversion is removed.  */
-   inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
-   middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
-   final_prec = TYPE_PRECISION (finaltype);
-   /* If the first conversion is not injective, the second must not
-      be widening.  */
-   if (wi::gtu_p (innermax - innermin,
-                wi::mask <widest_int> (middle_prec, false))
-       && middle_prec < final_prec)
-     return false;
-   /* We also want a medium value so that we can track the effect that
-      narrowing conversions with sign change have.  */
-   inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
-   if (inner_sgn == UNSIGNED)
-     innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
-   else
-     innermed = 0;
-   if (wi::cmp (innermin, innermed, inner_sgn) >= 0
-       || wi::cmp (innermed, innermax, inner_sgn) >= 0)
-     innermed = innermin;
-   middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
-   middlemin = wi::ext (innermin, middle_prec, middle_sgn);
-   middlemed = wi::ext (innermed, middle_prec, middle_sgn);
-   middlemax = wi::ext (innermax, middle_prec, middle_sgn);
-   /* Require that the final conversion applied to both the original
-      and the intermediate range produces the same result.  */
-   final_sgn = TYPE_SIGN (finaltype);
-   if (wi::ext (middlemin, final_prec, final_sgn)
-        != wi::ext (innermin, final_prec, final_sgn)
-       || wi::ext (middlemed, final_prec, final_sgn)
-        != wi::ext (innermed, final_prec, final_sgn)
-       || wi::ext (middlemax, final_prec, final_sgn)
-        != wi::ext (innermax, final_prec, final_sgn))
-     return false;
-   gimple_assign_set_rhs1 (stmt, innerop);
-   fold_stmt (gsi, follow_single_use_edges);
-   return true;
- }
  
- /* Simplify a conversion from integral SSA name to float in STMT.  */
+ /* Intersect the two value-ranges *VR0 and *VR1 and store the result
+    in *VR0.  This may not be the smallest possible such range.  */
  
- static bool
- simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
-                                       gimple *stmt)
+ static void
+ vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
  {
-   tree rhs1 = gimple_assign_rhs1 (stmt);
-   value_range *vr = get_value_range (rhs1);
-   scalar_float_mode fltmode
-     = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
-   scalar_int_mode mode;
-   tree tem;
-   gassign *conv;
-   /* We can only handle constant ranges.  */
-   if (vr->type != VR_RANGE
-       || TREE_CODE (vr->min) != INTEGER_CST
-       || TREE_CODE (vr->max) != INTEGER_CST)
-     return false;
+   value_range saved;
  
-   /* First check if we can use a signed type in place of an unsigned.  */
-   scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
-   if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
-       && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
-       && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
-     mode = rhs_mode;
-   /* If we can do the conversion in the current input mode do nothing.  */
-   else if (can_float_p (fltmode, rhs_mode,
-                       TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
-     return false;
-   /* Otherwise search for a mode we can use, starting from the narrowest
-      integer mode available.  */
-   else
+   /* If either range is VR_VARYING the other one wins.  */
+   if (vr1->type == VR_VARYING)
+     return;
+   if (vr0->type == VR_VARYING)
      {
-       mode = NARROWEST_INT_MODE;
-       for (;;)
-       {
-         /* If we cannot do a signed conversion to float from mode
-            or if the value-range does not fit in the signed type
-            try with a wider mode.  */
-         if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
-             && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
-           break;
+       copy_value_range (vr0, vr1);
+       return;
+     }
  
-         /* But do not widen the input.  Instead leave that to the
-            optabs expansion code.  */
-         if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
-             || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
-           return false;
-       }
+   /* When either range is VR_UNDEFINED the resulting range is
+      VR_UNDEFINED, too.  */
+   if (vr0->type == VR_UNDEFINED)
+     return;
+   if (vr1->type == VR_UNDEFINED)
+     {
+       set_value_range_to_undefined (vr0);
+       return;
      }
  
-   /* It works, insert a truncation or sign-change before the
-      float conversion.  */
-   tem = make_ssa_name (build_nonstandard_integer_type
-                         (GET_MODE_PRECISION (mode), 0));
-   conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
-   gsi_insert_before (gsi, conv, GSI_SAME_STMT);
-   gimple_assign_set_rhs1 (stmt, tem);
-   fold_stmt (gsi, follow_single_use_edges);
+   /* Save the original vr0 so we can return it as conservative intersection
+      result when our worker turns things to varying.  */
+   saved = *vr0;
+   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
+                   vr1->type, vr1->min, vr1->max);
+   /* Make sure to canonicalize the result though as the inversion of a
+      VR_RANGE can still be a VR_RANGE.  */
+   set_and_canonicalize_value_range (vr0, vr0->type,
+                                   vr0->min, vr0->max, vr0->equiv);
+   /* If that failed, use the saved original VR0.  */
+   if (vr0->type == VR_VARYING)
+     {
+       *vr0 = saved;
+       return;
+     }
+   /* If the result is VR_UNDEFINED there is no need to mess with
+      the equivalencies.  */
+   if (vr0->type == VR_UNDEFINED)
+     return;
  
-   return true;
+   /* The resulting set of equivalences for range intersection is the union of
+      the two sets.  */
+   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
+     bitmap_ior_into (vr0->equiv, vr1->equiv);
+   else if (vr1->equiv && !vr0->equiv)
+     {
+       /* All equivalence bitmaps are allocated from the same obstack.  So
+        we can use the obstack associated with VR to allocate vr0->equiv.  */
+       vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
+       bitmap_copy (vr0->equiv, vr1->equiv);
+     }
  }
  
- /* Simplify an internal fn call using ranges if possible.  */
- static bool
- simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+ void
+ vrp_intersect_ranges (value_range *vr0, value_range *vr1)
  {
-   enum tree_code subcode;
-   bool is_ubsan = false;
-   bool ovf = false;
-   switch (gimple_call_internal_fn (stmt))
-     {
-     case IFN_UBSAN_CHECK_ADD:
-       subcode = PLUS_EXPR;
-       is_ubsan = true;
-       break;
-     case IFN_UBSAN_CHECK_SUB:
-       subcode = MINUS_EXPR;
-       is_ubsan = true;
-       break;
-     case IFN_UBSAN_CHECK_MUL:
-       subcode = MULT_EXPR;
-       is_ubsan = true;
-       break;
-     case IFN_ADD_OVERFLOW:
-       subcode = PLUS_EXPR;
-       break;
-     case IFN_SUB_OVERFLOW:
-       subcode = MINUS_EXPR;
-       break;
-     case IFN_MUL_OVERFLOW:
-       subcode = MULT_EXPR;
-       break;
-     default:
-       return false;
-     }
-   tree op0 = gimple_call_arg (stmt, 0);
-   tree op1 = gimple_call_arg (stmt, 1);
-   tree type;
-   if (is_ubsan)
+   if (dump_file && (dump_flags & TDF_DETAILS))
      {
-       type = TREE_TYPE (op0);
-       if (VECTOR_TYPE_P (type))
-       return false;
+       fprintf (dump_file, "Intersecting\n  ");
+       dump_value_range (dump_file, vr0);
+       fprintf (dump_file, "\nand\n  ");
+       dump_value_range (dump_file, vr1);
+       fprintf (dump_file, "\n");
      }
-   else if (gimple_call_lhs (stmt) == NULL_TREE)
-     return false;
-   else
-     type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
-   if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
-       || (is_ubsan && ovf))
-     return false;
-   gimple *g;
-   location_t loc = gimple_location (stmt);
-   if (is_ubsan)
-     g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
-   else
+   vrp_intersect_ranges_1 (vr0, vr1);
+   if (dump_file && (dump_flags & TDF_DETAILS))
      {
-       int prec = TYPE_PRECISION (type);
-       tree utype = type;
-       if (ovf
-         || !useless_type_conversion_p (type, TREE_TYPE (op0))
-         || !useless_type_conversion_p (type, TREE_TYPE (op1)))
-       utype = build_nonstandard_integer_type (prec, 1);
-       if (TREE_CODE (op0) == INTEGER_CST)
-       op0 = fold_convert (utype, op0);
-       else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
-       {
-         g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
-         gimple_set_location (g, loc);
-         gsi_insert_before (gsi, g, GSI_SAME_STMT);
-         op0 = gimple_assign_lhs (g);
-       }
-       if (TREE_CODE (op1) == INTEGER_CST)
-       op1 = fold_convert (utype, op1);
-       else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
-       {
-         g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
-         gimple_set_location (g, loc);
-         gsi_insert_before (gsi, g, GSI_SAME_STMT);
-         op1 = gimple_assign_lhs (g);
-       }
-       g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
-       gimple_set_location (g, loc);
-       gsi_insert_before (gsi, g, GSI_SAME_STMT);
-       if (utype != type)
-       {
-         g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
-                                  gimple_assign_lhs (g));
-         gimple_set_location (g, loc);
-         gsi_insert_before (gsi, g, GSI_SAME_STMT);
-       }
-       g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
-                              gimple_assign_lhs (g),
-                              build_int_cst (type, ovf));
-     }
-   gimple_set_location (g, loc);
-   gsi_replace (gsi, g, false);
-   return true;
+       fprintf (dump_file, "to\n  ");
+       dump_value_range (dump_file, vr0);
+       fprintf (dump_file, "\n");
+     }
  }
  
- /* Return true if VAR is a two-valued variable.  Set a and b with the
-    two-values when it is true.  Return false otherwise.  */
+ /* Meet operation for value ranges.  Given two value ranges VR0 and
+    VR1, store in VR0 a range that contains both VR0 and VR1.  This
+    may not be the smallest possible such range.  */
  
- static bool
two_valued_val_range_p (tree var, tree *a, tree *b)
+ static void
vrp_meet_1 (value_range *vr0, const value_range *vr1)
  {
-   value_range *vr = get_value_range (var);
-   if ((vr->type != VR_RANGE
-        && vr->type != VR_ANTI_RANGE)
-       || TREE_CODE (vr->min) != INTEGER_CST
-       || TREE_CODE (vr->max) != INTEGER_CST)
-     return false;
+   value_range saved;
  
-   if (vr->type == VR_RANGE
-       && wi::sub (vr->max, vr->min) == 1)
+   if (vr0->type == VR_UNDEFINED)
      {
-       *a = vr->min;
-       *b = vr->max;
-       return true;
+       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
+       return;
      }
  
-   /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
-   if (vr->type == VR_ANTI_RANGE
-       && wi::sub (vr->min, vrp_val_min (TREE_TYPE (var))) == 1
-       && wi::sub (vrp_val_max (TREE_TYPE (var)), vr->max) == 1)
+   if (vr1->type == VR_UNDEFINED)
      {
-       *a = vrp_val_min (TREE_TYPE (var));
-       *b = vrp_val_max (TREE_TYPE (var));
-       return true;
+       /* VR0 already has the resulting range.  */
+       return;
      }
  
-   return false;
- }
- /* Simplify STMT using ranges if possible.  */
+   if (vr0->type == VR_VARYING)
+     {
+       /* Nothing to do.  VR0 already has the resulting range.  */
+       return;
+     }
  
- static bool
- simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
- {
-   gimple *stmt = gsi_stmt (*gsi);
-   if (is_gimple_assign (stmt))
-     {
-       enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
-       tree rhs1 = gimple_assign_rhs1 (stmt);
-       tree rhs2 = gimple_assign_rhs2 (stmt);
-       tree lhs = gimple_assign_lhs (stmt);
-       tree val1 = NULL_TREE, val2 = NULL_TREE;
-       use_operand_p use_p;
-       gimple *use_stmt;
+   if (vr1->type == VR_VARYING)
+     {
+       set_value_range_to_varying (vr0);
+       return;
+     }
  
-       /* Convert:
-        LHS = CST BINOP VAR
-        Where VAR is two-valued and LHS is used in GIMPLE_COND only
-        To:
-        LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
-        Also handles:
-        LHS = VAR BINOP CST
-        Where VAR is two-valued and LHS is used in GIMPLE_COND only
-        To:
-        LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
-       if (TREE_CODE_CLASS (rhs_code) == tcc_binary
-         && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
-         && ((TREE_CODE (rhs1) == INTEGER_CST
-              && TREE_CODE (rhs2) == SSA_NAME)
-             || (TREE_CODE (rhs2) == INTEGER_CST
-                 && TREE_CODE (rhs1) == SSA_NAME))
-         && single_imm_use (lhs, &use_p, &use_stmt)
-         && gimple_code (use_stmt) == GIMPLE_COND)
-       {
-         tree new_rhs1 = NULL_TREE;
-         tree new_rhs2 = NULL_TREE;
-         tree cmp_var = NULL_TREE;
-         if (TREE_CODE (rhs2) == SSA_NAME
-             && two_valued_val_range_p (rhs2, &val1, &val2))
-           {
-             /* Optimize RHS1 OP [VAL1, VAL2].  */
-             new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
-             new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
-             cmp_var = rhs2;
-           }
-         else if (TREE_CODE (rhs1) == SSA_NAME
-                  && two_valued_val_range_p (rhs1, &val1, &val2))
-           {
-             /* Optimize [VAL1, VAL2] OP RHS2.  */
-             new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
-             new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
-             cmp_var = rhs1;
-           }
+   saved = *vr0;
+   union_ranges (&vr0->type, &vr0->min, &vr0->max,
+               vr1->type, vr1->min, vr1->max);
+   if (vr0->type == VR_VARYING)
+     {
+       /* Failed to find an efficient meet.  Before giving up and setting
+        the result to VARYING, see if we can at least derive a useful
+        anti-range.  FIXME, all this nonsense about distinguishing
+        anti-ranges from ranges is necessary because of the odd
+        semantics of range_includes_zero_p and friends.  */
+       if (((saved.type == VR_RANGE
+           && range_includes_zero_p (saved.min, saved.max) == 0)
+          || (saved.type == VR_ANTI_RANGE
+              && range_includes_zero_p (saved.min, saved.max) == 1))
+         && ((vr1->type == VR_RANGE
+              && range_includes_zero_p (vr1->min, vr1->max) == 0)
+             || (vr1->type == VR_ANTI_RANGE
+                 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
+       {
+         set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
  
-         /* If we could not find two-vals or the optimzation is invalid as
-            in divide by zero, new_rhs1 / new_rhs will be NULL_TREE.  */
-         if (new_rhs1 && new_rhs2)
-           {
-             tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
-             gimple_assign_set_rhs_with_ops (gsi,
-                                             COND_EXPR, cond,
-                                             new_rhs1,
-                                             new_rhs2);
-             update_stmt (gsi_stmt (*gsi));
-             fold_stmt (gsi, follow_single_use_edges);
-             return true;
-           }
+         /* Since this meet operation did not result from the meeting of
+            two equivalent names, VR0 cannot have any equivalences.  */
+         if (vr0->equiv)
+           bitmap_clear (vr0->equiv);
+         return;
        }
  
-       switch (rhs_code)
-       {
-       case EQ_EXPR:
-       case NE_EXPR:
-           /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
-            if the RHS is zero or one, and the LHS are known to be boolean
-            values.  */
-         if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_truth_ops_using_ranges (gsi, stmt);
-         break;
+       set_value_range_to_varying (vr0);
+       return;
+     }
+   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
+                                   vr0->equiv);
+   if (vr0->type == VR_VARYING)
+     return;
  
-       /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
-        and BIT_AND_EXPR respectively if the first operand is greater
-        than zero and the second operand is an exact power of two.
-        Also optimize TRUNC_MOD_EXPR away if the second operand is
-        constant and the first operand already has the right value
-        range.  */
-       case TRUNC_DIV_EXPR:
-       case TRUNC_MOD_EXPR:
-         if ((TREE_CODE (rhs1) == SSA_NAME
-              || TREE_CODE (rhs1) == INTEGER_CST)
-             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_div_or_mod_using_ranges (gsi, stmt);
-         break;
+   /* The resulting set of equivalences is always the intersection of
+      the two sets.  */
+   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
+     bitmap_and_into (vr0->equiv, vr1->equiv);
+   else if (vr0->equiv && !vr1->equiv)
+     bitmap_clear (vr0->equiv);
+ }
  
-       /* Transform ABS (X) into X or -X as appropriate.  */
-       case ABS_EXPR:
-         if (TREE_CODE (rhs1) == SSA_NAME
-             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_abs_using_ranges (gsi, stmt);
-         break;
+ void
+ vrp_meet (value_range *vr0, const value_range *vr1)
+ {
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Meeting\n  ");
+       dump_value_range (dump_file, vr0);
+       fprintf (dump_file, "\nand\n  ");
+       dump_value_range (dump_file, vr1);
+       fprintf (dump_file, "\n");
+     }
+   vrp_meet_1 (vr0, vr1);
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "to\n  ");
+       dump_value_range (dump_file, vr0);
+       fprintf (dump_file, "\n");
+     }
+ }
  
-       case BIT_AND_EXPR:
-       case BIT_IOR_EXPR:
-         /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
-            if all the bits being cleared are already cleared or
-            all the bits being set are already set.  */
-         if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_bit_ops_using_ranges (gsi, stmt);
-         break;
  
-       CASE_CONVERT:
-         if (TREE_CODE (rhs1) == SSA_NAME
-             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_conversion_using_ranges (gsi, stmt);
-         break;
+ /* Visit all arguments for PHI node PHI that flow through executable
+    edges.  If a valid value range can be derived from all the incoming
+    value ranges, set a new range for the LHS of PHI.  */
  
-       case FLOAT_EXPR:
-         if (TREE_CODE (rhs1) == SSA_NAME
-             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
-           return simplify_float_conversion_using_ranges (gsi, stmt);
-         break;
+ enum ssa_prop_result
+ vrp_prop::visit_phi (gphi *phi)
+ {
+   tree lhs = PHI_RESULT (phi);
+   value_range vr_result = VR_INITIALIZER;
+   extract_range_from_phi_node (phi, &vr_result);
+   if (update_value_range (lhs, &vr_result))
+     {
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         fprintf (dump_file, "Found new range for ");
+         print_generic_expr (dump_file, lhs);
+         fprintf (dump_file, ": ");
+         dump_value_range (dump_file, &vr_result);
+         fprintf (dump_file, "\n");
+       }
  
-       case MIN_EXPR:
-       case MAX_EXPR:
-         return simplify_min_or_max_using_ranges (gsi, stmt);
+       if (vr_result.type == VR_VARYING)
+       return SSA_PROP_VARYING;
  
-       default:
-         break;
-       }
+       return SSA_PROP_INTERESTING;
      }
-   else if (gimple_code (stmt) == GIMPLE_COND)
-     return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
-   else if (gimple_code (stmt) == GIMPLE_SWITCH)
-     return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
-   else if (is_gimple_call (stmt)
-          && gimple_call_internal_p (stmt))
-     return simplify_internal_call_using_ranges (gsi, stmt);
  
-   return false;
+   /* Nothing changed, don't add outgoing edges.  */
+   return SSA_PROP_NOT_INTERESTING;
  }
  
+ class vrp_folder : public substitute_and_fold_engine
+ {
+  public:
+   tree get_value (tree) FINAL OVERRIDE;
+   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
+   bool fold_predicate_in (gimple_stmt_iterator *);
+   class vr_values *vr_values;
+   /* Delegators.  */
+   tree vrp_evaluate_conditional (tree_code code, tree op0,
+                                tree op1, gimple *stmt)
+     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
+   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+     { return vr_values->simplify_stmt_using_ranges (gsi); }
+  tree op_with_constant_singleton_value_range (tree op)
+     { return vr_values->op_with_constant_singleton_value_range (op); }
+ };
  /* If the statement pointed by SI has a predicate whose value can be
     computed using the value range information computed by VRP, compute
     its value and return true.  Otherwise, return false.  */
diff --cc gcc/tree-vrp.h
index 8673e33567e17bee063e0c5eae69ec3c1c06e2ec,d8f60be231122cb2f4b80ed4224f1903e6b405c4..d295f1d508cd977fc34260c8ace628269fe2b4e4
@@@ -60,6 -60,73 +60,77 @@@ extern void extract_range_from_unary_ex
                                           value_range *vr0_,
                                           tree op0_type);
  
-                                                    wide_int *min, wide_int *max);
 +enum value_range_type get_range_info_as_value_range (const_tree ssa,
++                                                   wide_int *min,
++                                                   wide_int *max);
++
+ extern bool vrp_operand_equal_p (const_tree, const_tree);
+ extern enum value_range_type intersect_range_with_nonzero_bits
+   (enum value_range_type, wide_int *, wide_int *, const wide_int &, signop);
+ struct assert_info
+ {
+   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
+   enum tree_code comp_code;
+   /* Name to register the assert for.  */
+   tree name;
+   /* Value being compared against.  */
+   tree val;
+   /* Expression to compare.  */
+   tree expr;
+ };
+ extern void register_edge_assert_for (tree, edge, enum tree_code,
+                                     tree, tree, vec<assert_info> &);
+ extern bool stmt_interesting_for_vrp (gimple *);
+ extern void set_value_range_to_varying (value_range *);
+ extern int range_includes_zero_p (tree, tree);
+ extern bool infer_value_range (gimple *, tree, tree_code *, tree *);
+ extern void set_value_range_to_nonnull (value_range *, tree);
+ extern void set_value_range (value_range *, enum value_range_type, tree,
+                            tree, bitmap);
+ extern void set_and_canonicalize_value_range (value_range *,
+                                             enum value_range_type,
+                                             tree, tree, bitmap);
+ extern bool vrp_bitmap_equal_p (const_bitmap, const_bitmap);
+ extern bool range_is_nonnull (value_range *);
+ extern tree value_range_constant_singleton (value_range *);
+ extern bool symbolic_range_p (value_range *);
+ extern int compare_values (tree, tree);
+ extern int compare_values_warnv (tree, tree, bool *);
+ extern bool vrp_val_is_min (const_tree);
+ extern bool vrp_val_is_max (const_tree);
+ extern void copy_value_range (value_range *, value_range *);
+ extern void set_value_range_to_value (value_range *, tree, bitmap);
+ extern void extract_range_from_binary_expr_1 (value_range *, enum tree_code,
+                                             tree, value_range *,
+                                             value_range *);
+ extern tree vrp_val_min (const_tree);
+ extern tree vrp_val_max (const_tree);
+ extern void set_value_range_to_null (value_range *, tree);
+ extern bool range_int_cst_p (value_range *);
+ extern int operand_less_p (tree, tree);
+ extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
+ extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
+ extern bool zero_nonzero_bits_from_vr (const tree, value_range *,
+                                      wide_int *, wide_int *);
+ extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *);
+ extern bool range_int_cst_singleton_p (value_range *);
+ extern int value_inside_range (tree, tree, tree);
+ extern tree get_single_symbol (tree, bool *, tree *);
+ extern void maybe_set_nonzero_bits (edge, tree);
+ struct switch_update {
+   gswitch *stmt;
+   tree vec;
+ };
+ extern vec<edge> to_remove_edges;
+ extern vec<switch_update> to_update_switch_stmts;
  #endif /* GCC_TREE_VRP_H */
diff --cc gcc/tree.c
Simple merge
diff --cc gcc/tree.h
Simple merge
diff --cc gcc/vr-values.c
index 0000000000000000000000000000000000000000,b1f587d2b924f955c98f7b9d515450d7c0e04923..34d3353792281ed565c22b3858242d7c661997b3
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,4220 +1,4221 @@@
 -            value_range_type rtype = get_range_info (var, &min, &max);
+ /* Support routines for Value Range Propagation (VRP).
+    Copyright (C) 2005-2018 Free Software Foundation, Inc.
+ This file is part of GCC.
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3.  If not see
+ <http://www.gnu.org/licenses/>.  */
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+ #include "insn-codes.h"
+ #include "tree.h"
+ #include "gimple.h"
+ #include "ssa.h"
+ #include "optabs-tree.h"
+ #include "gimple-pretty-print.h"
+ #include "diagnostic-core.h"
+ #include "flags.h"
+ #include "fold-const.h"
+ #include "calls.h"
+ #include "cfganal.h"
+ #include "gimple-fold.h"
+ #include "gimple-iterator.h"
+ #include "tree-cfg.h"
+ #include "tree-ssa-loop-niter.h"
+ #include "tree-ssa-loop.h"
+ #include "intl.h"
+ #include "cfgloop.h"
+ #include "tree-scalar-evolution.h"
+ #include "tree-ssa-propagate.h"
+ #include "tree-chrec.h"
+ #include "omp-general.h"
+ #include "case-cfn-macros.h"
+ #include "alloc-pool.h"
+ #include "attribs.h"
+ #include "vr-values.h"
+ /* Set value range VR to a non-negative range of type TYPE.  */
+ static inline void
+ set_value_range_to_nonnegative (value_range *vr, tree type)
+ {
+   tree zero = build_int_cst (type, 0);
+   set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
+ }
+ /* Set value range VR to a range of a truthvalue of type TYPE.  */
+ static inline void
+ set_value_range_to_truthvalue (value_range *vr, tree type)
+ {
+   if (TYPE_PRECISION (type) == 1)
+     set_value_range_to_varying (vr);
+   else
+     set_value_range (vr, VR_RANGE,
+                    build_int_cst (type, 0), build_int_cst (type, 1),
+                    vr->equiv);
+ }
+ /* Return value range information for VAR.
+    If we have no values ranges recorded (ie, VRP is not running), then
+    return NULL.  Otherwise create an empty range if none existed for VAR.  */
+ value_range *
+ vr_values::get_value_range (const_tree var)
+ {
+   static const value_range vr_const_varying
+     = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
+   value_range *vr;
+   tree sym;
+   unsigned ver = SSA_NAME_VERSION (var);
+   /* If we have no recorded ranges, then return NULL.  */
+   if (! vr_value)
+     return NULL;
+   /* If we query the range for a new SSA name return an unmodifiable VARYING.
+      We should get here at most from the substitute-and-fold stage which
+      will never try to change values.  */
+   if (ver >= num_vr_values)
+     return CONST_CAST (value_range *, &vr_const_varying);
+   vr = vr_value[ver];
+   if (vr)
+     return vr;
+   /* After propagation finished do not allocate new value-ranges.  */
+   if (values_propagated)
+     return CONST_CAST (value_range *, &vr_const_varying);
+   /* Create a default value range.  */
+   vr_value[ver] = vr = vrp_value_range_pool.allocate ();
+   memset (vr, 0, sizeof (*vr));
+   /* Defer allocating the equivalence set.  */
+   vr->equiv = NULL;
+   /* If VAR is a default definition of a parameter, the variable can
+      take any value in VAR's type.  */
+   if (SSA_NAME_IS_DEFAULT_DEF (var))
+     {
+       sym = SSA_NAME_VAR (var);
+       if (TREE_CODE (sym) == PARM_DECL)
+       {
+         /* Try to use the "nonnull" attribute to create ~[0, 0]
+            anti-ranges for pointers.  Note that this is only valid with
+            default definitions of PARM_DECLs.  */
+         if (POINTER_TYPE_P (TREE_TYPE (sym))
+             && (nonnull_arg_p (sym)
+                 || get_ptr_nonnull (var)))
+           set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+         else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
+           {
+             wide_int min, max;
 -      value_range_type rtype = get_range_info (var, &min, &max);
++            value_range_type rtype
++              = get_range_info_as_value_range (var, &min, &max);
+             if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+               set_value_range (vr, rtype,
+                                wide_int_to_tree (TREE_TYPE (var), min),
+                                wide_int_to_tree (TREE_TYPE (var), max),
+                                NULL);
+             else
+               set_value_range_to_varying (vr);
+           }
+         else
+           set_value_range_to_varying (vr);
+       }
+       else if (TREE_CODE (sym) == RESULT_DECL
+              && DECL_BY_REFERENCE (sym))
+       set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+     }
+   return vr;
+ }
+ /* Set value-ranges of all SSA names defined by STMT to varying.  */
+ void
+ vr_values::set_defs_to_varying (gimple *stmt)
+ {
+   ssa_op_iter i;
+   tree def;
+   FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
+     {
+       value_range *vr = get_value_range (def);
+       /* Avoid writing to vr_const_varying get_value_range may return.  */
+       if (vr->type != VR_VARYING)
+       set_value_range_to_varying (vr);
+     }
+ }
+ /* Update the value range and equivalence set for variable VAR to
+    NEW_VR.  Return true if NEW_VR is different from VAR's previous
+    value.
+    NOTE: This function assumes that NEW_VR is a temporary value range
+    object created for the sole purpose of updating VAR's range.  The
+    storage used by the equivalence set from NEW_VR will be freed by
+    this function.  Do not call update_value_range when NEW_VR
+    is the range object associated with another SSA name.  */
+ bool
+ vr_values::update_value_range (const_tree var, value_range *new_vr)
+ {
+   value_range *old_vr;
+   bool is_new;
+   /* If there is a value-range on the SSA name from earlier analysis
+      factor that in.  */
+   if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+     {
+       wide_int min, max;
 -      || get_range_info (innerop, &imin, &imax) != VR_RANGE)
++      value_range_type rtype = get_range_info_as_value_range (var, &min, &max);
+       if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+       {
+         tree nr_min, nr_max;
+         nr_min = wide_int_to_tree (TREE_TYPE (var), min);
+         nr_max = wide_int_to_tree (TREE_TYPE (var), max);
+         value_range nr = VR_INITIALIZER;
+         set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
+         vrp_intersect_ranges (new_vr, &nr);
+       }
+     }
+   /* Update the value range, if necessary.  */
+   old_vr = get_value_range (var);
+   is_new = old_vr->type != new_vr->type
+          || !vrp_operand_equal_p (old_vr->min, new_vr->min)
+          || !vrp_operand_equal_p (old_vr->max, new_vr->max)
+          || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
+   if (is_new)
+     {
+       /* Do not allow transitions up the lattice.  The following
+        is slightly more awkward than just new_vr->type < old_vr->type
+        because VR_RANGE and VR_ANTI_RANGE need to be considered
+        the same.  We may not have is_new when transitioning to
+        UNDEFINED.  If old_vr->type is VARYING, we shouldn't be
+        called.  */
+       if (new_vr->type == VR_UNDEFINED)
+       {
+         BITMAP_FREE (new_vr->equiv);
+         set_value_range_to_varying (old_vr);
+         set_value_range_to_varying (new_vr);
+         return true;
+       }
+       else
+       set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
+                        new_vr->equiv);
+     }
+   BITMAP_FREE (new_vr->equiv);
+   return is_new;
+ }
+ /* Add VAR and VAR's equivalence set to EQUIV.  This is the central
+    point where equivalence processing can be turned on/off.  */
+ void
+ vr_values::add_equivalence (bitmap *equiv, const_tree var)
+ {
+   unsigned ver = SSA_NAME_VERSION (var);
+   value_range *vr = get_value_range (var);
+   if (*equiv == NULL)
+     *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
+   bitmap_set_bit (*equiv, ver);
+   if (vr && vr->equiv)
+     bitmap_ior_into (*equiv, vr->equiv);
+ }
+ /* Return true if value range VR involves exactly one symbol SYM.  */
+ static bool
+ symbolic_range_based_on_p (value_range *vr, const_tree sym)
+ {
+   bool neg, min_has_symbol, max_has_symbol;
+   tree inv;
+   if (is_gimple_min_invariant (vr->min))
+     min_has_symbol = false;
+   else if (get_single_symbol (vr->min, &neg, &inv) == sym)
+     min_has_symbol = true;
+   else
+     return false;
+   if (is_gimple_min_invariant (vr->max))
+     max_has_symbol = false;
+   else if (get_single_symbol (vr->max, &neg, &inv) == sym)
+     max_has_symbol = true;
+   else
+     return false;
+   return (min_has_symbol || max_has_symbol);
+ }
+ /* Return true if the result of assignment STMT is know to be non-zero.  */
+ static bool
+ gimple_assign_nonzero_p (gimple *stmt)
+ {
+   enum tree_code code = gimple_assign_rhs_code (stmt);
+   bool strict_overflow_p;
+   switch (get_gimple_rhs_class (code))
+     {
+     case GIMPLE_UNARY_RHS:
+       return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+                                        gimple_expr_type (stmt),
+                                        gimple_assign_rhs1 (stmt),
+                                        &strict_overflow_p);
+     case GIMPLE_BINARY_RHS:
+       return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+                                         gimple_expr_type (stmt),
+                                         gimple_assign_rhs1 (stmt),
+                                         gimple_assign_rhs2 (stmt),
+                                         &strict_overflow_p);
+     case GIMPLE_TERNARY_RHS:
+       return false;
+     case GIMPLE_SINGLE_RHS:
+       return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
+                                         &strict_overflow_p);
+     case GIMPLE_INVALID_RHS:
+       gcc_unreachable ();
+     default:
+       gcc_unreachable ();
+     }
+ }
+ /* Return true if STMT is known to compute a non-zero value.  */
+ static bool
+ gimple_stmt_nonzero_p (gimple *stmt)
+ {
+   switch (gimple_code (stmt))
+     {
+     case GIMPLE_ASSIGN:
+       return gimple_assign_nonzero_p (stmt);
+     case GIMPLE_CALL:
+       {
+       tree fndecl = gimple_call_fndecl (stmt);
+       if (!fndecl) return false;
+       if (flag_delete_null_pointer_checks && !flag_check_new
+           && DECL_IS_OPERATOR_NEW (fndecl)
+           && !TREE_NOTHROW (fndecl))
+         return true;
+       /* References are always non-NULL.  */
+       if (flag_delete_null_pointer_checks
+           && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
+         return true;
+       if (flag_delete_null_pointer_checks && 
+           lookup_attribute ("returns_nonnull",
+                             TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
+         return true;
+       gcall *call_stmt = as_a<gcall *> (stmt);
+       unsigned rf = gimple_call_return_flags (call_stmt);
+       if (rf & ERF_RETURNS_ARG)
+         {
+           unsigned argnum = rf & ERF_RETURN_ARG_MASK;
+           if (argnum < gimple_call_num_args (call_stmt))
+             {
+               tree arg = gimple_call_arg (call_stmt, argnum);
+               if (SSA_VAR_P (arg)
+                   && infer_nonnull_range_by_attribute (stmt, arg))
+                 return true;
+             }
+         }
+       return gimple_alloca_call_p (stmt);
+       }
+     default:
+       gcc_unreachable ();
+     }
+ }
+ /* Like tree_expr_nonzero_p, but this function uses value ranges
+    obtained so far.  */
+ bool
+ vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
+ {
+   if (gimple_stmt_nonzero_p (stmt))
+     return true;
+   /* If we have an expression of the form &X->a, then the expression
+      is nonnull if X is nonnull.  */
+   if (is_gimple_assign (stmt)
+       && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
+     {
+       tree expr = gimple_assign_rhs1 (stmt);
+       tree base = get_base_address (TREE_OPERAND (expr, 0));
+       if (base != NULL_TREE
+         && TREE_CODE (base) == MEM_REF
+         && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+       {
+         value_range *vr = get_value_range (TREE_OPERAND (base, 0));
+         if (range_is_nonnull (vr))
+           return true;
+       }
+     }
+   return false;
+ }
+ /* Returns true if EXPR is a valid value (as expected by compare_values) --
+    a gimple invariant, or SSA_NAME +- CST.  */
+ static bool
+ valid_value_p (tree expr)
+ {
+   if (TREE_CODE (expr) == SSA_NAME)
+     return true;
+   if (TREE_CODE (expr) == PLUS_EXPR
+       || TREE_CODE (expr) == MINUS_EXPR)
+     return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
+           && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
+   return is_gimple_min_invariant (expr);
+ }
+ /* If OP has a value range with a single constant value return that,
+    otherwise return NULL_TREE.  This returns OP itself if OP is a
+    constant.  */
+ tree
+ vr_values::op_with_constant_singleton_value_range (tree op)
+ {
+   if (is_gimple_min_invariant (op))
+     return op;
+   if (TREE_CODE (op) != SSA_NAME)
+     return NULL_TREE;
+   return value_range_constant_singleton (get_value_range (op));
+ }
+ /* Return true if op is in a boolean [0, 1] value-range.  */
+ bool
+ vr_values::op_with_boolean_value_range_p (tree op)
+ {
+   value_range *vr;
+   if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
+     return true;
+   if (integer_zerop (op)
+       || integer_onep (op))
+     return true;
+   if (TREE_CODE (op) != SSA_NAME)
+     return false;
+   vr = get_value_range (op);
+   return (vr->type == VR_RANGE
+         && integer_zerop (vr->min)
+         && integer_onep (vr->max));
+ }
+ /* Extract value range information for VAR when (OP COND_CODE LIMIT) is
+    true and store it in *VR_P.  */
+ void
+ vr_values::extract_range_for_var_from_comparison_expr (tree var,
+                                                      enum tree_code cond_code,
+                                                      tree op, tree limit,
+                                                      value_range *vr_p)
+ {
+   tree  min, max, type;
+   value_range *limit_vr;
+   type = TREE_TYPE (var);
+   /* For pointer arithmetic, we only keep track of pointer equality
+      and inequality.  If we arrive here with unfolded conditions like
+      _1 > _1 do not derive anything.  */
+   if ((POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
+       || limit == var)
+     {
+       set_value_range_to_varying (vr_p);
+       return;
+     }
+   /* If LIMIT is another SSA name and LIMIT has a range of its own,
+      try to use LIMIT's range to avoid creating symbolic ranges
+      unnecessarily. */
+   limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
+   /* LIMIT's range is only interesting if it has any useful information.  */
+   if (! limit_vr
+       || limit_vr->type == VR_UNDEFINED
+       || limit_vr->type == VR_VARYING
+       || (symbolic_range_p (limit_vr)
+         && ! (limit_vr->type == VR_RANGE
+               && (limit_vr->min == limit_vr->max
+                   || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
+     limit_vr = NULL;
+   /* Initially, the new range has the same set of equivalences of
+      VAR's range.  This will be revised before returning the final
+      value.  Since assertions may be chained via mutually exclusive
+      predicates, we will need to trim the set of equivalences before
+      we are done.  */
+   gcc_assert (vr_p->equiv == NULL);
+   add_equivalence (&vr_p->equiv, var);
+   /* Extract a new range based on the asserted comparison for VAR and
+      LIMIT's value range.  Notice that if LIMIT has an anti-range, we
+      will only use it for equality comparisons (EQ_EXPR).  For any
+      other kind of assertion, we cannot derive a range from LIMIT's
+      anti-range that can be used to describe the new range.  For
+      instance, ASSERT_EXPR <x_2, x_2 <= b_4>.  If b_4 is ~[2, 10],
+      then b_4 takes on the ranges [-INF, 1] and [11, +INF].  There is
+      no single range for x_2 that could describe LE_EXPR, so we might
+      as well build the range [b_4, +INF] for it.
+      One special case we handle is extracting a range from a
+      range test encoded as (unsigned)var + CST <= limit.  */
+   if (TREE_CODE (op) == NOP_EXPR
+       || TREE_CODE (op) == PLUS_EXPR)
+     {
+       if (TREE_CODE (op) == PLUS_EXPR)
+         {
+         min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
+                            TREE_OPERAND (op, 1));
+           max = int_const_binop (PLUS_EXPR, limit, min);
+         op = TREE_OPERAND (op, 0);
+       }
+       else
+       {
+         min = build_int_cst (TREE_TYPE (var), 0);
+         max = limit;
+       }
+       /* Make sure to not set TREE_OVERFLOW on the final type
+        conversion.  We are willingly interpreting large positive
+        unsigned values as negative signed values here.  */
+       min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
+       max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
+       /* We can transform a max, min range to an anti-range or
+          vice-versa.  Use set_and_canonicalize_value_range which does
+        this for us.  */
+       if (cond_code == LE_EXPR)
+         set_and_canonicalize_value_range (vr_p, VR_RANGE,
+                                         min, max, vr_p->equiv);
+       else if (cond_code == GT_EXPR)
+         set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+                                         min, max, vr_p->equiv);
+       else
+       gcc_unreachable ();
+     }
+   else if (cond_code == EQ_EXPR)
+     {
+       enum value_range_type range_type;
+       if (limit_vr)
+       {
+         range_type = limit_vr->type;
+         min = limit_vr->min;
+         max = limit_vr->max;
+       }
+       else
+       {
+         range_type = VR_RANGE;
+         min = limit;
+         max = limit;
+       }
+       set_value_range (vr_p, range_type, min, max, vr_p->equiv);
+       /* When asserting the equality VAR == LIMIT and LIMIT is another
+        SSA name, the new range will also inherit the equivalence set
+        from LIMIT.  */
+       if (TREE_CODE (limit) == SSA_NAME)
+       add_equivalence (&vr_p->equiv, limit);
+     }
+   else if (cond_code == NE_EXPR)
+     {
+       /* As described above, when LIMIT's range is an anti-range and
+        this assertion is an inequality (NE_EXPR), then we cannot
+        derive anything from the anti-range.  For instance, if
+        LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
+        not imply that VAR's range is [0, 0].  So, in the case of
+        anti-ranges, we just assert the inequality using LIMIT and
+        not its anti-range.
+        If LIMIT_VR is a range, we can only use it to build a new
+        anti-range if LIMIT_VR is a single-valued range.  For
+        instance, if LIMIT_VR is [0, 1], the predicate
+        VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
+        Rather, it means that for value 0 VAR should be ~[0, 0]
+        and for value 1, VAR should be ~[1, 1].  We cannot
+        represent these ranges.
+        The only situation in which we can build a valid
+        anti-range is when LIMIT_VR is a single-valued range
+        (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX).  In that case,
+        build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX].  */
+       if (limit_vr
+         && limit_vr->type == VR_RANGE
+         && compare_values (limit_vr->min, limit_vr->max) == 0)
+       {
+         min = limit_vr->min;
+         max = limit_vr->max;
+       }
+       else
+       {
+         /* In any other case, we cannot use LIMIT's range to build a
+            valid anti-range.  */
+         min = max = limit;
+       }
+       /* If MIN and MAX cover the whole range for their type, then
+        just use the original LIMIT.  */
+       if (INTEGRAL_TYPE_P (type)
+         && vrp_val_is_min (min)
+         && vrp_val_is_max (max))
+       min = max = limit;
+       set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+                                       min, max, vr_p->equiv);
+     }
+   else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+     {
+       min = TYPE_MIN_VALUE (type);
+       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+       max = limit;
+       else
+       {
+         /* If LIMIT_VR is of the form [N1, N2], we need to build the
+            range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
+            LT_EXPR.  */
+         max = limit_vr->max;
+       }
+       /* If the maximum value forces us to be out of bounds, simply punt.
+        It would be pointless to try and do anything more since this
+        all should be optimized away above us.  */
+       if (cond_code == LT_EXPR
+         && compare_values (max, min) == 0)
+       set_value_range_to_varying (vr_p);
+       else
+       {
+         /* For LT_EXPR, we create the range [MIN, MAX - 1].  */
+         if (cond_code == LT_EXPR)
+           {
+             if (TYPE_PRECISION (TREE_TYPE (max)) == 1
+                 && !TYPE_UNSIGNED (TREE_TYPE (max)))
+               max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
+                                  build_int_cst (TREE_TYPE (max), -1));
+             else
+               max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
+                                  build_int_cst (TREE_TYPE (max), 1));
+             /* Signal to compare_values_warnv this expr doesn't overflow.  */
+             if (EXPR_P (max))
+               TREE_NO_WARNING (max) = 1;
+           }
+         set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+       }
+     }
+   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+     {
+       max = TYPE_MAX_VALUE (type);
+       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+       min = limit;
+       else
+       {
+         /* If LIMIT_VR is of the form [N1, N2], we need to build the
+            range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
+            GT_EXPR.  */
+         min = limit_vr->min;
+       }
+       /* If the minimum value forces us to be out of bounds, simply punt.
+        It would be pointless to try and do anything more since this
+        all should be optimized away above us.  */
+       if (cond_code == GT_EXPR
+         && compare_values (min, max) == 0)
+       set_value_range_to_varying (vr_p);
+       else
+       {
+         /* For GT_EXPR, we create the range [MIN + 1, MAX].  */
+         if (cond_code == GT_EXPR)
+           {
+             if (TYPE_PRECISION (TREE_TYPE (min)) == 1
+                 && !TYPE_UNSIGNED (TREE_TYPE (min)))
+               min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
+                                  build_int_cst (TREE_TYPE (min), -1));
+             else
+               min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
+                                  build_int_cst (TREE_TYPE (min), 1));
+             /* Signal to compare_values_warnv this expr doesn't overflow.  */
+             if (EXPR_P (min))
+               TREE_NO_WARNING (min) = 1;
+           }
+         set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+       }
+     }
+   else
+     gcc_unreachable ();
+   /* Finally intersect the new range with what we already know about var.  */
+   vrp_intersect_ranges (vr_p, get_value_range (var));
+ }
+ /* Extract value range information from an ASSERT_EXPR EXPR and store
+    it in *VR_P.  */
+ void
+ vr_values::extract_range_from_assert (value_range *vr_p, tree expr)
+ {
+   tree var = ASSERT_EXPR_VAR (expr);
+   tree cond = ASSERT_EXPR_COND (expr);
+   tree limit, op;
+   enum tree_code cond_code;
+   gcc_assert (COMPARISON_CLASS_P (cond));
+   /* Find VAR in the ASSERT_EXPR conditional.  */
+   if (var == TREE_OPERAND (cond, 0)
+       || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
+       || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
+     {
+       /* If the predicate is of the form VAR COMP LIMIT, then we just
+        take LIMIT from the RHS and use the same comparison code.  */
+       cond_code = TREE_CODE (cond);
+       limit = TREE_OPERAND (cond, 1);
+       op = TREE_OPERAND (cond, 0);
+     }
+   else
+     {
+       /* If the predicate is of the form LIMIT COMP VAR, then we need
+        to flip around the comparison code to create the proper range
+        for VAR.  */
+       cond_code = swap_tree_comparison (TREE_CODE (cond));
+       limit = TREE_OPERAND (cond, 0);
+       op = TREE_OPERAND (cond, 1);
+     }
+   extract_range_for_var_from_comparison_expr (var, cond_code, op,
+                                             limit, vr_p);
+ }
+ /* Extract range information from SSA name VAR and store it in VR.  If
+    VAR has an interesting range, use it.  Otherwise, create the
+    range [VAR, VAR] and return it.  This is useful in situations where
+    we may have conditionals testing values of VARYING names.  For
+    instance,
+       x_3 = y_5;
+       if (x_3 > y_5)
+         ...
+     Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
+     always false.  */
+ void
+ vr_values::extract_range_from_ssa_name (value_range *vr, tree var)
+ {
+   value_range *var_vr = get_value_range (var);
+   if (var_vr->type != VR_VARYING)
+     copy_value_range (vr, var_vr);
+   else
+     set_value_range (vr, VR_RANGE, var, var, NULL);
+   add_equivalence (&vr->equiv, var);
+ }
+ /* Extract range information from a binary expression OP0 CODE OP1 based on
+    the ranges of each of its operands with resulting type EXPR_TYPE.
+    The resulting range is stored in *VR.  */
+ void
+ vr_values::extract_range_from_binary_expr (value_range *vr,
+                                          enum tree_code code,
+                                          tree expr_type, tree op0, tree op1)
+ {
+   value_range vr0 = VR_INITIALIZER;
+   value_range vr1 = VR_INITIALIZER;
+   /* Get value ranges for each operand.  For constant operands, create
+      a new value range with the operand to simplify processing.  */
+   if (TREE_CODE (op0) == SSA_NAME)
+     vr0 = *(get_value_range (op0));
+   else if (is_gimple_min_invariant (op0))
+     set_value_range_to_value (&vr0, op0, NULL);
+   else
+     set_value_range_to_varying (&vr0);
+   if (TREE_CODE (op1) == SSA_NAME)
+     vr1 = *(get_value_range (op1));
+   else if (is_gimple_min_invariant (op1))
+     set_value_range_to_value (&vr1, op1, NULL);
+   else
+     set_value_range_to_varying (&vr1);
+   /* If one argument is varying, we can sometimes still deduce a
+      range for the output: any + [3, +INF] is in [MIN+3, +INF].  */
+   if (INTEGRAL_TYPE_P (TREE_TYPE (op0))
+       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
+     {
+       if (vr0.type == VR_VARYING && vr1.type != VR_VARYING)
+       {
+         vr0.type = VR_RANGE;
+         vr0.min = vrp_val_min (expr_type);
+         vr0.max = vrp_val_max (expr_type);
+       }
+       else if (vr1.type == VR_VARYING && vr0.type != VR_VARYING)
+       {
+         vr1.type = VR_RANGE;
+         vr1.min = vrp_val_min (expr_type);
+         vr1.max = vrp_val_max (expr_type);
+       }
+     }
+   extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
+   /* Try harder for PLUS and MINUS if the range of one operand is symbolic
+      and based on the other operand, for example if it was deduced from a
+      symbolic comparison.  When a bound of the range of the first operand
+      is invariant, we set the corresponding bound of the new range to INF
+      in order to avoid recursing on the range of the second operand.  */
+   if (vr->type == VR_VARYING
+       && (code == PLUS_EXPR || code == MINUS_EXPR)
+       && TREE_CODE (op1) == SSA_NAME
+       && vr0.type == VR_RANGE
+       && symbolic_range_based_on_p (&vr0, op1))
+     {
+       const bool minus_p = (code == MINUS_EXPR);
+       value_range n_vr1 = VR_INITIALIZER;
+       /* Try with VR0 and [-INF, OP1].  */
+       if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
+       set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
+       /* Try with VR0 and [OP1, +INF].  */
+       else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
+       set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
+       /* Try with VR0 and [OP1, OP1].  */
+       else
+       set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
+       extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
+     }
+   if (vr->type == VR_VARYING
+       && (code == PLUS_EXPR || code == MINUS_EXPR)
+       && TREE_CODE (op0) == SSA_NAME
+       && vr1.type == VR_RANGE
+       && symbolic_range_based_on_p (&vr1, op0))
+     {
+       const bool minus_p = (code == MINUS_EXPR);
+       value_range n_vr0 = VR_INITIALIZER;
+       /* Try with [-INF, OP0] and VR1.  */
+       if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
+       set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
+       /* Try with [OP0, +INF] and VR1.  */
+       else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
+       set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
+       /* Try with [OP0, OP0] and VR1.  */
+       else
+       set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
+       extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
+     }
+   /* If we didn't derive a range for MINUS_EXPR, and
+      op1's range is ~[op0,op0] or vice-versa, then we
+      can derive a non-null range.  This happens often for
+      pointer subtraction.  */
+   if (vr->type == VR_VARYING
+       && (code == MINUS_EXPR || code == POINTER_DIFF_EXPR)
+       && TREE_CODE (op0) == SSA_NAME
+       && ((vr0.type == VR_ANTI_RANGE
+          && vr0.min == op1
+          && vr0.min == vr0.max)
+         || (vr1.type == VR_ANTI_RANGE
+             && vr1.min == op0
+             && vr1.min == vr1.max)))
+       set_value_range_to_nonnull (vr, expr_type);
+ }
+ /* Extract range information from a unary expression CODE OP0 based on
+    the range of its operand with resulting type TYPE.
+    The resulting range is stored in *VR.  */
+ void
+ vr_values::extract_range_from_unary_expr (value_range *vr, enum tree_code code,
+                                         tree type, tree op0)
+ {
+   value_range vr0 = VR_INITIALIZER;
+   /* Get value ranges for the operand.  For constant operands, create
+      a new value range with the operand to simplify processing.  */
+   if (TREE_CODE (op0) == SSA_NAME)
+     vr0 = *(get_value_range (op0));
+   else if (is_gimple_min_invariant (op0))
+     set_value_range_to_value (&vr0, op0, NULL);
+   else
+     set_value_range_to_varying (&vr0);
+   ::extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
+ }
+ /* Extract range information from a conditional expression STMT based on
+    the ranges of each of its operands and the expression code.  */
+ void
+ vr_values::extract_range_from_cond_expr (value_range *vr, gassign *stmt)
+ {
+   tree op0, op1;
+   value_range vr0 = VR_INITIALIZER;
+   value_range vr1 = VR_INITIALIZER;
+   /* Get value ranges for each operand.  For constant operands, create
+      a new value range with the operand to simplify processing.  */
+   op0 = gimple_assign_rhs2 (stmt);
+   if (TREE_CODE (op0) == SSA_NAME)
+     vr0 = *(get_value_range (op0));
+   else if (is_gimple_min_invariant (op0))
+     set_value_range_to_value (&vr0, op0, NULL);
+   else
+     set_value_range_to_varying (&vr0);
+   op1 = gimple_assign_rhs3 (stmt);
+   if (TREE_CODE (op1) == SSA_NAME)
+     vr1 = *(get_value_range (op1));
+   else if (is_gimple_min_invariant (op1))
+     set_value_range_to_value (&vr1, op1, NULL);
+   else
+     set_value_range_to_varying (&vr1);
+   /* The resulting value range is the union of the operand ranges */
+   copy_value_range (vr, &vr0);
+   vrp_meet (vr, &vr1);
+ }
+ /* Extract range information from a comparison expression EXPR based
+    on the range of its operand and the expression code.  */
+ void
+ vr_values::extract_range_from_comparison (value_range *vr, enum tree_code code,
+                                         tree type, tree op0, tree op1)
+ {
+   bool sop;
+   tree val;
+   val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
+                                                NULL);
+   if (val)
+     {
+       /* Since this expression was found on the RHS of an assignment,
+        its type may be different from _Bool.  Convert VAL to EXPR's
+        type.  */
+       val = fold_convert (type, val);
+       if (is_gimple_min_invariant (val))
+       set_value_range_to_value (vr, val, vr->equiv);
+       else
+       set_value_range (vr, VR_RANGE, val, val, vr->equiv);
+     }
+   else
+     /* The result of a comparison is always true or false.  */
+     set_value_range_to_truthvalue (vr, type);
+ }
+ /* Helper function for simplify_internal_call_using_ranges and
+    extract_range_basic.  Return true if OP0 SUBCODE OP1 for
+    SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
+    always overflow.  Set *OVF to true if it is known to always
+    overflow.  */
+ bool
+ vr_values::check_for_binary_op_overflow (enum tree_code subcode, tree type,
+                                        tree op0, tree op1, bool *ovf)
+ {
+   value_range vr0 = VR_INITIALIZER;
+   value_range vr1 = VR_INITIALIZER;
+   if (TREE_CODE (op0) == SSA_NAME)
+     vr0 = *get_value_range (op0);
+   else if (TREE_CODE (op0) == INTEGER_CST)
+     set_value_range_to_value (&vr0, op0, NULL);
+   else
+     set_value_range_to_varying (&vr0);
+   if (TREE_CODE (op1) == SSA_NAME)
+     vr1 = *get_value_range (op1);
+   else if (TREE_CODE (op1) == INTEGER_CST)
+     set_value_range_to_value (&vr1, op1, NULL);
+   else
+     set_value_range_to_varying (&vr1);
+   if (!range_int_cst_p (&vr0)
+       || TREE_OVERFLOW (vr0.min)
+       || TREE_OVERFLOW (vr0.max))
+     {
+       vr0.min = vrp_val_min (TREE_TYPE (op0));
+       vr0.max = vrp_val_max (TREE_TYPE (op0));
+     }
+   if (!range_int_cst_p (&vr1)
+       || TREE_OVERFLOW (vr1.min)
+       || TREE_OVERFLOW (vr1.max))
+     {
+       vr1.min = vrp_val_min (TREE_TYPE (op1));
+       vr1.max = vrp_val_max (TREE_TYPE (op1));
+     }
+   *ovf = arith_overflowed_p (subcode, type, vr0.min,
+                            subcode == MINUS_EXPR ? vr1.max : vr1.min);
+   if (arith_overflowed_p (subcode, type, vr0.max,
+                         subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
+     return false;
+   if (subcode == MULT_EXPR)
+     {
+       if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
+         || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
+       return false;
+     }
+   if (*ovf)
+     {
+       /* So far we found that there is an overflow on the boundaries.
+        That doesn't prove that there is an overflow even for all values
+        in between the boundaries.  For that compute widest_int range
+        of the result and see if it doesn't overlap the range of
+        type.  */
+       widest_int wmin, wmax;
+       widest_int w[4];
+       int i;
+       w[0] = wi::to_widest (vr0.min);
+       w[1] = wi::to_widest (vr0.max);
+       w[2] = wi::to_widest (vr1.min);
+       w[3] = wi::to_widest (vr1.max);
+       for (i = 0; i < 4; i++)
+       {
+         widest_int wt;
+         switch (subcode)
+           {
+           case PLUS_EXPR:
+             wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
+             break;
+           case MINUS_EXPR:
+             wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
+             break;
+           case MULT_EXPR:
+             wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
+             break;
+           default:
+             gcc_unreachable ();
+           }
+         if (i == 0)
+           {
+             wmin = wt;
+             wmax = wt;
+           }
+         else
+           {
+             wmin = wi::smin (wmin, wt);
+             wmax = wi::smax (wmax, wt);
+           }
+       }
+       /* The result of op0 CODE op1 is known to be in range
+        [wmin, wmax].  */
+       widest_int wtmin = wi::to_widest (vrp_val_min (type));
+       widest_int wtmax = wi::to_widest (vrp_val_max (type));
+       /* If all values in [wmin, wmax] are smaller than
+        [wtmin, wtmax] or all are larger than [wtmin, wtmax],
+        the arithmetic operation will always overflow.  */
+       if (wmax < wtmin || wmin > wtmax)
+       return true;
+       return false;
+     }
+   return true;
+ }
+ /* Try to derive a nonnegative or nonzero range out of STMT relying
+    primarily on generic routines in fold in conjunction with range data.
+    Store the result in *VR */
+ void
+ vr_values::extract_range_basic (value_range *vr, gimple *stmt)
+ {
+   bool sop;
+   tree type = gimple_expr_type (stmt);
+   if (is_gimple_call (stmt))
+     {
+       tree arg;
+       int mini, maxi, zerov = 0, prec;
+       enum tree_code subcode = ERROR_MARK;
+       combined_fn cfn = gimple_call_combined_fn (stmt);
+       scalar_int_mode mode;
+       switch (cfn)
+       {
+       case CFN_BUILT_IN_CONSTANT_P:
+         /* If the call is __builtin_constant_p and the argument is a
+            function parameter resolve it to false.  This avoids bogus
+            array bound warnings.
+            ???  We could do this as early as inlining is finished.  */
+         arg = gimple_call_arg (stmt, 0);
+         if (TREE_CODE (arg) == SSA_NAME
+             && SSA_NAME_IS_DEFAULT_DEF (arg)
+             && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
+             && cfun->after_inlining)
+           {
+             set_value_range_to_null (vr, type);
+             return;
+           }
+         break;
+         /* Both __builtin_ffs* and __builtin_popcount return
+            [0, prec].  */
+       CASE_CFN_FFS:
+       CASE_CFN_POPCOUNT:
+         arg = gimple_call_arg (stmt, 0);
+         prec = TYPE_PRECISION (TREE_TYPE (arg));
+         mini = 0;
+         maxi = prec;
+         if (TREE_CODE (arg) == SSA_NAME)
+           {
+             value_range *vr0 = get_value_range (arg);
+             /* If arg is non-zero, then ffs or popcount
+                are non-zero.  */
+             if ((vr0->type == VR_RANGE
+                  && range_includes_zero_p (vr0->min, vr0->max) == 0)
+                 || (vr0->type == VR_ANTI_RANGE
+                     && range_includes_zero_p (vr0->min, vr0->max) == 1))
+               mini = 1;
+             /* If some high bits are known to be zero,
+                we can decrease the maximum.  */
+             if (vr0->type == VR_RANGE
+                 && TREE_CODE (vr0->max) == INTEGER_CST
+                 && !operand_less_p (vr0->min,
+                                     build_zero_cst (TREE_TYPE (vr0->min))))
+               maxi = tree_floor_log2 (vr0->max) + 1;
+           }
+         goto bitop_builtin;
+         /* __builtin_parity* returns [0, 1].  */
+       CASE_CFN_PARITY:
+         mini = 0;
+         maxi = 1;
+         goto bitop_builtin;
+         /* __builtin_c[lt]z* return [0, prec-1], except for
+            when the argument is 0, but that is undefined behavior.
+            On many targets where the CLZ RTL or optab value is defined
+            for 0 the value is prec, so include that in the range
+            by default.  */
+       CASE_CFN_CLZ:
+         arg = gimple_call_arg (stmt, 0);
+         prec = TYPE_PRECISION (TREE_TYPE (arg));
+         mini = 0;
+         maxi = prec;
+         mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+         if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
+             && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
+             /* Handle only the single common value.  */
+             && zerov != prec)
+           /* Magic value to give up, unless vr0 proves
+              arg is non-zero.  */
+           mini = -2;
+         if (TREE_CODE (arg) == SSA_NAME)
+           {
+             value_range *vr0 = get_value_range (arg);
+             /* From clz of VR_RANGE minimum we can compute
+                result maximum.  */
+             if (vr0->type == VR_RANGE
+                 && TREE_CODE (vr0->min) == INTEGER_CST)
+               {
+                 maxi = prec - 1 - tree_floor_log2 (vr0->min);
+                 if (maxi != prec)
+                   mini = 0;
+               }
+             else if (vr0->type == VR_ANTI_RANGE
+                      && integer_zerop (vr0->min))
+               {
+                 maxi = prec - 1;
+                 mini = 0;
+               }
+             if (mini == -2)
+               break;
+             /* From clz of VR_RANGE maximum we can compute
+                result minimum.  */
+             if (vr0->type == VR_RANGE
+                 && TREE_CODE (vr0->max) == INTEGER_CST)
+               {
+                 mini = prec - 1 - tree_floor_log2 (vr0->max);
+                 if (mini == prec)
+                   break;
+               }
+           }
+         if (mini == -2)
+           break;
+         goto bitop_builtin;
+         /* __builtin_ctz* return [0, prec-1], except for
+            when the argument is 0, but that is undefined behavior.
+            If there is a ctz optab for this mode and
+            CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
+            otherwise just assume 0 won't be seen.  */
+       CASE_CFN_CTZ:
+         arg = gimple_call_arg (stmt, 0);
+         prec = TYPE_PRECISION (TREE_TYPE (arg));
+         mini = 0;
+         maxi = prec - 1;
+         mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+         if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
+             && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
+           {
+             /* Handle only the two common values.  */
+             if (zerov == -1)
+               mini = -1;
+             else if (zerov == prec)
+               maxi = prec;
+             else
+               /* Magic value to give up, unless vr0 proves
+                  arg is non-zero.  */
+               mini = -2;
+           }
+         if (TREE_CODE (arg) == SSA_NAME)
+           {
+             value_range *vr0 = get_value_range (arg);
+             /* If arg is non-zero, then use [0, prec - 1].  */
+             if ((vr0->type == VR_RANGE
+                  && integer_nonzerop (vr0->min))
+                 || (vr0->type == VR_ANTI_RANGE
+                     && integer_zerop (vr0->min)))
+               {
+                 mini = 0;
+                 maxi = prec - 1;
+               }
+             /* If some high bits are known to be zero,
+                we can decrease the result maximum.  */
+             if (vr0->type == VR_RANGE
+                 && TREE_CODE (vr0->max) == INTEGER_CST)
+               {
+                 maxi = tree_floor_log2 (vr0->max);
+                 /* For vr0 [0, 0] give up.  */
+                 if (maxi == -1)
+                   break;
+               }
+           }
+         if (mini == -2)
+           break;
+         goto bitop_builtin;
+         /* __builtin_clrsb* returns [0, prec-1].  */
+       CASE_CFN_CLRSB:
+         arg = gimple_call_arg (stmt, 0);
+         prec = TYPE_PRECISION (TREE_TYPE (arg));
+         mini = 0;
+         maxi = prec - 1;
+         goto bitop_builtin;
+       bitop_builtin:
+         set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
+                          build_int_cst (type, maxi), NULL);
+         return;
+       case CFN_UBSAN_CHECK_ADD:
+         subcode = PLUS_EXPR;
+         break;
+       case CFN_UBSAN_CHECK_SUB:
+         subcode = MINUS_EXPR;
+         break;
+       case CFN_UBSAN_CHECK_MUL:
+         subcode = MULT_EXPR;
+         break;
+       case CFN_GOACC_DIM_SIZE:
+       case CFN_GOACC_DIM_POS:
+         /* Optimizing these two internal functions helps the loop
+            optimizer eliminate outer comparisons.  Size is [1,N]
+            and pos is [0,N-1].  */
+         {
+           bool is_pos = cfn == CFN_GOACC_DIM_POS;
+           int axis = oacc_get_ifn_dim_arg (stmt);
+           int size = oacc_get_fn_dim_size (current_function_decl, axis);
+           if (!size)
+             /* If it's dynamic, the backend might know a hardware
+                limitation.  */
+             size = targetm.goacc.dim_limit (axis);
+           tree type = TREE_TYPE (gimple_call_lhs (stmt));
+           set_value_range (vr, VR_RANGE,
+                            build_int_cst (type, is_pos ? 0 : 1),
+                            size ? build_int_cst (type, size - is_pos)
+                                 : vrp_val_max (type), NULL);
+         }
+         return;
+       case CFN_BUILT_IN_STRLEN:
+         if (tree lhs = gimple_call_lhs (stmt))
+           if (ptrdiff_type_node
+               && (TYPE_PRECISION (ptrdiff_type_node)
+                   == TYPE_PRECISION (TREE_TYPE (lhs))))
+             {
+               tree type = TREE_TYPE (lhs);
+               tree max = vrp_val_max (ptrdiff_type_node);
+               wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
+               tree range_min = build_zero_cst (type);
+               tree range_max = wide_int_to_tree (type, wmax - 1);
+               set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
+               return;
+             }
+         break;
+       default:
+         break;
+       }
+       if (subcode != ERROR_MARK)
+       {
+         bool saved_flag_wrapv = flag_wrapv;
+         /* Pretend the arithmetics is wrapping.  If there is
+            any overflow, we'll complain, but will actually do
+            wrapping operation.  */
+         flag_wrapv = 1;
+         extract_range_from_binary_expr (vr, subcode, type,
+                                         gimple_call_arg (stmt, 0),
+                                         gimple_call_arg (stmt, 1));
+         flag_wrapv = saved_flag_wrapv;
+         /* If for both arguments vrp_valueize returned non-NULL,
+            this should have been already folded and if not, it
+            wasn't folded because of overflow.  Avoid removing the
+            UBSAN_CHECK_* calls in that case.  */
+         if (vr->type == VR_RANGE
+             && (vr->min == vr->max
+                 || operand_equal_p (vr->min, vr->max, 0)))
+           set_value_range_to_varying (vr);
+         return;
+       }
+     }
+   /* Handle extraction of the two results (result of arithmetics and
+      a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
+      internal function.  Similarly from ATOMIC_COMPARE_EXCHANGE.  */
+   else if (is_gimple_assign (stmt)
+          && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
+              || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
+          && INTEGRAL_TYPE_P (type))
+     {
+       enum tree_code code = gimple_assign_rhs_code (stmt);
+       tree op = gimple_assign_rhs1 (stmt);
+       if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
+       {
+         gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
+         if (is_gimple_call (g) && gimple_call_internal_p (g))
+           {
+             enum tree_code subcode = ERROR_MARK;
+             switch (gimple_call_internal_fn (g))
+               {
+               case IFN_ADD_OVERFLOW:
+                 subcode = PLUS_EXPR;
+                 break;
+               case IFN_SUB_OVERFLOW:
+                 subcode = MINUS_EXPR;
+                 break;
+               case IFN_MUL_OVERFLOW:
+                 subcode = MULT_EXPR;
+                 break;
+               case IFN_ATOMIC_COMPARE_EXCHANGE:
+                 if (code == IMAGPART_EXPR)
+                   {
+                     /* This is the boolean return value whether compare and
+                        exchange changed anything or not.  */
+                     set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+                                      build_int_cst (type, 1), NULL);
+                     return;
+                   }
+                 break;
+               default:
+                 break;
+               }
+             if (subcode != ERROR_MARK)
+               {
+                 tree op0 = gimple_call_arg (g, 0);
+                 tree op1 = gimple_call_arg (g, 1);
+                 if (code == IMAGPART_EXPR)
+                   {
+                     bool ovf = false;
+                     if (check_for_binary_op_overflow (subcode, type,
+                                                       op0, op1, &ovf))
+                       set_value_range_to_value (vr,
+                                                 build_int_cst (type, ovf),
+                                                 NULL);
+                     else if (TYPE_PRECISION (type) == 1
+                              && !TYPE_UNSIGNED (type))
+                       set_value_range_to_varying (vr);
+                     else
+                       set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+                                        build_int_cst (type, 1), NULL);
+                   }
+                 else if (types_compatible_p (type, TREE_TYPE (op0))
+                          && types_compatible_p (type, TREE_TYPE (op1)))
+                   {
+                     bool saved_flag_wrapv = flag_wrapv;
+                     /* Pretend the arithmetics is wrapping.  If there is
+                        any overflow, IMAGPART_EXPR will be set.  */
+                     flag_wrapv = 1;
+                     extract_range_from_binary_expr (vr, subcode, type,
+                                                     op0, op1);
+                     flag_wrapv = saved_flag_wrapv;
+                   }
+                 else
+                   {
+                     value_range vr0 = VR_INITIALIZER;
+                     value_range vr1 = VR_INITIALIZER;
+                     bool saved_flag_wrapv = flag_wrapv;
+                     /* Pretend the arithmetics is wrapping.  If there is
+                        any overflow, IMAGPART_EXPR will be set.  */
+                     flag_wrapv = 1;
+                     extract_range_from_unary_expr (&vr0, NOP_EXPR,
+                                                    type, op0);
+                     extract_range_from_unary_expr (&vr1, NOP_EXPR,
+                                                    type, op1);
+                     extract_range_from_binary_expr_1 (vr, subcode, type,
+                                                       &vr0, &vr1);
+                     flag_wrapv = saved_flag_wrapv;
+                   }
+                 return;
+               }
+           }
+       }
+     }
+   if (INTEGRAL_TYPE_P (type)
+       && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
+     set_value_range_to_nonnegative (vr, type);
+   else if (vrp_stmt_computes_nonzero (stmt))
+     set_value_range_to_nonnull (vr, type);
+   else
+     set_value_range_to_varying (vr);
+ }
+ /* Try to compute a useful range out of assignment STMT and store it
+    in *VR.  */
+ void
+ vr_values::extract_range_from_assignment (value_range *vr, gassign *stmt)
+ {
+   enum tree_code code = gimple_assign_rhs_code (stmt);
+   if (code == ASSERT_EXPR)
+     extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
+   else if (code == SSA_NAME)
+     extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
+   else if (TREE_CODE_CLASS (code) == tcc_binary)
+     extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
+                                   gimple_expr_type (stmt),
+                                   gimple_assign_rhs1 (stmt),
+                                   gimple_assign_rhs2 (stmt));
+   else if (TREE_CODE_CLASS (code) == tcc_unary)
+     extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
+                                  gimple_expr_type (stmt),
+                                  gimple_assign_rhs1 (stmt));
+   else if (code == COND_EXPR)
+     extract_range_from_cond_expr (vr, stmt);
+   else if (TREE_CODE_CLASS (code) == tcc_comparison)
+     extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
+                                  gimple_expr_type (stmt),
+                                  gimple_assign_rhs1 (stmt),
+                                  gimple_assign_rhs2 (stmt));
+   else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
+          && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
+     set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
+   else
+     set_value_range_to_varying (vr);
+   if (vr->type == VR_VARYING)
+     extract_range_basic (vr, stmt);
+ }
+ /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
+    - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
+      all the values in the ranges.
+    - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
+    - Return NULL_TREE if it is not always possible to determine the
+      value of the comparison.
+    Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+    assumed signed overflow is undefined.  */
+ static tree
+ compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
+               bool *strict_overflow_p)
+ {
+   /* VARYING or UNDEFINED ranges cannot be compared.  */
+   if (vr0->type == VR_VARYING
+       || vr0->type == VR_UNDEFINED
+       || vr1->type == VR_VARYING
+       || vr1->type == VR_UNDEFINED)
+     return NULL_TREE;
+   /* Anti-ranges need to be handled separately.  */
+   if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
+     {
+       /* If both are anti-ranges, then we cannot compute any
+        comparison.  */
+       if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
+       return NULL_TREE;
+       /* These comparisons are never statically computable.  */
+       if (comp == GT_EXPR
+         || comp == GE_EXPR
+         || comp == LT_EXPR
+         || comp == LE_EXPR)
+       return NULL_TREE;
+       /* Equality can be computed only between a range and an
+        anti-range.  ~[VAL1, VAL2] == [VAL1, VAL2] is always false.  */
+       if (vr0->type == VR_RANGE)
+       {
+         /* To simplify processing, make VR0 the anti-range.  */
+         value_range *tmp = vr0;
+         vr0 = vr1;
+         vr1 = tmp;
+       }
+       gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
+       if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
+         && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
+       return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+       return NULL_TREE;
+     }
+   /* Simplify processing.  If COMP is GT_EXPR or GE_EXPR, switch the
+      operands around and change the comparison code.  */
+   if (comp == GT_EXPR || comp == GE_EXPR)
+     {
+       comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
+       std::swap (vr0, vr1);
+     }
+   if (comp == EQ_EXPR)
+     {
+       /* Equality may only be computed if both ranges represent
+        exactly one value.  */
+       if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
+         && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
+       {
+         int cmp_min = compare_values_warnv (vr0->min, vr1->min,
+                                             strict_overflow_p);
+         int cmp_max = compare_values_warnv (vr0->max, vr1->max,
+                                             strict_overflow_p);
+         if (cmp_min == 0 && cmp_max == 0)
+           return boolean_true_node;
+         else if (cmp_min != -2 && cmp_max != -2)
+           return boolean_false_node;
+       }
+       /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1.  */
+       else if (compare_values_warnv (vr0->min, vr1->max,
+                                    strict_overflow_p) == 1
+              || compare_values_warnv (vr1->min, vr0->max,
+                                       strict_overflow_p) == 1)
+       return boolean_false_node;
+       return NULL_TREE;
+     }
+   else if (comp == NE_EXPR)
+     {
+       int cmp1, cmp2;
+       /* If VR0 is completely to the left or completely to the right
+        of VR1, they are always different.  Notice that we need to
+        make sure that both comparisons yield similar results to
+        avoid comparing values that cannot be compared at
+        compile-time.  */
+       cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+       cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+       if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
+       return boolean_true_node;
+       /* If VR0 and VR1 represent a single value and are identical,
+        return false.  */
+       else if (compare_values_warnv (vr0->min, vr0->max,
+                                    strict_overflow_p) == 0
+              && compare_values_warnv (vr1->min, vr1->max,
+                                       strict_overflow_p) == 0
+              && compare_values_warnv (vr0->min, vr1->min,
+                                       strict_overflow_p) == 0
+              && compare_values_warnv (vr0->max, vr1->max,
+                                       strict_overflow_p) == 0)
+       return boolean_false_node;
+       /* Otherwise, they may or may not be different.  */
+       else
+       return NULL_TREE;
+     }
+   else if (comp == LT_EXPR || comp == LE_EXPR)
+     {
+       int tst;
+       /* If VR0 is to the left of VR1, return true.  */
+       tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+       if ((comp == LT_EXPR && tst == -1)
+         || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+       return boolean_true_node;
+       /* If VR0 is to the right of VR1, return false.  */
+       tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+         || (comp == LE_EXPR && tst == 1))
+       return boolean_false_node;
+       /* Otherwise, we don't know.  */
+       return NULL_TREE;
+     }
+   gcc_unreachable ();
+ }
+ /* Given a value range VR, a value VAL and a comparison code COMP, return
+    BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
+    values in VR.  Return BOOLEAN_FALSE_NODE if the comparison
+    always returns false.  Return NULL_TREE if it is not always
+    possible to determine the value of the comparison.  Also set
+    *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+    assumed signed overflow is undefined.  */
+ static tree
+ compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
+                         bool *strict_overflow_p)
+ {
+   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+     return NULL_TREE;
+   /* Anti-ranges need to be handled separately.  */
+   if (vr->type == VR_ANTI_RANGE)
+     {
+       /* For anti-ranges, the only predicates that we can compute at
+        compile time are equality and inequality.  */
+       if (comp == GT_EXPR
+         || comp == GE_EXPR
+         || comp == LT_EXPR
+         || comp == LE_EXPR)
+       return NULL_TREE;
+       /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2.  */
+       if (value_inside_range (val, vr->min, vr->max) == 1)
+       return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+       return NULL_TREE;
+     }
+   if (comp == EQ_EXPR)
+     {
+       /* EQ_EXPR may only be computed if VR represents exactly
+        one value.  */
+       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
+       {
+         int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
+         if (cmp == 0)
+           return boolean_true_node;
+         else if (cmp == -1 || cmp == 1 || cmp == 2)
+           return boolean_false_node;
+       }
+       else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
+              || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
+       return boolean_false_node;
+       return NULL_TREE;
+     }
+   else if (comp == NE_EXPR)
+     {
+       /* If VAL is not inside VR, then they are always different.  */
+       if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
+         || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
+       return boolean_true_node;
+       /* If VR represents exactly one value equal to VAL, then return
+        false.  */
+       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
+         && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
+       return boolean_false_node;
+       /* Otherwise, they may or may not be different.  */
+       return NULL_TREE;
+     }
+   else if (comp == LT_EXPR || comp == LE_EXPR)
+     {
+       int tst;
+       /* If VR is to the left of VAL, return true.  */
+       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+       if ((comp == LT_EXPR && tst == -1)
+         || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+       return boolean_true_node;
+       /* If VR is to the right of VAL, return false.  */
+       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+         || (comp == LE_EXPR && tst == 1))
+       return boolean_false_node;
+       /* Otherwise, we don't know.  */
+       return NULL_TREE;
+     }
+   else if (comp == GT_EXPR || comp == GE_EXPR)
+     {
+       int tst;
+       /* If VR is to the right of VAL, return true.  */
+       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+       if ((comp == GT_EXPR && tst == 1)
+         || (comp == GE_EXPR && (tst == 0 || tst == 1)))
+       return boolean_true_node;
+       /* If VR is to the left of VAL, return false.  */
+       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+       if ((comp == GT_EXPR && (tst == -1 || tst == 0))
+         || (comp == GE_EXPR && tst == -1))
+       return boolean_false_node;
+       /* Otherwise, we don't know.  */
+       return NULL_TREE;
+     }
+   gcc_unreachable ();
+ }
+ /* Given a range VR, a LOOP and a variable VAR, determine whether it
+    would be profitable to adjust VR using scalar evolution information
+    for VAR.  If so, update VR with the new limits.  */
+ void
+ vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
+                                  gimple *stmt, tree var)
+ {
+   tree init, step, chrec, tmin, tmax, min, max, type, tem;
+   enum ev_direction dir;
+   /* TODO.  Don't adjust anti-ranges.  An anti-range may provide
+      better opportunities than a regular range, but I'm not sure.  */
+   if (vr->type == VR_ANTI_RANGE)
+     return;
+   chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
+   /* Like in PR19590, scev can return a constant function.  */
+   if (is_gimple_min_invariant (chrec))
+     {
+       set_value_range_to_value (vr, chrec, vr->equiv);
+       return;
+     }
+   if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
+     return;
+   init = initial_condition_in_loop_num (chrec, loop->num);
+   tem = op_with_constant_singleton_value_range (init);
+   if (tem)
+     init = tem;
+   step = evolution_part_in_loop_num (chrec, loop->num);
+   tem = op_with_constant_singleton_value_range (step);
+   if (tem)
+     step = tem;
+   /* If STEP is symbolic, we can't know whether INIT will be the
+      minimum or maximum value in the range.  Also, unless INIT is
+      a simple expression, compare_values and possibly other functions
+      in tree-vrp won't be able to handle it.  */
+   if (step == NULL_TREE
+       || !is_gimple_min_invariant (step)
+       || !valid_value_p (init))
+     return;
+   dir = scev_direction (chrec);
+   if (/* Do not adjust ranges if we do not know whether the iv increases
+        or decreases,  ... */
+       dir == EV_DIR_UNKNOWN
+       /* ... or if it may wrap.  */
+       || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
+                               get_chrec_loop (chrec), true))
+     return;
+   type = TREE_TYPE (var);
+   if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
+     tmin = lower_bound_in_type (type, type);
+   else
+     tmin = TYPE_MIN_VALUE (type);
+   if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
+     tmax = upper_bound_in_type (type, type);
+   else
+     tmax = TYPE_MAX_VALUE (type);
+   /* Try to use estimated number of iterations for the loop to constrain the
+      final value in the evolution.  */
+   if (TREE_CODE (step) == INTEGER_CST
+       && is_gimple_val (init)
+       && (TREE_CODE (init) != SSA_NAME
+         || get_value_range (init)->type == VR_RANGE))
+     {
+       widest_int nit;
+       /* We are only entering here for loop header PHI nodes, so using
+        the number of latch executions is the correct thing to use.  */
+       if (max_loop_iterations (loop, &nit))
+       {
+         value_range maxvr = VR_INITIALIZER;
+         signop sgn = TYPE_SIGN (TREE_TYPE (step));
+         bool overflow;
+         widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
+                                    &overflow);
+         /* If the multiplication overflowed we can't do a meaningful
+            adjustment.  Likewise if the result doesn't fit in the type
+            of the induction variable.  For a signed type we have to
+            check whether the result has the expected signedness which
+            is that of the step as number of iterations is unsigned.  */
+         if (!overflow
+             && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
+             && (sgn == UNSIGNED
+                 || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
+           {
+             tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
+             extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
+                                             TREE_TYPE (init), init, tem);
+             /* Likewise if the addition did.  */
+             if (maxvr.type == VR_RANGE)
+               {
+                 value_range initvr = VR_INITIALIZER;
+                 if (TREE_CODE (init) == SSA_NAME)
+                   initvr = *(get_value_range (init));
+                 else if (is_gimple_min_invariant (init))
+                   set_value_range_to_value (&initvr, init, NULL);
+                 else
+                   return;
+                 /* Check if init + nit * step overflows.  Though we checked
+                    scev {init, step}_loop doesn't wrap, it is not enough
+                    because the loop may exit immediately.  Overflow could
+                    happen in the plus expression in this case.  */
+                 if ((dir == EV_DIR_DECREASES
+                      && compare_values (maxvr.min, initvr.min) != -1)
+                     || (dir == EV_DIR_GROWS
+                         && compare_values (maxvr.max, initvr.max) != 1))
+                   return;
+                 tmin = maxvr.min;
+                 tmax = maxvr.max;
+               }
+           }
+       }
+     }
+   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+     {
+       min = tmin;
+       max = tmax;
+       /* For VARYING or UNDEFINED ranges, just about anything we get
+        from scalar evolutions should be better.  */
+       if (dir == EV_DIR_DECREASES)
+       max = init;
+       else
+       min = init;
+     }
+   else if (vr->type == VR_RANGE)
+     {
+       min = vr->min;
+       max = vr->max;
+       if (dir == EV_DIR_DECREASES)
+       {
+         /* INIT is the maximum value.  If INIT is lower than VR->MAX
+            but no smaller than VR->MIN, set VR->MAX to INIT.  */
+         if (compare_values (init, max) == -1)
+           max = init;
+         /* According to the loop information, the variable does not
+            overflow.  */
+         if (compare_values (min, tmin) == -1)
+           min = tmin;
+       }
+       else
+       {
+         /* If INIT is bigger than VR->MIN, set VR->MIN to INIT.  */
+         if (compare_values (init, min) == 1)
+           min = init;
+         if (compare_values (tmax, max) == -1)
+           max = tmax;
+       }
+     }
+   else
+     return;
+   /* If we just created an invalid range with the minimum
+      greater than the maximum, we fail conservatively.
+      This should happen only in unreachable
+      parts of code, or for invalid programs.  */
+   if (compare_values (min, max) == 1)
+     return;
+   /* Even for valid range info, sometimes overflow flag will leak in.
+      As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
+      drop them.  */
+   if (TREE_OVERFLOW_P (min))
+     min = drop_tree_overflow (min);
+   if (TREE_OVERFLOW_P (max))
+     max = drop_tree_overflow (max);
+   set_value_range (vr, VR_RANGE, min, max, vr->equiv);
+ }
+ /* Dump value ranges of all SSA_NAMEs to FILE.  */
+ void
+ vr_values::dump_all_value_ranges (FILE *file)
+ {
+   size_t i;
+   for (i = 0; i < num_vr_values; i++)
+     {
+       if (vr_value[i])
+       {
+         print_generic_expr (file, ssa_name (i));
+         fprintf (file, ": ");
+         dump_value_range (file, vr_value[i]);
+         fprintf (file, "\n");
+       }
+     }
+   fprintf (file, "\n");
+ }
+ /* Initialize VRP lattice.  */
+ vr_values::vr_values () : vrp_value_range_pool ("Tree VRP value ranges")
+ {
+   values_propagated = false;
+   num_vr_values = num_ssa_names;
+   vr_value = XCNEWVEC (value_range *, num_vr_values);
+   vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
+   bitmap_obstack_initialize (&vrp_equiv_obstack);
+ }
+ /* Free VRP lattice.  */
+ vr_values::~vr_values ()
+ {
+   /* Free allocated memory.  */
+   free (vr_value);
+   free (vr_phi_edge_counts);
+   bitmap_obstack_release (&vrp_equiv_obstack);
+   vrp_value_range_pool.release ();
+   /* So that we can distinguish between VRP data being available
+      and not available.  */
+   vr_value = NULL;
+   vr_phi_edge_counts = NULL;
+ }
+ /* A hack.  */
+ static class vr_values *x_vr_values;
+ /* Return the singleton value-range for NAME or NAME.  */
+ static inline tree
+ vrp_valueize (tree name)
+ {
+   if (TREE_CODE (name) == SSA_NAME)
+     {
+       value_range *vr = x_vr_values->get_value_range (name);
+       if (vr->type == VR_RANGE
+         && (TREE_CODE (vr->min) == SSA_NAME
+             || is_gimple_min_invariant (vr->min))
+         && vrp_operand_equal_p (vr->min, vr->max))
+       return vr->min;
+     }
+   return name;
+ }
+ /* Return the singleton value-range for NAME if that is a constant
+    but signal to not follow SSA edges.  */
+ static inline tree
+ vrp_valueize_1 (tree name)
+ {
+   if (TREE_CODE (name) == SSA_NAME)
+     {
+       /* If the definition may be simulated again we cannot follow
+          this SSA edge as the SSA propagator does not necessarily
+        re-visit the use.  */
+       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+       if (!gimple_nop_p (def_stmt)
+         && prop_simulate_again_p (def_stmt))
+       return NULL_TREE;
+       value_range *vr = x_vr_values->get_value_range (name);
+       if (range_int_cst_singleton_p (vr))
+       return vr->min;
+     }
+   return name;
+ }
+ /* Given STMT, an assignment or call, return its LHS if the type
+    of the LHS is suitable for VRP analysis, else return NULL_TREE.  */
+ tree
+ get_output_for_vrp (gimple *stmt)
+ {
+   if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
+     return NULL_TREE;
+   /* We only keep track of ranges in integral and pointer types.  */
+   tree lhs = gimple_get_lhs (stmt);
+   if (TREE_CODE (lhs) == SSA_NAME
+       && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+          /* It is valid to have NULL MIN/MAX values on a type.  See
+             build_range_type.  */
+          && TYPE_MIN_VALUE (TREE_TYPE (lhs))
+          && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
+         || POINTER_TYPE_P (TREE_TYPE (lhs))))
+     return lhs;
+   return NULL_TREE;
+ }
+ /* Visit assignment STMT.  If it produces an interesting range, record
+    the range in VR and set LHS to OUTPUT_P.  */
+ void
+ vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
+                                        value_range *vr)
+ {
+   tree lhs = get_output_for_vrp (stmt);
+   *output_p = lhs;
+   /* We only keep track of ranges in integral and pointer types.  */
+   if (lhs)
+     {
+       enum gimple_code code = gimple_code (stmt);
+       /* Try folding the statement to a constant first.  */
+       x_vr_values = this;
+       tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
+                                                vrp_valueize_1);
+       x_vr_values = NULL;
+       if (tem)
+       {
+         if (TREE_CODE (tem) == SSA_NAME
+             && (SSA_NAME_IS_DEFAULT_DEF (tem)
+                 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
+           {
+             extract_range_from_ssa_name (vr, tem);
+             return;
+           }
+         else if (is_gimple_min_invariant (tem))
+           {
+             set_value_range_to_value (vr, tem, NULL);
+             return;
+           }
+       }
+       /* Then dispatch to value-range extracting functions.  */
+       if (code == GIMPLE_CALL)
+       extract_range_basic (vr, stmt);
+       else
+       extract_range_from_assignment (vr, as_a <gassign *> (stmt));
+     }
+ }
+ /* Helper that gets the value range of the SSA_NAME with version I
+    or a symbolic range containing the SSA_NAME only if the value range
+    is varying or undefined.  */
+ value_range
+ vr_values::get_vr_for_comparison (int i)
+ {
+   value_range vr = *get_value_range (ssa_name (i));
+   /* If name N_i does not have a valid range, use N_i as its own
+      range.  This allows us to compare against names that may
+      have N_i in their ranges.  */
+   if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
+     {
+       vr.type = VR_RANGE;
+       vr.min = ssa_name (i);
+       vr.max = ssa_name (i);
+     }
+   return vr;
+ }
+ /* Compare all the value ranges for names equivalent to VAR with VAL
+    using comparison code COMP.  Return the same value returned by
+    compare_range_with_value, including the setting of
+    *STRICT_OVERFLOW_P.  */
+ tree
+ vr_values::compare_name_with_value (enum tree_code comp, tree var, tree val,
+                                   bool *strict_overflow_p, bool use_equiv_p)
+ {
+   bitmap_iterator bi;
+   unsigned i;
+   bitmap e;
+   tree retval, t;
+   int used_strict_overflow;
+   bool sop;
+   value_range equiv_vr;
+   /* Get the set of equivalences for VAR.  */
+   e = get_value_range (var)->equiv;
+   /* Start at -1.  Set it to 0 if we do a comparison without relying
+      on overflow, or 1 if all comparisons rely on overflow.  */
+   used_strict_overflow = -1;
+   /* Compare vars' value range with val.  */
+   equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
+   sop = false;
+   retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
+   if (retval)
+     used_strict_overflow = sop ? 1 : 0;
+   /* If the equiv set is empty we have done all work we need to do.  */
+   if (e == NULL)
+     {
+       if (retval
+         && used_strict_overflow > 0)
+       *strict_overflow_p = true;
+       return retval;
+     }
+   EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
+     {
+       tree name = ssa_name (i);
+       if (! name)
+       continue;
+       if (! use_equiv_p
+         && ! SSA_NAME_IS_DEFAULT_DEF (name)
+         && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
+       continue;
+       equiv_vr = get_vr_for_comparison (i);
+       sop = false;
+       t = compare_range_with_value (comp, &equiv_vr, val, &sop);
+       if (t)
+       {
+         /* If we get different answers from different members
+            of the equivalence set this check must be in a dead
+            code region.  Folding it to a trap representation
+            would be correct here.  For now just return don't-know.  */
+         if (retval != NULL
+             && t != retval)
+           {
+             retval = NULL_TREE;
+             break;
+           }
+         retval = t;
+         if (!sop)
+           used_strict_overflow = 0;
+         else if (used_strict_overflow < 0)
+           used_strict_overflow = 1;
+       }
+     }
+   if (retval
+       && used_strict_overflow > 0)
+     *strict_overflow_p = true;
+   return retval;
+ }
+ /* Given a comparison code COMP and names N1 and N2, compare all the
+    ranges equivalent to N1 against all the ranges equivalent to N2
+    to determine the value of N1 COMP N2.  Return the same value
+    returned by compare_ranges.  Set *STRICT_OVERFLOW_P to indicate
+    whether we relied on undefined signed overflow in the comparison.  */
+ tree
+ vr_values::compare_names (enum tree_code comp, tree n1, tree n2,
+                         bool *strict_overflow_p)
+ {
+   tree t, retval;
+   bitmap e1, e2;
+   bitmap_iterator bi1, bi2;
+   unsigned i1, i2;
+   int used_strict_overflow;
+   static bitmap_obstack *s_obstack = NULL;
+   static bitmap s_e1 = NULL, s_e2 = NULL;
+   /* Compare the ranges of every name equivalent to N1 against the
+      ranges of every name equivalent to N2.  */
+   e1 = get_value_range (n1)->equiv;
+   e2 = get_value_range (n2)->equiv;
+   /* Use the fake bitmaps if e1 or e2 are not available.  */
+   if (s_obstack == NULL)
+     {
+       s_obstack = XNEW (bitmap_obstack);
+       bitmap_obstack_initialize (s_obstack);
+       s_e1 = BITMAP_ALLOC (s_obstack);
+       s_e2 = BITMAP_ALLOC (s_obstack);
+     }
+   if (e1 == NULL)
+     e1 = s_e1;
+   if (e2 == NULL)
+     e2 = s_e2;
+   /* Add N1 and N2 to their own set of equivalences to avoid
+      duplicating the body of the loop just to check N1 and N2
+      ranges.  */
+   bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
+   bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
+   /* If the equivalence sets have a common intersection, then the two
+      names can be compared without checking their ranges.  */
+   if (bitmap_intersect_p (e1, e2))
+     {
+       bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+       bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+       return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
+            ? boolean_true_node
+            : boolean_false_node;
+     }
+   /* Start at -1.  Set it to 0 if we do a comparison without relying
+      on overflow, or 1 if all comparisons rely on overflow.  */
+   used_strict_overflow = -1;
+   /* Otherwise, compare all the equivalent ranges.  First, add N1 and
+      N2 to their own set of equivalences to avoid duplicating the body
+      of the loop just to check N1 and N2 ranges.  */
+   EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
+     {
+       if (! ssa_name (i1))
+       continue;
+       value_range vr1 = get_vr_for_comparison (i1);
+       t = retval = NULL_TREE;
+       EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
+       {
+         if (! ssa_name (i2))
+           continue;
+         bool sop = false;
+         value_range vr2 = get_vr_for_comparison (i2);
+         t = compare_ranges (comp, &vr1, &vr2, &sop);
+         if (t)
+           {
+             /* If we get different answers from different members
+                of the equivalence set this check must be in a dead
+                code region.  Folding it to a trap representation
+                would be correct here.  For now just return don't-know.  */
+             if (retval != NULL
+                 && t != retval)
+               {
+                 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+                 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+                 return NULL_TREE;
+               }
+             retval = t;
+             if (!sop)
+               used_strict_overflow = 0;
+             else if (used_strict_overflow < 0)
+               used_strict_overflow = 1;
+           }
+       }
+       if (retval)
+       {
+         bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+         bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+         if (used_strict_overflow > 0)
+           *strict_overflow_p = true;
+         return retval;
+       }
+     }
+   /* None of the equivalent ranges are useful in computing this
+      comparison.  */
+   bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+   bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+   return NULL_TREE;
+ }
+ /* Helper function for vrp_evaluate_conditional_warnv & other
+    optimizers.  */
+ tree
+ vr_values::vrp_evaluate_conditional_warnv_with_ops_using_ranges
+     (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p)
+ {
+   value_range *vr0, *vr1;
+   vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
+   vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
+   tree res = NULL_TREE;
+   if (vr0 && vr1)
+     res = compare_ranges (code, vr0, vr1, strict_overflow_p);
+   if (!res && vr0)
+     res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
+   if (!res && vr1)
+     res = (compare_range_with_value
+           (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
+   return res;
+ }
+ /* Helper function for vrp_evaluate_conditional_warnv. */
+ tree
+ vr_values::vrp_evaluate_conditional_warnv_with_ops (enum tree_code code,
+                                                   tree op0, tree op1,
+                                                   bool use_equiv_p,
+                                                   bool *strict_overflow_p,
+                                                   bool *only_ranges)
+ {
+   tree ret;
+   if (only_ranges)
+     *only_ranges = true;
+   /* We only deal with integral and pointer types.  */
+   if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
+       && !POINTER_TYPE_P (TREE_TYPE (op0)))
+     return NULL_TREE;
+   /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
+      as a simple equality test, then prefer that over its current form
+      for evaluation.
+      An overflow test which collapses to an equality test can always be
+      expressed as a comparison of one argument against zero.  Overflow
+      occurs when the chosen argument is zero and does not occur if the
+      chosen argument is not zero.  */
+   tree x;
+   if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
+     {
+       wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
+       /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
+          B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
+          B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
+          B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
+       if (integer_zerop (x))
+       {
+         op1 = x;
+         code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
+       }
+       /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
+          B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
+          B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
+          B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
+       else if (wi::to_wide (x) == max - 1)
+       {
+         op0 = op1;
+         op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
+         code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
+       }
+     }
+   if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
+              (code, op0, op1, strict_overflow_p)))
+     return ret;
+   if (only_ranges)
+     *only_ranges = false;
+   /* Do not use compare_names during propagation, it's quadratic.  */
+   if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
+       && use_equiv_p)
+     return compare_names (code, op0, op1, strict_overflow_p);
+   else if (TREE_CODE (op0) == SSA_NAME)
+     return compare_name_with_value (code, op0, op1,
+                                   strict_overflow_p, use_equiv_p);
+   else if (TREE_CODE (op1) == SSA_NAME)
+     return compare_name_with_value (swap_tree_comparison (code), op1, op0,
+                                   strict_overflow_p, use_equiv_p);
+   return NULL_TREE;
+ }
+ /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
+    information.  Return NULL if the conditional can not be evaluated.
+    The ranges of all the names equivalent with the operands in COND
+    will be used when trying to compute the value.  If the result is
+    based on undefined signed overflow, issue a warning if
+    appropriate.  */
+ tree
+ vr_values::vrp_evaluate_conditional (tree_code code, tree op0,
+                                    tree op1, gimple *stmt)
+ {
+   bool sop;
+   tree ret;
+   bool only_ranges;
+   /* Some passes and foldings leak constants with overflow flag set
+      into the IL.  Avoid doing wrong things with these and bail out.  */
+   if ((TREE_CODE (op0) == INTEGER_CST
+        && TREE_OVERFLOW (op0))
+       || (TREE_CODE (op1) == INTEGER_CST
+         && TREE_OVERFLOW (op1)))
+     return NULL_TREE;
+   sop = false;
+   ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
+                                                &only_ranges);
+   if (ret && sop)
+     {
+       enum warn_strict_overflow_code wc;
+       const char* warnmsg;
+       if (is_gimple_min_invariant (ret))
+       {
+         wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
+         warnmsg = G_("assuming signed overflow does not occur when "
+                      "simplifying conditional to constant");
+       }
+       else
+       {
+         wc = WARN_STRICT_OVERFLOW_COMPARISON;
+         warnmsg = G_("assuming signed overflow does not occur when "
+                      "simplifying conditional");
+       }
+       if (issue_strict_overflow_warning (wc))
+       {
+         location_t location;
+         if (!gimple_has_location (stmt))
+           location = input_location;
+         else
+           location = gimple_location (stmt);
+         warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
+       }
+     }
+   if (warn_type_limits
+       && ret && only_ranges
+       && TREE_CODE_CLASS (code) == tcc_comparison
+       && TREE_CODE (op0) == SSA_NAME)
+     {
+       /* If the comparison is being folded and the operand on the LHS
+        is being compared against a constant value that is outside of
+        the natural range of OP0's type, then the predicate will
+        always fold regardless of the value of OP0.  If -Wtype-limits
+        was specified, emit a warning.  */
+       tree type = TREE_TYPE (op0);
+       value_range *vr0 = get_value_range (op0);
+       if (vr0->type == VR_RANGE
+         && INTEGRAL_TYPE_P (type)
+         && vrp_val_is_min (vr0->min)
+         && vrp_val_is_max (vr0->max)
+         && is_gimple_min_invariant (op1))
+       {
+         location_t location;
+         if (!gimple_has_location (stmt))
+           location = input_location;
+         else
+           location = gimple_location (stmt);
+         warning_at (location, OPT_Wtype_limits,
+                     integer_zerop (ret)
+                     ? G_("comparison always false "
+                            "due to limited range of data type")
+                     : G_("comparison always true "
+                            "due to limited range of data type"));
+       }
+     }
+   return ret;
+ }
+ /* Visit conditional statement STMT.  If we can determine which edge
+    will be taken out of STMT's basic block, record it in
+    *TAKEN_EDGE_P.  Otherwise, set *TAKEN_EDGE_P to NULL.  */
+ void
+ vr_values::vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
+ {
+   tree val;
+   *taken_edge_p = NULL;
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       tree use;
+       ssa_op_iter i;
+       fprintf (dump_file, "\nVisiting conditional with predicate: ");
+       print_gimple_stmt (dump_file, stmt, 0);
+       fprintf (dump_file, "\nWith known ranges\n");
+       FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
+       {
+         fprintf (dump_file, "\t");
+         print_generic_expr (dump_file, use);
+         fprintf (dump_file, ": ");
+         dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
+       }
+       fprintf (dump_file, "\n");
+     }
+   /* Compute the value of the predicate COND by checking the known
+      ranges of each of its operands.
+      Note that we cannot evaluate all the equivalent ranges here
+      because those ranges may not yet be final and with the current
+      propagation strategy, we cannot determine when the value ranges
+      of the names in the equivalence set have changed.
+      For instance, given the following code fragment
+         i_5 = PHI <8, i_13>
+       ...
+       i_14 = ASSERT_EXPR <i_5, i_5 != 0>
+       if (i_14 == 1)
+         ...
+      Assume that on the first visit to i_14, i_5 has the temporary
+      range [8, 8] because the second argument to the PHI function is
+      not yet executable.  We derive the range ~[0, 0] for i_14 and the
+      equivalence set { i_5 }.  So, when we visit 'if (i_14 == 1)' for
+      the first time, since i_14 is equivalent to the range [8, 8], we
+      determine that the predicate is always false.
+      On the next round of propagation, i_13 is determined to be
+      VARYING, which causes i_5 to drop down to VARYING.  So, another
+      visit to i_14 is scheduled.  In this second visit, we compute the
+      exact same range and equivalence set for i_14, namely ~[0, 0] and
+      { i_5 }.  But we did not have the previous range for i_5
+      registered, so vrp_visit_assignment thinks that the range for
+      i_14 has not changed.  Therefore, the predicate 'if (i_14 == 1)'
+      is not visited again, which stops propagation from visiting
+      statements in the THEN clause of that if().
+      To properly fix this we would need to keep the previous range
+      value for the names in the equivalence set.  This way we would've
+      discovered that from one visit to the other i_5 changed from
+      range [8, 8] to VR_VARYING.
+      However, fixing this apparent limitation may not be worth the
+      additional checking.  Testing on several code bases (GCC, DLV,
+      MICO, TRAMP3D and SPEC2000) showed that doing this results in
+      4 more predicates folded in SPEC.  */
+   bool sop;
+   val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
+                                                gimple_cond_lhs (stmt),
+                                                gimple_cond_rhs (stmt),
+                                                false, &sop, NULL);
+   if (val)
+     *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "\nPredicate evaluates to: ");
+       if (val == NULL_TREE)
+       fprintf (dump_file, "DON'T KNOW\n");
+       else
+       print_generic_stmt (dump_file, val);
+     }
+ }
+ /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
+    used in range VR.  The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
+    MAX_IDX2.  If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
+    Returns true if the default label is not needed.  */
+ static bool
+ find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
+                       size_t *max_idx1, size_t *min_idx2,
+                       size_t *max_idx2)
+ {
+   size_t i, j, k, l;
+   unsigned int n = gimple_switch_num_labels (stmt);
+   bool take_default;
+   tree case_low, case_high;
+   tree min = vr->min, max = vr->max;
+   gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
+   take_default = !find_case_label_range (stmt, min, max, &i, &j);
+   /* Set second range to emtpy.  */
+   *min_idx2 = 1;
+   *max_idx2 = 0;
+   if (vr->type == VR_RANGE)
+     {
+       *min_idx1 = i;
+       *max_idx1 = j;
+       return !take_default;
+     }
+   /* Set first range to all case labels.  */
+   *min_idx1 = 1;
+   *max_idx1 = n - 1;
+   if (i > j)
+     return false;
+   /* Make sure all the values of case labels [i , j] are contained in
+      range [MIN, MAX].  */
+   case_low = CASE_LOW (gimple_switch_label (stmt, i));
+   case_high = CASE_HIGH (gimple_switch_label (stmt, j));
+   if (tree_int_cst_compare (case_low, min) < 0)
+     i += 1;
+   if (case_high != NULL_TREE
+       && tree_int_cst_compare (max, case_high) < 0)
+     j -= 1;
+   if (i > j)
+     return false;
+   /* If the range spans case labels [i, j], the corresponding anti-range spans
+      the labels [1, i - 1] and [j + 1, n -  1].  */
+   k = j + 1;
+   l = n - 1;
+   if (k > l)
+     {
+       k = 1;
+       l = 0;
+     }
+   j = i - 1;
+   i = 1;
+   if (i > j)
+     {
+       i = k;
+       j = l;
+       k = 1;
+       l = 0;
+     }
+   *min_idx1 = i;
+   *max_idx1 = j;
+   *min_idx2 = k;
+   *max_idx2 = l;
+   return false;
+ }
+ /* Visit switch statement STMT.  If we can determine which edge
+    will be taken out of STMT's basic block, record it in
+    *TAKEN_EDGE_P.  Otherwise, *TAKEN_EDGE_P set to NULL.  */
+ void
+ vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
+ {
+   tree op, val;
+   value_range *vr;
+   size_t i = 0, j = 0, k, l;
+   bool take_default;
+   *taken_edge_p = NULL;
+   op = gimple_switch_index (stmt);
+   if (TREE_CODE (op) != SSA_NAME)
+     return;
+   vr = get_value_range (op);
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "\nVisiting switch expression with operand ");
+       print_generic_expr (dump_file, op);
+       fprintf (dump_file, " with known range ");
+       dump_value_range (dump_file, vr);
+       fprintf (dump_file, "\n");
+     }
+   if ((vr->type != VR_RANGE
+        && vr->type != VR_ANTI_RANGE)
+       || symbolic_range_p (vr))
+     return;
+   /* Find the single edge that is taken from the switch expression.  */
+   take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+   /* Check if the range spans no CASE_LABEL. If so, we only reach the default
+      label */
+   if (j < i)
+     {
+       gcc_assert (take_default);
+       val = gimple_switch_default_label (stmt);
+     }
+   else
+     {
+       /* Check if labels with index i to j and maybe the default label
+        are all reaching the same label.  */
+       val = gimple_switch_label (stmt, i);
+       if (take_default
+         && CASE_LABEL (gimple_switch_default_label (stmt))
+         != CASE_LABEL (val))
+       {
+         if (dump_file && (dump_flags & TDF_DETAILS))
+           fprintf (dump_file, "  not a single destination for this "
+                    "range\n");
+         return;
+       }
+       for (++i; i <= j; ++i)
+         {
+           if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
+           {
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               fprintf (dump_file, "  not a single destination for this "
+                        "range\n");
+             return;
+           }
+         }
+       for (; k <= l; ++k)
+         {
+           if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
+           {
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               fprintf (dump_file, "  not a single destination for this "
+                        "range\n");
+             return;
+           }
+         }
+     }
+   *taken_edge_p = find_edge (gimple_bb (stmt),
+                            label_to_block (CASE_LABEL (val)));
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "  will take edge to ");
+       print_generic_stmt (dump_file, CASE_LABEL (val));
+     }
+ }
+ /* Evaluate statement STMT.  If the statement produces a useful range,
+    set VR and corepsponding OUTPUT_P.
+    If STMT is a conditional branch and we can determine its truth
+    value, the taken edge is recorded in *TAKEN_EDGE_P.  */
+ void
+ vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+                                   tree *output_p, value_range *vr)
+ {
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "\nVisiting statement:\n");
+       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+     }
+   if (!stmt_interesting_for_vrp (stmt))
+     gcc_assert (stmt_ends_bb_p (stmt));
+   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
+     vrp_visit_assignment_or_call (stmt, output_p, vr);
+   else if (gimple_code (stmt) == GIMPLE_COND)
+     vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
+   else if (gimple_code (stmt) == GIMPLE_SWITCH)
+     vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
+ }
+ /* Visit all arguments for PHI node PHI that flow through executable
+    edges.  If a valid value range can be derived from all the incoming
+    value ranges, set a new range in VR_RESULT.  */
+ void
+ vr_values::extract_range_from_phi_node (gphi *phi, value_range *vr_result)
+ {
+   size_t i;
+   tree lhs = PHI_RESULT (phi);
+   value_range *lhs_vr = get_value_range (lhs);
+   bool first = true;
+   int edges, old_edges;
+   struct loop *l;
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "\nVisiting PHI node: ");
+       print_gimple_stmt (dump_file, phi, 0, dump_flags);
+     }
+   bool may_simulate_backedge_again = false;
+   edges = 0;
+   for (i = 0; i < gimple_phi_num_args (phi); i++)
+     {
+       edge e = gimple_phi_arg_edge (phi, i);
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         fprintf (dump_file,
+             "    Argument #%d (%d -> %d %sexecutable)\n",
+             (int) i, e->src->index, e->dest->index,
+             (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
+       }
+       if (e->flags & EDGE_EXECUTABLE)
+       {
+         tree arg = PHI_ARG_DEF (phi, i);
+         value_range vr_arg;
+         ++edges;
+         if (TREE_CODE (arg) == SSA_NAME)
+           {
+             /* See if we are eventually going to change one of the args.  */
+             gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
+             if (! gimple_nop_p (def_stmt)
+                 && prop_simulate_again_p (def_stmt)
+                 && e->flags & EDGE_DFS_BACK)
+               may_simulate_backedge_again = true;
+             vr_arg = *(get_value_range (arg));
+             /* Do not allow equivalences or symbolic ranges to leak in from
+                backedges.  That creates invalid equivalencies.
+                See PR53465 and PR54767.  */
+             if (e->flags & EDGE_DFS_BACK)
+               {
+                 if (vr_arg.type == VR_RANGE
+                     || vr_arg.type == VR_ANTI_RANGE)
+                   {
+                     vr_arg.equiv = NULL;
+                     if (symbolic_range_p (&vr_arg))
+                       {
+                         vr_arg.type = VR_VARYING;
+                         vr_arg.min = NULL_TREE;
+                         vr_arg.max = NULL_TREE;
+                       }
+                   }
+               }
+             else
+               {
+                 /* If the non-backedge arguments range is VR_VARYING then
+                    we can still try recording a simple equivalence.  */
+                 if (vr_arg.type == VR_VARYING)
+                   {
+                     vr_arg.type = VR_RANGE;
+                     vr_arg.min = arg;
+                     vr_arg.max = arg;
+                     vr_arg.equiv = NULL;
+                   }
+               }
+           }
+         else
+           {
+             if (TREE_OVERFLOW_P (arg))
+               arg = drop_tree_overflow (arg);
+             vr_arg.type = VR_RANGE;
+             vr_arg.min = arg;
+             vr_arg.max = arg;
+             vr_arg.equiv = NULL;
+           }
+         if (dump_file && (dump_flags & TDF_DETAILS))
+           {
+             fprintf (dump_file, "\t");
+             print_generic_expr (dump_file, arg, dump_flags);
+             fprintf (dump_file, ": ");
+             dump_value_range (dump_file, &vr_arg);
+             fprintf (dump_file, "\n");
+           }
+         if (first)
+           copy_value_range (vr_result, &vr_arg);
+         else
+           vrp_meet (vr_result, &vr_arg);
+         first = false;
+         if (vr_result->type == VR_VARYING)
+           break;
+       }
+     }
+   if (vr_result->type == VR_VARYING)
+     goto varying;
+   else if (vr_result->type == VR_UNDEFINED)
+     goto update_range;
+   old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
+   vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
+   /* To prevent infinite iterations in the algorithm, derive ranges
+      when the new value is slightly bigger or smaller than the
+      previous one.  We don't do this if we have seen a new executable
+      edge; this helps us avoid an infinity for conditionals
+      which are not in a loop.  If the old value-range was VR_UNDEFINED
+      use the updated range and iterate one more time.  If we will not
+      simulate this PHI again via the backedge allow us to iterate.  */
+   if (edges > 0
+       && gimple_phi_num_args (phi) > 1
+       && edges == old_edges
+       && lhs_vr->type != VR_UNDEFINED
+       && may_simulate_backedge_again)
+     {
+       /* Compare old and new ranges, fall back to varying if the
+          values are not comparable.  */
+       int cmp_min = compare_values (lhs_vr->min, vr_result->min);
+       if (cmp_min == -2)
+       goto varying;
+       int cmp_max = compare_values (lhs_vr->max, vr_result->max);
+       if (cmp_max == -2)
+       goto varying;
+       /* For non VR_RANGE or for pointers fall back to varying if
+        the range changed.  */
+       if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
+          || POINTER_TYPE_P (TREE_TYPE (lhs)))
+         && (cmp_min != 0 || cmp_max != 0))
+       goto varying;
+       /* If the new minimum is larger than the previous one
+        retain the old value.  If the new minimum value is smaller
+        than the previous one and not -INF go all the way to -INF + 1.
+        In the first case, to avoid infinite bouncing between different
+        minimums, and in the other case to avoid iterating millions of
+        times to reach -INF.  Going to -INF + 1 also lets the following
+        iteration compute whether there will be any overflow, at the
+        expense of one additional iteration.  */
+       if (cmp_min < 0)
+       vr_result->min = lhs_vr->min;
+       else if (cmp_min > 0
+              && !vrp_val_is_min (vr_result->min))
+       vr_result->min
+         = int_const_binop (PLUS_EXPR,
+                            vrp_val_min (TREE_TYPE (vr_result->min)),
+                            build_int_cst (TREE_TYPE (vr_result->min), 1));
+       /* Similarly for the maximum value.  */
+       if (cmp_max > 0)
+       vr_result->max = lhs_vr->max;
+       else if (cmp_max < 0
+              && !vrp_val_is_max (vr_result->max))
+       vr_result->max
+         = int_const_binop (MINUS_EXPR,
+                            vrp_val_max (TREE_TYPE (vr_result->min)),
+                            build_int_cst (TREE_TYPE (vr_result->min), 1));
+       /* If we dropped either bound to +-INF then if this is a loop
+        PHI node SCEV may known more about its value-range.  */
+       if (cmp_min > 0 || cmp_min < 0
+          || cmp_max < 0 || cmp_max > 0)
+       goto scev_check;
+       goto infinite_check;
+     }
+   goto update_range;
+ varying:
+   set_value_range_to_varying (vr_result);
+ scev_check:
+   /* If this is a loop PHI node SCEV may known more about its value-range.
+      scev_check can be reached from two paths, one is a fall through from above
+      "varying" label, the other is direct goto from code block which tries to
+      avoid infinite simulation.  */
+   if (scev_initialized_p ()
+       && (l = loop_containing_stmt (phi))
+       && l->header == gimple_bb (phi))
+     adjust_range_with_scev (vr_result, l, phi, lhs);
+ infinite_check:
+   /* If we will end up with a (-INF, +INF) range, set it to
+      VARYING.  Same if the previous max value was invalid for
+      the type and we end up with vr_result.min > vr_result.max.  */
+   if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
+       && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
+          || compare_values (vr_result->min, vr_result->max) > 0))
+     ;
+   else
+     set_value_range_to_varying (vr_result);
+   /* If the new range is different than the previous value, keep
+      iterating.  */
+ update_range:
+   return;
+ }
+ /* Simplify boolean operations if the source is known
+    to be already a boolean.  */
+ bool
+ vr_values::simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi,
+                                           gimple *stmt)
+ {
+   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+   tree lhs, op0, op1;
+   bool need_conversion;
+   /* We handle only !=/== case here.  */
+   gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
+   op0 = gimple_assign_rhs1 (stmt);
+   if (!op_with_boolean_value_range_p (op0))
+     return false;
+   op1 = gimple_assign_rhs2 (stmt);
+   if (!op_with_boolean_value_range_p (op1))
+     return false;
+   /* Reduce number of cases to handle to NE_EXPR.  As there is no
+      BIT_XNOR_EXPR we cannot replace A == B with a single statement.  */
+   if (rhs_code == EQ_EXPR)
+     {
+       if (TREE_CODE (op1) == INTEGER_CST)
+       op1 = int_const_binop (BIT_XOR_EXPR, op1,
+                              build_int_cst (TREE_TYPE (op1), 1));
+       else
+       return false;
+     }
+   lhs = gimple_assign_lhs (stmt);
+   need_conversion
+     = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
+   /* Make sure to not sign-extend a 1-bit 1 when converting the result.  */
+   if (need_conversion
+       && !TYPE_UNSIGNED (TREE_TYPE (op0))
+       && TYPE_PRECISION (TREE_TYPE (op0)) == 1
+       && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
+     return false;
+   /* For A != 0 we can substitute A itself.  */
+   if (integer_zerop (op1))
+     gimple_assign_set_rhs_with_ops (gsi,
+                                   need_conversion
+                                   ? NOP_EXPR : TREE_CODE (op0), op0);
+   /* For A != B we substitute A ^ B.  Either with conversion.  */
+   else if (need_conversion)
+     {
+       tree tem = make_ssa_name (TREE_TYPE (op0));
+       gassign *newop
+       = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
+       gsi_insert_before (gsi, newop, GSI_SAME_STMT);
+       if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
+         && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
+       set_range_info (tem, VR_RANGE,
+                       wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
+                       wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
+       gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
+     }
+   /* Or without.  */
+   else
+     gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
+   update_stmt (gsi_stmt (*gsi));
+   fold_stmt (gsi, follow_single_use_edges);
+   return true;
+ }
+ /* Simplify a division or modulo operator to a right shift or bitwise and
+    if the first operand is unsigned or is greater than zero and the second
+    operand is an exact power of two.  For TRUNC_MOD_EXPR op0 % op1 with
+    constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
+    optimize it into just op0 if op0's range is known to be a subset of
+    [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
+    modulo.  */
+ bool
+ vr_values::simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi,
+                                            gimple *stmt)
+ {
+   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+   tree val = NULL;
+   tree op0 = gimple_assign_rhs1 (stmt);
+   tree op1 = gimple_assign_rhs2 (stmt);
+   tree op0min = NULL_TREE, op0max = NULL_TREE;
+   tree op1min = op1;
+   value_range *vr = NULL;
+   if (TREE_CODE (op0) == INTEGER_CST)
+     {
+       op0min = op0;
+       op0max = op0;
+     }
+   else
+     {
+       vr = get_value_range (op0);
+       if (range_int_cst_p (vr))
+       {
+         op0min = vr->min;
+         op0max = vr->max;
+       }
+     }
+   if (rhs_code == TRUNC_MOD_EXPR
+       && TREE_CODE (op1) == SSA_NAME)
+     {
+       value_range *vr1 = get_value_range (op1);
+       if (range_int_cst_p (vr1))
+       op1min = vr1->min;
+     }
+   if (rhs_code == TRUNC_MOD_EXPR
+       && TREE_CODE (op1min) == INTEGER_CST
+       && tree_int_cst_sgn (op1min) == 1
+       && op0max
+       && tree_int_cst_lt (op0max, op1min))
+     {
+       if (TYPE_UNSIGNED (TREE_TYPE (op0))
+         || tree_int_cst_sgn (op0min) >= 0
+         || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
+                             op0min))
+       {
+         /* If op0 already has the range op0 % op1 has,
+            then TRUNC_MOD_EXPR won't change anything.  */
+         gimple_assign_set_rhs_from_tree (gsi, op0);
+         return true;
+       }
+     }
+   if (TREE_CODE (op0) != SSA_NAME)
+     return false;
+   if (!integer_pow2p (op1))
+     {
+       /* X % -Y can be only optimized into X % Y either if
+        X is not INT_MIN, or Y is not -1.  Fold it now, as after
+        remove_range_assertions the range info might be not available
+        anymore.  */
+       if (rhs_code == TRUNC_MOD_EXPR
+         && fold_stmt (gsi, follow_single_use_edges))
+       return true;
+       return false;
+     }
+   if (TYPE_UNSIGNED (TREE_TYPE (op0)))
+     val = integer_one_node;
+   else
+     {
+       bool sop = false;
+       val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
+       if (val
+         && sop
+         && integer_onep (val)
+         && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+       {
+         location_t location;
+         if (!gimple_has_location (stmt))
+           location = input_location;
+         else
+           location = gimple_location (stmt);
+         warning_at (location, OPT_Wstrict_overflow,
+                     "assuming signed overflow does not occur when "
+                     "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
+       }
+     }
+   if (val && integer_onep (val))
+     {
+       tree t;
+       if (rhs_code == TRUNC_DIV_EXPR)
+       {
+         t = build_int_cst (integer_type_node, tree_log2 (op1));
+         gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
+         gimple_assign_set_rhs1 (stmt, op0);
+         gimple_assign_set_rhs2 (stmt, t);
+       }
+       else
+       {
+         t = build_int_cst (TREE_TYPE (op1), 1);
+         t = int_const_binop (MINUS_EXPR, op1, t);
+         t = fold_convert (TREE_TYPE (op0), t);
+         gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
+         gimple_assign_set_rhs1 (stmt, op0);
+         gimple_assign_set_rhs2 (stmt, t);
+       }
+       update_stmt (stmt);
+       fold_stmt (gsi, follow_single_use_edges);
+       return true;
+     }
+   return false;
+ }
+ /* Simplify a min or max if the ranges of the two operands are
+    disjoint.   Return true if we do simplify.  */
+ bool
+ vr_values::simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi,
+                                            gimple *stmt)
+ {
+   tree op0 = gimple_assign_rhs1 (stmt);
+   tree op1 = gimple_assign_rhs2 (stmt);
+   bool sop = false;
+   tree val;
+   val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+        (LE_EXPR, op0, op1, &sop));
+   if (!val)
+     {
+       sop = false;
+       val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+            (LT_EXPR, op0, op1, &sop));
+     }
+   if (val)
+     {
+       if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+       {
+         location_t location;
+         if (!gimple_has_location (stmt))
+           location = input_location;
+         else
+           location = gimple_location (stmt);
+         warning_at (location, OPT_Wstrict_overflow,
+                     "assuming signed overflow does not occur when "
+                     "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
+       }
+       /* VAL == TRUE -> OP0 < or <= op1
+        VAL == FALSE -> OP0 > or >= op1.  */
+       tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
+                 == integer_zerop (val)) ? op0 : op1;
+       gimple_assign_set_rhs_from_tree (gsi, res);
+       return true;
+     }
+   return false;
+ }
+ /* If the operand to an ABS_EXPR is >= 0, then eliminate the
+    ABS_EXPR.  If the operand is <= 0, then simplify the
+    ABS_EXPR into a NEGATE_EXPR.  */
+ bool
+ vr_values::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+ {
+   tree op = gimple_assign_rhs1 (stmt);
+   value_range *vr = get_value_range (op);
+   if (vr)
+     {
+       tree val = NULL;
+       bool sop = false;
+       val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
+       if (!val)
+       {
+         /* The range is neither <= 0 nor > 0.  Now see if it is
+            either < 0 or >= 0.  */
+         sop = false;
+         val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
+                                         &sop);
+       }
+       if (val)
+       {
+         if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+           {
+             location_t location;
+             if (!gimple_has_location (stmt))
+               location = input_location;
+             else
+               location = gimple_location (stmt);
+             warning_at (location, OPT_Wstrict_overflow,
+                         "assuming signed overflow does not occur when "
+                         "simplifying %<abs (X)%> to %<X%> or %<-X%>");
+           }
+         gimple_assign_set_rhs1 (stmt, op);
+         if (integer_zerop (val))
+           gimple_assign_set_rhs_code (stmt, SSA_NAME);
+         else
+           gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
+         update_stmt (stmt);
+         fold_stmt (gsi, follow_single_use_edges);
+         return true;
+       }
+     }
+   return false;
+ }
+ /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
+    If all the bits that are being cleared by & are already
+    known to be zero from VR, or all the bits that are being
+    set by | are already known to be one from VR, the bit
+    operation is redundant.  */
+ bool
+ vr_values::simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi,
+                                         gimple *stmt)
+ {
+   tree op0 = gimple_assign_rhs1 (stmt);
+   tree op1 = gimple_assign_rhs2 (stmt);
+   tree op = NULL_TREE;
+   value_range vr0 = VR_INITIALIZER;
+   value_range vr1 = VR_INITIALIZER;
+   wide_int may_be_nonzero0, may_be_nonzero1;
+   wide_int must_be_nonzero0, must_be_nonzero1;
+   wide_int mask;
+   if (TREE_CODE (op0) == SSA_NAME)
+     vr0 = *(get_value_range (op0));
+   else if (is_gimple_min_invariant (op0))
+     set_value_range_to_value (&vr0, op0, NULL);
+   else
+     return false;
+   if (TREE_CODE (op1) == SSA_NAME)
+     vr1 = *(get_value_range (op1));
+   else if (is_gimple_min_invariant (op1))
+     set_value_range_to_value (&vr1, op1, NULL);
+   else
+     return false;
+   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
+                                 &must_be_nonzero0))
+     return false;
+   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
+                                 &must_be_nonzero1))
+     return false;
+   switch (gimple_assign_rhs_code (stmt))
+     {
+     case BIT_AND_EXPR:
+       mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+       if (mask == 0)
+       {
+         op = op0;
+         break;
+       }
+       mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+       if (mask == 0)
+       {
+         op = op1;
+         break;
+       }
+       break;
+     case BIT_IOR_EXPR:
+       mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+       if (mask == 0)
+       {
+         op = op1;
+         break;
+       }
+       mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+       if (mask == 0)
+       {
+         op = op0;
+         break;
+       }
+       break;
+     default:
+       gcc_unreachable ();
+     }
+   if (op == NULL_TREE)
+     return false;
+   gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
+   update_stmt (gsi_stmt (*gsi));
+   return true;
+ }
+ /* We are comparing trees OP0 and OP1 using COND_CODE.  OP0 has
+    a known value range VR.
+    If there is one and only one value which will satisfy the
+    conditional, then return that value.  Else return NULL.
+    If signed overflow must be undefined for the value to satisfy
+    the conditional, then set *STRICT_OVERFLOW_P to true.  */
+ static tree
+ test_for_singularity (enum tree_code cond_code, tree op0,
+                     tree op1, value_range *vr)
+ {
+   tree min = NULL;
+   tree max = NULL;
+   /* Extract minimum/maximum values which satisfy the conditional as it was
+      written.  */
+   if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+     {
+       min = TYPE_MIN_VALUE (TREE_TYPE (op0));
+       max = op1;
+       if (cond_code == LT_EXPR)
+       {
+         tree one = build_int_cst (TREE_TYPE (op0), 1);
+         max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
+         /* Signal to compare_values_warnv this expr doesn't overflow.  */
+         if (EXPR_P (max))
+           TREE_NO_WARNING (max) = 1;
+       }
+     }
+   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+     {
+       max = TYPE_MAX_VALUE (TREE_TYPE (op0));
+       min = op1;
+       if (cond_code == GT_EXPR)
+       {
+         tree one = build_int_cst (TREE_TYPE (op0), 1);
+         min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
+         /* Signal to compare_values_warnv this expr doesn't overflow.  */
+         if (EXPR_P (min))
+           TREE_NO_WARNING (min) = 1;
+       }
+     }
+   /* Now refine the minimum and maximum values using any
+      value range information we have for op0.  */
+   if (min && max)
+     {
+       if (compare_values (vr->min, min) == 1)
+       min = vr->min;
+       if (compare_values (vr->max, max) == -1)
+       max = vr->max;
+       /* If the new min/max values have converged to a single value,
+        then there is only one value which can satisfy the condition,
+        return that value.  */
+       if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
+       return min;
+     }
+   return NULL;
+ }
+ /* Return whether the value range *VR fits in an integer type specified
+    by PRECISION and UNSIGNED_P.  */
+ static bool
+ range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
+ {
+   tree src_type;
+   unsigned src_precision;
+   widest_int tem;
+   signop src_sgn;
+   /* We can only handle integral and pointer types.  */
+   src_type = TREE_TYPE (vr->min);
+   if (!INTEGRAL_TYPE_P (src_type)
+       && !POINTER_TYPE_P (src_type))
+     return false;
+   /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
+      and so is an identity transform.  */
+   src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
+   src_sgn = TYPE_SIGN (src_type);
+   if ((src_precision < dest_precision
+        && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
+       || (src_precision == dest_precision && src_sgn == dest_sgn))
+     return true;
+   /* Now we can only handle ranges with constant bounds.  */
+   if (vr->type != VR_RANGE
+       || TREE_CODE (vr->min) != INTEGER_CST
+       || TREE_CODE (vr->max) != INTEGER_CST)
+     return false;
+   /* For sign changes, the MSB of the wide_int has to be clear.
+      An unsigned value with its MSB set cannot be represented by
+      a signed wide_int, while a negative value cannot be represented
+      by an unsigned wide_int.  */
+   if (src_sgn != dest_sgn
+       && (wi::lts_p (wi::to_wide (vr->min), 0)
+         || wi::lts_p (wi::to_wide (vr->max), 0)))
+     return false;
+   /* Then we can perform the conversion on both ends and compare
+      the result for equality.  */
+   tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
+   if (tem != wi::to_widest (vr->min))
+     return false;
+   tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
+   if (tem != wi::to_widest (vr->max))
+     return false;
+   return true;
+ }
+ /* Simplify a conditional using a relational operator to an equality
+    test if the range information indicates only one value can satisfy
+    the original conditional.  */
+ bool
+ vr_values::simplify_cond_using_ranges_1 (gcond *stmt)
+ {
+   tree op0 = gimple_cond_lhs (stmt);
+   tree op1 = gimple_cond_rhs (stmt);
+   enum tree_code cond_code = gimple_cond_code (stmt);
+   if (cond_code != NE_EXPR
+       && cond_code != EQ_EXPR
+       && TREE_CODE (op0) == SSA_NAME
+       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+       && is_gimple_min_invariant (op1))
+     {
+       value_range *vr = get_value_range (op0);
+       /* If we have range information for OP0, then we might be
+        able to simplify this conditional. */
+       if (vr->type == VR_RANGE)
+       {
+         tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
+         if (new_tree)
+           {
+             if (dump_file)
+               {
+                 fprintf (dump_file, "Simplified relational ");
+                 print_gimple_stmt (dump_file, stmt, 0);
+                 fprintf (dump_file, " into ");
+               }
+             gimple_cond_set_code (stmt, EQ_EXPR);
+             gimple_cond_set_lhs (stmt, op0);
+             gimple_cond_set_rhs (stmt, new_tree);
+             update_stmt (stmt);
+             if (dump_file)
+               {
+                 print_gimple_stmt (dump_file, stmt, 0);
+                 fprintf (dump_file, "\n");
+               }
+             return true;
+           }
+         /* Try again after inverting the condition.  We only deal
+            with integral types here, so no need to worry about
+            issues with inverting FP comparisons.  */
+         new_tree = test_for_singularity
+                      (invert_tree_comparison (cond_code, false),
+                       op0, op1, vr);
+         if (new_tree)
+           {
+             if (dump_file)
+               {
+                 fprintf (dump_file, "Simplified relational ");
+                 print_gimple_stmt (dump_file, stmt, 0);
+                 fprintf (dump_file, " into ");
+               }
+             gimple_cond_set_code (stmt, NE_EXPR);
+             gimple_cond_set_lhs (stmt, op0);
+             gimple_cond_set_rhs (stmt, new_tree);
+             update_stmt (stmt);
+             if (dump_file)
+               {
+                 print_gimple_stmt (dump_file, stmt, 0);
+                 fprintf (dump_file, "\n");
+               }
+             return true;
+           }
+       }
+     }
+   return false;
+ }
+ /* STMT is a conditional at the end of a basic block.
+    If the conditional is of the form SSA_NAME op constant and the SSA_NAME
+    was set via a type conversion, try to replace the SSA_NAME with the RHS
+    of the type conversion.  Doing so makes the conversion dead which helps
+    subsequent passes.  */
+ void
+ vr_values::simplify_cond_using_ranges_2 (gcond *stmt)
+ {
+   tree op0 = gimple_cond_lhs (stmt);
+   tree op1 = gimple_cond_rhs (stmt);
+   /* If we have a comparison of an SSA_NAME (OP0) against a constant,
+      see if OP0 was set by a type conversion where the source of
+      the conversion is another SSA_NAME with a range that fits
+      into the range of OP0's type.
+      If so, the conversion is redundant as the earlier SSA_NAME can be
+      used for the comparison directly if we just massage the constant in the
+      comparison.  */
+   if (TREE_CODE (op0) == SSA_NAME
+       && TREE_CODE (op1) == INTEGER_CST)
+     {
+       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
+       tree innerop;
+       if (!is_gimple_assign (def_stmt)
+         || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+       return;
+       innerop = gimple_assign_rhs1 (def_stmt);
+       if (TREE_CODE (innerop) == SSA_NAME
+         && !POINTER_TYPE_P (TREE_TYPE (innerop))
+         && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
+         && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
+       {
+         value_range *vr = get_value_range (innerop);
+         if (range_int_cst_p (vr)
+             && range_fits_type_p (vr,
+                                   TYPE_PRECISION (TREE_TYPE (op0)),
+                                   TYPE_SIGN (TREE_TYPE (op0)))
+             && int_fits_type_p (op1, TREE_TYPE (innerop)))
+           {
+             tree newconst = fold_convert (TREE_TYPE (innerop), op1);
+             gimple_cond_set_lhs (stmt, innerop);
+             gimple_cond_set_rhs (stmt, newconst);
+             update_stmt (stmt);
+             if (dump_file && (dump_flags & TDF_DETAILS))
+               {
+                 fprintf (dump_file, "Folded into: ");
+                 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+                 fprintf (dump_file, "\n");
+               }
+           }
+       }
+     }
+ }
+ /* Simplify a switch statement using the value range of the switch
+    argument.  */
+ bool
+ vr_values::simplify_switch_using_ranges (gswitch *stmt)
+ {
+   tree op = gimple_switch_index (stmt);
+   value_range *vr = NULL;
+   bool take_default;
+   edge e;
+   edge_iterator ei;
+   size_t i = 0, j = 0, n, n2;
+   tree vec2;
+   switch_update su;
+   size_t k = 1, l = 0;
+   if (TREE_CODE (op) == SSA_NAME)
+     {
+       vr = get_value_range (op);
+       /* We can only handle integer ranges.  */
+       if ((vr->type != VR_RANGE
+          && vr->type != VR_ANTI_RANGE)
+         || symbolic_range_p (vr))
+       return false;
+       /* Find case label for min/max of the value range.  */
+       take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+     }
+   else if (TREE_CODE (op) == INTEGER_CST)
+     {
+       take_default = !find_case_label_index (stmt, 1, op, &i);
+       if (take_default)
+       {
+         i = 1;
+         j = 0;
+       }
+       else
+       {
+         j = i;
+       }
+     }
+   else
+     return false;
+   n = gimple_switch_num_labels (stmt);
+   /* We can truncate the case label ranges that partially overlap with OP's
+      value range.  */
+   size_t min_idx = 1, max_idx = 0;
+   if (vr != NULL)
+     find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
+   if (min_idx <= max_idx)
+     {
+       tree min_label = gimple_switch_label (stmt, min_idx);
+       tree max_label = gimple_switch_label (stmt, max_idx);
+       /* Avoid changing the type of the case labels when truncating.  */
+       tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
+       tree vr_min = fold_convert (case_label_type, vr->min);
+       tree vr_max = fold_convert (case_label_type, vr->max);
+       if (vr->type == VR_RANGE)
+       {
+         /* If OP's value range is [2,8] and the low label range is
+            0 ... 3, truncate the label's range to 2 .. 3.  */
+         if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+             && CASE_HIGH (min_label) != NULL_TREE
+             && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+           CASE_LOW (min_label) = vr_min;
+         /* If OP's value range is [2,8] and the high label range is
+            7 ... 10, truncate the label's range to 7 .. 8.  */
+         if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+             && CASE_HIGH (max_label) != NULL_TREE
+             && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+           CASE_HIGH (max_label) = vr_max;
+       }
+       else if (vr->type == VR_ANTI_RANGE)
+       {
+         tree one_cst = build_one_cst (case_label_type);
+         if (min_label == max_label)
+           {
+             /* If OP's value range is ~[7,8] and the label's range is
+                7 ... 10, truncate the label's range to 9 ... 10.  */
+             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
+                 && CASE_HIGH (min_label) != NULL_TREE
+                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
+               CASE_LOW (min_label)
+                 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+             /* If OP's value range is ~[7,8] and the label's range is
+                5 ... 8, truncate the label's range to 5 ... 6.  */
+             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+                 && CASE_HIGH (min_label) != NULL_TREE
+                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
+               CASE_HIGH (min_label)
+                 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+           }
+         else
+           {
+             /* If OP's value range is ~[2,8] and the low label range is
+                0 ... 3, truncate the label's range to 0 ... 1.  */
+             if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+                 && CASE_HIGH (min_label) != NULL_TREE
+                 && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+               CASE_HIGH (min_label)
+                 = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+             /* If OP's value range is ~[2,8] and the high label range is
+                7 ... 10, truncate the label's range to 9 ... 10.  */
+             if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+                 && CASE_HIGH (max_label) != NULL_TREE
+                 && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+               CASE_LOW (max_label)
+                 = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+           }
+       }
+       /* Canonicalize singleton case ranges.  */
+       if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
+       CASE_HIGH (min_label) = NULL_TREE;
+       if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
+       CASE_HIGH (max_label) = NULL_TREE;
+     }
+   /* We can also eliminate case labels that lie completely outside OP's value
+      range.  */
+   /* Bail out if this is just all edges taken.  */
+   if (i == 1
+       && j == n - 1
+       && take_default)
+     return false;
+   /* Build a new vector of taken case labels.  */
+   vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
+   n2 = 0;
+   /* Add the default edge, if necessary.  */
+   if (take_default)
+     TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
+   for (; i <= j; ++i, ++n2)
+     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
+   for (; k <= l; ++k, ++n2)
+     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
+   /* Mark needed edges.  */
+   for (i = 0; i < n2; ++i)
+     {
+       e = find_edge (gimple_bb (stmt),
+                    label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
+       e->aux = (void *)-1;
+     }
+   /* Queue not needed edges for later removal.  */
+   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+     {
+       if (e->aux == (void *)-1)
+       {
+         e->aux = NULL;
+         continue;
+       }
+       if (dump_file && (dump_flags & TDF_DETAILS))
+       {
+         fprintf (dump_file, "removing unreachable case label\n");
+       }
+       to_remove_edges.safe_push (e);
+       e->flags &= ~EDGE_EXECUTABLE;
+     }
+   /* And queue an update for the stmt.  */
+   su.stmt = stmt;
+   su.vec = vec2;
+   to_update_switch_stmts.safe_push (su);
+   return false;
+ }
+ /* Simplify an integral conversion from an SSA name in STMT.  */
+ static bool
+ simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+ {
+   tree innerop, middleop, finaltype;
+   gimple *def_stmt;
+   signop inner_sgn, middle_sgn, final_sgn;
+   unsigned inner_prec, middle_prec, final_prec;
+   widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
+   finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
+   if (!INTEGRAL_TYPE_P (finaltype))
+     return false;
+   middleop = gimple_assign_rhs1 (stmt);
+   def_stmt = SSA_NAME_DEF_STMT (middleop);
+   if (!is_gimple_assign (def_stmt)
+       || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+     return false;
+   innerop = gimple_assign_rhs1 (def_stmt);
+   if (TREE_CODE (innerop) != SSA_NAME
+       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
+     return false;
+   /* Get the value-range of the inner operand.  Use get_range_info in
+      case innerop was created during substitute-and-fold.  */
+   wide_int imin, imax;
+   if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
++      || !get_range_info (innerop, &imin, &imax))
+     return false;
+   innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
+   innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
+   /* Simulate the conversion chain to check if the result is equal if
+      the middle conversion is removed.  */
+   inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
+   middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
+   final_prec = TYPE_PRECISION (finaltype);
+   /* If the first conversion is not injective, the second must not
+      be widening.  */
+   if (wi::gtu_p (innermax - innermin,
+                wi::mask <widest_int> (middle_prec, false))
+       && middle_prec < final_prec)
+     return false;
+   /* We also want a medium value so that we can track the effect that
+      narrowing conversions with sign change have.  */
+   inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
+   if (inner_sgn == UNSIGNED)
+     innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
+   else
+     innermed = 0;
+   if (wi::cmp (innermin, innermed, inner_sgn) >= 0
+       || wi::cmp (innermed, innermax, inner_sgn) >= 0)
+     innermed = innermin;
+   middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
+   middlemin = wi::ext (innermin, middle_prec, middle_sgn);
+   middlemed = wi::ext (innermed, middle_prec, middle_sgn);
+   middlemax = wi::ext (innermax, middle_prec, middle_sgn);
+   /* Require that the final conversion applied to both the original
+      and the intermediate range produces the same result.  */
+   final_sgn = TYPE_SIGN (finaltype);
+   if (wi::ext (middlemin, final_prec, final_sgn)
+        != wi::ext (innermin, final_prec, final_sgn)
+       || wi::ext (middlemed, final_prec, final_sgn)
+        != wi::ext (innermed, final_prec, final_sgn)
+       || wi::ext (middlemax, final_prec, final_sgn)
+        != wi::ext (innermax, final_prec, final_sgn))
+     return false;
+   gimple_assign_set_rhs1 (stmt, innerop);
+   fold_stmt (gsi, follow_single_use_edges);
+   return true;
+ }
+ /* Simplify a conversion from integral SSA name to float in STMT.  */
+ bool
+ vr_values::simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
+                                                  gimple *stmt)
+ {
+   tree rhs1 = gimple_assign_rhs1 (stmt);
+   value_range *vr = get_value_range (rhs1);
+   scalar_float_mode fltmode
+     = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
+   scalar_int_mode mode;
+   tree tem;
+   gassign *conv;
+   /* We can only handle constant ranges.  */
+   if (vr->type != VR_RANGE
+       || TREE_CODE (vr->min) != INTEGER_CST
+       || TREE_CODE (vr->max) != INTEGER_CST)
+     return false;
+   /* First check if we can use a signed type in place of an unsigned.  */
+   scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
+   if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
+       && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
+       && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
+     mode = rhs_mode;
+   /* If we can do the conversion in the current input mode do nothing.  */
+   else if (can_float_p (fltmode, rhs_mode,
+                       TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
+     return false;
+   /* Otherwise search for a mode we can use, starting from the narrowest
+      integer mode available.  */
+   else
+     {
+       mode = NARROWEST_INT_MODE;
+       for (;;)
+       {
+         /* If we cannot do a signed conversion to float from mode
+            or if the value-range does not fit in the signed type
+            try with a wider mode.  */
+         if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
+             && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
+           break;
+         /* But do not widen the input.  Instead leave that to the
+            optabs expansion code.  */
+         if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
+             || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
+           return false;
+       }
+     }
+   /* It works, insert a truncation or sign-change before the
+      float conversion.  */
+   tem = make_ssa_name (build_nonstandard_integer_type
+                         (GET_MODE_PRECISION (mode), 0));
+   conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
+   gsi_insert_before (gsi, conv, GSI_SAME_STMT);
+   gimple_assign_set_rhs1 (stmt, tem);
+   fold_stmt (gsi, follow_single_use_edges);
+   return true;
+ }
+ /* Simplify an internal fn call using ranges if possible.  */
+ bool
+ vr_values::simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi,
+                                               gimple *stmt)
+ {
+   enum tree_code subcode;
+   bool is_ubsan = false;
+   bool ovf = false;
+   switch (gimple_call_internal_fn (stmt))
+     {
+     case IFN_UBSAN_CHECK_ADD:
+       subcode = PLUS_EXPR;
+       is_ubsan = true;
+       break;
+     case IFN_UBSAN_CHECK_SUB:
+       subcode = MINUS_EXPR;
+       is_ubsan = true;
+       break;
+     case IFN_UBSAN_CHECK_MUL:
+       subcode = MULT_EXPR;
+       is_ubsan = true;
+       break;
+     case IFN_ADD_OVERFLOW:
+       subcode = PLUS_EXPR;
+       break;
+     case IFN_SUB_OVERFLOW:
+       subcode = MINUS_EXPR;
+       break;
+     case IFN_MUL_OVERFLOW:
+       subcode = MULT_EXPR;
+       break;
+     default:
+       return false;
+     }
+   tree op0 = gimple_call_arg (stmt, 0);
+   tree op1 = gimple_call_arg (stmt, 1);
+   tree type;
+   if (is_ubsan)
+     {
+       type = TREE_TYPE (op0);
+       if (VECTOR_TYPE_P (type))
+       return false;
+     }
+   else if (gimple_call_lhs (stmt) == NULL_TREE)
+     return false;
+   else
+     type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
+   if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
+       || (is_ubsan && ovf))
+     return false;
+   gimple *g;
+   location_t loc = gimple_location (stmt);
+   if (is_ubsan)
+     g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
+   else
+     {
+       int prec = TYPE_PRECISION (type);
+       tree utype = type;
+       if (ovf
+         || !useless_type_conversion_p (type, TREE_TYPE (op0))
+         || !useless_type_conversion_p (type, TREE_TYPE (op1)))
+       utype = build_nonstandard_integer_type (prec, 1);
+       if (TREE_CODE (op0) == INTEGER_CST)
+       op0 = fold_convert (utype, op0);
+       else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
+       {
+         g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
+         gimple_set_location (g, loc);
+         gsi_insert_before (gsi, g, GSI_SAME_STMT);
+         op0 = gimple_assign_lhs (g);
+       }
+       if (TREE_CODE (op1) == INTEGER_CST)
+       op1 = fold_convert (utype, op1);
+       else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
+       {
+         g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
+         gimple_set_location (g, loc);
+         gsi_insert_before (gsi, g, GSI_SAME_STMT);
+         op1 = gimple_assign_lhs (g);
+       }
+       g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
+       gimple_set_location (g, loc);
+       gsi_insert_before (gsi, g, GSI_SAME_STMT);
+       if (utype != type)
+       {
+         g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
+                                  gimple_assign_lhs (g));
+         gimple_set_location (g, loc);
+         gsi_insert_before (gsi, g, GSI_SAME_STMT);
+       }
+       g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
+                              gimple_assign_lhs (g),
+                              build_int_cst (type, ovf));
+     }
+   gimple_set_location (g, loc);
+   gsi_replace (gsi, g, false);
+   return true;
+ }
+ /* Return true if VAR is a two-valued variable.  Set a and b with the
+    two-values when it is true.  Return false otherwise.  */
+ bool
+ vr_values::two_valued_val_range_p (tree var, tree *a, tree *b)
+ {
+   value_range *vr = get_value_range (var);
+   if ((vr->type != VR_RANGE
+        && vr->type != VR_ANTI_RANGE)
+       || TREE_CODE (vr->min) != INTEGER_CST
+       || TREE_CODE (vr->max) != INTEGER_CST)
+     return false;
+   if (vr->type == VR_RANGE
+       && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
+     {
+       *a = vr->min;
+       *b = vr->max;
+       return true;
+     }
+   /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
+   if (vr->type == VR_ANTI_RANGE
+       && (wi::to_wide (vr->min)
+         - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
+       && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
+         - wi::to_wide (vr->max)) == 1)
+     {
+       *a = vrp_val_min (TREE_TYPE (var));
+       *b = vrp_val_max (TREE_TYPE (var));
+       return true;
+     }
+   return false;
+ }
+ /* Simplify STMT using ranges if possible.  */
+ bool
+ vr_values::simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+ {
+   gimple *stmt = gsi_stmt (*gsi);
+   if (is_gimple_assign (stmt))
+     {
+       enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+       tree rhs1 = gimple_assign_rhs1 (stmt);
+       tree rhs2 = gimple_assign_rhs2 (stmt);
+       tree lhs = gimple_assign_lhs (stmt);
+       tree val1 = NULL_TREE, val2 = NULL_TREE;
+       use_operand_p use_p;
+       gimple *use_stmt;
+       /* Convert:
+        LHS = CST BINOP VAR
+        Where VAR is two-valued and LHS is used in GIMPLE_COND only
+        To:
+        LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
+        Also handles:
+        LHS = VAR BINOP CST
+        Where VAR is two-valued and LHS is used in GIMPLE_COND only
+        To:
+        LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
+       if (TREE_CODE_CLASS (rhs_code) == tcc_binary
+         && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
+         && ((TREE_CODE (rhs1) == INTEGER_CST
+              && TREE_CODE (rhs2) == SSA_NAME)
+             || (TREE_CODE (rhs2) == INTEGER_CST
+                 && TREE_CODE (rhs1) == SSA_NAME))
+         && single_imm_use (lhs, &use_p, &use_stmt)
+         && gimple_code (use_stmt) == GIMPLE_COND)
+       {
+         tree new_rhs1 = NULL_TREE;
+         tree new_rhs2 = NULL_TREE;
+         tree cmp_var = NULL_TREE;
+         if (TREE_CODE (rhs2) == SSA_NAME
+             && two_valued_val_range_p (rhs2, &val1, &val2))
+           {
+             /* Optimize RHS1 OP [VAL1, VAL2].  */
+             new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
+             new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
+             cmp_var = rhs2;
+           }
+         else if (TREE_CODE (rhs1) == SSA_NAME
+                  && two_valued_val_range_p (rhs1, &val1, &val2))
+           {
+             /* Optimize [VAL1, VAL2] OP RHS2.  */
+             new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
+             new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
+             cmp_var = rhs1;
+           }
+         /* If we could not find two-vals or the optimzation is invalid as
+            in divide by zero, new_rhs1 / new_rhs will be NULL_TREE.  */
+         if (new_rhs1 && new_rhs2)
+           {
+             tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
+             gimple_assign_set_rhs_with_ops (gsi,
+                                             COND_EXPR, cond,
+                                             new_rhs1,
+                                             new_rhs2);
+             update_stmt (gsi_stmt (*gsi));
+             fold_stmt (gsi, follow_single_use_edges);
+             return true;
+           }
+       }
+       switch (rhs_code)
+       {
+       case EQ_EXPR:
+       case NE_EXPR:
+           /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
+            if the RHS is zero or one, and the LHS are known to be boolean
+            values.  */
+         if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_truth_ops_using_ranges (gsi, stmt);
+         break;
+       /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
+        and BIT_AND_EXPR respectively if the first operand is greater
+        than zero and the second operand is an exact power of two.
+        Also optimize TRUNC_MOD_EXPR away if the second operand is
+        constant and the first operand already has the right value
+        range.  */
+       case TRUNC_DIV_EXPR:
+       case TRUNC_MOD_EXPR:
+         if ((TREE_CODE (rhs1) == SSA_NAME
+              || TREE_CODE (rhs1) == INTEGER_CST)
+             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_div_or_mod_using_ranges (gsi, stmt);
+         break;
+       /* Transform ABS (X) into X or -X as appropriate.  */
+       case ABS_EXPR:
+         if (TREE_CODE (rhs1) == SSA_NAME
+             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_abs_using_ranges (gsi, stmt);
+         break;
+       case BIT_AND_EXPR:
+       case BIT_IOR_EXPR:
+         /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
+            if all the bits being cleared are already cleared or
+            all the bits being set are already set.  */
+         if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_bit_ops_using_ranges (gsi, stmt);
+         break;
+       CASE_CONVERT:
+         if (TREE_CODE (rhs1) == SSA_NAME
+             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_conversion_using_ranges (gsi, stmt);
+         break;
+       case FLOAT_EXPR:
+         if (TREE_CODE (rhs1) == SSA_NAME
+             && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+           return simplify_float_conversion_using_ranges (gsi, stmt);
+         break;
+       case MIN_EXPR:
+       case MAX_EXPR:
+         return simplify_min_or_max_using_ranges (gsi, stmt);
+       default:
+         break;
+       }
+     }
+   else if (gimple_code (stmt) == GIMPLE_COND)
+     return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
+   else if (gimple_code (stmt) == GIMPLE_SWITCH)
+     return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
+   else if (is_gimple_call (stmt)
+          && gimple_call_internal_p (stmt))
+     return simplify_internal_call_using_ranges (gsi, stmt);
+   return false;
+ }
+ void
+ vr_values::set_vr_value (tree var, value_range *vr)
+ {
+   if (SSA_NAME_VERSION (var) >= num_vr_values)
+     return;
+   vr_value[SSA_NAME_VERSION (var)] = vr;
+ }