+2012-11-17 Diego Novillo <dnovillo@google.com>
+
+ VEC API overhaul (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * vec.c (register_overhead): Convert it into
+ member function of vec_prefix.
+ (release_overhead): Likewise.
+ (calculate_allocation): Likewise.
+ (vec_heap_free): Remove.
+ (vec_gc_o_reserve_1): Remove.
+ (vec_heap_o_reserve_1): Remove.
+ (vec_stack_o_reserve_1): Remove.
+ (vec_stack_o_reserve_exact): Remove.
+ (register_stack_vec): New.
+ (stack_vec_register_index): New.
+ (unregister_stack_vec): New.
+ (vec_assert_fail): Remove.
+ * vec.h: Conditionally include ggc.h. Document conditional
+ hackery.
+ Update top-level documentation.
+ (ALONE_VEC_CHECK_INFO): Remove.
+ (VEC_CHECK_INFO): Remove.
+ (ALONE_VEC_CHECK_DECL): Remove.
+ (VEC_CHECK_DECL): Remove.
+ (ALONE_VEC_CHECK_PASS): Remove.
+ (VEC_CHECK_PASS): Remove.
+ (VEC_ASSERT): Remove.
+ (vec_prefix): Add friends va_gc, va_gc_atomic, va_heap and
+ va_stack.
+ Mark fields alloc_ and num_ as protected.
+ (struct vec_t): Remove. Remove all function members.
+ (struct vl_embed): Declare.
+ (struct vl_ptr): Declare.
+ (free): Remove.
+ (reserve_exact): Remove.
+ (reserve): Remove.
+ (safe_splice): Remove.
+ (safe_push): Remove.
+ (safe_grow): Remove.
+ (safe_grow_cleared): Remove.
+ (safe_insert): Remove.
+ (DEF_VEC_I): Remove.
+ (DEF_VEC_ALLOC_I): Remove.
+ (DEF_VEC_P): Remove.
+ (DEF_VEC_ALLOC_P): Remove.
+ (DEF_VEC_O): Remove.
+ (DEF_VEC_ALLOC_O): Remove.
+ (DEF_VEC_ALLOC_P_STACK): Remove.
+ (DEF_VEC_ALLOC_O_STACK): Remove.
+ (DEF_VEC_ALLOC_I_STACK): Remove.
+ (DEF_VEC_A): Remove.
+ (DEF_VEC_ALLOC_A): Remove.
+ (vec_stack_p_reserve_exact_1): Remove.
+ (vec_stack_o_reserve): Remove.
+ (vec_stack_o_reserve_exact): Remove.
+ (VEC_length): Remove.
+ (VEC_empty): Remove.
+ (VEC_address): Remove.
+ (vec_address): Remove.
+ (VEC_last): Remove.
+ (VEC_index): Remove.
+ (VEC_iterate): Remove.
+ (VEC_embedded_size): Remove.
+ (VEC_embedded_init): Remove.
+ (VEC_free): Remove.
+ (VEC_copy): Remove.
+ (VEC_space): Remove.
+ (VEC_reserve): Remove.
+ (VEC_reserve_exact): Remove.
+ (VEC_splice): Remove.
+ (VEC_safe_splice): Remove.
+ (VEC_quick_push): Remove.
+ (VEC_safe_push): Remove.
+ (VEC_pop): Remove.
+ (VEC_truncate): Remove.
+ (VEC_safe_grow): Remove.
+ (VEC_replace): Remove.
+ (VEC_quick_insert): Remove.
+ (VEC_safe_insert): Remove.
+ (VEC_ordered_remove): Remove.
+ (VEC_unordered_remove): Remove.
+ (VEC_block_remove): Remove.
+ (VEC_lower_bound): Remove.
+ (VEC_alloc): Remove.
+ (VEC_qsort): Remove.
+
+ (va_heap): Declare.
+ (va_heap::default_layout): New typedef to vl_ptr.
+ (va_heap::reserve): New.
+ (va_heap::release): New.
+ (va_gc): Declare.
+ (va_gc::default_layout): New typedef to vl_embed.
+ (va_gc::reserve): New.
+ (va_gc::release): New.
+ (va_gc_atomic): Declare. Inherit from va_gc.
+ (va_stack): Declare.
+ (va_stack::default_layout): New typedef to vl_ptr.
+ (va_stack::alloc): New.
+ (va_stack::reserve): New.
+ (va_stack::release): New.
+ (register_stack_vec): Declare.
+ (stack_vec_register_index): Declare.
+ (unregister_stack_vec): Declare.
+
+ (vec<T, A = va_heap, L = typename A::default_layout>): Declare
+ empty vec template.
+ (vec<T, A, vl_embed>): Partial specialization for embedded
+ layout.
+ (vec<T, A, vl_embed>::allocated): New.
+ (vec<T, A, vl_embed>::length): New.
+ (vec<T, A, vl_embed>::is_empty): New.
+ (vec<T, A, vl_embed>::address): New.
+ (vec<T, A, vl_embed>::operator[]): New.
+ (vec<T, A, vl_embed>::last New.
+ (vec<T, A, vl_embed>::space): New.
+ (vec<T, A, vl_embed>::iterate): New.
+ (vec<T, A, vl_embed>::iterate): New.
+ (vec<T, A, vl_embed>::copy): New.
+ (vec<T, A, vl_embed>::splice): New.
+ (vec<T, A, vl_embed>::quick_push New.
+ (vec<T, A, vl_embed>::pop New.
+ (vec<T, A, vl_embed>::truncate): New.
+ (vec<T, A, vl_embed>::quick_insert): New.
+ (vec<T, A, vl_embed>::ordered_remove): New.
+ (vec<T, A, vl_embed>::unordered_remove): New.
+ (vec<T, A, vl_embed>::block_remove): New.
+ (vec<T, A, vl_embed>::qsort): New.
+ (vec<T, A, vl_embed>::lower_bound): New.
+ (vec<T, A, vl_embed>::embedded_size): New.
+ (vec<T, A, vl_embed>::embedded_init): New.
+ (vec<T, A, vl_embed>::quick_grow): New.
+ (vec<T, A, vl_embed>::quick_grow_cleared): New.
+ (vec_safe_space): New.
+ (vec_safe_length): New.
+ (vec_safe_address): New.
+ (vec_safe_is_empty): New.
+ (vec_safe_reserve): New.
+ (vec_safe_reserve_exact): New.
+ (vec_alloc): New.
+ (vec_free): New.
+ (vec_safe_grow): New.
+ (vec_safe_grow_cleared): New.
+ (vec_safe_iterate): New.
+ (vec_safe_push): New.
+ (vec_safe_insert): New.
+ (vec_safe_truncate): New.
+ (vec_safe_copy): New.
+ (vec_safe_splice): New.
+
+ (vec<T, A, vl_ptr>): New partial specialization for the space
+ efficient layout.
+ (vec<T, A, vl_ptr>::exists): New.
+ (vec<T, A, vl_ptr>::is_empty): New.
+ (vec<T, A, vl_ptr>::length): New.
+ (vec<T, A, vl_ptr>::address): New.
+ (vec<T, A, vl_ptr>::operator[]): New.
+ (vec<T, A, vl_ptr>::operator!=): New.
+ (vec<T, A, vl_ptr>::operator==): New.
+ (vec<T, A, vl_ptr>::last): New.
+ (vec<T, A, vl_ptr>::space): New.
+ (vec<T, A, vl_ptr>::iterate): New.
+ (vec<T, A, vl_ptr>::copy): New.
+ (vec<T, A, vl_ptr>::reserve): New.
+ (vec<T, A, vl_ptr>::reserve_exact): New.
+ (vec<T, A, vl_ptr>::splice): New.
+ (vec<T, A, vl_ptr>::safe_splice): New.
+ (vec<T, A, vl_ptr>::quick_push): New.
+ (vec<T, A, vl_ptr>::safe_push): New.
+ (vec<T, A, vl_ptr>::pop): New.
+ (vec<T, A, vl_ptr>::truncate): New.
+ (vec<T, A, vl_ptr>::safe_grow): New.
+ (vec<T, A, vl_ptr>::safe_grow_cleared): New.
+ (vec<T, A, vl_ptr>::quick_grow): New.
+ (vec<T, A, vl_ptr>::quick_grow_cleared): New.
+ (vec<T, A, vl_ptr>::quick_insert): New.
+ (vec<T, A, vl_ptr>::safe_insert): New.
+ (vec<T, A, vl_ptr>::ordered_remove): New.
+ (vec<T, A, vl_ptr>::unordered_remove): New.
+ (vec<T, A, vl_ptr>::block_remove): New.
+ (vec<T, A, vl_ptr>::qsort): New.
+ (vec<T, A, vl_ptr>::lower_bound): New.
+ (vec_stack_alloc): Define.
+ (FOR_EACH_VEC_SAFE_ELT): Define.
+ * vecir.h: Remove. Update all users.
+ * vecprim.h: Remove. Update all users.
+ Move uchar to coretypes.h.
+
+ * Makefile.in (VEC_H): Add $(GGC_H).
+ Remove vecir.h and vecprim.h dependencies everywhere.
+
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ * gengtype-lex.l (VEC): Remove.
+ Add characters in the set [\!\>\.-].
+ * gengtype-parse.c (token_names): Remove "VEC".
+ (require_template_declaration): Remove handling of VEC_TOKEN.
+ (type): Likewise.
+ Call create_user_defined_type when parsing GTY((user)).
+ * gengtype-state.c (type_lineloc): handle TYPE_UNDEFINED.
+ (write_state_undefined_type): New.
+ (write_state_type): Call write_state_undefined_type for
+ TYPE_UNDEFINED.
+ (read_state_type): Call read_state_undefined_type for
+ TYPE_UNDEFINED.
+ * gengtype.c (dbgprint_count_type_at): Handle TYPE_UNDEFINED.
+ (create_user_defined_type): Make extern.
+ (type_for_name): Factor out of resolve_typedef.
+ (create_undefined_type): New
+ (resolve_typedef): Call it when we cannot find a previous
+ typedef and the type is not a template.
+ (find_structure): Accept TYPE_UNDEFINED.
+ (set_gc_used_type): Add argument ALLOWED_UNDEFINED_TYPES,
+ default to false.
+ Emit an error for TYPE_UNDEFINED unless LEVEL is GC_UNUSED or
+ ALLOWED_UNDEFINED_TYPES is set.
+ Set ALLOWED_UNDEFINED_TYPES to true for TYPE_USER_STRUCT.
+ (filter_type_name): Accept templates with more than one
+ argument.
+ (output_mangled_typename): Handle TYPE_UNDEFINED
+ (walk_type): Likewise.
+ (write_types_process_field): Likewise.
+ (write_func_for_structure): If CHAIN_NEXT is set, ORIG_S
+ should not be a user-defined type.
+ (write_types_local_user_process_field): Handle TYPE_ARRAY,
+ TYPE_NONE and TYPE_UNDEFINED.
+ (write_types_local_process_field): Likewise.
+ (contains_scalar_p): Return 0 for TYPE_USER_STRUCT.
+ (write_root): Reject user-defined types that are not pointers.
+ Handle TYPE_NONE, TYPE_UNDEFINED, TYPE_UNION, TYPE_LANG_STRUCT
+ and TYPE_PARAM_STRUCT.
+ (output_typename): Handle TYPE_NONE, TYPE_UNDEFINED, and
+ TYPE_ARRAY.
+ (dump_typekind): Handle TYPE_UNDEFINED.
+ * gengtype.h (enum typekind): Add TYPE_UNDEFINED.
+ (create_user_defined_type): Declare.
+ (enum gty_token): Remove VEC_TOKEN.
+
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * coretypes.h (uchar): Define.
+ * alias.c: Use new vec API in vec.h.
+ * asan.c: Likewise.
+ * attribs.c: Likewise.
+ * basic-block.h: Likewise.
+ * bb-reorder.c: Likewise.
+ * builtins.c: Likewise.
+ * calls.c: Likewise.
+ * cfg.c: Likewise.
+ * cfganal.c: Likewise.
+ * cfgcleanup.c: Likewise.
+ * cfgexpand.c: Likewise.
+ * cfghooks.c: Likewise.
+ * cfghooks.h: Likewise.
+ * cfgloop.c: Likewise.
+ * cfgloop.h: Likewise.
+ * cfgloopanal.c: Likewise.
+ * cfgloopmanip.c: Likewise.
+ * cfgrtl.c: Likewise.
+ * cgraph.c: Likewise.
+ * cgraph.h: Likewise.
+ * cgraphclones.c: Likewise.
+ * cgraphunit.c: Likewise.
+ * combine.c: Likewise.
+ * compare-elim.c: Likewise.
+ * coverage.c: Likewise.
+ * cprop.c: Likewise.
+ * data-streamer.h: Likewise.
+ * dbxout.c: Likewise.
+ * dce.c: Likewise.
+ * df-core.c: Likewise.
+ * df-problems.c: Likewise.
+ * df-scan.c: Likewise.
+ * dominance.c: Likewise.
+ * domwalk.c: Likewise.
+ * domwalk.h: Likewise.
+ * dse.c: Likewise.
+ * dwarf2cfi.c: Likewise.
+ * dwarf2out.c: Likewise.
+ * dwarf2out.h: Likewise.
+ * emit-rtl.c: Likewise.
+ * except.c: Likewise.
+ * except.h: Likewise.
+ * expr.c: Likewise.
+ * expr.h: Likewise.
+ * final.c: Likewise.
+ * fold-const.c: Likewise.
+ * function.c: Likewise.
+ * function.h: Likewise.
+ * fwprop.c: Likewise.
+ * gcc.c: Likewise.
+ * gcse.c: Likewise.
+ * genattr.c: Likewise.
+ * genattrtab.c: Likewise.
+ * genautomata.c: Likewise.
+ * genextract.c: Likewise.
+ * genopinit.c: Likewise
+ * ggc-common.c: Likewise.
+ * ggc.h: Likewise.
+ * gimple-low.c: Likewise.
+ * gimple-ssa-strength-reduction.c: Likewise.
+ * gimple-streamer-in.c: Likewise.
+ * gimple.c: Likewise.
+ * gimple.h: Likewise.
+ * gimplify.c: Likewise.
+ * graph.c: Likewise.
+ * graphds.c: Likewise.
+ * graphds.h: Likewise.
+ * graphite-blocking.c: Likewise.
+ * graphite-clast-to-gimple.c: Likewise.
+ * graphite-dependences.c: Likewise.
+ * graphite-interchange.c: Likewise.
+ * graphite-optimize-isl.c: Likewise.
+ * graphite-poly.c: Likewise.
+ * graphite-poly.h: Likewise.
+ * graphite-scop-detection.c: Likewise.
+ * graphite-scop-detection.h: Likewise.
+ * graphite-sese-to-poly.c: Likewise.
+ * graphite.c: Likewise.
+ * godump.c: Likewise.
+ * haifa-sched.c: Likewise.
+ * hw-doloop.c: Likewise.
+ * hw-doloop.h: Likewise.
+ * ifcvt.c: Likewise.
+ * insn-addr.h: Likewise.
+ * ipa-cp.c: Likewise.
+ * ipa-inline-analysis.c: Likewise.
+ * ipa-inline-transform.c: Likewise.
+ * ipa-inline.c: Likewise.
+ * ipa-inline.h: Likewise.
+ * ipa-prop.c: Likewise.
+ * ipa-prop.h: Likewise.
+ * ipa-pure-const.c: Likewise.
+ * ipa-ref-inline.h: Likewise.
+ * ipa-ref.c: Likewise.
+ * ipa-ref.h: Likewise.
+ * ipa-reference.c: Likewise.
+ * ipa-split.c: Likewise.
+ * ipa-utils.c: Likewise.
+ * ipa-utils.h: Likewise.
+ * ipa.c: Likewise.
+ * ira-build.c: Likewise.
+ * ira-color.c: Likewise.
+ * ira-emit.c: Likewise.
+ * ira-int.h: Likewise.
+ * ira.c: Likewise.
+ * loop-invariant.c: Likewise.
+ * loop-unroll.c: Likewise.
+ * lower-subreg.c: Likewise.
+ * lra-lives.c: Likewise.
+ * lra.c: Likewise.
+ * lto-cgraph.c: Likewise.
+ * lto-section-out.c: Likewise.
+ * lto-streamer-in.c: Likewise.
+ * lto-streamer-out.c: Likewise.
+ * lto-streamer.h: Likewise.
+ * lto-symtab.c: Likewise.
+ * mcf.c: Likewise.
+ * modulo-sched.c: Likewise.
+ * omp-low.c: Likewise.
+ * opts-common.c: Likewise.
+ * opts-global.c: Likewise.
+ * opts.c: Likewise.
+ * opts.h: Likewise.
+ * passes.c: Likewise.
+ * predict.c: Likewise.
+ * print-tree.c: Likewise.
+ * profile.c: Likewise.
+ * profile.h: Likewise.
+ * read-rtl.c: Likewise.
+ * ree.c: Likewise.
+ * reg-stack.c: Likewise.
+ * regrename.c: Likewise.
+ * regrename.h: Likewise.
+ * reload.c: Likewise.
+ * reload.h: Likewise.
+ * reload1.c: Likewise.
+ * rtl.h: Likewise.
+ * sched-deps.c: Likewise.
+ * sched-int.h: Likewise.
+ * sdbout.c: Likewise.
+ * sel-sched-dump.c: Likewise.
+ * sel-sched-ir.c: Likewise.
+ * sel-sched-ir.h: Likewise.
+ * sel-sched.c: Likewise.
+ * sese.c: Likewise.
+ * sese.h: Likewise.
+ * statistics.h: Likewise.
+ * stmt.c: Likewise.
+ * stor-layout.c: Likewise.
+ * store-motion.c: Likewise.
+ * tlink.c: Likewise.
+ * toplev.c: Likewise.
+ * trans-mem.c: Likewise.
+ * tree-browser.c: Likewise.
+ * tree-call-cdce.c: Likewise.
+ * tree-cfg.c: Likewise.
+ * tree-cfgcleanup.c: Likewise.
+ * tree-chrec.c: Likewise.
+ * tree-chrec.h: Likewise.
+ * tree-complex.c: Likewise.
+ * tree-data-ref.c: Likewise.
+ * tree-data-ref.h: Likewise.
+ * tree-dfa.c: Likewise.
+ * tree-diagnostic.c: Likewise.
+ * tree-dump.c: Likewise.
+ * tree-eh.c: Likewise.
+ * tree-emutls.c: Likewise.
+ * tree-flow.h: Likewise.
+ * tree-if-conv.c: Likewise.
+ * tree-inline.c: Likewise.
+ * tree-inline.h: Likewise.
+ * tree-into-ssa.c: Likewise.
+ * tree-iterator.c: Likewise.
+ * tree-loop-distribution.c: Likewise.
+ * tree-mudflap.c: Likewise.
+ * tree-optimize.c: Likewise.
+ * tree-outof-ssa.c: Likewise.
+ * tree-parloops.c: Likewise.
+ * tree-phinodes.c: Likewise.
+ * tree-predcom.c: Likewise.
+ * tree-pretty-print.c: Likewise.
+ * tree-scalar-evolution.c: Likewise.
+ * tree-sra.c: Likewise.
+ * tree-ssa-address.c: Likewise.
+ * tree-ssa-alias.c: Likewise.
+ * tree-ssa-ccp.c: Likewise.
+ * tree-ssa-coalesce.c: Likewise.
+ * tree-ssa-dce.c: Likewise.
+ * tree-ssa-dom.c: Likewise.
+ * tree-ssa-forwprop.c: Likewise.
+ * tree-ssa-live.c: Likewise.
+ * tree-ssa-live.h: Likewise.
+ * tree-ssa-loop-im.c: Likewise.
+ * tree-ssa-loop-ivcanon.c: Likewise.
+ * tree-ssa-loop-ivopts.c: Likewise.
+ * tree-ssa-loop-manip.c: Likewise.
+ * tree-ssa-loop-niter.c: Likewise.
+ * tree-ssa-loop-prefetch.c: Likewise.
+ * tree-ssa-math-opts.c: Likewise.
+ * tree-ssa-operands.c: Likewise.
+ * tree-ssa-phiopt.c: Likewise.
+ * tree-ssa-phiprop.c: Likewise.
+ * tree-ssa-pre.c: Likewise.
+ * tree-ssa-propagate.c: Likewise.
+ * tree-ssa-reassoc.c: Likewise.
+ * tree-ssa-sccvn.c: Likewise.
+ * tree-ssa-sccvn.h: Likewise.
+ * tree-ssa-strlen.c: Likewise.
+ * tree-ssa-structalias.c: Likewise.
+ * tree-ssa-tail-merge.c: Likewise.
+ * tree-ssa-threadedge.c: Likewise.
+ * tree-ssa-threadupdate.c: Likewise.
+ * tree-ssa-uncprop.c: Likewise.
+ * tree-ssa-uninit.c: Likewise.
+ * tree-ssa.c: Likewise.
+ * tree-ssanames.c: Likewise.
+ * tree-stdarg.c: Likewise.
+ * tree-streamer-in.c: Likewise.
+ * tree-streamer-out.c: Likewise.
+ * tree-streamer.c: Likewise.
+ * tree-streamer.h: Likewise.
+ * tree-switch-conversion.c: Likewise.
+ * tree-vect-data-refs.c: Likewise.
+ * tree-vect-generic.c: Likewise.
+ * tree-vect-loop-manip.c: Likewise.
+ * tree-vect-loop.c: Likewise.
+ * tree-vect-patterns.c: Likewise.
+ * tree-vect-slp.c: Likewise.
+ * tree-vect-stmts.c: Likewise.
+ * tree-vectorizer.c: Likewise.
+ * tree-vectorizer.h: Likewise.
+ * tree-vrp.c: Likewise.
+ * tree.c: Likewise.
+ * tree.h: Likewise.
+ * value-prof.c: Likewise.
+ * value-prof.h: Likewise.
+ * var-tracking.c: Likewise.
+ * varasm.c: Likewise.
+ * varpool.c: Likewise.
+ * vmsdbgout.c: Likewise.
+ * config/bfin/bfin.c: Likewise.
+ * config/c6x/c6x.c: Likewise.
+ * config/darwin.c: Likewise.
+ * config/i386/i386.c: Likewise.
+ * config/ia64/ia64.c: Likewise.
+ * config/mep/mep.c: Likewise.
+ * config/mips/mips.c: Likewise.
+ * config/pa/pa.c: Likewise.
+ * config/rs6000/rs6000-c.c: Likewise.
+ * config/rs6000/rs6000.c: Likewise.
+ * config/rx/rx.c: Likewise.
+ * config/spu/spu-c.c: Likewise.
+ * config/vms/vms.c: Likewise.
+ * config/vxworks.c: Likewise.
+ * config/epiphany/resolve-sw-modes.c: Likewise.
+
2012-11-17 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/55236
endif
# Shorthand variables for dependency lists.
-VEC_H = vec.h statistics.h
+VEC_H = vec.h statistics.h $(GGC_H)
HASH_TABLE_H = $(HASHTAB_H) hash-table.h
-EXCEPT_H = except.h $(HASHTAB_H) vecprim.h vecir.h
+EXCEPT_H = except.h $(HASHTAB_H)
TARGET_DEF = target.def target-hooks-macros.h
C_TARGET_DEF = c-family/c-target.def target-hooks-macros.h
COMMON_TARGET_DEF = common/common-target.def target-hooks-macros.h
insn-notes.def $(INPUT_H) $(REAL_H) statistics.h $(VEC_H) \
$(FIXED_VALUE_H) alias.h $(HASHTAB_H)
FIXED_VALUE_H = fixed-value.h $(MACHMODE_H) double-int.h
-RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h vecir.h
+RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h
RTL_ERROR_H = rtl-error.h $(RTL_H) $(DIAGNOSTIC_CORE_H)
READ_MD_H = $(OBSTACK_H) $(HASHTAB_H) read-md.h
PARAMS_H = params.h params.def
TREE_H = coretypes.h tree.h all-tree.def tree.def c-family/c-common.def \
$(lang_tree_files) $(MACHMODE_H) tree-check.h $(BUILTINS_DEF) \
$(INPUT_H) statistics.h $(VEC_H) treestruct.def $(HASHTAB_H) \
- double-int.h alias.h $(SYMTAB_H) $(FLAGS_H) vecir.h \
+ double-int.h alias.h $(SYMTAB_H) $(FLAGS_H) \
$(REAL_H) $(FIXED_VALUE_H)
REGSET_H = regset.h $(BITMAP_H) hard-reg-set.h
BASIC_BLOCK_H = basic-block.h $(PREDICT_H) $(VEC_H) $(FUNCTION_H) \
cfg-flags.def cfghooks.h
GIMPLE_H = gimple.h gimple.def gsstruct.def pointer-set.h $(VEC_H) \
- vecir.h $(GGC_H) $(BASIC_BLOCK_H) $(TREE_H) tree-ssa-operands.h \
+ $(GGC_H) $(BASIC_BLOCK_H) $(TREE_H) tree-ssa-operands.h \
tree-ssa-alias.h $(INTERNAL_FN_H)
TRANS_MEM_H = trans-mem.h
GCOV_IO_H = gcov-io.h gcov-iov.h auto-host.h
EMIT_RTL_H = emit-rtl.h
FLAGS_H = flags.h flag-types.h $(OPTIONS_H)
OPTIONS_H = options.h flag-types.h $(OPTIONS_H_EXTRA)
-FUNCTION_H = function.h $(HASHTAB_H) vecprim.h $(TM_H) hard-reg-set.h \
- $(VEC_H) vecir.h $(INPUT_H) $(MACHMODE_H)
+FUNCTION_H = function.h $(HASHTAB_H) $(TM_H) hard-reg-set.h \
+ $(VEC_H) $(INPUT_H) $(MACHMODE_H)
EXPR_H = expr.h insn-config.h $(FUNCTION_H) $(RTL_H) $(FLAGS_H) $(TREE_H) $(MACHMODE_H) $(EMIT_RTL_H)
OPTABS_H = optabs.h insn-codes.h insn-opinit.h
REGS_H = regs.h $(MACHMODE_H) hard-reg-set.h
SCHED_INT_H = sched-int.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) $(DF_H) \
- vecprim.h $(REGSET_H)
+ $(REGSET_H)
SEL_SCHED_IR_H = sel-sched-ir.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) \
- $(GGC_H) $(BITMAP_H) vecprim.h $(SCHED_INT_H) $(CFGLOOP_H) $(REGSET_H)
+ $(GGC_H) $(BITMAP_H) $(SCHED_INT_H) $(CFGLOOP_H) $(REGSET_H)
SEL_SCHED_DUMP_H = sel-sched-dump.h $(SEL_SCHED_IR_H)
-CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) vecprim.h double-int.h \
+CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) double-int.h \
$(BITMAP_H) sbitmap.h
IPA_UTILS_H = ipa-utils.h $(TREE_H) $(CGRAPH_H)
IPA_REFERENCE_H = ipa-reference.h $(BITMAP_H) $(TREE_H)
GGC_INTERNAL_H = ggc-internal.h $(GGC_H)
TIMEVAR_H = timevar.h timevar.def
INSN_ATTR_H = insn-attr.h insn-attr-common.h $(INSN_ADDR_H)
-INSN_ADDR_H = $(srcdir)/insn-addr.h vecprim.h
+INSN_ADDR_H = $(srcdir)/insn-addr.h
C_COMMON_H = c-family/c-common.h c-family/c-common.def $(TREE_H) \
$(SPLAY_TREE_H) $(CPPLIB_H) $(GGC_H) $(DIAGNOSTIC_CORE_H)
C_PRAGMA_H = c-family/c-pragma.h $(CPPLIB_H)
$(BITMAP_H) sbitmap.h $(BASIC_BLOCK_H) $(GIMPLE_H) \
$(HASHTAB_H) $(CGRAPH_H) $(IPA_REFERENCE_H) \
tree-ssa-alias.h
-TREE_SSA_LIVE_H = tree-ssa-live.h $(PARTITION_H) vecprim.h
+TREE_SSA_LIVE_H = tree-ssa-live.h $(PARTITION_H)
SSAEXPAND_H = ssaexpand.h $(TREE_SSA_LIVE_H)
PRETTY_PRINT_H = pretty-print.h $(INPUT_H) $(OBSTACK_H)
TREE_PRETTY_PRINT_H = tree-pretty-print.h $(PRETTY_PRINT_H)
SCEV_H = tree-scalar-evolution.h $(GGC_H) tree-chrec.h $(PARAMS_H)
OMEGA_H = omega.h $(PARAMS_H)
TREE_DATA_REF_H = tree-data-ref.h $(OMEGA_H) graphds.h $(SCEV_H)
-TREE_INLINE_H = tree-inline.h vecir.h
+TREE_INLINE_H = tree-inline.h
REAL_H = real.h $(MACHMODE_H)
IRA_INT_H = ira.h ira-int.h $(CFGLOOP_H) alloc-pool.h
LRA_INT_H = lra.h $(BITMAP_H) $(RECOG_H) $(INSN_ATTR_H) insn-codes.h lra-int.h
DBGCNT_H = dbgcnt.h dbgcnt.def
LTO_STREAMER_H = lto-streamer.h $(LINKER_PLUGIN_API_H) $(TARGET_H) \
- $(CGRAPH_H) $(VEC_H) vecprim.h $(TREE_H) $(GIMPLE_H) \
+ $(CGRAPH_H) $(VEC_H) $(TREE_H) $(GIMPLE_H) \
$(GCOV_IO_H) $(DIAGNOSTIC_H) alloc-pool.h
DATA_STREAMER_H = data-streamer.h $(VEC_H) $(LTO_STREAMER_H)
GIMPLE_STREAMER_H = gimple-streamer.h $(LTO_STREAMER_H) $(BASIC_BLOCK_H) \
$(FUNCTION_H) $(TM_H) coretypes.h \
langhooks.h domwalk.h $(TREE_PASS_H) $(PARAMS_H) $(BASIC_BLOCK_H) \
$(BITMAP_H) $(CFGLOOP_H) $(FLAGS_H) $(HASHTAB_H) \
- $(GIMPLE_H) $(TREE_INLINE_H) vecprim.h \
- $(GIMPLE_PRETTY_PRINT_H)
+ $(GIMPLE_H) $(TREE_INLINE_H) $(GIMPLE_PRETTY_PRINT_H)
tree-ssa-ter.o : tree-ssa-ter.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
$(TREE_H) $(DIAGNOSTIC_H) $(TM_H) coretypes.h dumpfile.h \
$(TREE_SSA_LIVE_H) $(BITMAP_H) $(FLAGS_H) \
langhooks.h \
$(PARAMS_H) $(BASIC_BLOCK_H) $(DIAGNOSTIC_H) \
$(GIMPLE_H) $(VEC_H) $(TARGET_H) \
- vecprim.h pointer-set.h alloc-pool.h \
+ pointer-set.h alloc-pool.h \
$(TREE_PRETTY_PRINT_H)
tree-ssa-reassoc.o : tree-ssa-reassoc.c $(TREE_FLOW_H) $(CONFIG_H) \
$(SYSTEM_H) $(TREE_H) $(DIAGNOSTIC_H) \
$(OPTABS_H) $(LIBFUNCS_H) $(REGS_H) hard-reg-set.h insn-config.h $(RECOG_H) \
output.h $(EXCEPT_H) $(HASHTAB_H) $(GGC_H) $(TM_P_H) langhooks.h \
gt-function.h $(TARGET_H) $(BASIC_BLOCK_H) $(PREDICT_H) \
- $(TREE_PASS_H) $(DF_H) vecprim.h $(PARAMS_H) bb-reorder.h \
+ $(TREE_PASS_H) $(DF_H) $(PARAMS_H) bb-reorder.h \
$(COMMON_TARGET_H)
statistics.o : statistics.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TREE_PASS_H) $(TREE_DUMP_H) $(HASHTAB_H) statistics.h $(FUNCTION_H)
typeclass.h hard-reg-set.h toplev.h $(DIAGNOSTIC_CORE_H) hard-reg-set.h $(EXCEPT_H) \
reload.h langhooks.h intl.h $(TM_P_H) $(TARGET_H) \
tree-iterator.h gt-expr.h $(MACHMODE_H) $(TIMEVAR_H) $(TREE_FLOW_H) \
- $(TREE_PASS_H) $(DF_H) $(DIAGNOSTIC_H) vecprim.h $(SSAEXPAND_H) \
+ $(TREE_PASS_H) $(DF_H) $(DIAGNOSTIC_H) $(SSAEXPAND_H) \
$(PARAMS_H) $(COMMON_TARGET_H) target-globals.h
dojump.o : dojump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TM_P_H) \
$(RTL_H) $(TREE_H) \
$(FLAGS_H) $(FUNCTION_H) $(EXPR_H) $(OPTABS_H) $(INSN_ATTR_H) insn-config.h \
- langhooks.h $(GGC_H) gt-dojump.h vecprim.h $(BASIC_BLOCK_H)
+ langhooks.h $(GGC_H) gt-dojump.h $(BASIC_BLOCK_H)
builtins.o : builtins.c builtins.h $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(TREE_H) $(GIMPLE_H) $(FLAGS_H) $(TARGET_H) $(FUNCTION_H) $(REGS_H) \
$(EXPR_H) $(OPTABS_H) insn-config.h $(RECOG_H) output.h typeclass.h \
godump.o : godump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(DIAGNOSTIC_CORE_H) \
$(TREE_H) $(GGC_H) pointer-set.h $(OBSTACK_H) debug.h gt-godump.h
emit-rtl.o : emit-rtl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
- $(TREE_H) $(FLAGS_H) $(FUNCTION_H) $(REGS_H) insn-config.h $(RECOG_H) vecprim.h \
+ $(TREE_H) $(FLAGS_H) $(FUNCTION_H) $(REGS_H) insn-config.h $(RECOG_H) \
$(GGC_H) $(EXPR_H) hard-reg-set.h $(BITMAP_H) $(DIAGNOSTIC_CORE_H) $(BASIC_BLOCK_H) \
$(HASHTAB_H) $(TM_P_H) debug.h langhooks.h $(TREE_PASS_H) gt-emit-rtl.h \
$(DF_H) $(PARAMS_H) $(TARGET_H)
df-problems.o : df-problems.c $(CONFIG_H) $(SYSTEM_H) coretypes.h dumpfile.h $(TM_H) \
$(RTL_H) insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) $(BITMAP_H) sbitmap.h $(TIMEVAR_H) \
- $(TM_P_H) $(TARGET_H) $(FLAGS_H) $(EXCEPT_H) dce.h vecprim.h $(VALTRACK_H)
+ $(TM_P_H) $(TARGET_H) $(FLAGS_H) $(EXCEPT_H) dce.h $(VALTRACK_H)
df-scan.o : df-scan.c $(CONFIG_H) $(SYSTEM_H) coretypes.h dumpfile.h $(TM_H) $(RTL_H) \
insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) $(BITMAP_H) sbitmap.h \
$(CFGLOOP_H) $(OBSTACK_H) $(TARGET_H) $(TREE_H) \
$(TREE_PASS_H) $(DF_H) $(GGC_H) $(COMMON_TARGET_H) gt-cfgrtl.h
cfganal.o : cfganal.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(BASIC_BLOCK_H) \
- $(TIMEVAR_H) vecprim.h sbitmap.h $(BITMAP_H)
+ $(TIMEVAR_H) sbitmap.h $(BITMAP_H)
cfgbuild.o : cfgbuild.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(FLAGS_H) $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h $(DIAGNOSTIC_CORE_H) \
$(FUNCTION_H) $(EXCEPT_H) $(TIMEVAR_H) $(TREE_H) $(EXPR_H) sbitmap.h
$(BASIC_BLOCK_H) hard-reg-set.h $(CFGLOOP_H) $(EXPR_H) $(TM_H) \
$(OBSTACK_H) graphds.h $(PARAMS_H)
graphds.o : graphds.c graphds.h $(CONFIG_H) $(SYSTEM_H) $(BITMAP_H) $(OBSTACK_H) \
- coretypes.h $(VEC_H) vecprim.h
+ coretypes.h $(VEC_H)
loop-iv.o : loop-iv.c $(CONFIG_H) $(SYSTEM_H) coretypes.h dumpfile.h \
$(RTL_H) $(BASIC_BLOCK_H) \
hard-reg-set.h $(CFGLOOP_H) $(EXPR_H) $(TM_H) $(OBSTACK_H) \
$(OBSTACK_H)
dominance.o : dominance.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
hard-reg-set.h $(BASIC_BLOCK_H) et-forest.h $(OBSTACK_H) $(DIAGNOSTIC_CORE_H) \
- $(TIMEVAR_H) graphds.h vecprim.h pointer-set.h $(BITMAP_H)
+ $(TIMEVAR_H) graphds.h pointer-set.h $(BITMAP_H)
et-forest.o : et-forest.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
et-forest.h alloc-pool.h $(BASIC_BLOCK_H)
combine.o : combine.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(DIAGNOSTIC_CORE_H) $(TM_P_H) $(TREE_H) $(TARGET_H) \
output.h $(PARAMS_H) $(OPTABS_H) \
insn-codes.h $(TREE_PASS_H) $(DF_H) $(VALTRACK_H) \
- vecprim.h $(CGRAPH_H) $(OBSTACK_H)
+ $(CGRAPH_H) $(OBSTACK_H)
reginfo.o : reginfo.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
hard-reg-set.h $(FLAGS_H) $(BASIC_BLOCK_H) addresses.h $(REGS_H) \
insn-config.h $(RECOG_H) reload.h $(DIAGNOSTIC_CORE_H) \
bt-load.o : bt-load.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(EXCEPT_H) \
$(RTL_H) hard-reg-set.h $(REGS_H) $(TM_P_H) $(FIBHEAP_H) $(EXPR_H) \
$(TARGET_H) $(FLAGS_H) $(INSN_ATTR_H) $(FUNCTION_H) $(TREE_PASS_H) \
- $(DIAGNOSTIC_CORE_H) $(DF_H) vecprim.h $(RECOG_H) $(CFGLOOP_H)
+ $(DIAGNOSTIC_CORE_H) $(DF_H) $(RECOG_H) $(CFGLOOP_H)
reorg.o : reorg.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
conditions.h hard-reg-set.h $(BASIC_BLOCK_H) $(REGS_H) insn-config.h \
$(INSN_ATTR_H) $(EXCEPT_H) $(RECOG_H) $(FUNCTION_H) $(FLAGS_H) output.h \
$(EXCEPT_H) debug.h xcoffout.h toplev.h $(DIAGNOSTIC_CORE_H) reload.h $(DWARF2OUT_H) \
$(TREE_PASS_H) $(BASIC_BLOCK_H) $(TM_P_H) $(TARGET_H) $(EXPR_H) \
dbxout.h $(CGRAPH_H) $(COVERAGE_H) \
- $(DF_H) vecprim.h $(GGC_H) $(CFGLOOP_H) $(PARAMS_H) $(TREE_FLOW_H) \
+ $(DF_H) $(GGC_H) $(CFGLOOP_H) $(PARAMS_H) $(TREE_FLOW_H) \
$(TARGET_DEF_H) $(TREE_PRETTY_PRINT_H)
recog.o : recog.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_ERROR_H) \
$(FUNCTION_H) $(BASIC_BLOCK_H) $(REGS_H) $(RECOG_H) $(EXPR_H) \
$(RTL_ERROR_H) $(TREE_H) $(RECOG_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) \
insn-config.h reload.h $(FUNCTION_H) $(TM_P_H) $(GGC_H) \
$(BASIC_BLOCK_H) \
- $(TREE_PASS_H) $(TARGET_H) vecprim.h $(DF_H) $(EMIT_RTL_H)
+ $(TREE_PASS_H) $(TARGET_H) $(DF_H) $(EMIT_RTL_H)
sreal.o: sreal.c $(CONFIG_H) $(SYSTEM_H) coretypes.h sreal.h
predict.o: predict.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(TREE_H) $(FLAGS_H) insn-config.h $(BASIC_BLOCK_H) $(REGS_H) \
fi
GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
- $(srcdir)/vecprim.h $(srcdir)/vecir.h \
$(host_xm_file_list) \
$(tm_file_list) $(HASHTAB_H) $(SPLAY_TREE_H) $(srcdir)/bitmap.h \
$(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \
$(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h
build/genattrtab.o : genattrtab.c $(RTL_BASE_H) $(OBSTACK_H) \
$(BCONFIG_H) $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(GGC_H) \
- $(READ_MD_H) gensupport.h vecprim.h $(FNMATCH_H)
+ $(READ_MD_H) gensupport.h $(FNMATCH_H)
build/genautomata.o : genautomata.c $(RTL_BASE_H) $(OBSTACK_H) \
$(BCONFIG_H) $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(VEC_H) \
$(HASHTAB_H) gensupport.h $(FNMATCH_H)
build/genenums.o : genenums.c $(BCONFIG_H) $(SYSTEM_H) \
coretypes.h errors.h $(READ_MD_H)
build/genextract.o : genextract.c $(RTL_BASE_H) $(BCONFIG_H) \
- $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h \
- vecprim.h
+ $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h
build/genflags.o : genflags.c $(RTL_BASE_H) $(OBSTACK_H) $(BCONFIG_H) \
$(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h
build/gengenrtl.o : gengenrtl.c $(BCONFIG_H) $(SYSTEM_H) rtl.def
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * gcc-interface/decl.c: Use new vec API in vec.h.
+ * gcc-interface/gigi.h: Likewise.
+ * gcc-interface/trans.c: Likewise.
+ * gcc-interface/utils.c: Likewise.
+ * gcc-interface/utils2.c: Likewise.
+
2012-11-09 Eric Botcazou <ebotcazou@adacore.com>
PR other/52438
tree replacement;
} subst_pair;
-DEF_VEC_O(subst_pair);
-DEF_VEC_ALLOC_O(subst_pair,heap);
typedef struct variant_desc_d {
/* The type of the variant. */
tree new_type;
} variant_desc;
-DEF_VEC_O(variant_desc);
-DEF_VEC_ALLOC_O(variant_desc,heap);
/* A hash table used to cache the result of annotate_value. */
static GTY ((if_marked ("tree_int_map_marked_p"),
static Uint annotate_value (tree);
static void annotate_rep (Entity_Id, tree);
static tree build_position_list (tree, bool, tree, tree, unsigned int, tree);
-static VEC(subst_pair,heap) *build_subst_list (Entity_Id, Entity_Id, bool);
-static VEC(variant_desc,heap) *build_variant_list (tree,
- VEC(subst_pair,heap) *,
- VEC(variant_desc,heap) *);
+static vec<subst_pair> build_subst_list (Entity_Id, Entity_Id, bool);
+static vec<variant_desc> build_variant_list (tree,
+ vec<subst_pair> ,
+ vec<variant_desc> );
static tree validate_size (Uint, tree, Entity_Id, enum tree_code, bool, bool);
static void set_rm_size (Uint, tree, Entity_Id);
static unsigned int validate_alignment (Uint, Entity_Id, unsigned int);
static void check_ok_for_atomic (tree, Entity_Id, bool);
static tree create_field_decl_from (tree, tree, tree, tree, tree,
- VEC(subst_pair,heap) *);
+ vec<subst_pair> );
static tree create_rep_part (tree, tree, tree);
static tree get_rep_part (tree);
-static tree create_variant_part_from (tree, VEC(variant_desc,heap) *, tree,
- tree, VEC(subst_pair,heap) *);
-static void copy_and_substitute_in_size (tree, tree, VEC(subst_pair,heap) *);
+static tree create_variant_part_from (tree, vec<variant_desc> , tree,
+ tree, vec<subst_pair> );
+static void copy_and_substitute_in_size (tree, tree, vec<subst_pair> );
/* The relevant constituents of a subprogram binding to a GCC builtin. Used
to pass around calls performing profile compatibility checks. */
= TYPE_PADDING_P (gnu_type)
? TYPE_FIELDS (TREE_TYPE (TYPE_FIELDS (gnu_type)))
: TYPE_FIELDS (gnu_type);
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 1);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
tree t = build_template (TREE_TYPE (template_field),
TREE_TYPE (DECL_CHAIN (template_field)),
NULL_TREE);
= TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_alloc_type)));
if (TREE_CODE (gnu_expr) == CONSTRUCTOR
- && 1 == VEC_length (constructor_elt,
- CONSTRUCTOR_ELTS (gnu_expr)))
+ && 1 == vec_safe_length (CONSTRUCTOR_ELTS (gnu_expr)))
gnu_expr = 0;
else
gnu_expr
&& Present (Discriminant_Constraint (gnat_entity))
&& Stored_Constraint (gnat_entity) != No_Elist)
{
- VEC(subst_pair,heap) *gnu_subst_list
+ vec<subst_pair> gnu_subst_list
= build_subst_list (gnat_entity, gnat_base_type, definition);
tree gnu_unpad_base_type, gnu_rep_part, gnu_variant_part, t;
tree gnu_pos_list, gnu_field_list = NULL_TREE;
bool selected_variant = false;
Entity_Id gnat_field;
- VEC(variant_desc,heap) *gnu_variant_list;
+ vec<variant_desc> gnu_variant_list;
gnu_type = make_node (RECORD_TYPE);
TYPE_NAME (gnu_type) = gnu_entity_name;
gnu_variant_list
= build_variant_list (TREE_TYPE (gnu_variant_part),
- gnu_subst_list, NULL);
+ gnu_subst_list,
+ vec<variant_desc>());
/* If all the qualifiers are unconditionally true, the
innermost variant is statically selected. */
selected_variant = true;
- FOR_EACH_VEC_ELT (variant_desc, gnu_variant_list, i, v)
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
if (!integer_onep (v->qual))
{
selected_variant = false;
/* Otherwise, create the new variants. */
if (!selected_variant)
- FOR_EACH_VEC_ELT (variant_desc, gnu_variant_list, i, v)
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
{
tree old_variant = v->type;
tree new_variant = make_node (RECORD_TYPE);
}
else
{
- gnu_variant_list = NULL;
+ gnu_variant_list.create (0);
selected_variant = false;
}
gnu_pos_list
= build_position_list (gnu_unpad_base_type,
- gnu_variant_list && !selected_variant,
+ gnu_variant_list.exists ()
+ && !selected_variant,
size_zero_node, bitsize_zero_node,
BIGGEST_ALIGNMENT, NULL_TREE);
unsigned int i;
t = NULL_TREE;
- FOR_EACH_VEC_ELT (variant_desc, gnu_variant_list, i, v)
+ FOR_EACH_VEC_ELT (gnu_variant_list, i, v)
if (gnu_context == v->type
|| ((gnu_rep_part = get_rep_part (v->type))
&& gnu_context == TREE_TYPE (gnu_rep_part)))
/* If there is a variant list and no selected variant, we need
to create the nest of variant parts from the old nest. */
- if (gnu_variant_list && !selected_variant)
+ if (gnu_variant_list.exists () && !selected_variant)
{
tree new_variant_part
= create_variant_part_from (gnu_variant_part,
gnat_entity);
}
- VEC_free (variant_desc, heap, gnu_variant_list);
- VEC_free (subst_pair, heap, gnu_subst_list);
+ gnu_variant_list.release ();
+ gnu_subst_list.release ();
/* Now we can finalize it. */
rest_of_record_type_compilation (gnu_type);
of operands to SUBSTITUTE_IN_EXPR. DEFINITION is true if this is for
a definition of GNAT_SUBTYPE. */
-static VEC(subst_pair,heap) *
+static vec<subst_pair>
build_subst_list (Entity_Id gnat_subtype, Entity_Id gnat_type, bool definition)
{
- VEC(subst_pair,heap) *gnu_list = NULL;
+ vec<subst_pair> gnu_list = vec<subst_pair>();
Entity_Id gnat_discrim;
Node_Id gnat_value;
get_entity_name (gnat_discrim),
definition, true, false));
subst_pair s = {gnu_field, replacement};
- VEC_safe_push (subst_pair, heap, gnu_list, s);
+ gnu_list.safe_push (s);
}
return gnu_list;
the substitutions described in SUBST_LIST. GNU_LIST is a pre-existing
list to be prepended to the newly created entries. */
-static VEC(variant_desc,heap) *
-build_variant_list (tree qual_union_type, VEC(subst_pair,heap) *subst_list,
- VEC(variant_desc,heap) *gnu_list)
+static vec<variant_desc>
+build_variant_list (tree qual_union_type, vec<subst_pair> subst_list,
+ vec<variant_desc> gnu_list)
{
tree gnu_field;
unsigned int i;
subst_pair *s;
- FOR_EACH_VEC_ELT (subst_pair, subst_list, i, s)
+ FOR_EACH_VEC_ELT (subst_list, i, s)
qual = SUBSTITUTE_IN_EXPR (qual, s->discriminant, s->replacement);
/* If the new qualifier is not unconditionally false, its variant may
tree variant_type = TREE_TYPE (gnu_field), variant_subpart;
variant_desc v = {variant_type, gnu_field, qual, NULL_TREE};
- VEC_safe_push (variant_desc, heap, gnu_list, v);
+ gnu_list.safe_push (v);
/* Recurse on the variant subpart of the variant, if any. */
variant_subpart = get_variant_part (variant_type);
static tree
create_field_decl_from (tree old_field, tree field_type, tree record_type,
tree size, tree pos_list,
- VEC(subst_pair,heap) *subst_list)
+ vec<subst_pair> subst_list)
{
tree t = TREE_VALUE (purpose_member (old_field, pos_list));
tree pos = TREE_VEC_ELT (t, 0), bitpos = TREE_VEC_ELT (t, 2);
subst_pair *s;
if (CONTAINS_PLACEHOLDER_P (pos))
- FOR_EACH_VEC_ELT (subst_pair, subst_list, i, s)
+ FOR_EACH_VEC_ELT (subst_list, i, s)
pos = SUBSTITUTE_IN_EXPR (pos, s->discriminant, s->replacement);
/* If the position is now a constant, we can set it as the position of the
static tree
create_variant_part_from (tree old_variant_part,
- VEC(variant_desc,heap) *variant_list,
+ vec<variant_desc> variant_list,
tree record_type, tree pos_list,
- VEC(subst_pair,heap) *subst_list)
+ vec<subst_pair> subst_list)
{
tree offset = DECL_FIELD_OFFSET (old_variant_part);
tree old_union_type = TREE_TYPE (old_variant_part);
copy_and_substitute_in_size (new_union_type, old_union_type, subst_list);
/* Now finish up the new variants and populate the union type. */
- FOR_EACH_VEC_ELT_REVERSE (variant_desc, variant_list, i, v)
+ FOR_EACH_VEC_ELT_REVERSE (variant_list, i, v)
{
tree old_field = v->field, new_field;
tree old_variant, old_variant_subpart, new_variant, field_list;
static void
copy_and_substitute_in_size (tree new_type, tree old_type,
- VEC(subst_pair,heap) *subst_list)
+ vec<subst_pair> subst_list)
{
unsigned int i;
subst_pair *s;
relate_alias_sets (new_type, old_type, ALIAS_SET_COPY);
if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (new_type)))
- FOR_EACH_VEC_ELT (subst_pair, subst_list, i, s)
+ FOR_EACH_VEC_ELT (subst_list, i, s)
TYPE_SIZE (new_type)
= SUBSTITUTE_IN_EXPR (TYPE_SIZE (new_type),
s->discriminant, s->replacement);
if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE_UNIT (new_type)))
- FOR_EACH_VEC_ELT (subst_pair, subst_list, i, s)
+ FOR_EACH_VEC_ELT (subst_list, i, s)
TYPE_SIZE_UNIT (new_type)
= SUBSTITUTE_IN_EXPR (TYPE_SIZE_UNIT (new_type),
s->discriminant, s->replacement);
if (CONTAINS_PLACEHOLDER_P (TYPE_ADA_SIZE (new_type)))
- FOR_EACH_VEC_ELT (subst_pair, subst_list, i, s)
+ FOR_EACH_VEC_ELT (subst_list, i, s)
SET_TYPE_ADA_SIZE
(new_type, SUBSTITUTE_IN_EXPR (TYPE_ADA_SIZE (new_type),
s->discriminant, s->replacement));
/* Return a CONSTRUCTOR of TYPE whose elements are V. This is not the
same as build_constructor in the language-independent tree.c. */
-extern tree gnat_build_constructor (tree type, VEC(constructor_elt,gc) *v);
+extern tree gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v);
/* Return a COMPONENT_REF to access a field that is given by COMPONENT,
an IDENTIFIER_NODE giving the name of the field, FIELD, a FIELD_DECL,
/* Current filename without path. */
const char *ref_filename;
-DEF_VEC_I(Node_Id);
-DEF_VEC_ALLOC_I(Node_Id,heap);
/* List of N_Validate_Unchecked_Conversion nodes in the unit. */
-static VEC(Node_Id,heap) *gnat_validate_uc_list;
+static vec<Node_Id> gnat_validate_uc_list;
/* When not optimizing, we cache the 'First, 'Last and 'Length attributes
of unconstrained array IN parameters to avoid emitting a great deal of
typedef struct parm_attr_d *parm_attr;
-DEF_VEC_P(parm_attr);
-DEF_VEC_ALLOC_P(parm_attr,gc);
struct GTY(()) language_function {
- VEC(parm_attr,gc) *parm_attr_cache;
+ vec<parm_attr, va_gc> *parm_attr_cache;
bitmap named_ret_val;
- VEC(tree,gc) *other_ret_val;
+ vec<tree, va_gc> *other_ret_val;
int gnat_ret;
};
/* Stack of exception pointer variables. Each entry is the VAR_DECL
that stores the address of the raised exception. Nonzero means we
are in an exception handler. Not used in the zero-cost case. */
-static GTY(()) VEC(tree,gc) *gnu_except_ptr_stack;
+static GTY(()) vec<tree, va_gc> *gnu_except_ptr_stack;
/* In ZCX case, current exception pointer. Used to re-raise it. */
static GTY(()) tree gnu_incoming_exc_ptr;
/* Stack for storing the current elaboration procedure decl. */
-static GTY(()) VEC(tree,gc) *gnu_elab_proc_stack;
+static GTY(()) vec<tree, va_gc> *gnu_elab_proc_stack;
/* Stack of labels to be used as a goto target instead of a return in
some functions. See processing for N_Subprogram_Body. */
-static GTY(()) VEC(tree,gc) *gnu_return_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_return_label_stack;
/* Stack of variable for the return value of a function with copy-in/copy-out
parameters. See processing for N_Subprogram_Body. */
-static GTY(()) VEC(tree,gc) *gnu_return_var_stack;
+static GTY(()) vec<tree, va_gc> *gnu_return_var_stack;
/* Structure used to record information for a range check. */
struct GTY(()) range_check_info_d {
typedef struct range_check_info_d *range_check_info;
-DEF_VEC_P(range_check_info);
-DEF_VEC_ALLOC_P(range_check_info,gc);
/* Structure used to record information for a loop. */
struct GTY(()) loop_info_d {
tree label;
tree loop_var;
- VEC(range_check_info,gc) *checks;
+ vec<range_check_info, va_gc> *checks;
};
typedef struct loop_info_d *loop_info;
-DEF_VEC_P(loop_info);
-DEF_VEC_ALLOC_P(loop_info,gc);
/* Stack of loop_info structures associated with LOOP_STMT nodes. */
-static GTY(()) VEC(loop_info,gc) *gnu_loop_stack;
+static GTY(()) vec<loop_info, va_gc> *gnu_loop_stack;
/* The stacks for N_{Push,Pop}_*_Label. */
-static GTY(()) VEC(tree,gc) *gnu_constraint_error_label_stack;
-static GTY(()) VEC(tree,gc) *gnu_storage_error_label_stack;
-static GTY(()) VEC(tree,gc) *gnu_program_error_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_constraint_error_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_storage_error_label_stack;
+static GTY(()) vec<tree, va_gc> *gnu_program_error_label_stack;
/* Map GNAT tree codes to GCC tree codes for simple expressions. */
static enum tree_code gnu_codes[Number_Node_Kinds];
static void insert_code_for (Node_Id);
static void add_cleanup (tree, Node_Id);
static void add_stmt_list (List_Id);
-static void push_exception_label_stack (VEC(tree,gc) **, Entity_Id);
+static void push_exception_label_stack (vec<tree, va_gc> **, Entity_Id);
static tree build_stmt_group (List_Id, bool);
static inline bool stmt_group_may_fallthru (void);
static enum gimplify_status gnat_gimplify_stmt (tree *);
tree null_node = fold_convert (ptr_void_ftype, null_pointer_node);
tree field_list = NULL_TREE;
int j;
- VEC(constructor_elt,gc) *null_vec = NULL;
+ vec<constructor_elt, va_gc> *null_vec = NULL;
constructor_elt *elt;
fdesc_type_node = make_node (RECORD_TYPE);
- VEC_safe_grow (constructor_elt, gc, null_vec,
- TARGET_VTABLE_USES_DESCRIPTORS);
- elt = (VEC_address (constructor_elt,null_vec)
- + TARGET_VTABLE_USES_DESCRIPTORS - 1);
+ vec_safe_grow (null_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ elt = (null_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (j = 0; j < TARGET_VTABLE_USES_DESCRIPTORS; j++)
{
user available facilities for Intrinsic imports. */
gnat_install_builtins ();
- VEC_safe_push (tree, gc, gnu_except_ptr_stack, NULL_TREE);
- VEC_safe_push (tree, gc, gnu_constraint_error_label_stack, NULL_TREE);
- VEC_safe_push (tree, gc, gnu_storage_error_label_stack, NULL_TREE);
- VEC_safe_push (tree, gc, gnu_program_error_label_stack, NULL_TREE);
+ vec_safe_push (gnu_except_ptr_stack, NULL_TREE);
+ vec_safe_push (gnu_constraint_error_label_stack, NULL_TREE);
+ vec_safe_push (gnu_storage_error_label_stack, NULL_TREE);
+ vec_safe_push (gnu_program_error_label_stack, NULL_TREE);
/* Process any Pragma Ident for the main unit. */
if (Present (Ident_String (Main_Unit)))
/* Then process the N_Validate_Unchecked_Conversion nodes. We do this at
the very end to avoid having to second-guess the front-end when we run
into dummy nodes during the regular processing. */
- for (i = 0; VEC_iterate (Node_Id, gnat_validate_uc_list, i, gnat_iter); i++)
+ for (i = 0; gnat_validate_uc_list.iterate (i, &gnat_iter); i++)
validate_unchecked_conversion (gnat_iter);
- VEC_free (Node_Id, heap, gnat_validate_uc_list);
+ gnat_validate_uc_list.release ();
/* Finally see if we have any elaboration procedures to deal with. */
for (info = elab_info_list; info; info = info->next)
/* Descriptors can only be built here for top-level functions. */
bool build_descriptor = (global_bindings_p () != 0);
int i;
- VEC(constructor_elt,gc) *gnu_vec = NULL;
+ vec<constructor_elt, va_gc> *gnu_vec = NULL;
constructor_elt *elt;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_result = build1 (INDIRECT_REF, gnu_result_type, gnu_result);
}
- VEC_safe_grow (constructor_elt, gc, gnu_vec,
- TARGET_VTABLE_USES_DESCRIPTORS);
- elt = (VEC_address (constructor_elt, gnu_vec)
- + TARGET_VTABLE_USES_DESCRIPTORS - 1);
+ vec_safe_grow (gnu_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ elt = (gnu_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (gnu_field = TYPE_FIELDS (gnu_result_type), i = 0;
i < TARGET_VTABLE_USES_DESCRIPTORS;
gnu_field = DECL_CHAIN (gnu_field), i++)
and the dimension in the cache and create a new one on failure. */
if (!optimize && Present (gnat_param))
{
- FOR_EACH_VEC_ELT (parm_attr, f_parm_attr_cache, i, pa)
+ FOR_EACH_VEC_SAFE_ELT (f_parm_attr_cache, i, pa)
if (pa->id == gnat_param && pa->dim == Dimension)
break;
pa = ggc_alloc_cleared_parm_attr_d ();
pa->id = gnat_param;
pa->dim = Dimension;
- VEC_safe_push (parm_attr, gc, f_parm_attr_cache, pa);
+ vec_safe_push (f_parm_attr_cache, pa);
}
}
struct loop_info_d *iter = NULL;
unsigned int i;
- if (VEC_empty (loop_info, gnu_loop_stack))
+ if (vec_safe_is_empty (gnu_loop_stack))
return NULL;
var = remove_conversions (var, false);
if (decl_function_context (var) != current_function_decl)
return NULL;
- for (i = VEC_length (loop_info, gnu_loop_stack) - 1;
- VEC_iterate (loop_info, gnu_loop_stack, i, iter);
+ for (i = vec_safe_length (gnu_loop_stack) - 1;
+ vec_safe_iterate (gnu_loop_stack, i, &iter);
i--)
if (var == iter->loop_var)
break;
if (iter)
{
struct range_check_info_d *rci = ggc_alloc_range_check_info_d ();
- VEC_safe_push (range_check_info, gc, iter->checks, rci);
+ vec_safe_push (iter->checks, rci);
return rci;
}
tree gnu_result;
/* Push the loop_info structure associated with the LOOP_STMT. */
- VEC_safe_push (loop_info, gc, gnu_loop_stack, gnu_loop_info);
+ vec_safe_push (gnu_loop_stack, gnu_loop_info);
/* Set location information for statement and end label. */
set_expr_location_from_node (gnu_loop_stmt, gnat_node);
if (Present (gnat_iter_scheme) && No (Condition (gnat_iter_scheme)))
{
struct range_check_info_d *rci;
- unsigned n_checks = VEC_length (range_check_info, gnu_loop_info->checks);
+ unsigned n_checks = vec_safe_length (gnu_loop_info->checks);
unsigned int i;
/* First, if we have computed a small number of invariant conditions for
that can be entirely optimized away in the end. */
if (1 <= n_checks && n_checks <= 4)
for (i = 0;
- VEC_iterate (range_check_info, gnu_loop_info->checks, i, rci);
+ vec_safe_iterate (gnu_loop_info->checks, i, &rci);
i++)
{
tree low_ok
else
gnu_result = gnu_loop_stmt;
- VEC_pop (loop_info, gnu_loop_stack);
+ gnu_loop_stack->pop ();
return gnu_result;
}
{
if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (ret_val)))
ret_val
- = VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS
- (TREE_OPERAND (TREE_OPERAND (ret_val, 0), 1)),
- 1).value;
+ = (*CONSTRUCTOR_ELTS (TREE_OPERAND (TREE_OPERAND (ret_val, 0),
+ 1)))[1].value;
else
ret_val = TREE_OPERAND (TREE_OPERAND (ret_val, 0), 1);
}
tree saved_current_function_decl = current_function_decl;
tree var = DECL_EXPR_DECL (t);
tree alloc, p_array, new_var, new_ret;
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 2);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
/* Create an artificial context to build the allocation. */
current_function_decl = decl_function_context (var);
DECL_INITIAL (new_var)
= build2 (COMPOUND_EXPR, TREE_TYPE (new_var),
TREE_OPERAND (alloc, 0),
- VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (TREE_OPERAND (alloc, 1)),
- 0).value);
+ (*CONSTRUCTOR_ELTS (TREE_OPERAND (alloc, 1)))[0].value);
/* Build a modified CONSTRUCTOR that references NEW_VAR. */
p_array = TYPE_FIELDS (TREE_TYPE (alloc));
CONSTRUCTOR_APPEND_ELT (v, p_array,
fold_convert (TREE_TYPE (p_array), new_var));
CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (p_array),
- VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS
- (TREE_OPERAND (alloc, 1)),
- 1).value);
+ (*CONSTRUCTOR_ELTS (
+ TREE_OPERAND (alloc, 1)))[1].value);
new_ret = build_constructor (TREE_TYPE (alloc), v);
}
else
the other return values. GNAT_RET is a representative return node. */
static void
-finalize_nrv (tree fndecl, bitmap nrv, VEC(tree,gc) *other, Node_Id gnat_ret)
+finalize_nrv (tree fndecl, bitmap nrv, vec<tree, va_gc> *other, Node_Id gnat_ret)
{
struct cgraph_node *node;
struct nrv_data data;
data.nrv = nrv;
data.result = NULL_TREE;
data.visited = NULL;
- for (i = 0; VEC_iterate(tree, other, i, iter); i++)
+ for (i = 0; vec_safe_iterate (other, i, &iter); i++)
walk_tree_without_duplicates (&iter, prune_nrv_r, &data);
if (bitmap_empty_p (nrv))
return;
totally transparent given the read-compose-write semantics of
assignments from CONSTRUCTORs. */
else if (EXPR_P (ret_val))
- VEC_safe_push (tree, gc, f_other_ret_val, ret_val);
+ vec_safe_push (f_other_ret_val, ret_val);
}
}
else
tree gnu_subprog_type, gnu_subprog_addr, gnu_subprog_call;
tree gnu_subprog_param, gnu_stub_param, gnu_param;
tree gnu_stub_decl = DECL_FUNCTION_STUB (gnu_subprog);
- VEC(tree,gc) *gnu_param_vec = NULL;
+ vec<tree, va_gc> *gnu_param_vec = NULL;
gnu_subprog_type = TREE_TYPE (gnu_subprog);
else
gnu_param = gnu_stub_param;
- VEC_safe_push (tree, gc, gnu_param_vec, gnu_param);
+ vec_safe_push (gnu_param_vec, gnu_param);
}
/* Invoke the internal subprogram. */
tree gnu_return_var_elmt = NULL_TREE;
tree gnu_result;
struct language_function *gnu_subprog_language;
- VEC(parm_attr,gc) *cache;
+ vec<parm_attr, va_gc> *cache;
/* If this is a generic object or if it has been eliminated,
ignore it. */
{
tree gnu_return_var = NULL_TREE;
- VEC_safe_push (tree, gc, gnu_return_label_stack,
+ vec_safe_push (gnu_return_label_stack,
create_artificial_label (input_location));
start_stmt_group ();
TREE_VALUE (gnu_return_var_elmt) = gnu_return_var;
}
- VEC_safe_push (tree, gc, gnu_return_var_stack, gnu_return_var);
+ vec_safe_push (gnu_return_var_stack, gnu_return_var);
/* See whether there are parameters for which we don't have a GCC tree
yet. These must be Out parameters. Make a VAR_DECL for them and
}
}
else
- VEC_safe_push (tree, gc, gnu_return_label_stack, NULL_TREE);
+ vec_safe_push (gnu_return_label_stack, NULL_TREE);
/* Get a tree corresponding to the code for the subprogram. */
start_stmt_group ();
start_stmt_group ();
- FOR_EACH_VEC_ELT (parm_attr, cache, i, pa)
+ FOR_EACH_VEC_ELT (*cache, i, pa)
{
if (pa->first)
add_stmt_with_node_force (pa->first, gnat_node);
add_stmt (gnu_result);
add_stmt (build1 (LABEL_EXPR, void_type_node,
- VEC_last (tree, gnu_return_label_stack)));
+ gnu_return_label_stack->last ()));
if (list_length (gnu_cico_list) == 1)
gnu_retval = TREE_VALUE (gnu_cico_list);
gnu_result = end_stmt_group ();
}
- VEC_pop (tree, gnu_return_label_stack);
+ gnu_return_label_stack->pop ();
/* Attempt setting the end_locus of our GCC body tree, typically a
BIND_EXPR or STATEMENT_LIST, then the end_locus of our GCC subprogram
/* The return type of the FUNCTION_TYPE. */
tree gnu_result_type = TREE_TYPE (gnu_subprog_type);
tree gnu_subprog_addr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_subprog);
- VEC(tree,gc) *gnu_actual_vec = NULL;
+ vec<tree, va_gc> *gnu_actual_vec = NULL;
tree gnu_name_list = NULL_TREE;
tree gnu_stmt_list = NULL_TREE;
tree gnu_after_list = NULL_TREE;
gnu_actual = convert (DECL_ARG_TYPE (gnu_formal), gnu_actual);
}
- VEC_safe_push (tree, gc, gnu_actual_vec, gnu_actual);
+ vec_safe_push (gnu_actual_vec, gnu_actual);
}
gnu_call
start_stmt_group ();
gnat_pushlevel ();
- VEC_safe_push (tree, gc, gnu_except_ptr_stack,
+ vec_safe_push (gnu_except_ptr_stack,
create_var_decl (get_identifier ("EXCEPT_PTR"), NULL_TREE,
build_pointer_type (except_type_node),
build_call_n_expr (get_excptr_decl, 0),
/* If none of the exception handlers did anything, re-raise but do not
defer abortion. */
gnu_expr = build_call_n_expr (raise_nodefer_decl, 1,
- VEC_last (tree, gnu_except_ptr_stack));
+ gnu_except_ptr_stack->last ());
set_expr_location_from_node
(gnu_expr,
Present (End_Label (gnat_node)) ? End_Label (gnat_node) : gnat_node);
/* End the binding level dedicated to the exception handlers and get the
whole statement group. */
- VEC_pop (tree, gnu_except_ptr_stack);
+ gnu_except_ptr_stack->pop ();
gnat_poplevel ();
gnu_handler = end_stmt_group ();
build_component_ref
(build_unary_op
(INDIRECT_REF, NULL_TREE,
- VEC_last (tree, gnu_except_ptr_stack)),
+ gnu_except_ptr_stack->last ()),
get_identifier ("not_handled_by_others"), NULL_TREE,
false)),
integer_zero_node);
this_choice
= build_binary_op
(EQ_EXPR, boolean_type_node,
- VEC_last (tree, gnu_except_ptr_stack),
- convert (TREE_TYPE (VEC_last (tree, gnu_except_ptr_stack)),
+ gnu_except_ptr_stack->last (),
+ convert (TREE_TYPE (gnu_except_ptr_stack->last ()),
build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr)));
/* If this is the distinguished exception "Non_Ada_Error" (and we are
tree gnu_comp
= build_component_ref
(build_unary_op (INDIRECT_REF, NULL_TREE,
- VEC_last (tree, gnu_except_ptr_stack)),
+ gnu_except_ptr_stack->last ()),
get_identifier ("lang"), NULL_TREE, false);
this_choice
gnat_unit);
struct elab_info *info;
- VEC_safe_push (tree, gc, gnu_elab_proc_stack, gnu_elab_proc_decl);
+ vec_safe_push (gnu_elab_proc_stack, gnu_elab_proc_decl);
DECL_ELABORATION_PROC_P (gnu_elab_proc_decl) = 1;
/* Initialize the information structure for the function. */
/* Generate elaboration code for this unit, if necessary, and say whether
we did or not. */
- VEC_pop (tree, gnu_elab_proc_stack);
+ gnu_elab_proc_stack->pop ();
/* Invalidate the global renaming pointers. This is necessary because
stabilization of the renamed entities may create SAVE_EXPRs which
int length = String_Length (gnat_string);
int i;
tree gnu_idx = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_result_type));
- VEC(constructor_elt,gc) *gnu_vec
- = VEC_alloc (constructor_elt, gc, length);
+ vec<constructor_elt, va_gc> *gnu_vec;
+ vec_alloc (gnu_vec, length);
for (i = 0; i < length; i++)
{
gnu_aggr_type = TYPE_REPRESENTATIVE_ARRAY (gnu_result_type);
if (Null_Record_Present (gnat_node))
- gnu_result = gnat_build_constructor (gnu_aggr_type, NULL);
+ gnu_result = gnat_build_constructor (gnu_aggr_type,
+ NULL);
else if (TREE_CODE (gnu_aggr_type) == RECORD_TYPE
|| TREE_CODE (gnu_aggr_type) == UNION_TYPE)
? gnat_to_gnu (Condition (gnat_node)) : NULL_TREE),
(Present (Name (gnat_node))
? get_gnu_tree (Entity (Name (gnat_node)))
- : VEC_last (loop_info, gnu_loop_stack)->label));
+ : gnu_loop_stack->last ()->label));
break;
case N_Simple_Return_Statement:
/* If this function has copy-in/copy-out parameters, get the real
object for the return. See Subprogram_to_gnu. */
if (TYPE_CI_CO_LIST (gnu_subprog_type))
- gnu_ret_obj = VEC_last (tree, gnu_return_var_stack);
+ gnu_ret_obj = gnu_return_var_stack->last ();
else
gnu_ret_obj = DECL_RESULT (current_function_decl);
/* If we have a return label defined, convert this into a branch to
that label. The return proper will be handled elsewhere. */
- if (VEC_last (tree, gnu_return_label_stack))
+ if (gnu_return_label_stack->last ())
{
if (gnu_ret_obj)
add_stmt (build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_ret_obj,
gnu_ret_val));
gnu_result = build1 (GOTO_EXPR, void_type_node,
- VEC_last (tree, gnu_return_label_stack));
+ gnu_return_label_stack->last ());
/* When not optimizing, make sure the return is preserved. */
if (!optimize && Comes_From_Source (gnat_node))
- DECL_ARTIFICIAL (VEC_last (tree, gnu_return_label_stack)) = 0;
+ DECL_ARTIFICIAL (gnu_return_label_stack->last ()) = 0;
}
/* Otherwise, build a regular return. */
break;
case N_Pop_Constraint_Error_Label:
- VEC_pop (tree, gnu_constraint_error_label_stack);
+ gnu_constraint_error_label_stack->pop ();
break;
case N_Pop_Storage_Error_Label:
- VEC_pop (tree, gnu_storage_error_label_stack);
+ gnu_storage_error_label_stack->pop ();
break;
case N_Pop_Program_Error_Label:
- VEC_pop (tree, gnu_program_error_label_stack);
+ gnu_program_error_label_stack->pop ();
break;
/******************************/
/* The only validation we currently do on an unchecked conversion is
that of aliasing assumptions. */
if (flag_strict_aliasing)
- VEC_safe_push (Node_Id, heap, gnat_validate_uc_list, gnat_node);
+ gnat_validate_uc_list.safe_push (gnat_node);
gnu_result = alloc_stmt_list ();
break;
label to push onto the stack. */
static void
-push_exception_label_stack (VEC(tree,gc) **gnu_stack, Entity_Id gnat_label)
+push_exception_label_stack (vec<tree, va_gc> **gnu_stack, Entity_Id gnat_label)
{
tree gnu_label = (Present (gnat_label)
? gnat_to_gnu_entity (gnat_label, NULL_TREE, 0)
: NULL_TREE);
- VEC_safe_push (tree, gc, *gnu_stack, gnu_label);
+ vec_safe_push (*gnu_stack, gnu_label);
}
\f
/* Record the current code position in GNAT_NODE. */
{
tree gnu_index = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_array_type));
tree gnu_expr;
- VEC(constructor_elt,gc) *gnu_expr_vec = NULL;
+ vec<constructor_elt, va_gc> *gnu_expr_vec = NULL;
for ( ; Present (gnat_expr); gnat_expr = Next (gnat_expr))
{
extract_values (tree values, tree record_type)
{
tree field, tem;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
for (field = TYPE_FIELDS (record_type); field; field = DECL_CHAIN (field))
{
{
value = extract_values (values, TREE_TYPE (field));
if (TREE_CODE (value) == CONSTRUCTOR
- && VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (value)))
+ && vec_safe_is_empty (CONSTRUCTOR_ELTS (value)))
value = 0;
}
else
get_exception_label (char kind)
{
if (kind == N_Raise_Constraint_Error)
- return VEC_last (tree, gnu_constraint_error_label_stack);
+ return gnu_constraint_error_label_stack->last ();
else if (kind == N_Raise_Storage_Error)
- return VEC_last (tree, gnu_storage_error_label_stack);
+ return gnu_storage_error_label_stack->last ();
else if (kind == N_Raise_Program_Error)
- return VEC_last (tree, gnu_program_error_label_stack);
+ return gnu_program_error_label_stack->last ();
else
return NULL_TREE;
}
tree
get_elaboration_procedure (void)
{
- return VEC_last (tree, gnu_elab_proc_stack);
+ return gnu_elab_proc_stack->last ();
}
#include "gt-ada-trans.h"
static GTY(()) tree global_context;
/* An array of global declarations. */
-static GTY(()) VEC(tree,gc) *global_decls;
+static GTY(()) vec<tree, va_gc> *global_decls;
/* An array of builtin function declarations. */
-static GTY(()) VEC(tree,gc) *builtin_decls;
+static GTY(()) vec<tree, va_gc> *builtin_decls;
/* An array of global renaming pointers. */
-static GTY(()) VEC(tree,gc) *global_renaming_pointers;
+static GTY(()) vec<tree, va_gc> *global_renaming_pointers;
/* A chain of unused BLOCK nodes. */
static GTY((deletable)) tree free_block_chain;
if (DECL_EXTERNAL (decl))
{
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl))
- VEC_safe_push (tree, gc, builtin_decls, decl);
+ vec_safe_push (builtin_decls, decl);
}
else if (global_bindings_p ())
- VEC_safe_push (tree, gc, global_decls, decl);
+ vec_safe_push (global_decls, decl);
else
{
DECL_CHAIN (decl) = BLOCK_VARS (current_binding_level->block);
/* A list of the data type nodes of the subprogram formal parameters.
This list is generated by traversing the input list of PARM_DECL
nodes. */
- VEC(tree,gc) *param_type_list = NULL;
+ vec<tree, va_gc> *param_type_list = NULL;
tree t, type;
for (t = param_decl_list; t; t = DECL_CHAIN (t))
- VEC_safe_push (tree, gc, param_type_list, TREE_TYPE (t));
+ vec_safe_push (param_type_list, TREE_TYPE (t));
type = build_function_type_vec (return_type, param_type_list);
record_global_renaming_pointer (tree decl)
{
gcc_assert (!DECL_LOOP_PARM_P (decl) && DECL_RENAMED_OBJECT (decl));
- VEC_safe_push (tree, gc, global_renaming_pointers, decl);
+ vec_safe_push (global_renaming_pointers, decl);
}
/* Invalidate the global renaming pointers. */
unsigned int i;
tree iter;
- FOR_EACH_VEC_ELT (tree, global_renaming_pointers, i, iter)
+ if (global_renaming_pointers == NULL)
+ return;
+
+ FOR_EACH_VEC_ELT (*global_renaming_pointers, i, iter)
SET_DECL_RENAMED_OBJECT (iter, NULL_TREE);
- VEC_free (tree, gc, global_renaming_pointers);
+ vec_free (global_renaming_pointers);
}
/* Return true if VALUE is a known to be a multiple of FACTOR, which must be
tree
build_template (tree template_type, tree array_type, tree expr)
{
- VEC(constructor_elt,gc) *template_elts = NULL;
+ vec<constructor_elt, va_gc> *template_elts = NULL;
tree bound_list = NULL_TREE;
tree field;
tree
fill_vms_descriptor (tree gnu_type, tree gnu_expr, Node_Id gnat_actual)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree field;
gnu_expr = maybe_unconstrained_array (gnu_expr);
/* See the head comment of build_vms_descriptor. */
int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass));
tree lfield, ufield;
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* Convert POINTER to the pointer-to-array type. */
gnu_expr64 = convert (p_array_type, gnu_expr64);
case 1: /* Class S */
case 15: /* Class SB */
/* Build {1, LENGTH} template; LENGTH64 is the 5th field. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
t = DECL_CHAIN (DECL_CHAIN (klass));
t = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
CONSTRUCTOR_APPEND_ELT (v, min_field,
(TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (template_type))), ufield);
/* Build the template in the form of a constructor. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (template_type), lfield);
CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (template_type)),
ufield);
(TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (template_type))), ufield);
/* Build the template in the form of a constructor. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (template_type), lfield);
CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (template_type)),
ufield);
}
/* Build the fat pointer in the form of a constructor. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (gnu_type), gnu_expr64);
CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (gnu_type)),
template_addr);
tree template_tree, template_addr, aflags, dimct, t, u;
/* See the head comment of build_vms_descriptor. */
int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass));
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* Convert POINTER to the pointer-to-array type. */
gnu_expr32 = convert (p_array_type, gnu_expr32);
case 1: /* Class S */
case 15: /* Class SB */
/* Build {1, LENGTH} template; LENGTH is the 1st field. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
t = TYPE_FIELDS (desc_type);
t = build3 (COMPONENT_REF, TREE_TYPE (t), desc, t, NULL_TREE);
CONSTRUCTOR_APPEND_ELT (v, min_field,
}
/* Build the fat pointer in the form of a constructor. */
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (gnu_type), gnu_expr32);
CONSTRUCTOR_APPEND_ELT (v, DECL_CHAIN (TYPE_FIELDS (gnu_type)),
template_addr);
tree p_array_type = TREE_TYPE (TYPE_FIELDS (type));
tree etype = TREE_TYPE (expr);
tree template_tree;
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 2);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
/* If EXPR is null, make a fat pointer that contains a null pointer to the
array (compare_fat_pointers ensures that this is the full discriminant)
{
/* The template type can still be dummy at this point so we build an
empty constructor. The middle-end will fill it in with zeros. */
- t = build_constructor (template_type, NULL);
+ t = build_constructor (template_type,
+ NULL);
TREE_CONSTANT (t) = TREE_STATIC (t) = 1;
null_bounds = build_unary_op (ADDR_EXPR, NULL_TREE, t);
SET_TYPE_NULL_BOUNDS (ptr_template_type, null_bounds);
constructor to build the record, unless a variable size is involved. */
else if (code == RECORD_TYPE && TYPE_PADDING_P (type))
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* If we previously converted from another type and our type is
of variable size, remove the conversion to avoid the need for
expr),
false);
- v = VEC_alloc (constructor_elt, gc, 1);
+ vec_alloc (v, 1);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
convert (TREE_TYPE (TYPE_FIELDS (type)), expr));
return gnat_build_constructor (type, v);
/* If we have just converted to this padded type, just get the
inner expression. */
if (TREE_CODE (expr) == CONSTRUCTOR
- && !VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (expr))
- && VEC_index (constructor_elt, CONSTRUCTOR_ELTS (expr), 0).index
- == TYPE_FIELDS (etype))
- unpadded
- = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (expr), 0).value;
+ && !vec_safe_is_empty (CONSTRUCTOR_ELTS (expr))
+ && (*CONSTRUCTOR_ELTS (expr))[0].index == TYPE_FIELDS (etype))
+ unpadded = (*CONSTRUCTOR_ELTS (expr))[0].value;
/* Otherwise, build an explicit component reference. */
else
if (code == RECORD_TYPE && TYPE_CONTAINS_TEMPLATE_P (type))
{
tree obj_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 2);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
/* If the source already has a template, get a reference to the
associated array only, as we are going to rebuild a template
{
expr = copy_node (expr);
TREE_TYPE (expr) = type;
- CONSTRUCTOR_ELTS (expr)
- = VEC_copy (constructor_elt, gc, CONSTRUCTOR_ELTS (expr));
+ CONSTRUCTOR_ELTS (expr) = vec_safe_copy (CONSTRUCTOR_ELTS (expr));
return expr;
}
|| tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (etype))))
{
- VEC(constructor_elt,gc) *e = CONSTRUCTOR_ELTS (expr);
- unsigned HOST_WIDE_INT len = VEC_length (constructor_elt, e);
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, len);
+ vec<constructor_elt, va_gc> *e = CONSTRUCTOR_ELTS (expr);
+ unsigned HOST_WIDE_INT len = vec_safe_length (e);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, len);
tree efield = TYPE_FIELDS (etype), field = TYPE_FIELDS (type);
unsigned HOST_WIDE_INT idx;
tree index, value;
if (!SAME_FIELD_P (efield, field))
break;
constructor_elt elt = {field, convert (TREE_TYPE (field), value)};
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
/* If packing has made this field a bitfield and the input
value couldn't be emitted statically any more, we need to
&& gnat_types_compatible_p (TYPE_REPRESENTATIVE_ARRAY (type),
etype))
{
- VEC(constructor_elt,gc) *e = CONSTRUCTOR_ELTS (expr);
- unsigned HOST_WIDE_INT len = VEC_length (constructor_elt, e);
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *e = CONSTRUCTOR_ELTS (expr);
+ unsigned HOST_WIDE_INT len = vec_safe_length (e);
+ vec<constructor_elt, va_gc> *v;
unsigned HOST_WIDE_INT ix;
tree value;
}
/* Otherwise, build a regular vector constructor. */
- v = VEC_alloc (constructor_elt, gc, len);
+ vec_alloc (v, len);
FOR_EACH_CONSTRUCTOR_VALUE (e, ix, value)
{
constructor_elt elt = {NULL_TREE, value};
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
}
expr = copy_node (expr);
TREE_TYPE (expr) = type;
case RECORD_TYPE:
if (TYPE_JUSTIFIED_MODULAR_P (type) && !AGGREGATE_TYPE_P (etype))
{
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 1);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (type),
convert (TREE_TYPE (TYPE_FIELDS (type)),
&& TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
&& TYPE_JUSTIFIED_MODULAR_P (TREE_TYPE (exp)))
return
- remove_conversions (VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (exp), 0).value,
- true);
+ remove_conversions ((*CONSTRUCTOR_ELTS (exp))[0].value, true);
break;
case COMPONENT_REF:
{
tree rec_type = make_node (RECORD_TYPE);
unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype));
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 1);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 1);
tree field_type, field;
if (TYPE_UNSIGNED (etype))
/* If we have declared types as used at the global level, insert them in
the global hash table. We use a dummy variable for this purpose. */
- if (!VEC_empty (tree, types_used_by_cur_var_decl))
+ if (types_used_by_cur_var_decl && !types_used_by_cur_var_decl->is_empty ())
{
struct varpool_node *node;
char *label;
node = varpool_node_for_decl (dummy_global);
node->symbol.force_output = 1;
- while (!VEC_empty (tree, types_used_by_cur_var_decl))
+ while (!types_used_by_cur_var_decl->is_empty ())
{
- tree t = VEC_pop (tree, types_used_by_cur_var_decl);
+ tree t = types_used_by_cur_var_decl->pop ();
types_used_by_var_decl_insert (t, dummy_global);
}
}
ensures that global types whose compilation hasn't been finalized yet,
for example pointers to Taft amendment types, have their compilation
finalized in the right context. */
- FOR_EACH_VEC_ELT (tree, global_decls, i, iter)
+ FOR_EACH_VEC_SAFE_ELT (global_decls, i, iter)
if (TREE_CODE (iter) == TYPE_DECL)
debug_hooks->global_decl (iter);
if (!seen_error ())
{
timevar_push (TV_SYMOUT);
- FOR_EACH_VEC_ELT (tree, global_decls, i, iter)
+ FOR_EACH_VEC_SAFE_ELT (global_decls, i, iter)
if (TREE_CODE (iter) != TYPE_DECL)
debug_hooks->global_decl (iter);
timevar_pop (TV_SYMOUT);
unsigned i;
tree decl;
- FOR_EACH_VEC_ELT (tree, builtin_decls, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (builtin_decls, i, decl)
if (DECL_NAME (decl) == name)
return decl;
/* The constant folder doesn't fold fat pointer types so we do it here. */
if (TREE_CODE (p1) == CONSTRUCTOR)
- p1_array = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (p1), 0).value;
+ p1_array = (*CONSTRUCTOR_ELTS (p1))[0].value;
else
p1_array = build_component_ref (p1, NULL_TREE,
TYPE_FIELDS (TREE_TYPE (p1)), true);
null_pointer_node));
if (TREE_CODE (p2) == CONSTRUCTOR)
- p2_array = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (p2), 0).value;
+ p2_array = (*CONSTRUCTOR_ELTS (p2))[0].value;
else
p2_array = build_component_ref (p2, NULL_TREE,
TYPE_FIELDS (TREE_TYPE (p2)), true);
= fold_build2_loc (loc, EQ_EXPR, result_type, p1_array, p2_array);
if (TREE_CODE (p1) == CONSTRUCTOR)
- p1_bounds = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (p1), 1).value;
+ p1_bounds = (*CONSTRUCTOR_ELTS (p1))[1].value;
else
p1_bounds
= build_component_ref (p1, NULL_TREE,
DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (p1))), true);
if (TREE_CODE (p2) == CONSTRUCTOR)
- p2_bounds = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (p2), 1).value;
+ p2_bounds = (*CONSTRUCTOR_ELTS (p2))[1].value;
else
p2_bounds
= build_component_ref (p2, NULL_TREE,
a pointer to our type. */
if (TYPE_IS_PADDING_P (type))
{
- result = VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (operand),
- 0).value;
+ result = (*CONSTRUCTOR_ELTS (operand))[0].value;
result = convert (build_pointer_type (TREE_TYPE (operand)),
build_unary_op (ADDR_EXPR, NULL_TREE, result));
break;
/* Return a CONSTRUCTOR of TYPE whose elements are V. */
tree
-gnat_build_constructor (tree type, VEC(constructor_elt,gc) *v)
+gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v)
{
bool allconstant = (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST);
bool side_effects = false;
by increasing bit position. This is necessary to ensure the
constructor can be output as static data. */
if (allconstant && TREE_CODE (type) == RECORD_TYPE && n_elmts > 1)
- VEC_qsort (constructor_elt, v, compare_elmt_bitpos);
+ v->qsort (compare_elmt_bitpos);
result = build_constructor (type, v);
TREE_CONSTANT (result) = TREE_STATIC (result) = allconstant;
if (TREE_CODE (record_variable) == CONSTRUCTOR
&& TYPE_CONTAINS_TEMPLATE_P (TREE_TYPE (record_variable)))
{
- VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (record_variable);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (record_variable);
unsigned HOST_WIDE_INT idx;
tree index, value;
FOR_EACH_CONSTRUCTOR_ELT (elts, idx, index, value)
If there is no initializing expression, just set the bounds. */
if (init)
{
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 2);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, TYPE_FIELDS (storage_type),
build_template (template_type, type, init));
/* Constructors with 1 element are used extensively to formally
convert objects to special wrapping types. */
if (TREE_CODE (type) == RECORD_TYPE
- && VEC_length (constructor_elt, CONSTRUCTOR_ELTS (ref)) == 1)
+ && vec_safe_length (CONSTRUCTOR_ELTS (ref)) == 1)
{
- tree index
- = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (ref), 0).index;
- tree value
- = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (ref), 0).value;
+ tree index = (*CONSTRUCTOR_ELTS (ref))[0].index;
+ tree value = (*CONSTRUCTOR_ELTS (ref))[0].value;
result
= build_constructor_single (type, index,
gnat_stabilize_reference_1 (value,
The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
indicate this. */
-static GTY(()) VEC(rtx,gc) *reg_base_value;
+static GTY(()) vec<rtx, va_gc> *reg_base_value;
static rtx *new_reg_base_value;
/* The single VOIDmode ADDRESS that represents all argument bases.
/* We preserve the copy of old array around to avoid amount of garbage
produced. About 8% of garbage produced were attributed to this
array. */
-static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
+static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
/* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
registers. */
#define static_reg_base_value \
(this_target_rtl->x_static_reg_base_value)
-#define REG_BASE_VALUE(X) \
- (REGNO (X) < VEC_length (rtx, reg_base_value) \
- ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
+#define REG_BASE_VALUE(X) \
+ (REGNO (X) < vec_safe_length (reg_base_value) \
+ ? (*reg_base_value)[REGNO (X)] : 0)
/* Vector indexed by N giving the initial (unchanging) value known for
pseudo-register N. This vector is initialized in init_alias_analysis,
and does not change until end_alias_analysis is called. */
-static GTY(()) VEC(rtx,gc) *reg_known_value;
+static GTY(()) vec<rtx, va_gc> *reg_known_value;
/* Vector recording for each reg_known_value whether it is due to a
REG_EQUIV note. Future passes (viz., reload) may replace the
NOTE_INSN_FUNCTION_BEG note. */
static bool copying_arguments;
-DEF_VEC_P(alias_set_entry);
-DEF_VEC_ALLOC_P(alias_set_entry,gc);
/* The splay-tree used to store the various alias set entries. */
-static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
+static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
\f
/* Build a decomposed reference object for querying the alias-oracle
from the MEM rtx and store it in *REF.
static inline alias_set_entry
get_alias_set_entry (alias_set_type alias_set)
{
- return VEC_index (alias_set_entry, alias_sets, alias_set);
+ return (*alias_sets)[alias_set];
}
/* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
if (flag_strict_aliasing)
{
if (alias_sets == 0)
- VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
- VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
- return VEC_length (alias_set_entry, alias_sets) - 1;
+ vec_safe_push (alias_sets, (alias_set_entry) 0);
+ vec_safe_push (alias_sets, (alias_set_entry) 0);
+ return alias_sets->length () - 1;
}
else
return 0;
ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
superset_entry->has_zero_child = 0;
- VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
+ (*alias_sets)[superset] = superset_entry;
}
if (subset == 0)
The test above is not sufficient because the scheduler may move
a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
- && regno < VEC_length (rtx, reg_base_value))
+ && regno < vec_safe_length (reg_base_value))
{
/* If we're inside init_alias_analysis, use new_reg_base_value
to reduce the number of relaxation iterations. */
&& DF_REG_DEF_COUNT (regno) == 1)
return new_reg_base_value[regno];
- if (VEC_index (rtx, reg_base_value, regno))
- return VEC_index (rtx, reg_base_value, regno);
+ if ((*reg_base_value)[regno])
+ return (*reg_base_value)[regno];
}
return 0;
regno = REGNO (dest);
- gcc_checking_assert (regno < VEC_length (rtx, reg_base_value));
+ gcc_checking_assert (regno < reg_base_value->length ());
/* If this spans multiple hard registers, then we must indicate that every
register has an unusable value. */
rtx
get_reg_base_value (unsigned int regno)
{
- return VEC_index (rtx, reg_base_value, regno);
+ return (*reg_base_value)[regno];
}
/* If a value is known for REGNO, return it. */
if (regno >= FIRST_PSEUDO_REGISTER)
{
regno -= FIRST_PSEUDO_REGISTER;
- if (regno < VEC_length (rtx, reg_known_value))
- return VEC_index (rtx, reg_known_value, regno);
+ if (regno < vec_safe_length (reg_known_value))
+ return (*reg_known_value)[regno];
}
return NULL;
}
if (regno >= FIRST_PSEUDO_REGISTER)
{
regno -= FIRST_PSEUDO_REGISTER;
- if (regno < VEC_length (rtx, reg_known_value))
- VEC_replace (rtx, reg_known_value, regno, val);
+ if (regno < vec_safe_length (reg_known_value))
+ (*reg_known_value)[regno] = val;
}
}
if (regno >= FIRST_PSEUDO_REGISTER)
{
regno -= FIRST_PSEUDO_REGISTER;
- if (regno < VEC_length (rtx, reg_known_value))
+ if (regno < vec_safe_length (reg_known_value))
return bitmap_bit_p (reg_known_equiv_p, regno);
}
return false;
if (regno >= FIRST_PSEUDO_REGISTER)
{
regno -= FIRST_PSEUDO_REGISTER;
- if (regno < VEC_length (rtx, reg_known_value))
+ if (regno < vec_safe_length (reg_known_value))
{
if (val)
bitmap_set_bit (reg_known_equiv_p, regno);
timevar_push (TV_ALIAS_ANALYSIS);
- reg_known_value = VEC_alloc (rtx, gc, maxreg - FIRST_PSEUDO_REGISTER);
+ vec_alloc (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
/* If we have memory allocated from the previous run, use it. */
reg_base_value = old_reg_base_value;
if (reg_base_value)
- VEC_truncate (rtx, reg_base_value, 0);
+ reg_base_value->truncate (0);
- VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
+ vec_safe_grow_cleared (reg_base_value, maxreg);
new_reg_base_value = XNEWVEC (rtx, maxreg);
reg_seen = sbitmap_alloc (maxreg);
for (ui = 0; ui < maxreg; ui++)
{
if (new_reg_base_value[ui]
- && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
- && ! rtx_equal_p (new_reg_base_value[ui],
- VEC_index (rtx, reg_base_value, ui)))
+ && new_reg_base_value[ui] != (*reg_base_value)[ui]
+ && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
{
- VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
+ (*reg_base_value)[ui] = new_reg_base_value[ui];
changed = 1;
}
}
XDELETEVEC (rpo);
/* Fill in the remaining entries. */
- FOR_EACH_VEC_ELT (rtx, reg_known_value, i, val)
+ FOR_EACH_VEC_ELT (*reg_known_value, i, val)
{
int regno = i + FIRST_PSEUDO_REGISTER;
if (! val)
void
vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
{
- VEC_replace (rtx, reg_base_value, REGNO (reg1), REG_BASE_VALUE (reg2));
+ (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
}
void
end_alias_analysis (void)
{
old_reg_base_value = reg_base_value;
- VEC_free (rtx, gc, reg_known_value);
+ vec_free (reg_known_value);
sbitmap_free (reg_known_equiv_p);
}
TYPE is __asan_global struct type as returned by asan_global_struct. */
static void
-asan_add_global (tree decl, tree type, VEC(constructor_elt, gc) *v)
+asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
{
tree init, uptr = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
unsigned HOST_WIDE_INT size;
tree str_cst, refdecl = decl;
- VEC(constructor_elt, gc) *vinner = NULL;
+ vec<constructor_elt, va_gc> *vinner = NULL;
if (!asan_pp_initialized)
asan_pp_initialize ();
if (asan_needs_local_alias (decl))
{
char buf[20];
- ASM_GENERATE_INTERNAL_LABEL (buf, "LASAN",
- VEC_length (constructor_elt, v) + 1);
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LASAN", vec_safe_length (v) + 1);
refdecl = build_decl (DECL_SOURCE_LOCATION (decl),
VAR_DECL, get_identifier (buf), TREE_TYPE (decl));
TREE_ADDRESSABLE (refdecl) = TREE_ADDRESSABLE (decl);
tree type = asan_global_struct (), var, ctor, decl;
tree uptr = build_nonstandard_integer_type (POINTER_SIZE, 1);
tree dtor_statements = NULL_TREE;
- VEC(constructor_elt, gc) *v;
+ vec<constructor_elt, va_gc> *v;
char buf[20];
type = build_array_type_nelts (type, gcount);
TREE_PUBLIC (var) = 0;
DECL_ARTIFICIAL (var) = 1;
DECL_IGNORED_P (var) = 1;
- v = VEC_alloc (constructor_elt, gc, gcount);
+ vec_alloc (v, gcount);
FOR_EACH_DEFINED_VARIABLE (vnode)
if (asan_protect_global (vnode->symbol.decl))
asan_add_global (vnode->symbol.decl, TREE_TYPE (type), v);
int length;
};
-DEF_VEC_O (attribute_spec);
-DEF_VEC_ALLOC_O (attribute_spec, heap);
-
/* Scoped attribute name representation. */
struct scoped_attributes
{
const char *ns;
- VEC (attribute_spec, heap) *attributes;
+ vec<attribute_spec> attributes;
htab_t attribute_hash;
};
-DEF_VEC_O (scoped_attributes);
-DEF_VEC_ALLOC_O (scoped_attributes, heap);
-
/* The table of scope attributes. */
-static VEC(scoped_attributes, heap) *attributes_table;
+static vec<scoped_attributes> attributes_table;
static scoped_attributes* find_attribute_namespace (const char*);
static void register_scoped_attribute (const struct attribute_spec *,
/* We don't have any namespace NS yet. Create one. */
scoped_attributes sa;
- if (attributes_table == NULL)
- attributes_table = VEC_alloc (scoped_attributes, heap, 64);
+ if (!attributes_table.is_empty ())
+ attributes_table.create (64);
memset (&sa, 0, sizeof (sa));
sa.ns = ns;
- sa.attributes = VEC_alloc (attribute_spec, heap, 64);
- result = VEC_safe_push (scoped_attributes, heap, attributes_table, sa);
+ sa.attributes.create (64);
+ result = attributes_table.safe_push (sa);
result->attribute_hash = htab_create (200, hash_attr, eq_attr, NULL);
}
/* Really add the attributes to their namespace now. */
for (unsigned i = 0; attributes[i].name != NULL; ++i)
{
- VEC_safe_push (attribute_spec, heap,
- result->attributes, attributes[i]);
+ result->attributes.safe_push (attributes[i]);
register_scoped_attribute (&attributes[i], result);
}
unsigned ix;
scoped_attributes *iter;
- FOR_EACH_VEC_ELT (scoped_attributes, attributes_table, ix, iter)
+ FOR_EACH_VEC_ELT (attributes_table, ix, iter)
if (ns == iter->ns
|| (iter->ns != NULL
&& ns != NULL
in profile.c */
};
-DEF_VEC_P(edge);
-DEF_VEC_ALLOC_P(edge,gc);
-DEF_VEC_ALLOC_P(edge,heap);
/* Garbage collection and PCH support for edge_def. */
extern void gt_ggc_mx (edge_def *e);
/* Basic block information indexed by block number. */
struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_def {
/* The edges into and out of the block. */
- VEC(edge,gc) *preds;
- VEC(edge,gc) *succs;
+ vec<edge, va_gc> *preds;
+ vec<edge, va_gc> *succs;
/* Auxiliary info specific to a pass. */
PTR GTY ((skip (""))) aux;
[(int)sizeof(struct rtl_bb_info)
- (int)sizeof (struct gimple_bb_info)];
-DEF_VEC_P(basic_block);
-DEF_VEC_ALLOC_P(basic_block,gc);
-DEF_VEC_ALLOC_P(basic_block,heap);
#define BB_FREQ_MAX 10000
basic_block x_exit_block_ptr;
/* Index by basic block number, get basic block struct info. */
- VEC(basic_block,gc) *x_basic_block_info;
+ vec<basic_block, va_gc> *x_basic_block_info;
/* Number of basic blocks in this flow graph. */
int x_n_basic_blocks;
/* Mapping of labels to their associated blocks. At present
only used for the gimple CFG. */
- VEC(basic_block,gc) *x_label_to_block_map;
+ vec<basic_block, va_gc> *x_label_to_block_map;
enum profile_status_d x_profile_status;
#define profile_status_for_function(FN) ((FN)->cfg->x_profile_status)
#define BASIC_BLOCK_FOR_FUNCTION(FN,N) \
- (VEC_index (basic_block, basic_block_info_for_function(FN), (N)))
+ ((*basic_block_info_for_function(FN))[(N)])
#define SET_BASIC_BLOCK_FOR_FUNCTION(FN,N,BB) \
- (VEC_replace (basic_block, basic_block_info_for_function(FN), (N), (BB)))
+ ((*basic_block_info_for_function(FN))[(N)] = (BB))
/* Defines for textual backward source compatibility. */
#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr)
#define label_to_block_map (cfun->cfg->x_label_to_block_map)
#define profile_status (cfun->cfg->x_profile_status)
-#define BASIC_BLOCK(N) (VEC_index (basic_block, basic_block_info, (N)))
-#define SET_BASIC_BLOCK(N,BB) (VEC_replace (basic_block, basic_block_info, (N), (BB)))
+#define BASIC_BLOCK(N) ((*basic_block_info)[(N)])
+#define SET_BASIC_BLOCK(N,BB) ((*basic_block_info)[(N)] = (BB))
/* For iterating over basic blocks. */
#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \
} ce_if_block_t;
/* This structure maintains an edge list vector. */
-/* FIXME: Make this a VEC(edge). */
+/* FIXME: Make this a vec<edge>. */
struct edge_list
{
int num_edges;
#define EDGE_CRITICAL_P(e) (EDGE_COUNT ((e)->src->succs) >= 2 \
&& EDGE_COUNT ((e)->dest->preds) >= 2)
-#define EDGE_COUNT(ev) VEC_length (edge, (ev))
-#define EDGE_I(ev,i) VEC_index (edge, (ev), (i))
-#define EDGE_PRED(bb,i) VEC_index (edge, (bb)->preds, (i))
-#define EDGE_SUCC(bb,i) VEC_index (edge, (bb)->succs, (i))
+#define EDGE_COUNT(ev) vec_safe_length (ev)
+#define EDGE_I(ev,i) (*ev)[(i)]
+#define EDGE_PRED(bb,i) (*(bb)->preds)[(i)]
+#define EDGE_SUCC(bb,i) (*(bb)->succs)[(i)]
/* Returns true if BB has precisely one successor. */
typedef struct {
unsigned index;
- VEC(edge,gc) **container;
+ vec<edge, va_gc> **container;
} edge_iterator;
-static inline VEC(edge,gc) *
+static inline vec<edge, va_gc> *
ei_container (edge_iterator i)
{
gcc_checking_assert (i.container);
/* Return an iterator pointing to the start of an edge vector. */
static inline edge_iterator
-ei_start_1 (VEC(edge,gc) **ev)
+ei_start_1 (vec<edge, va_gc> **ev)
{
edge_iterator i;
/* Return an iterator pointing to the last element of an edge
vector. */
static inline edge_iterator
-ei_last_1 (VEC(edge,gc) **ev)
+ei_last_1 (vec<edge, va_gc> **ev)
{
edge_iterator i;
basic_block);
extern basic_block get_immediate_dominator (enum cdi_direction, basic_block);
extern bool dominated_by_p (enum cdi_direction, const_basic_block, const_basic_block);
-extern VEC (basic_block, heap) *get_dominated_by (enum cdi_direction, basic_block);
-extern VEC (basic_block, heap) *get_dominated_by_region (enum cdi_direction,
+extern vec<basic_block> get_dominated_by (enum cdi_direction, basic_block);
+extern vec<basic_block> get_dominated_by_region (enum cdi_direction,
basic_block *,
unsigned);
-extern VEC (basic_block, heap) *get_dominated_to_depth (enum cdi_direction,
+extern vec<basic_block> get_dominated_to_depth (enum cdi_direction,
basic_block, int);
-extern VEC (basic_block, heap) *get_all_dominated_blocks (enum cdi_direction,
+extern vec<basic_block> get_all_dominated_blocks (enum cdi_direction,
basic_block);
extern void add_to_dominance_info (enum cdi_direction, basic_block);
extern void delete_from_dominance_info (enum cdi_direction, basic_block);
extern void redirect_immediate_dominators (enum cdi_direction, basic_block,
basic_block);
extern void iterate_fix_dominators (enum cdi_direction,
- VEC (basic_block, heap) *, bool);
+ vec<basic_block> , bool);
extern void verify_dominators (enum cdi_direction);
extern basic_block first_dom_son (enum cdi_direction, basic_block);
extern basic_block next_dom_son (enum cdi_direction, basic_block);
/* Return the fallthru edge in EDGES if it exists, NULL otherwise. */
static inline edge
-find_fallthru_edge (VEC(edge,gc) *edges)
+find_fallthru_edge (vec<edge, va_gc> *edges)
{
edge e;
edge_iterator ei;
a separate section of the .o file (to cut down on paging and improve
cache locality). Return a vector of all edges that cross. */
-static VEC(edge, heap) *
+static vec<edge>
find_rarely_executed_basic_blocks_and_crossing_edges (void)
{
- VEC(edge, heap) *crossing_edges = NULL;
+ vec<edge> crossing_edges = vec<edge>();
basic_block bb;
edge e;
edge_iterator ei;
unsigned i;
eh_landing_pad lp;
- FOR_EACH_VEC_ELT (eh_landing_pad, cfun->eh->lp_array, i, lp)
+ FOR_EACH_VEC_ELT (*cfun->eh->lp_array, i, lp)
{
bool all_same, all_diff;
&& e->dest != EXIT_BLOCK_PTR
&& BB_PARTITION (e->src) != BB_PARTITION (e->dest))
{
- VEC_safe_push (edge, heap, crossing_edges, e);
+ crossing_edges.safe_push (e);
flags |= EDGE_CROSSING;
}
Convert any easy fall-through crossing edges to unconditional jumps. */
static void
-add_labels_and_missing_jumps (VEC(edge, heap) *crossing_edges)
+add_labels_and_missing_jumps (vec<edge> crossing_edges)
{
size_t i;
edge e;
- FOR_EACH_VEC_ELT (edge, crossing_edges, i, e)
+ FOR_EACH_VEC_ELT (crossing_edges, i, e)
{
basic_block src = e->src;
basic_block dest = e->dest;
static unsigned
partition_hot_cold_basic_blocks (void)
{
- VEC(edge, heap) *crossing_edges;
+ vec<edge> crossing_edges;
if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return 0;
df_set_flags (DF_DEFER_INSN_RESCAN);
crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
- if (crossing_edges == NULL)
+ if (!crossing_edges.exists ())
return 0;
/* Make sure the source of any crossing edge ends in a jump and the
/* Clear bb->aux fields that the above routines were using. */
clear_aux_for_blocks ();
- VEC_free (edge, heap, crossing_edges);
+ crossing_edges.release ();
/* ??? FIXME: DF generates the bb info for a block immediately.
And by immediately, I mean *during* creation of the block.
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
{
unsigned int nargs, z;
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
mode =
get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1);
/* If this is turned into an external library call, the weak parameter
must be dropped to match the expected parameter list. */
nargs = call_expr_nargs (exp);
- vec = VEC_alloc (tree, gc, nargs - 1);
+ vec_alloc (vec, nargs - 1);
for (z = 0; z < 3; z++)
- VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z));
+ vec->quick_push (CALL_EXPR_ARG (exp, z));
/* Skip the boolean weak parameter. */
for (z = 4; z < 6; z++)
- VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z));
+ vec->quick_push (CALL_EXPR_ARG (exp, z));
exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), vec);
break;
}
VEC. */
tree
-build_call_expr_loc_vec (location_t loc, tree fndecl, VEC(tree,gc) *vec)
+build_call_expr_loc_vec (location_t loc, tree fndecl, vec<tree, va_gc> *vec)
{
- return build_call_expr_loc_array (loc, fndecl, VEC_length (tree, vec),
- VEC_address (tree, vec));
+ return build_call_expr_loc_array (loc, fndecl, vec_safe_length (vec),
+ vec_safe_address (vec));
}
void
sizeof_pointer_memaccess_warning (location_t *sizeof_arg_loc, tree callee,
- VEC(tree, gc) *params, tree *sizeof_arg,
+ vec<tree, va_gc> *params, tree *sizeof_arg,
bool (*comp_types) (tree, tree))
{
tree type, dest = NULL_TREE, src = NULL_TREE, tem;
if (TREE_CODE (callee) != FUNCTION_DECL
|| DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
- || VEC_length (tree, params) <= 1)
+ || vec_safe_length (params) <= 1)
return;
switch (DECL_FUNCTION_CODE (callee))
case BUILT_IN_MEMCPY_CHK:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMMOVE_CHK:
- if (VEC_length (tree, params) < 3)
+ if (params->length () < 3)
return;
- src = VEC_index (tree, params, 1);
- dest = VEC_index (tree, params, 0);
+ src = (*params)[1];
+ dest = (*params)[0];
idx = 2;
break;
case BUILT_IN_BCOPY:
- if (VEC_length (tree, params) < 3)
+ if (params->length () < 3)
return;
- src = VEC_index (tree, params, 0);
- dest = VEC_index (tree, params, 1);
+ src = (*params)[0];
+ dest = (*params)[1];
idx = 2;
break;
case BUILT_IN_MEMCMP:
case BUILT_IN_BCMP:
- if (VEC_length (tree, params) < 3)
+ if (params->length () < 3)
return;
- src = VEC_index (tree, params, 1);
- dest = VEC_index (tree, params, 0);
+ src = (*params)[1];
+ dest = (*params)[0];
idx = 2;
cmp = true;
break;
case BUILT_IN_MEMSET:
case BUILT_IN_MEMSET_CHK:
- if (VEC_length (tree, params) < 3)
+ if (params->length () < 3)
return;
- dest = VEC_index (tree, params, 0);
+ dest = (*params)[0];
idx = 2;
break;
case BUILT_IN_BZERO:
- dest = VEC_index (tree, params, 0);
+ dest = (*params)[0];
idx = 1;
break;
case BUILT_IN_STRNDUP:
- src = VEC_index (tree, params, 0);
+ src = (*params)[0];
strop = true;
idx = 1;
break;
case BUILT_IN_MEMCHR:
- if (VEC_length (tree, params) < 3)
+ if (params->length () < 3)
return;
- src = VEC_index (tree, params, 0);
+ src = (*params)[0];
idx = 2;
break;
case BUILT_IN_SNPRINTF:
case BUILT_IN_SNPRINTF_CHK:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSNPRINTF_CHK:
- dest = VEC_index (tree, params, 0);
+ dest = (*params)[0];
idx = 1;
strop = true;
break;
/* Arguments being collected for optimization. */
typedef const char *const_char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(const_char_p);
-DEF_VEC_ALLOC_P(const_char_p, gc);
-static GTY(()) VEC(const_char_p, gc) *optimize_args;
+static GTY(()) vec<const_char_p, va_gc> *optimize_args;
/* Inner function to convert a TREE_LIST to argv string to parse the optimize
/* Build up argv vector. Just in case the string is stored away, use garbage
collected strings. */
- VEC_truncate (const_char_p, optimize_args, 0);
- VEC_safe_push (const_char_p, gc, optimize_args, NULL);
+ vec_safe_truncate (optimize_args, 0);
+ vec_safe_push (optimize_args, (const char *) NULL);
for (ap = args; ap != NULL_TREE; ap = TREE_CHAIN (ap))
{
{
char buffer[20];
sprintf (buffer, "-O%ld", (long) TREE_INT_CST_LOW (value));
- VEC_safe_push (const_char_p, gc, optimize_args, ggc_strdup (buffer));
+ vec_safe_push (optimize_args, ggc_strdup (buffer));
}
else if (TREE_CODE (value) == STRING_CST)
memcpy (r, p, len2);
r[len2] = '\0';
- VEC_safe_push (const_char_p, gc, optimize_args, q);
+ vec_safe_push (optimize_args, (const char *) q);
}
}
}
- opt_argc = VEC_length (const_char_p, optimize_args);
+ opt_argc = optimize_args->length ();
opt_argv = (const char **) alloca (sizeof (char *) * (opt_argc + 1));
for (i = 1; i < opt_argc; i++)
- opt_argv[i] = VEC_index (const_char_p, optimize_args, i);
+ opt_argv[i] = (*optimize_args)[i];
saved_flag_strict_aliasing = flag_strict_aliasing;
/* Don't allow changing -fstrict-aliasing. */
flag_strict_aliasing = saved_flag_strict_aliasing;
- VEC_truncate (const_char_p, optimize_args, 0);
+ optimize_args->truncate (0);
return ret;
}
}
else if (TREE_CODE (initial_value) == CONSTRUCTOR)
{
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (initial_value);
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initial_value);
- if (VEC_empty (constructor_elt, v))
+ if (vec_safe_is_empty (v))
{
if (pedantic)
failure = 3;
constructor_elt *ce;
bool fold_p = false;
- if (VEC_index (constructor_elt, v, 0).index)
+ if ((*v)[0].index)
maxindex = fold_convert_loc (input_location, sizetype,
- VEC_index (constructor_elt,
- v, 0).index);
+ (*v)[0].index);
curindex = maxindex;
- for (cnt = 1;
- VEC_iterate (constructor_elt, v, cnt, ce);
- cnt++)
+ for (cnt = 1; vec_safe_iterate (v, cnt, &ce); cnt++)
{
bool curfold_p = false;
if (ce->index)
Returns 0 if an error is encountered. */
static int
-sync_resolve_size (tree function, VEC(tree,gc) *params)
+sync_resolve_size (tree function, vec<tree, va_gc> *params)
{
tree type;
int size;
- if (VEC_empty (tree, params))
+ if (!params)
{
error ("too few arguments to function %qE", function);
return 0;
}
- type = TREE_TYPE (VEC_index (tree, params, 0));
+ type = TREE_TYPE ((*params)[0]);
if (TREE_CODE (type) != POINTER_TYPE)
goto incompatible;
static bool
sync_resolve_params (location_t loc, tree orig_function, tree function,
- VEC(tree, gc) *params, bool orig_format)
+ vec<tree, va_gc> *params, bool orig_format)
{
function_args_iterator iter;
tree ptype;
as the pointer parameter, so we shouldn't get any complaints from the
call to check_function_arguments what ever type the user used. */
function_args_iter_next (&iter);
- ptype = TREE_TYPE (TREE_TYPE (VEC_index (tree, params, 0)));
+ ptype = TREE_TYPE (TREE_TYPE ((*params)[0]));
/* For the rest of the values, we need to cast these to FTYPE, so that we
don't get warnings for passing pointer types, etc. */
break;
++parmnum;
- if (VEC_length (tree, params) <= parmnum)
+ if (params->length () <= parmnum)
{
error_at (loc, "too few arguments to function %qE", orig_function);
return false;
/* Ideally for the first conversion we'd use convert_for_assignment
so that we get warnings for anything that doesn't match the pointer
type. This isn't portable across the C and C++ front ends atm. */
- val = VEC_index (tree, params, parmnum);
+ val = (*params)[parmnum];
val = convert (ptype, val);
val = convert (arg_type, val);
- VEC_replace (tree, params, parmnum, val);
+ (*params)[parmnum] = val;
}
function_args_iter_next (&iter);
}
/* __atomic routines are not variadic. */
- if (!orig_format && VEC_length (tree, params) != parmnum + 1)
+ if (!orig_format && params->length () != parmnum + 1)
{
error_at (loc, "too many arguments to function %qE", orig_function);
return false;
being "an optional list of variables protected by the memory barrier".
No clue what that's supposed to mean, precisely, but we consider all
call-clobbered variables to be protected so we're safe. */
- VEC_truncate (tree, params, parmnum + 1);
+ params->truncate (parmnum + 1);
return true;
}
0 is returned if the parameters are invalid. */
static int
-get_atomic_generic_size (location_t loc, tree function, VEC(tree,gc) *params)
+get_atomic_generic_size (location_t loc, tree function,
+ vec<tree, va_gc> *params)
{
unsigned int n_param;
unsigned int n_model;
gcc_unreachable ();
}
- if (VEC_length (tree, params) != n_param)
+ if (vec_safe_length (params) != n_param)
{
error_at (loc, "incorrect number of arguments to function %qE", function);
return 0;
}
/* Get type of first parameter, and determine its size. */
- type_0 = TREE_TYPE (VEC_index (tree, params, 0));
+ type_0 = TREE_TYPE ((*params)[0]);
if (TREE_CODE (type_0) != POINTER_TYPE || VOID_TYPE_P (TREE_TYPE (type_0)))
{
error_at (loc, "argument 1 of %qE must be a non-void pointer type",
for (x = 0; x < n_param - n_model; x++)
{
int size;
- tree type = TREE_TYPE (VEC_index (tree, params, x));
+ tree type = TREE_TYPE ((*params)[x]);
/* __atomic_compare_exchange has a bool in the 4th postion, skip it. */
if (n_param == 6 && x == 3)
continue;
/* Check memory model parameters for validity. */
for (x = n_param - n_model ; x < n_param; x++)
{
- tree p = VEC_index (tree, params, x);
+ tree p = (*params)[x];
if (TREE_CODE (p) == INTEGER_CST)
{
int i = tree_low_cst (p, 1);
static tree
add_atomic_size_parameter (unsigned n, location_t loc, tree function,
- VEC(tree,gc) *params)
+ vec<tree, va_gc> *params)
{
tree size_node;
/* Insert a SIZE_T parameter as the first param. If there isn't
enough space, allocate a new vector and recursively re-build with that. */
- if (!VEC_space (tree, params, 1))
+ if (!params->space (1))
{
unsigned int z, len;
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *v;
tree f;
- len = VEC_length (tree, params);
- vec = VEC_alloc (tree, gc, len + 1);
+ len = params->length ();
+ vec_alloc (v, len + 1);
for (z = 0; z < len; z++)
- VEC_quick_push (tree, vec, VEC_index (tree, params, z));
- f = build_function_call_vec (loc, function, vec, NULL);
- VEC_free (tree, gc, vec);
+ v->quick_push ((*params)[z]);
+ f = build_function_call_vec (loc, function, v, NULL);
+ vec_free (v);
return f;
}
/* Add the size parameter and leave as a function call for processing. */
size_node = build_int_cst (size_type_node, n);
- VEC_quick_insert (tree, params, 0, size_node);
+ params->quick_insert (0, size_node);
return NULL_TREE;
}
NEW_RETURN is set to the the return value the result is copied into. */
static bool
resolve_overloaded_atomic_exchange (location_t loc, tree function,
- VEC(tree,gc) *params, tree *new_return)
+ vec<tree, va_gc> *params, tree *new_return)
{
tree p0, p1, p2, p3;
tree I_type, I_type_ptr;
into
*return = (T) (fn (In* mem, (In) *desired, model)) */
- p0 = VEC_index (tree, params, 0);
- p1 = VEC_index (tree, params, 1);
- p2 = VEC_index (tree, params, 2);
- p3 = VEC_index (tree, params, 3);
+ p0 = (*params)[0];
+ p1 = (*params)[1];
+ p2 = (*params)[2];
+ p3 = (*params)[3];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
/* Convert object pointer to required type. */
p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
- VEC_replace (tree, params, 0, p0);
+ (*params)[0] = p0;
/* Convert new value to required type, and dereference it. */
p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR);
p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1);
- VEC_replace (tree, params, 1, p1);
+ (*params)[1] = p1;
/* Move memory model to the 3rd position, and end param list. */
- VEC_replace (tree, params, 2, p3);
- VEC_truncate (tree, params, 3);
+ (*params)[2] = p3;
+ params->truncate (3);
/* Convert return pointer and dereference it for later assignment. */
*new_return = build_indirect_ref (loc, p2, RO_UNARY_STAR);
static bool
resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
- VEC(tree,gc) *params,
+ vec<tree, va_gc> *params,
tree *new_return)
{
tree p0, p1, p2;
there is no danger this will be done twice. */
if (n > 0)
{
- VEC_replace (tree, params, 3, VEC_index (tree, params, 4));
- VEC_replace (tree, params, 4, VEC_index (tree, params, 5));
- VEC_truncate (tree, params, 5);
+ (*params)[3] = (*params)[4];
+ (*params)[4] = (*params)[5];
+ params->truncate (5);
}
*new_return = add_atomic_size_parameter (n, loc, function, params);
return true;
into
bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */
- p0 = VEC_index (tree, params, 0);
- p1 = VEC_index (tree, params, 1);
- p2 = VEC_index (tree, params, 2);
+ p0 = (*params)[0];
+ p1 = (*params)[1];
+ p2 = (*params)[2];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
/* Convert object pointer to required type. */
p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
- VEC_replace (tree, params, 0, p0);
+ (*params)[0] = p0;
/* Convert expected pointer to required type. */
p1 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1);
- VEC_replace (tree, params, 1, p1);
+ (*params)[1] = p1;
/* Convert desired value to required type, and dereference it. */
p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR);
p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2);
- VEC_replace (tree, params, 2, p2);
+ (*params)[2] = p2;
/* The rest of the parameters are fine. NULL means no special return value
processing.*/
static bool
resolve_overloaded_atomic_load (location_t loc, tree function,
- VEC(tree,gc) *params, tree *new_return)
+ vec<tree, va_gc> *params, tree *new_return)
{
tree p0, p1, p2;
tree I_type, I_type_ptr;
into
*return = (T) (fn ((In *) mem, model)) */
- p0 = VEC_index (tree, params, 0);
- p1 = VEC_index (tree, params, 1);
- p2 = VEC_index (tree, params, 2);
+ p0 = (*params)[0];
+ p1 = (*params)[1];
+ p2 = (*params)[2];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
/* Convert object pointer to required type. */
p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
- VEC_replace (tree, params, 0, p0);
+ (*params)[0] = p0;
/* Move memory model to the 2nd position, and end param list. */
- VEC_replace (tree, params, 1, p2);
- VEC_truncate (tree, params, 2);
+ (*params)[1] = p2;
+ params->truncate (2);
/* Convert return pointer and dereference it for later assignment. */
*new_return = build_indirect_ref (loc, p1, RO_UNARY_STAR);
static bool
resolve_overloaded_atomic_store (location_t loc, tree function,
- VEC(tree,gc) *params, tree *new_return)
+ vec<tree, va_gc> *params, tree *new_return)
{
tree p0, p1;
tree I_type, I_type_ptr;
into
fn ((In *) mem, (In) *value, model) */
- p0 = VEC_index (tree, params, 0);
- p1 = VEC_index (tree, params, 1);
+ p0 = (*params)[0];
+ p1 = (*params)[1];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
/* Convert object pointer to required type. */
p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
- VEC_replace (tree, params, 0, p0);
+ (*params)[0] = p0;
/* Convert new value to required type, and dereference it. */
p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR);
p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1);
- VEC_replace (tree, params, 1, p1);
+ (*params)[1] = p1;
/* The memory model is in the right spot already. Return is void. */
*new_return = NULL_TREE;
continue. */
tree
-resolve_overloaded_builtin (location_t loc, tree function, VEC(tree,gc) *params)
+resolve_overloaded_builtin (location_t loc, tree function,
+ vec<tree, va_gc> *params)
{
enum built_in_function orig_code = DECL_FUNCTION_CODE (function);
bool orig_format = true;
orig_format))
return error_mark_node;
- first_param = VEC_index (tree, params, 0);
+ first_param = (*params)[0];
result = build_function_call_vec (loc, new_function, params, NULL);
if (result == error_mark_node)
return result;
{
gcc_assert (decl && DECL_P (decl) && TREE_STATIC (decl));
- while (!VEC_empty (tree, types_used_by_cur_var_decl))
+ while (types_used_by_cur_var_decl && !types_used_by_cur_var_decl->is_empty ())
{
- tree type = VEC_pop (tree, types_used_by_cur_var_decl);
+ tree type = types_used_by_cur_var_decl->pop ();
types_used_by_var_decl_insert (type, decl);
}
}
return;
l = (struct c_language_function *) cfun->language;
- VEC_safe_push (tree, gc, l->local_typedefs, decl);
+ vec_safe_push (l->local_typedefs, decl);
}
/* If T is a TYPE_DECL declared locally, mark it as used. */
if (warn_unused_local_typedefs
&& errorcount == unused_local_typedefs_warn_count)
{
- FOR_EACH_VEC_ELT (tree, l->local_typedefs, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (l->local_typedefs, i, decl)
if (!TREE_USED (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_local_typedefs,
unused_local_typedefs_warn_count = errorcount;
}
- if (l->local_typedefs)
- {
- VEC_free (tree, gc, l->local_typedefs);
- l->local_typedefs = NULL;
- }
+ vec_free (l->local_typedefs);
}
/* The C and C++ parsers both use vectors to hold function arguments.
For efficiency, we keep a cache of unused vectors. This is the
cache. */
-typedef VEC(tree,gc)* tree_gc_vec;
-DEF_VEC_P(tree_gc_vec);
-DEF_VEC_ALLOC_P(tree_gc_vec,gc);
-static GTY((deletable)) VEC(tree_gc_vec,gc) *tree_vector_cache;
+typedef vec<tree, va_gc> *tree_gc_vec;
+static GTY((deletable)) vec<tree_gc_vec, va_gc> *tree_vector_cache;
/* Return a new vector from the cache. If the cache is empty,
allocate a new vector. These vectors are GC'ed, so it is OK if the
pointer is not released.. */
-VEC(tree,gc) *
+vec<tree, va_gc> *
make_tree_vector (void)
{
- if (!VEC_empty (tree_gc_vec, tree_vector_cache))
- return VEC_pop (tree_gc_vec, tree_vector_cache);
+ if (tree_vector_cache && !tree_vector_cache->is_empty ())
+ return tree_vector_cache->pop ();
else
{
- /* Passing 0 to VEC_alloc returns NULL, and our callers require
+ /* Passing 0 to vec::alloc returns NULL, and our callers require
that we always return a non-NULL value. The vector code uses
4 when growing a NULL vector, so we do too. */
- return VEC_alloc (tree, gc, 4);
+ vec<tree, va_gc> *v;
+ vec_alloc (v, 4);
+ return v;
}
}
/* Release a vector of trees back to the cache. */
void
-release_tree_vector (VEC(tree,gc) *vec)
+release_tree_vector (vec<tree, va_gc> *vec)
{
if (vec != NULL)
{
- VEC_truncate (tree, vec, 0);
- VEC_safe_push (tree_gc_vec, gc, tree_vector_cache, vec);
+ vec->truncate (0);
+ vec_safe_push (tree_vector_cache, vec);
}
}
/* Get a new tree vector holding a single tree. */
-VEC(tree,gc) *
+vec<tree, va_gc> *
make_tree_vector_single (tree t)
{
- VEC(tree,gc) *ret = make_tree_vector ();
- VEC_quick_push (tree, ret, t);
+ vec<tree, va_gc> *ret = make_tree_vector ();
+ ret->quick_push (t);
return ret;
}
/* Get a new tree vector of the TREE_VALUEs of a TREE_LIST chain. */
-VEC(tree,gc) *
+vec<tree, va_gc> *
make_tree_vector_from_list (tree list)
{
- VEC(tree,gc) *ret = make_tree_vector ();
+ vec<tree, va_gc> *ret = make_tree_vector ();
for (; list; list = TREE_CHAIN (list))
- VEC_safe_push (tree, gc, ret, TREE_VALUE (list));
+ vec_safe_push (ret, TREE_VALUE (list));
return ret;
}
/* Get a new tree vector which is a copy of an existing one. */
-VEC(tree,gc) *
-make_tree_vector_copy (const VEC(tree,gc) *orig)
+vec<tree, va_gc> *
+make_tree_vector_copy (const vec<tree, va_gc> *orig)
{
- VEC(tree,gc) *ret;
+ vec<tree, va_gc> *ret;
unsigned int ix;
tree t;
ret = make_tree_vector ();
- VEC_reserve (tree, gc, ret, VEC_length (tree, orig));
- FOR_EACH_VEC_ELT (tree, orig, ix, t)
- VEC_quick_push (tree, ret, t);
+ vec_safe_reserve (ret, vec_safe_length (orig));
+ FOR_EACH_VEC_SAFE_ELT (orig, ix, t)
+ ret->quick_push (t);
return ret;
}
struct GTY(()) stmt_tree_s {
/* A stack of statement lists being collected. */
- VEC(tree,gc) *x_cur_stmt_list;
+ vec<tree, va_gc> *x_cur_stmt_list;
/* In C++, Nonzero if we should treat statements as full
expressions. In particular, this variable is non-zero if at the
/* Vector of locally defined typedefs, for
-Wunused-local-typedefs. */
- VEC(tree,gc) *local_typedefs;
+ vec<tree, va_gc> *local_typedefs;
};
#define stmt_list_stack (current_stmt_tree ()->x_cur_stmt_list)
/* When building a statement-tree, this is the current statement list
- being collected. We define it in this convoluted way, rather than
- using VEC_last, because it must be an lvalue. */
+ being collected. */
+#define cur_stmt_list (stmt_list_stack->last ())
-#define cur_stmt_list \
- (*(VEC_address (tree, stmt_list_stack) \
- + VEC_length (tree, stmt_list_stack) - 1))
-
-#define building_stmt_list_p() (!VEC_empty (tree, stmt_list_stack))
+#define building_stmt_list_p() (stmt_list_stack && !stmt_list_stack->is_empty())
/* Language-specific hooks. */
extern void constant_expression_error (tree);
extern bool strict_aliasing_warning (tree, tree, tree);
extern void sizeof_pointer_memaccess_warning (location_t *, tree,
- VEC(tree, gc) *, tree *,
+ vec<tree, va_gc> *, tree *,
bool (*) (tree, tree));
extern void warnings_for_convert_and_check (tree, tree, tree);
extern tree convert_and_check (tree, tree);
extern tree build_function_call (location_t, tree, tree);
-extern tree build_function_call_vec (location_t, tree,
- VEC(tree,gc) *, VEC(tree,gc) *);
+extern tree build_function_call_vec (location_t, tree, vec<tree, va_gc> *,
+ vec<tree, va_gc> *);
-extern tree resolve_overloaded_builtin (location_t, tree, VEC(tree,gc) *);
+extern tree resolve_overloaded_builtin (location_t, tree, vec<tree, va_gc> *);
extern tree finish_label_address_expr (tree, location_t);
extern void record_locally_defined_typedef (tree);
extern void maybe_record_typedef_use (tree);
extern void maybe_warn_unused_local_typedefs (void);
-extern VEC(tree,gc) *make_tree_vector (void);
-extern void release_tree_vector (VEC(tree,gc) *);
-extern VEC(tree,gc) *make_tree_vector_single (tree);
-extern VEC(tree,gc) *make_tree_vector_from_list (tree);
-extern VEC(tree,gc) *make_tree_vector_copy (const VEC(tree,gc) *);
+extern vec<tree, va_gc> *make_tree_vector (void);
+extern void release_tree_vector (vec<tree, va_gc> *);
+extern vec<tree, va_gc> *make_tree_vector_single (tree);
+extern vec<tree, va_gc> *make_tree_vector_from_list (tree);
+extern vec<tree, va_gc> *make_tree_vector_copy (const vec<tree, va_gc> *);
/* In c-gimplify.c */
extern void c_genericize (tree);
unsigned i;
tree enclosing;
gimple bind;
- VEC(gimple, heap) *stack = gimple_bind_expr_stack ();
+ vec<gimple> stack = gimple_bind_expr_stack ();
- FOR_EACH_VEC_ELT (gimple, stack, i, bind)
+ FOR_EACH_VEC_ELT (stack, i, bind)
if (gimple_bind_block (bind))
break;
#include "tm_p.h" /* For REGISTER_TARGET_PRAGMAS (why is
this not a target hook?). */
#include "vec.h"
-#include "vecprim.h"
#include "target.h"
#include "diagnostic.h"
#include "opts.h"
tree value;
} pending_weak;
-DEF_VEC_O(pending_weak);
-DEF_VEC_ALLOC_O(pending_weak,gc);
-static GTY(()) VEC(pending_weak,gc) *pending_weaks;
+static GTY(()) vec<pending_weak, va_gc> *pending_weaks;
static void apply_pragma_weak (tree, tree);
static void handle_pragma_weak (cpp_reader *);
id = DECL_ASSEMBLER_NAME (decl);
- FOR_EACH_VEC_ELT (pending_weak, pending_weaks, i, pe)
+ FOR_EACH_VEC_ELT (*pending_weaks, i, pe)
if (id == pe->name)
{
apply_pragma_weak (decl, pe->value);
- VEC_unordered_remove (pending_weak, pending_weaks, i);
+ pending_weaks->unordered_remove (i);
break;
}
}
pending_weak *pe;
symtab_node target;
- FOR_EACH_VEC_ELT (pending_weak, pending_weaks, i, pe)
+ if (!pending_weaks)
+ return;
+
+ FOR_EACH_VEC_ELT (*pending_weaks, i, pe)
{
alias_id = pe->name;
id = pe->value;
else
{
pending_weak pe = {name, value};
- VEC_safe_push (pending_weak, gc, pending_weaks, pe);
+ vec_safe_push (pending_weaks, pe);
}
}
tree newname;
} pending_redefinition;
-DEF_VEC_O(pending_redefinition);
-DEF_VEC_ALLOC_O(pending_redefinition,gc);
-static GTY(()) VEC(pending_redefinition,gc) *pending_redefine_extname;
+static GTY(()) vec<pending_redefinition, va_gc> *pending_redefine_extname;
static void handle_pragma_redefine_extname (cpp_reader *);
unsigned ix;
pending_redefinition *p;
- FOR_EACH_VEC_ELT (pending_redefinition, pending_redefine_extname, ix, p)
+ FOR_EACH_VEC_SAFE_ELT (pending_redefine_extname, ix, p)
if (oldname == p->oldname)
{
if (p->newname != newname)
}
pending_redefinition e = {oldname, newname};
- VEC_safe_push (pending_redefinition, gc, pending_redefine_extname, e);
+ vec_safe_push (pending_redefine_extname, e);
}
/* The current prefix set by #pragma extern_prefix. */
"conflict with previous rename");
/* Take any pending redefine_extname off the list. */
- FOR_EACH_VEC_ELT (pending_redefinition, pending_redefine_extname, ix, p)
+ FOR_EACH_VEC_SAFE_ELT (pending_redefine_extname, ix, p)
if (DECL_NAME (decl) == p->oldname)
{
/* Only warn if there is a conflict. */
warning (OPT_Wpragmas, "#pragma redefine_extname ignored due to "
"conflict with previous rename");
- VEC_unordered_remove (pending_redefinition,
- pending_redefine_extname, ix);
+ pending_redefine_extname->unordered_remove (ix);
break;
}
return 0;
}
/* Find out if we have a pending #pragma redefine_extname. */
- FOR_EACH_VEC_ELT (pending_redefinition, pending_redefine_extname, ix, p)
+ FOR_EACH_VEC_SAFE_ELT (pending_redefine_extname, ix, p)
if (DECL_NAME (decl) == p->oldname)
{
tree newname = p->newname;
- VEC_unordered_remove (pending_redefinition,
- pending_redefine_extname, ix);
+ pending_redefine_extname->unordered_remove (ix);
/* If we already have an asmname, #pragma redefine_extname is
ignored (with a warning if it conflicts). */
static void handle_pragma_visibility (cpp_reader *);
-static VEC (int, heap) *visstack;
+static vec<int> visstack;
/* Push the visibility indicated by STR onto the top of the #pragma
visibility stack. KIND is 0 for #pragma GCC visibility, 1 for
void
push_visibility (const char *str, int kind)
{
- VEC_safe_push (int, heap, visstack,
- ((int) default_visibility) | (kind << 8));
+ visstack.safe_push (((int) default_visibility) | (kind << 8));
if (!strcmp (str, "default"))
default_visibility = VISIBILITY_DEFAULT;
else if (!strcmp (str, "internal"))
bool
pop_visibility (int kind)
{
- if (!VEC_length (int, visstack))
+ if (!visstack.length ())
return false;
- if ((VEC_last (int, visstack) >> 8) != kind)
+ if ((visstack.last () >> 8) != kind)
return false;
default_visibility
- = (enum symbol_visibility) (VEC_pop (int, visstack) & 0xff);
+ = (enum symbol_visibility) (visstack.pop () & 0xff);
visibility_options.inpragma
- = VEC_length (int, visstack) != 0;
+ = visstack.length () != 0;
return true;
}
}
/* A vector of registered pragma callbacks, which is never freed. */
-DEF_VEC_O (internal_pragma_handler);
-DEF_VEC_ALLOC_O (internal_pragma_handler, heap);
-static VEC(internal_pragma_handler, heap) *registered_pragmas;
+static vec<internal_pragma_handler> registered_pragmas;
typedef struct
{
const char *name;
} pragma_ns_name;
-DEF_VEC_O (pragma_ns_name);
-DEF_VEC_ALLOC_O (pragma_ns_name, heap);
-static VEC(pragma_ns_name, heap) *registered_pp_pragmas;
+static vec<pragma_ns_name> registered_pp_pragmas;
struct omp_pragma_def { const char *name; unsigned int id; };
static const struct omp_pragma_def omp_pragmas[] = {
}
if (id >= PRAGMA_FIRST_EXTERNAL
- && (id < PRAGMA_FIRST_EXTERNAL
- + VEC_length (pragma_ns_name, registered_pp_pragmas)))
+ && (id < PRAGMA_FIRST_EXTERNAL + registered_pp_pragmas.length ()))
{
- *space = VEC_index (pragma_ns_name, registered_pp_pragmas,
- id - PRAGMA_FIRST_EXTERNAL).space;
- *name = VEC_index (pragma_ns_name, registered_pp_pragmas,
- id - PRAGMA_FIRST_EXTERNAL).name;
+ *space = registered_pp_pragmas[id - PRAGMA_FIRST_EXTERNAL].space;
+ *name = registered_pp_pragmas[id - PRAGMA_FIRST_EXTERNAL].name;
return;
}
ns_name.space = space;
ns_name.name = name;
- VEC_safe_push (pragma_ns_name, heap, registered_pp_pragmas, ns_name);
- id = VEC_length (pragma_ns_name, registered_pp_pragmas);
+ registered_pp_pragmas.safe_push (ns_name);
+ id = registered_pp_pragmas.length ();
id += PRAGMA_FIRST_EXTERNAL - 1;
}
else
{
- VEC_safe_push (internal_pragma_handler, heap, registered_pragmas,
- ihandler);
- id = VEC_length (internal_pragma_handler, registered_pragmas);
+ registered_pragmas.safe_push (ihandler);
+ id = registered_pragmas.length ();
id += PRAGMA_FIRST_EXTERNAL - 1;
/* The C++ front end allocates 6 bits in cp_token; the C front end
pragma_handler_2arg handler_2arg;
id -= PRAGMA_FIRST_EXTERNAL;
- ihandler = &VEC_index (internal_pragma_handler, registered_pragmas, id);
+ ihandler = ®istered_pragmas[id];
if (ihandler->extra_data)
{
handler_2arg = ihandler->handler.handler_2arg;
/* Print out V, which contains the elements of a constructor. */
void
-pp_c_constructor_elts (c_pretty_printer *pp, VEC(constructor_elt,gc) *v)
+pp_c_constructor_elts (c_pretty_printer *pp, vec<constructor_elt, va_gc> *v)
{
unsigned HOST_WIDE_INT ix;
tree value;
FOR_EACH_CONSTRUCTOR_VALUE (v, ix, value)
{
pp_expression (pp, value);
- if (ix != VEC_length (constructor_elt, v) - 1)
+ if (ix != vec_safe_length (v) - 1)
pp_separate_with (pp, ',');
}
}
void pp_c_expression (c_pretty_printer *, tree);
void pp_c_logical_or_expression (c_pretty_printer *, tree);
void pp_c_expression_list (c_pretty_printer *, tree);
-void pp_c_constructor_elts (c_pretty_printer *, VEC(constructor_elt,gc) *);
+void pp_c_constructor_elts (c_pretty_printer *, vec<constructor_elt, va_gc> *);
void pp_c_call_argument_list (c_pretty_printer *, tree);
void pp_c_unary_expression (c_pretty_printer *, tree);
void pp_c_cast_expression (c_pretty_printer *, tree);
{
tree t;
t = alloc_stmt_list ();
- VEC_safe_push (tree, gc, stmt_list_stack, t);
+ vec_safe_push (stmt_list_stack, t);
return t;
}
nestings will be due to outstanding cleanups. */
while (1)
{
- u = VEC_pop (tree, stmt_list_stack);
- if (!VEC_empty (tree, stmt_list_stack))
+ u = stmt_list_stack->pop ();
+ if (!stmt_list_stack->is_empty ())
{
- tree x = VEC_last (tree, stmt_list_stack);
+ tree x = stmt_list_stack->last ();
STATEMENT_LIST_HAS_LABEL (x) |= STATEMENT_LIST_HAS_LABEL (u);
}
if (t == u)
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * c-common.c: Use new vec API in vec.h.
+ * c-common.h: Likewise.
+ * c-gimplify.c: Likewise.
+ * c-pragma.c: Likewise.
+ * c-pretty-print.c: Likewise.
+ * c-pretty-print.h: Likewise.
+ * c-semantics.c: Likewise.
+ * c-decl.c: Likewise.
+ * c-parser.c: Likewise.
+ * c-tree.h: Likewise.
+ * c-typeck.c: Likewise.
+
2012-10-29 Jonathan Wakely <jwakely.gcc@gmail.com>
PR c++/54930
};
typedef struct c_goto_bindings *c_goto_bindings_p;
-DEF_VEC_P(c_goto_bindings_p);
-DEF_VEC_ALLOC_P(c_goto_bindings_p,gc);
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
- VEC(tree,gc) *decls_in_scope;
+ vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
- VEC(c_goto_bindings_p,gc) *gotos;
+ vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
static bool next_is_function_body;
-/* A VEC of pointers to c_binding structures. */
+/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
-DEF_VEC_P(c_binding_ptr);
-DEF_VEC_ALLOC_P(c_binding_ptr,heap);
/* Information that we keep for a struct or union while it is being
parsed. */
{
/* If warn_cxx_compat, a list of types defined within this
struct. */
- VEC(tree,heap) *struct_types;
+ vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
- VEC(c_binding_ptr,heap) *fields;
+ vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
- VEC(tree,heap) *typedefs_seen;
+ vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
- VEC_safe_push (tree, gc,
- label_vars->decls_in_scope,
- b1->decl);
+ vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
- FOR_EACH_VEC_ELT (c_goto_bindings_p, label_vars->gotos, ix, g)
+ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
- FOR_EACH_VEC_ELT (c_goto_bindings_p, label_vars->gotos, ix, g)
+ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
- FOR_EACH_VEC_ELT (c_goto_bindings_p, label_vars->gotos, ix, g)
+ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
- label_vars->gotos = VEC_alloc (c_goto_bindings_p, gc, 0);
+ label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
g = ggc_alloc_c_goto_bindings ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
- VEC_safe_push (c_goto_bindings_p, gc, label_vars->gotos, g);
+ vec_safe_push (label_vars->gotos, g);
return label;
}
...
goto lab;
Issue a warning or error. */
- FOR_EACH_VEC_ELT (tree, label_vars->decls_in_scope, ix, decl)
+ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
unsigned int ix;
struct c_goto_bindings *g;
- FOR_EACH_VEC_ELT (c_goto_bindings_p, label_vars->gotos, ix, g)
+ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
- VEC_truncate (c_goto_bindings_p, label_vars->gotos, 0);
+ vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
{
tree elt, type;
- if (VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (init)))
+ if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
- elt = VEC_last (constructor_elt, CONSTRUCTOR_ELTS (init)).value;
+ elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0);
- vec = VEC_alloc (tree, gc, 1);
- VEC_quick_push (tree, vec, cleanup);
+ vec_alloc (v, 1);
+ v->quick_push (cleanup);
cleanup = build_function_call_vec (DECL_SOURCE_LOCATION (decl),
- cleanup_decl, vec, NULL);
- VEC_free (tree, gc, vec);
+ cleanup_decl, v, NULL);
+ vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
c_arg_tag *tag;
unsigned ix;
- FOR_EACH_VEC_ELT_REVERSE (c_arg_tag, arg_info->tags, ix, tag)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = 0;
- VEC(c_arg_tag,gc) *tags = NULL;
+ vec<c_arg_tag, va_gc> *tags = NULL;
tree types = 0;
tree others = 0;
tag.id = b->id;
tag.type = decl;
- VEC_safe_push (c_arg_tag, gc, tags, tag);
+ vec_safe_push (tags, tag);
break;
case CONST_DECL:
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = XNEW (struct c_struct_parse_info);
- struct_parse_info->struct_types = VEC_alloc (tree, heap, 0);
- struct_parse_info->fields = VEC_alloc (c_binding_ptr, heap, 0);
- struct_parse_info->typedefs_seen = VEC_alloc (tree, heap, 0);
+ struct_parse_info->struct_types.create (0);
+ struct_parse_info->fields.create (0);
+ struct_parse_info->typedefs_seen.create (0);
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
to be cleared when this struct is finished. */
if (!b->in_struct)
{
- VEC_safe_push (c_binding_ptr, heap,
- struct_parse_info->fields, b);
+ struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
- FOR_EACH_VEC_ELT (tree, struct_parse_info->struct_types, ix, x)
+ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
- if (!VEC_empty (tree, struct_parse_info->typedefs_seen)
+ if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a pointer_set using the name of the typedef. We can use
a pointer_set because identifiers are interned. */
struct pointer_set_t *tset = pointer_set_create ();
- FOR_EACH_VEC_ELT (tree, struct_parse_info->typedefs_seen, ix, x)
+ FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
pointer_set_insert (tset, DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
- FOR_EACH_VEC_ELT (c_binding_ptr, struct_parse_info->fields, ix, b)
+ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist);
- VEC_free (tree, heap, struct_parse_info->struct_types);
- VEC_free (c_binding_ptr, heap, struct_parse_info->fields);
- VEC_free (tree, heap, struct_parse_info->typedefs_seen);
+ struct_parse_info->struct_types.release ();
+ struct_parse_info->fields.release ();
+ struct_parse_info->typedefs_seen.release ();
XDELETE (struct_parse_info);
struct_parse_info = enclosing_struct_parse_info;
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
- VEC_safe_push (tree, heap, struct_parse_info->struct_types, t);
+ struct_parse_info->struct_types.safe_push (t);
return t;
}
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
- VEC_safe_push (tree, heap, struct_parse_info->struct_types, enumtype);
+ struct_parse_info->struct_types.safe_push (enumtype);
return enumtype;
}
}
/* And all the tag declarations. */
- FOR_EACH_VEC_ELT_REVERSE (c_arg_tag, arg_info->tags, ix, tag)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
cfun->language = p = ggc_alloc_cleared_language_function ();
p->base.x_stmt_tree = c_stmt_tree;
- c_stmt_tree.x_cur_stmt_list
- = VEC_copy (tree, gc, c_stmt_tree.x_cur_stmt_list);
+ c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_break_label = c_break_label;
p->x_cont_label = c_cont_label;
p->x_switch_stack = c_switch_stack;
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
- VEC_safe_push (tree, heap, struct_parse_info->typedefs_seen, type);
+ struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
tree t;
unsigned i;
- FOR_EACH_VEC_ELT (tree, all_translation_units, i, t)
+ FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
tree decl;
unsigned i;
- FOR_EACH_VEC_ELT (tree, all_translation_units, i, t)
+ FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations and check_global_declarations. */
- FOR_EACH_VEC_ELT (tree, all_translation_units, i, t)
+ FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
if (!seen_error ())
{
timevar_push (TV_SYMOUT);
- FOR_EACH_VEC_ELT (tree, all_translation_units, i, t)
+ FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_2 (BLOCK_VARS (ext_block));
timevar_pop (TV_SYMOUT);
static tree c_parser_transaction_cancel (c_parser *);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
-static VEC(tree,gc) *c_parser_expr_list (c_parser *, bool, bool,
- VEC(tree,gc) **, location_t *,
- tree *);
+static vec<tree, va_gc> *c_parser_expr_list (c_parser *, bool, bool,
+ vec<tree, va_gc> **, location_t *,
+ tree *);
static void c_parser_omp_construct (c_parser *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
|| c_parser_next_token_is (parser, CPP_KEYWORD))
{
tree attr, attr_name, attr_args;
- VEC(tree,gc) *expr_list;
+ vec<tree, va_gc> *expr_list;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
stores the arguments in CEXPR_LIST. */
static bool
c_parser_get_builtin_args (c_parser *parser, const char *bname,
- VEC(c_expr_t,gc) **ret_cexpr_list)
+ vec<c_expr_t, va_gc> **ret_cexpr_list)
{
location_t loc = c_parser_peek_token (parser)->location;
- VEC (c_expr_t,gc) *cexpr_list;
+ vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t expr;
*ret_cexpr_list = NULL;
}
expr = c_parser_expr_no_commas (parser, NULL);
- cexpr_list = VEC_alloc (c_expr_t, gc, 1);
+ vec_alloc (cexpr_list, 1);
C_EXPR_APPEND (cexpr_list, expr);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
break;
case RID_CHOOSE_EXPR:
{
- VEC (c_expr_t, gc) *cexpr_list;
+ vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p, *e3_p;
tree c;
break;
}
- if (VEC_length (c_expr_t, cexpr_list) != 3)
+ if (vec_safe_length (cexpr_list) != 3)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_choose_expr%>");
break;
}
- e1_p = &VEC_index (c_expr_t, cexpr_list, 0);
- e2_p = &VEC_index (c_expr_t, cexpr_list, 1);
- e3_p = &VEC_index (c_expr_t, cexpr_list, 2);
+ e1_p = &(*cexpr_list)[0];
+ e2_p = &(*cexpr_list)[1];
+ e3_p = &(*cexpr_list)[2];
c = e1_p->value;
mark_exp_read (e2_p->value);
break;
case RID_BUILTIN_COMPLEX:
{
- VEC(c_expr_t, gc) *cexpr_list;
+ vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p;
c_parser_consume_token (parser);
break;
}
- if (VEC_length (c_expr_t, cexpr_list) != 2)
+ if (vec_safe_length (cexpr_list) != 2)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_complex%>");
break;
}
- e1_p = &VEC_index (c_expr_t, cexpr_list, 0);
- e2_p = &VEC_index (c_expr_t, cexpr_list, 1);
+ e1_p = &(*cexpr_list)[0];
+ e2_p = &(*cexpr_list)[1];
mark_exp_read (e1_p->value);
if (TREE_CODE (e1_p->value) == EXCESS_PRECISION_EXPR)
}
case RID_BUILTIN_SHUFFLE:
{
- VEC(c_expr_t,gc) *cexpr_list;
+ vec<c_expr_t, va_gc> *cexpr_list;
unsigned int i;
c_expr_t *p;
break;
}
- FOR_EACH_VEC_ELT (c_expr_t, cexpr_list, i, p)
+ FOR_EACH_VEC_SAFE_ELT (cexpr_list, i, p)
mark_exp_read (p->value);
- if (VEC_length (c_expr_t, cexpr_list) == 2)
+ if (vec_safe_length (cexpr_list) == 2)
expr.value =
c_build_vec_perm_expr
- (loc, VEC_index (c_expr_t, cexpr_list, 0).value,
- NULL_TREE, VEC_index (c_expr_t, cexpr_list, 1).value);
+ (loc, (*cexpr_list)[0].value,
+ NULL_TREE, (*cexpr_list)[1].value);
- else if (VEC_length (c_expr_t, cexpr_list) == 3)
+ else if (vec_safe_length (cexpr_list) == 3)
expr.value =
c_build_vec_perm_expr
- (loc, VEC_index (c_expr_t, cexpr_list, 0).value,
- VEC_index (c_expr_t, cexpr_list, 1).value,
- VEC_index (c_expr_t, cexpr_list, 2).value);
+ (loc, (*cexpr_list)[0].value,
+ (*cexpr_list)[1].value,
+ (*cexpr_list)[2].value);
else
{
error_at (loc, "wrong number of arguments to "
location_t sizeof_arg_loc[3];
tree sizeof_arg[3];
unsigned int i;
- VEC(tree,gc) *exprlist;
- VEC(tree,gc) *origtypes;
+ vec<tree, va_gc> *exprlist;
+ vec<tree, va_gc> *origtypes;
while (true)
{
location_t op_loc = c_parser_peek_token (parser)->location;
&& DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P)
expr.original_code = C_MAYBE_CONST_EXPR;
expr.original_type = NULL;
- if (exprlist != NULL)
+ if (exprlist)
{
release_tree_vector (exprlist);
release_tree_vector (origtypes);
nonempty-expr-list , assignment-expression
*/
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p,
- VEC(tree,gc) **p_orig_types, location_t *sizeof_arg_loc,
- tree *sizeof_arg)
+ vec<tree, va_gc> **p_orig_types,
+ location_t *sizeof_arg_loc, tree *sizeof_arg)
{
- VEC(tree,gc) *ret;
- VEC(tree,gc) *orig_types;
+ vec<tree, va_gc> *ret;
+ vec<tree, va_gc> *orig_types;
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
location_t cur_sizeof_arg_loc = UNKNOWN_LOCATION;
expr = default_function_array_read_conversion (loc, expr);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
- VEC_quick_push (tree, ret, expr.value);
- if (orig_types != NULL)
- VEC_quick_push (tree, orig_types, expr.original_type);
+ ret->quick_push (expr.value);
+ if (orig_types)
+ orig_types->quick_push (expr.original_type);
if (sizeof_arg != NULL
&& cur_sizeof_arg_loc != UNKNOWN_LOCATION
&& expr.original_code == SIZEOF_EXPR)
expr = default_function_array_read_conversion (loc, expr);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
- VEC_safe_push (tree, gc, ret, expr.value);
- if (orig_types != NULL)
- VEC_safe_push (tree, gc, orig_types, expr.original_type);
+ vec_safe_push (ret, expr.value);
+ if (orig_types)
+ vec_safe_push (orig_types, expr.original_type);
if (++idx < 3
&& sizeof_arg != NULL
&& cur_sizeof_arg_loc != UNKNOWN_LOCATION
sizeof_arg_loc[idx] = cur_sizeof_arg_loc;
}
}
- if (orig_types != NULL)
+ if (orig_types)
*p_orig_types = orig_types;
return ret;
}
c_parser_objc_keywordexpr (c_parser *parser)
{
tree ret;
- VEC(tree,gc) *expr_list = c_parser_expr_list (parser, true, true,
+ vec<tree, va_gc> *expr_list = c_parser_expr_list (parser, true, true,
NULL, NULL, NULL);
- if (VEC_length (tree, expr_list) == 1)
+ if (vec_safe_length (expr_list) == 1)
{
/* Just return the expression, remove a level of
indirection. */
- ret = VEC_index (tree, expr_list, 0);
+ ret = (*expr_list)[0];
}
else
{
bool fail = false, open_brace_parsed = false;
int i, collapse = 1, nbraces = 0;
location_t for_loc;
- VEC(tree,gc) *for_block = make_tree_vector ();
+ vec<tree, va_gc> *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
if (c_parser_next_tokens_start_declaration (parser))
{
if (i > 0)
- VEC_safe_push (tree, gc, for_block, c_begin_compound_stmt (true));
+ vec_safe_push (for_block, c_begin_compound_stmt (true));
c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL);
decl = check_for_loop_decls (for_loc, flag_isoc99);
if (decl == NULL)
ret = stmt;
}
pop_scopes:
- while (!VEC_empty (tree, for_block))
+ while (!for_block->is_empty ())
{
/* FIXME diagnostics: LOC below should be the actual location of
this particular for block. We need to build a list of
locations to go along with FOR_BLOCK. */
- stmt = c_end_compound_stmt (loc, VEC_pop (tree, for_block), true);
+ stmt = c_end_compound_stmt (loc, for_block->pop (), true);
add_stmt (stmt);
}
release_tree_vector (for_block);
typedef struct c_expr c_expr_t;
/* A varray of c_expr_t. */
-DEF_VEC_O (c_expr_t);
-DEF_VEC_ALLOC_O (c_expr_t, gc);
-DEF_VEC_ALLOC_O (c_expr_t, heap);
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
c_expr_t __elem = (ELEM); \
- VEC_safe_push (c_expr_t, gc, V, __elem); \
+ vec_safe_push (V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
tree type;
} c_arg_tag;
-DEF_VEC_O(c_arg_tag);
-DEF_VEC_ALLOC_O(c_arg_tag,gc);
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
- VEC(c_arg_tag,gc) *tags;
+ vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
bool *);
static int type_lists_compatible_p (const_tree, const_tree, bool *, bool *);
static tree lookup_field (tree, tree);
-static int convert_arguments (tree, VEC(tree,gc) *, VEC(tree,gc) *, tree,
- tree);
+static int convert_arguments (tree, vec<tree, va_gc> *, vec<tree, va_gc> *,
+ tree, tree);
static tree pointer_diff (location_t, tree, tree);
static tree convert_for_assignment (location_t, tree, tree, tree,
enum impl_conv, bool, tree, tree, int);
tree
build_function_call (location_t loc, tree function, tree params)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *v;
tree ret;
- vec = VEC_alloc (tree, gc, list_length (params));
+ vec_alloc (v, list_length (params));
for (; params; params = TREE_CHAIN (params))
- VEC_quick_push (tree, vec, TREE_VALUE (params));
- ret = build_function_call_vec (loc, function, vec, NULL);
- VEC_free (tree, gc, vec);
+ v->quick_push (TREE_VALUE (params));
+ ret = build_function_call_vec (loc, function, v, NULL);
+ vec_free (v);
return ret;
}
PARAMS. */
tree
-build_function_call_vec (location_t loc, tree function, VEC(tree,gc) *params,
- VEC(tree,gc) *origtypes)
+build_function_call_vec (location_t loc, tree function,
+ vec<tree, va_gc> *params,
+ vec<tree, va_gc> *origtypes)
{
tree fntype, fundecl = 0;
tree name = NULL_TREE, result;
/* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF
expressions, like those used for ObjC messenger dispatches. */
- if (!VEC_empty (tree, params))
- function = objc_rewrite_function_call (function,
- VEC_index (tree, params, 0));
+ if (params && !params->is_empty ())
+ function = objc_rewrite_function_call (function, (*params)[0]);
function = c_fully_fold (function, false, NULL);
/* Before the abort, allow the function arguments to exit or
call longjmp. */
for (i = 0; i < nargs; i++)
- trap = build2 (COMPOUND_EXPR, void_type_node,
- VEC_index (tree, params, i), trap);
+ trap = build2 (COMPOUND_EXPR, void_type_node, (*params)[i], trap);
if (VOID_TYPE_P (return_type))
{
if (AGGREGATE_TYPE_P (return_type))
rhs = build_compound_literal (loc, return_type,
- build_constructor (return_type, 0),
+ build_constructor (return_type,
+ NULL),
false);
else
rhs = build_zero_cst (return_type);
}
}
- argarray = VEC_address (tree, params);
+ argarray = vec_safe_address (params);
/* Check that arguments to builtin functions match the expectations. */
if (fundecl
failure. */
static int
-convert_arguments (tree typelist, VEC(tree,gc) *values,
- VEC(tree,gc) *origtypes, tree function, tree fundecl)
+convert_arguments (tree typelist, vec<tree, va_gc> *values,
+ vec<tree, va_gc> *origtypes, tree function, tree fundecl)
{
tree typetail, val;
unsigned int parmnum;
converted arguments. */
for (typetail = typelist, parmnum = 0;
- VEC_iterate (tree, values, parmnum, val);
+ values && values->iterate (parmnum, &val);
++parmnum)
{
tree type = typetail ? TREE_VALUE (typetail) : 0;
sake of better warnings from convert_and_check. */
if (excess_precision)
val = build1 (EXCESS_PRECISION_EXPR, valtype, val);
- origtype = (origtypes == NULL
- ? NULL_TREE
- : VEC_index (tree, origtypes, parmnum));
+ origtype = (!origtypes) ? NULL_TREE : (*origtypes)[parmnum];
parmval = convert_for_assignment (input_location, type, val,
origtype, ic_argpass, npc,
fundecl, function,
/* Convert `short' and `char' to full-size `int'. */
parmval = default_conversion (val);
- VEC_replace (tree, values, parmnum, parmval);
+ (*values)[parmnum] = parmval;
if (parmval == error_mark_node)
error_args = true;
typetail = TREE_CHAIN (typetail);
}
- gcc_assert (parmnum == VEC_length (tree, values));
+ gcc_assert (parmnum == vec_safe_length (values));
if (typetail != 0 && TREE_VALUE (typetail) != void_type_node)
{
/* If we are saving up the elements rather than allocating them,
this is the list of elements so far (in reverse order,
most recent first). */
-static VEC(constructor_elt,gc) *constructor_elements;
+static vec<constructor_elt, va_gc> *constructor_elements;
/* 1 if constructor should be incrementally stored into a constructor chain,
0 if all the elements should be kept in AVL tree. */
tree unfilled_index;
tree unfilled_fields;
tree bit_index;
- VEC(constructor_elt,gc) *elements;
+ vec<constructor_elt, va_gc> *elements;
struct init_node *pending_elts;
int offset;
int depth;
tree decl;
struct constructor_stack *constructor_stack;
struct constructor_range_stack *constructor_range_stack;
- VEC(constructor_elt,gc) *elements;
+ vec<constructor_elt, va_gc> *elements;
struct spelling *spelling;
struct spelling *spelling_base;
int spelling_size;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
- constructor_elements = 0;
+ constructor_elements = NULL;
constructor_pending_elts = 0;
constructor_type = type;
constructor_incremental = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
- constructor_elements = 0;
+ constructor_elements = NULL;
constructor_incremental = 1;
constructor_designated = 0;
constructor_pending_elts = 0;
constructor_simple = TREE_STATIC (value);
constructor_nonconst = CONSTRUCTOR_NON_CONST (value);
constructor_elements = CONSTRUCTOR_ELTS (value);
- if (!VEC_empty (constructor_elt, constructor_elements)
+ if (!vec_safe_is_empty (constructor_elements)
&& (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == ARRAY_TYPE))
set_nonincremental_init (braced_init_obstack);
&& constructor_unfilled_fields)
{
bool constructor_zeroinit =
- (VEC_length (constructor_elt, constructor_elements) == 1
- && integer_zerop
- (VEC_index (constructor_elt, constructor_elements, 0).value));
+ (vec_safe_length (constructor_elements) == 1
+ && integer_zerop ((*constructor_elements)[0].value));
/* Do not warn for flexible array members or zero-length arrays. */
while (constructor_unfilled_fields
{
/* A nonincremental scalar initializer--just return
the element, after verifying there is just one. */
- if (VEC_empty (constructor_elt,constructor_elements))
+ if (vec_safe_is_empty (constructor_elements))
{
if (!constructor_erroneous)
error_init ("empty scalar initializer");
ret.value = error_mark_node;
}
- else if (VEC_length (constructor_elt,constructor_elements) != 1)
+ else if (vec_safe_length (constructor_elements) != 1)
{
error_init ("extra elements in scalar initializer");
- ret.value = VEC_index (constructor_elt,constructor_elements,0).value;
+ ret.value = (*constructor_elements)[0].value;
}
else
- ret.value = VEC_index (constructor_elt,constructor_elements,0).value;
+ ret.value = (*constructor_elements)[0].value;
}
else
{
add_pending_init (index, value, NULL_TREE, true,
braced_init_obstack);
}
- constructor_elements = 0;
+ constructor_elements = NULL;
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields = TYPE_FIELDS (constructor_type);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
- if (!VEC_empty (constructor_elt, constructor_elements)
- && (VEC_last (constructor_elt, constructor_elements).index
- == field))
- return VEC_last (constructor_elt, constructor_elements).value;
+ if (!vec_safe_is_empty (constructor_elements)
+ && (constructor_elements->last ().index == field))
+ return constructor_elements->last ().value;
}
return 0;
}
return;
}
else if (TREE_CODE (constructor_type) == UNION_TYPE
- && !VEC_empty (constructor_elt, constructor_elements))
+ && !vec_safe_is_empty (constructor_elements))
{
if (!implicit)
{
- if (TREE_SIDE_EFFECTS (VEC_last (constructor_elt,
- constructor_elements).value))
+ if (TREE_SIDE_EFFECTS (constructor_elements->last ().value))
warning_init (0,
"initialized field with side-effects overwritten");
else if (warn_override_init)
}
/* We can have just one union field set. */
- constructor_elements = 0;
+ constructor_elements = NULL;
}
/* Otherwise, output this element either to
constructor_elements or to the assembler file. */
constructor_elt celt = {field, value};
- VEC_safe_push (constructor_elt, gc, constructor_elements, celt);
+ vec_safe_push (constructor_elements, celt);
/* Advance the variable that indicates sequential elements output. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
based on crtl->args.internal_arg_pointer. The element is NULL_RTX if the
pseudo isn't based on it, a CONST_INT offset if the pseudo is based on it
with fixed offset, or PC if this is with variable or unknown offset. */
- VEC(rtx, heap) *cache;
+ vec<rtx> cache;
} internal_arg_pointer_exp_state;
static rtx internal_arg_pointer_based_exp (rtx, bool);
rtx val = NULL_RTX;
unsigned int idx = REGNO (SET_DEST (set)) - FIRST_PSEUDO_REGISTER;
/* Punt on pseudos set multiple times. */
- if (idx < VEC_length (rtx, internal_arg_pointer_exp_state.cache)
- && (VEC_index (rtx, internal_arg_pointer_exp_state.cache, idx)
+ if (idx < internal_arg_pointer_exp_state.cache.length ()
+ && (internal_arg_pointer_exp_state.cache[idx]
!= NULL_RTX))
val = pc_rtx;
else
val = internal_arg_pointer_based_exp (SET_SRC (set), false);
if (val != NULL_RTX)
{
- if (idx
- >= VEC_length (rtx, internal_arg_pointer_exp_state.cache))
- VEC_safe_grow_cleared (rtx, heap,
- internal_arg_pointer_exp_state.cache,
- idx + 1);
- VEC_replace (rtx, internal_arg_pointer_exp_state.cache,
- idx, val);
+ if (idx >= internal_arg_pointer_exp_state.cache.length ())
+ internal_arg_pointer_exp_state.cache.safe_grow_cleared(idx + 1);
+ internal_arg_pointer_exp_state.cache[idx] = val;
}
}
if (NEXT_INSN (insn) == NULL_RTX)
if (REG_P (rtl))
{
unsigned int idx = REGNO (rtl) - FIRST_PSEUDO_REGISTER;
- if (idx < VEC_length (rtx, internal_arg_pointer_exp_state.cache))
- return VEC_index (rtx, internal_arg_pointer_exp_state.cache, idx);
+ if (idx < internal_arg_pointer_exp_state.cache.length ())
+ return internal_arg_pointer_exp_state.cache[idx];
return NULL_RTX;
}
sbitmap_free (stored_args_map);
internal_arg_pointer_exp_state.scan_start = NULL_RTX;
- VEC_free (rtx, heap, internal_arg_pointer_exp_state.cache);
+ internal_arg_pointer_exp_state.cache.release ();
}
else
{
{
FOR_EACH_EDGE (e, ei, bb->succs)
free_edge (e);
- VEC_truncate (edge, bb->succs, 0);
- VEC_truncate (edge, bb->preds, 0);
+ vec_safe_truncate (bb->succs, 0);
+ vec_safe_truncate (bb->preds, 0);
}
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
free_edge (e);
- VEC_truncate (edge, EXIT_BLOCK_PTR->preds, 0);
- VEC_truncate (edge, ENTRY_BLOCK_PTR->succs, 0);
+ vec_safe_truncate (EXIT_BLOCK_PTR->preds, 0);
+ vec_safe_truncate (ENTRY_BLOCK_PTR->succs, 0);
gcc_assert (!n_edges);
}
static inline void
connect_src (edge e)
{
- VEC_safe_push (edge, gc, e->src->succs, e);
+ vec_safe_push (e->src->succs, e);
df_mark_solutions_dirty ();
}
connect_dest (edge e)
{
basic_block dest = e->dest;
- VEC_safe_push (edge, gc, dest->preds, e);
+ vec_safe_push (dest->preds, e);
e->dest_idx = EDGE_COUNT (dest->preds) - 1;
df_mark_solutions_dirty ();
}
{
if (tmp == e)
{
- VEC_unordered_remove (edge, src->succs, ei.index);
+ src->succs->unordered_remove (ei.index);
df_mark_solutions_dirty ();
return;
}
basic_block dest = e->dest;
unsigned int dest_idx = e->dest_idx;
- VEC_unordered_remove (edge, dest->preds, dest_idx);
+ dest->preds->unordered_remove (dest_idx);
/* If we removed an edge in the middle of the edge vector, we need
to update dest_idx of the edge that moved into the "hole". */
#include "coretypes.h"
#include "basic-block.h"
#include "vec.h"
-#include "vecprim.h"
#include "bitmap.h"
#include "sbitmap.h"
#include "timevar.h"
{
bitmap_iterator bi;
unsigned bb_index, i;
- VEC(int,heap) *work_stack;
+ vec<int> work_stack;
bitmap phi_insertion_points;
- work_stack = VEC_alloc (int, heap, n_basic_blocks);
+ work_stack.create (n_basic_blocks);
phi_insertion_points = BITMAP_ALLOC (NULL);
/* Seed the work list with all the blocks in DEF_BLOCKS. We use
- VEC_quick_push here for speed. This is safe because we know that
+ vec::quick_push here for speed. This is safe because we know that
the number of definition blocks is no greater than the number of
basic blocks, which is the initial capacity of WORK_STACK. */
EXECUTE_IF_SET_IN_BITMAP (def_blocks, 0, bb_index, bi)
- VEC_quick_push (int, work_stack, bb_index);
+ work_stack.quick_push (bb_index);
/* Pop a block off the worklist, add every block that appears in
the original block's DF that we have not already processed to
the worklist. Iterate until the worklist is empty. Blocks
which are added to the worklist are potential sites for
PHI nodes. */
- while (VEC_length (int, work_stack) > 0)
+ while (work_stack.length () > 0)
{
- bb_index = VEC_pop (int, work_stack);
+ bb_index = work_stack.pop ();
/* Since the registration of NEW -> OLD name mappings is done
separately from the call to update_ssa, when updating the SSA
/* Use a safe push because if there is a definition of VAR
in every basic block, then WORK_STACK may eventually have
more than N_BASIC_BLOCK entries. */
- VEC_safe_push (int, heap, work_stack, i);
+ work_stack.safe_push (i);
bitmap_set_bit (phi_insertion_points, i);
}
}
- VEC_free (int, heap, work_stack);
+ work_stack.release ();
return phi_insertion_points;
}
delete_basic_block (b);
else
{
- VEC (basic_block, heap) *h
+ vec<basic_block> h
= get_all_dominated_blocks (CDI_DOMINATORS, b);
- while (VEC_length (basic_block, h))
+ while (h.length ())
{
- b = VEC_pop (basic_block, h);
+ b = h.pop ();
prev_bb = b->prev_bb;
delete_basic_block (b);
}
- VEC_free (basic_block, heap, h);
+ h.release ();
}
changed = true;
set_rtl (decl, x);
}
-DEF_VEC_I(HOST_WIDE_INT);
-DEF_VEC_ALLOC_I(HOST_WIDE_INT,heap);
-
struct stack_vars_data
{
/* Vector of offset pairs, always end of some padding followed
by start of the padding that needs Address Sanitizer protection.
The vector is in reversed, highest offset pairs come first. */
- VEC(HOST_WIDE_INT, heap) *asan_vec;
+ vec<HOST_WIDE_INT> asan_vec;
/* Vector of partition representative decls in between the paddings. */
- VEC(tree, heap) *asan_decl_vec;
+ vec<tree> asan_decl_vec;
};
/* A subroutine of expand_used_vars. Give each partition representative
= alloc_stack_frame_space (stack_vars[i].size
+ ASAN_RED_ZONE_SIZE,
MAX (alignb, ASAN_RED_ZONE_SIZE));
- VEC_safe_push (HOST_WIDE_INT, heap, data->asan_vec,
- prev_offset);
- VEC_safe_push (HOST_WIDE_INT, heap, data->asan_vec,
- offset + stack_vars[i].size);
+ data->asan_vec.safe_push (prev_offset);
+ data->asan_vec.safe_push (offset + stack_vars[i].size);
/* Find best representative of the partition.
Prefer those with DECL_NAME, even better
satisfying asan_protect_stack_decl predicate. */
repr_decl = stack_vars[j].decl;
if (repr_decl == NULL_TREE)
repr_decl = stack_vars[i].decl;
- VEC_safe_push (tree, heap, data->asan_decl_vec, repr_decl);
+ data->asan_decl_vec.safe_push (repr_decl);
}
else
offset = alloc_stack_frame_space (stack_vars[i].size, alignb);
expand_used_vars (void)
{
tree var, outer_block = DECL_INITIAL (current_function_decl);
- VEC(tree,heap) *maybe_local_decls = NULL;
+ vec<tree> maybe_local_decls = vec<tree>();
rtx var_end_seq = NULL_RTX;
struct pointer_map_t *ssa_name_decls;
unsigned i;
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
- len = VEC_length (tree, cfun->local_decls);
+ len = vec_safe_length (cfun->local_decls);
FOR_EACH_LOCAL_DECL (cfun, i, var)
{
bool expand_now = false;
/* If rtl isn't set yet, which can happen e.g. with
-fstack-protector, retry before returning from this
function. */
- VEC_safe_push (tree, heap, maybe_local_decls, var);
+ maybe_local_decls.safe_push (var);
}
}
We just want the duplicates, as those are the artificial
non-ignored vars that we want to keep until instantiate_decls.
Move them down and truncate the array. */
- if (!VEC_empty (tree, cfun->local_decls))
- VEC_block_remove (tree, cfun->local_decls, 0, len);
+ if (!vec_safe_is_empty (cfun->local_decls))
+ cfun->local_decls->block_remove (0, len);
/* At this point, all variables within the block tree with TREE_USED
set are actually used by the optimized function. Lay them out. */
{
struct stack_vars_data data;
- data.asan_vec = NULL;
- data.asan_decl_vec = NULL;
+ data.asan_vec = vec<HOST_WIDE_INT>();
+ data.asan_decl_vec = vec<tree>();
/* Reorder decls to be protected by iterating over the variables
array multiple times, and allocating out of each phase in turn. */
in addition to phase 1 and 2. */
expand_stack_vars (asan_decl_phase_3, &data);
- if (!VEC_empty (HOST_WIDE_INT, data.asan_vec))
+ if (!data.asan_vec.is_empty ())
{
HOST_WIDE_INT prev_offset = frame_offset;
HOST_WIDE_INT offset
= alloc_stack_frame_space (ASAN_RED_ZONE_SIZE,
ASAN_RED_ZONE_SIZE);
- VEC_safe_push (HOST_WIDE_INT, heap, data.asan_vec, prev_offset);
- VEC_safe_push (HOST_WIDE_INT, heap, data.asan_vec, offset);
+ data.asan_vec.safe_push (prev_offset);
+ data.asan_vec.safe_push (offset);
var_end_seq
= asan_emit_stack_protection (virtual_stack_vars_rtx,
- VEC_address (HOST_WIDE_INT,
- data.asan_vec),
- VEC_address (tree,
- data.asan_decl_vec),
- VEC_length (HOST_WIDE_INT,
- data.asan_vec));
+ data.asan_vec.address (),
+ data.asan_decl_vec. address(),
+ data.asan_vec.length ());
}
expand_stack_vars (NULL, &data);
- VEC_free (HOST_WIDE_INT, heap, data.asan_vec);
- VEC_free (tree, heap, data.asan_decl_vec);
+ data.asan_vec.release ();
+ data.asan_decl_vec.release ();
}
fini_vars_expansion ();
/* If there were any artificial non-ignored vars without rtl
found earlier, see if deferred stack allocation hasn't assigned
rtl to them. */
- FOR_EACH_VEC_ELT_REVERSE (tree, maybe_local_decls, i, var)
+ FOR_EACH_VEC_ELT_REVERSE (maybe_local_decls, i, var)
{
rtx rtl = DECL_RTL_IF_SET (var);
if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
add_local_decl (cfun, var);
}
- VEC_free (tree, heap, maybe_local_decls);
+ maybe_local_decls.release ();
/* If the target requires that FRAME_OFFSET be aligned, do it. */
if (STACK_ALIGNMENT_NEEDED)
/* Ensure RTL is created for debug args. */
if (decl && DECL_HAS_DEBUG_ARGS_P (decl))
{
- VEC(tree, gc) **debug_args = decl_debug_args_lookup (decl);
+ vec<tree, va_gc> **debug_args = decl_debug_args_lookup (decl);
unsigned int ix;
tree dtemp;
if (debug_args)
- for (ix = 1; VEC_iterate (tree, *debug_args, ix, dtemp); ix += 2)
+ for (ix = 1; (*debug_args)->iterate (ix, &dtemp); ix += 2)
{
gcc_assert (TREE_CODE (dtemp) == DEBUG_EXPR_DECL);
expand_debug_expr (dtemp);
if (DECL_CONTEXT (aexp)
== DECL_ABSTRACT_ORIGIN (current_function_decl))
{
- VEC(tree, gc) **debug_args;
+ vec<tree, va_gc> **debug_args;
unsigned int ix;
tree ddecl;
debug_args = decl_debug_args_lookup (current_function_decl);
if (debug_args != NULL)
{
- for (ix = 0; VEC_iterate (tree, *debug_args, ix, ddecl);
+ for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl);
ix += 2)
if (ddecl == aexp)
return gen_rtx_DEBUG_PARAMETER_REF (mode, aexp);
if (dom_info_available_p (CDI_DOMINATORS))
{
- VEC (basic_block, heap) *doms_to_fix = VEC_alloc (basic_block, heap, 2);
- VEC_quick_push (basic_block, doms_to_fix, dummy);
- VEC_quick_push (basic_block, doms_to_fix, bb);
+ vec<basic_block> doms_to_fix;
+ doms_to_fix.create (2);
+ doms_to_fix.quick_push (dummy);
+ doms_to_fix.quick_push (bb);
iterate_fix_dominators (CDI_DOMINATORS, doms_to_fix, false);
- VEC_free (basic_block, heap, doms_to_fix);
+ doms_to_fix.release ();
}
if (current_loops != NULL)
cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge e,
unsigned int ndupl,
sbitmap wont_exit, edge orig,
- VEC (edge, heap) **to_remove,
+ vec<edge> *to_remove,
int flags)
{
gcc_assert (cfg_hooks->cfg_hook_duplicate_loop_to_header_edge);
in loop versioning. */
bool (*cfg_hook_duplicate_loop_to_header_edge) (struct loop *, edge,
unsigned, sbitmap,
- edge, VEC (edge, heap) **,
+ edge, vec<edge> *,
int);
/* Add condition to new basic block and update CFG used in loop
unsigned int ndupl,
sbitmap wont_exit,
edge orig,
- VEC (edge, heap) **to_remove,
+ vec<edge> *to_remove,
int flags);
extern void lv_flush_pending_stmts (edge);
unsigned odepth = loop_depth (outer);
return (loop_depth (loop) > odepth
- && VEC_index (loop_p, loop->superloops, odepth) == outer);
+ && (*loop->superloops)[odepth] == outer);
}
/* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
if (depth == ldepth)
return loop;
- return VEC_index (loop_p, loop->superloops, depth);
+ return (*loop->superloops)[depth];
}
/* Returns the list of the latch edges of LOOP. */
-static VEC (edge, heap) *
+static vec<edge>
get_loop_latch_edges (const struct loop *loop)
{
edge_iterator ei;
edge e;
- VEC (edge, heap) *ret = NULL;
+ vec<edge> ret = vec<edge>();
FOR_EACH_EDGE (e, ei, loop->header->preds)
{
if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
- VEC_safe_push (edge, heap, ret, e);
+ ret.safe_push (e);
}
return ret;
{
basic_block *bbs;
unsigned i;
- VEC (edge, heap) *latches;
+ vec<edge> latches;
edge e;
if (! loop || ! loop->header)
{
fprintf (file, "multiple latches:");
latches = get_loop_latch_edges (loop);
- FOR_EACH_VEC_ELT (edge, latches, i, e)
+ FOR_EACH_VEC_ELT (latches, i, e)
fprintf (file, " %d", e->src->index);
- VEC_free (edge, heap, latches);
+ latches.release ();
fprintf (file, "\n");
}
{
struct loop_exit *exit, *next;
- VEC_free (loop_p, gc, loop->superloops);
+ vec_free (loop->superloops);
/* Break the list of the loop exit records. They will be freed when the
corresponding edge is rescanned or removed, and this avoids
loop_p loop;
/* Free the loop descriptors. */
- FOR_EACH_VEC_ELT (loop_p, loops->larray, i, loop)
+ FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
{
if (!loop)
continue;
flow_loop_free (loop);
}
- VEC_free (loop_p, gc, loops->larray);
+ vec_free (loops->larray);
}
}
int
flow_loop_nodes_find (basic_block header, struct loop *loop)
{
- VEC (basic_block, heap) *stack = NULL;
+ vec<basic_block> stack = vec<basic_block>();
int num_nodes = 1;
edge latch;
edge_iterator latch_ei;
continue;
num_nodes++;
- VEC_safe_push (basic_block, heap, stack, latch->src);
+ stack.safe_push (latch->src);
latch->src->loop_father = loop;
- while (!VEC_empty (basic_block, stack))
+ while (!stack.is_empty ())
{
basic_block node;
edge e;
edge_iterator ei;
- node = VEC_pop (basic_block, stack);
+ node = stack.pop ();
FOR_EACH_EDGE (e, ei, node->preds)
{
{
ancestor->loop_father = loop;
num_nodes++;
- VEC_safe_push (basic_block, heap, stack, ancestor);
+ stack.safe_push (ancestor);
}
}
}
}
- VEC_free (basic_block, heap, stack);
+ stack.release ();
return num_nodes;
}
unsigned depth = loop_depth (father) + 1;
unsigned i;
- VEC_truncate (loop_p, loop->superloops, 0);
- VEC_reserve (loop_p, gc, loop->superloops, depth);
- FOR_EACH_VEC_ELT (loop_p, father->superloops, i, ploop)
- VEC_quick_push (loop_p, loop->superloops, ploop);
- VEC_quick_push (loop_p, loop->superloops, father);
+ loop->superloops = 0;
+ vec_alloc (loop->superloops, depth);
+ FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
+ loop->superloops->quick_push (ploop);
+ loop->superloops->quick_push (father);
for (ploop = loop->inner; ploop; ploop = ploop->next)
establish_preds (ploop, loop);
prev->next = loop->next;
}
- VEC_truncate (loop_p, loop->superloops, 0);
+ loop->superloops = NULL;
}
/* Allocates and returns new loop structure. */
struct loop *root;
memset (loops, 0, sizeof *loops);
- loops->larray = VEC_alloc (loop_p, gc, num_loops);
+ vec_alloc (loops->larray, num_loops);
/* Dummy loop containing whole function. */
root = alloc_loop ();
ENTRY_BLOCK_PTR->loop_father = root;
EXIT_BLOCK_PTR->loop_father = root;
- VEC_quick_push (loop_p, loops->larray, root);
+ loops->larray->quick_push (root);
loops->tree_root = root;
}
header = BASIC_BLOCK (rc_order[b]);
loop = alloc_loop ();
- VEC_quick_push (loop_p, loops->larray, loop);
+ loops->larray->quick_push (loop);
loop->header = header;
loop->num = num_loops;
sbitmap_free (headers);
loops->exits = NULL;
- return VEC_length (loop_p, loops->larray);
+ return loops->larray->length ();
}
/* Ratio of frequencies of edges so that one of more latch edges is
derive the loop structure from it). */
static edge
-find_subloop_latch_edge_by_profile (VEC (edge, heap) *latches)
+find_subloop_latch_edge_by_profile (vec<edge> latches)
{
unsigned i;
edge e, me = NULL;
gcov_type mcount = 0, tcount = 0;
- FOR_EACH_VEC_ELT (edge, latches, i, e)
+ FOR_EACH_VEC_ELT (latches, i, e)
{
if (e->count > mcount)
{
another edge. */
static edge
-find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, VEC (edge, heap) *latches)
+find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
{
- edge e, latch = VEC_index (edge, latches, 0);
+ edge e, latch = latches[0];
unsigned i;
gimple phi;
gimple_stmt_iterator psi;
basic_block bb;
/* Find the candidate for the latch edge. */
- for (i = 1; VEC_iterate (edge, latches, i, e); i++)
+ for (i = 1; latches.iterate (i, &e); i++)
if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
latch = e;
/* Verify that it dominates all the latch edges. */
- FOR_EACH_VEC_ELT (edge, latches, i, e)
+ FOR_EACH_VEC_ELT (latches, i, e)
if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
return NULL;
if (!bb || !flow_bb_inside_loop_p (loop, bb))
continue;
- FOR_EACH_VEC_ELT (edge, latches, i, e)
+ FOR_EACH_VEC_ELT (latches, i, e)
if (e != latch
&& PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
return NULL;
static edge
find_subloop_latch_edge (struct loop *loop)
{
- VEC (edge, heap) *latches = get_loop_latch_edges (loop);
+ vec<edge> latches = get_loop_latch_edges (loop);
edge latch = NULL;
- if (VEC_length (edge, latches) > 1)
+ if (latches.length () > 1)
{
latch = find_subloop_latch_edge_by_profile (latches);
latch = find_subloop_latch_edge_by_ivs (loop, latches);
}
- VEC_free (edge, heap, latches);
+ latches.release ();
return latch;
}
static void
merge_latch_edges (struct loop *loop)
{
- VEC (edge, heap) *latches = get_loop_latch_edges (loop);
+ vec<edge> latches = get_loop_latch_edges (loop);
edge latch, e;
unsigned i;
- gcc_assert (VEC_length (edge, latches) > 0);
+ gcc_assert (latches.length () > 0);
- if (VEC_length (edge, latches) == 1)
- loop->latch = VEC_index (edge, latches, 0)->src;
+ if (latches.length () == 1)
+ loop->latch = latches[0]->src;
else
{
if (dump_file)
fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
mfb_reis_set = pointer_set_create ();
- FOR_EACH_VEC_ELT (edge, latches, i, e)
+ FOR_EACH_VEC_ELT (latches, i, e)
pointer_set_insert (mfb_reis_set, e);
latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
NULL);
loop->latch = latch->src;
}
- VEC_free (edge, heap, latches);
+ latches.release ();
}
/* LOOP may have several latch edges. Transform it into (possibly several)
/* Returns the list of the exit edges of a LOOP. */
-VEC (edge, heap) *
+vec<edge>
get_loop_exit_edges (const struct loop *loop)
{
- VEC (edge, heap) *edges = NULL;
+ vec<edge> edges = vec<edge>();
edge e;
unsigned i;
basic_block *body;
if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
{
for (exit = loop->exits->next; exit->e; exit = exit->next)
- VEC_safe_push (edge, heap, edges, exit->e);
+ edges.safe_push (exit->e);
}
else
{
FOR_EACH_EDGE (e, ei, body[i]->succs)
{
if (!flow_bb_inside_loop_p (loop, e->dest))
- VEC_safe_push (edge, heap, edges, e);
+ edges.safe_push (e);
}
free (body);
}
gcc_assert (bb->loop_father == NULL);
bb->loop_father = loop;
loop->num_nodes++;
- FOR_EACH_VEC_ELT (loop_p, loop->superloops, i, ploop)
+ FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
ploop->num_nodes++;
FOR_EACH_EDGE (e, ei, bb->succs)
void
remove_bb_from_loops (basic_block bb)
{
- int i;
+ unsigned i;
struct loop *loop = bb->loop_father;
loop_p ploop;
edge_iterator ei;
gcc_assert (loop != NULL);
loop->num_nodes--;
- FOR_EACH_VEC_ELT (loop_p, loop->superloops, i, ploop)
+ FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
ploop->num_nodes--;
bb->loop_father = NULL;
ddepth = loop_depth (loop_d);
if (sdepth < ddepth)
- loop_d = VEC_index (loop_p, loop_d->superloops, sdepth);
+ loop_d = (*loop_d->superloops)[sdepth];
else if (sdepth > ddepth)
- loop_s = VEC_index (loop_p, loop_s->superloops, ddepth);
+ loop_s = (*loop_s->superloops)[ddepth];
while (loop_s != loop_d)
{
flow_loop_tree_node_remove (loop);
/* Remove loop from loops array. */
- VEC_replace (loop_p, current_loops->larray, loop->num, NULL);
+ (*current_loops->larray)[loop->num] = NULL;
/* Free loop data. */
flow_loop_free (loop);
#define GCC_CFGLOOP_H
#include "basic-block.h"
-#include "vecprim.h"
#include "double-int.h"
#include "bitmap.h"
};
typedef struct loop *loop_p;
-DEF_VEC_P (loop_p);
-DEF_VEC_ALLOC_P (loop_p, heap);
-DEF_VEC_ALLOC_P (loop_p, gc);
/* An integer estimation of the number of iterations. Estimate_state
describes what is the state of the estimation. */
unsigned num_nodes;
/* Superloops of the loop, starting with the outermost loop. */
- VEC (loop_p, gc) *superloops;
+ vec<loop_p, va_gc> *superloops;
/* The first inner (child) loop or NULL if innermost loop. */
struct loop *inner;
int state;
/* Array of the loops. */
- VEC (loop_p, gc) *larray;
+ vec<loop_p, va_gc> *larray;
/* Maps edges to the list of their descriptions as loop exits. Edges
whose sources or destinations have loop_father == NULL (which may
extern basic_block *get_loop_body_in_custom_order (const struct loop *,
int (*) (const void *, const void *));
-extern VEC (edge, heap) *get_loop_exit_edges (const struct loop *);
+extern vec<edge> get_loop_exit_edges (const struct loop *);
extern edge single_exit (const struct loop *);
extern edge single_likely_exit (struct loop *loop);
extern unsigned num_loop_branches (const struct loop *);
extern void duplicate_subloops (struct loop *, struct loop *);
extern bool duplicate_loop_to_header_edge (struct loop *, edge,
unsigned, sbitmap, edge,
- VEC (edge, heap) **, int);
+ vec<edge> *, int);
extern struct loop *loopify (edge, edge,
basic_block, edge, edge, bool,
unsigned, unsigned);
static inline struct loop *
get_loop (unsigned num)
{
- return VEC_index (loop_p, current_loops->larray, num);
+ return (*current_loops->larray)[num];
}
/* Returns the number of superloops of LOOP. */
static inline unsigned
loop_depth (const struct loop *loop)
{
- return VEC_length (loop_p, loop->superloops);
+ return vec_safe_length (loop->superloops);
}
/* Returns the loop depth of the loop BB belongs to. */
static inline struct loop *
loop_outer (const struct loop *loop)
{
- unsigned n = VEC_length (loop_p, loop->superloops);
+ unsigned n = vec_safe_length (loop->superloops);
if (n == 0)
return NULL;
- return VEC_index (loop_p, loop->superloops, n - 1);
+ return (*loop->superloops)[n - 1];
}
/* Returns true if LOOP has at least one exit edge. */
/* Returns the list of loops in current_loops. */
-static inline VEC (loop_p, gc) *
+static inline vec<loop_p, va_gc> *
get_loops (void)
{
if (!current_loops)
if (!current_loops)
return 0;
- return VEC_length (loop_p, current_loops->larray);
+ return vec_safe_length (current_loops->larray);
}
/* Returns true if state of the loops satisfies all properties
typedef struct
{
/* The list of loops to visit. */
- VEC(int,heap) *to_visit;
+ vec<int> to_visit;
/* The index of the actual loop. */
unsigned idx;
{
int anum;
- while (VEC_iterate (int, li->to_visit, li->idx, anum))
+ while (li->to_visit.iterate (li->idx, &anum))
{
li->idx++;
*loop = get_loop (anum);
return;
}
- VEC_free (int, heap, li->to_visit);
+ li->to_visit.release ();
*loop = NULL;
}
li->idx = 0;
if (!current_loops)
{
- li->to_visit = NULL;
+ li->to_visit.create (0);
*loop = NULL;
return;
}
- li->to_visit = VEC_alloc (int, heap, number_of_loops ());
+ li->to_visit.create (number_of_loops ());
mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
if (flags & LI_ONLY_INNERMOST)
{
- for (i = 0; VEC_iterate (loop_p, current_loops->larray, i, aloop); i++)
+ for (i = 0; vec_safe_iterate (current_loops->larray, i, &aloop); i++)
if (aloop != NULL
&& aloop->inner == NULL
&& aloop->num >= mn)
- VEC_quick_push (int, li->to_visit, aloop->num);
+ li->to_visit.quick_push (aloop->num);
}
else if (flags & LI_FROM_INNERMOST)
{
while (1)
{
if (aloop->num >= mn)
- VEC_quick_push (int, li->to_visit, aloop->num);
+ li->to_visit.quick_push (aloop->num);
if (aloop->next)
{
while (1)
{
if (aloop->num >= mn)
- VEC_quick_push (int, li->to_visit, aloop->num);
+ li->to_visit.quick_push (aloop->num);
if (aloop->inner != NULL)
aloop = aloop->inner;
#define FOR_EACH_LOOP_BREAK(LI) \
{ \
- VEC_free (int, heap, (LI).to_visit); \
+ (LI).to_visit.release (); \
break; \
}
extern void move_loop_invariants (void);
extern bool finite_loop_p (struct loop *);
extern void scale_loop_profile (struct loop *loop, int scale, int iteration_bound);
-extern VEC (basic_block, heap) * get_loop_hot_path (const struct loop *loop);
+extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
/* Returns the outermost loop of the loop nest that contains LOOP.*/
static inline struct loop *
loop_outermost (struct loop *loop)
{
-
- unsigned n = VEC_length (loop_p, loop->superloops);
+ unsigned n = vec_safe_length (loop->superloops);
if (n <= 1)
return loop;
- return VEC_index (loop_p, loop->superloops, 1);
+ return (*loop->superloops)[1];
}
if (depth == loop_depth (act->loop_father))
cloop = act->loop_father;
else
- cloop = VEC_index (loop_p, act->loop_father->superloops, depth);
+ cloop = (*act->loop_father->superloops)[depth];
src = LOOP_REPR (cloop);
}
single_likely_exit (struct loop *loop)
{
edge found = single_exit (loop);
- VEC (edge, heap) *exits;
+ vec<edge> exits;
unsigned i;
edge ex;
if (found)
return found;
exits = get_loop_exit_edges (loop);
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
{
if (ex->flags & (EDGE_EH | EDGE_ABNORMAL_CALL))
continue;
found = ex;
else
{
- VEC_free (edge, heap, exits);
+ exits.release ();
return NULL;
}
}
- VEC_free (edge, heap, exits);
+ exits.release ();
return found;
}
order against direction of edges from latch. Specially, if
header != latch, latch is the 1-st block. */
-VEC (basic_block, heap) *
+vec<basic_block>
get_loop_hot_path (const struct loop *loop)
{
basic_block bb = loop->header;
- VEC (basic_block, heap) *path = NULL;
+ vec<basic_block> path = vec<basic_block>();
bitmap visited = BITMAP_ALLOC (NULL);
while (true)
edge e;
edge best = NULL;
- VEC_safe_push (basic_block, heap, path, bb);
+ path.safe_push (bb);
bitmap_set_bit (visited, bb->index);
FOR_EACH_EDGE (e, ei, bb->succs)
if ((!best || e->probability > best->probability)
{
unsigned i;
edge e;
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
struct loop *father = current_loops->tree_root, *act;
bool ret = false;
- FOR_EACH_VEC_ELT (edge, exits, i, e)
+ FOR_EACH_VEC_ELT (exits, i, e)
{
act = find_common_loop (loop, e->dest->loop_father);
if (flow_loop_nested_p (father, act))
/* The exit edges of LOOP no longer exits its original immediate
superloops; remove them from the appropriate exit lists. */
- FOR_EACH_VEC_ELT (edge, exits, i, e)
+ FOR_EACH_VEC_ELT (exits, i, e)
rescan_loop_exit (e, false, false);
ret = true;
}
- VEC_free (edge, heap, exits);
+ exits.release ();
return ret;
}
{
edge ae;
basic_block *rem_bbs, *bord_bbs, from, bb;
- VEC (basic_block, heap) *dom_bbs;
+ vec<basic_block> dom_bbs;
int i, nrem, n_bord_bbs;
sbitmap seen;
bool irred_invalidated = false;
/* Remove the path. */
from = e->src;
remove_branch (e);
- dom_bbs = NULL;
+ dom_bbs.create (0);
/* Cancel loops contained in the path. */
for (i = 0; i < nrem; i++)
ldom;
ldom = next_dom_son (CDI_DOMINATORS, ldom))
if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
- VEC_safe_push (basic_block, heap, dom_bbs, ldom);
+ dom_bbs.safe_push (ldom);
}
free (seen);
/* Recount dominators. */
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
free (bord_bbs);
/* Fix placements of basic blocks inside loops and the placement of
place_new_loop (struct loop *loop)
{
loop->num = number_of_loops ();
- VEC_safe_push (loop_p, gc, current_loops->larray, loop);
+ vec_safe_push (current_loops->larray, loop);
}
/* Given LOOP structure with filled header and latch, find the body of the
static void
update_dominators_in_loop (struct loop *loop)
{
- VEC (basic_block, heap) *dom_bbs = NULL;
+ vec<basic_block> dom_bbs = vec<basic_block>();
sbitmap seen;
basic_block *body;
unsigned i;
if (!bitmap_bit_p (seen, ldom->index))
{
bitmap_set_bit (seen, ldom->index);
- VEC_safe_push (basic_block, heap, dom_bbs, ldom);
+ dom_bbs.safe_push (ldom);
}
}
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
free (body);
free (seen);
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
}
/* Creates an if region as shown above. CONDITION is used to create
bool
duplicate_loop_to_header_edge (struct loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
- edge orig, VEC (edge, heap) **to_remove,
+ edge orig, vec<edge> *to_remove,
int flags)
{
struct loop *target, *aloop;
if (orig && bitmap_bit_p (wont_exit, j + 1))
{
if (to_remove)
- VEC_safe_push (edge, heap, *to_remove, new_spec_edges[SE_ORIG]);
+ to_remove->safe_push (new_spec_edges[SE_ORIG]);
set_zero_probability (new_spec_edges[SE_ORIG]);
/* Scale the frequencies of the blocks dominated by the exit. */
if (orig && bitmap_bit_p (wont_exit, 0))
{
if (to_remove)
- VEC_safe_push (edge, heap, *to_remove, orig);
+ to_remove->safe_push (orig);
set_zero_probability (orig);
/* Scale the frequencies of the blocks dominated by the exit. */
for (i = 0; i < n; i++)
{
basic_block dominated, dom_bb;
- VEC (basic_block, heap) *dom_bbs;
+ vec<basic_block> dom_bbs;
unsigned j;
bb = bbs[i];
bb->aux = 0;
dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
- FOR_EACH_VEC_ELT (basic_block, dom_bbs, j, dominated)
+ FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
{
if (flow_bb_inside_loop_p (loop, dominated))
continue;
CDI_DOMINATORS, first_active[i], first_active_latch);
set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
}
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
}
free (first_active);
basic_block bb;
/* Grow the basic block array if needed. */
- if ((size_t) last_basic_block >= VEC_length (basic_block, basic_block_info))
+ if ((size_t) last_basic_block >= basic_block_info->length ())
{
size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
- VEC_safe_grow_cleared (basic_block, gc, basic_block_info, new_size);
+ vec_safe_grow_cleared (basic_block_info, new_size);
}
n_basic_blocks++;
{
if (tmp == e)
{
- VEC_unordered_remove (edge, ENTRY_BLOCK_PTR->succs, ei.index);
+ ENTRY_BLOCK_PTR->succs->unordered_remove (ei.index);
found = true;
break;
}
gcc_assert (found);
- VEC_safe_push (edge, gc, bb->succs, e);
+ vec_safe_push (bb->succs, e);
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
}
}
free_histograms ();
pop_cfun();
gimple_set_body (node->symbol.decl, NULL);
- VEC_free (ipa_opt_pass, heap,
- node->ipa_transforms_to_apply);
+ node->ipa_transforms_to_apply.release ();
/* Struct function hangs a lot of data that would leak if we didn't
removed all pointers to it. */
ggc_free (DECL_STRUCT_FUNCTION (node->symbol.decl));
cgraph_call_node_removal_hooks (node);
cgraph_node_remove_callers (node);
cgraph_node_remove_callees (node);
- VEC_free (ipa_opt_pass, heap,
- node->ipa_transforms_to_apply);
+ node->ipa_transforms_to_apply.release ();
/* Incremental inlining access removed nodes stored in the postorder list.
*/
static bool
collect_callers_of_node_1 (struct cgraph_node *node, void *data)
{
- VEC (cgraph_edge_p, heap) ** redirect_callers = (VEC (cgraph_edge_p, heap) **)data;
+ vec<cgraph_edge_p> *redirect_callers = (vec<cgraph_edge_p> *)data;
struct cgraph_edge *cs;
enum availability avail;
cgraph_function_or_thunk_node (node, &avail);
if (avail > AVAIL_OVERWRITABLE)
for (cs = node->callers; cs != NULL; cs = cs->next_caller)
if (!cs->indirect_inlining_edge)
- VEC_safe_push (cgraph_edge_p, heap, *redirect_callers, cs);
+ redirect_callers->safe_push (cs);
return false;
}
/* Collect all callers of NODE and its aliases that are known to lead to NODE
(i.e. are not overwritable). */
-VEC (cgraph_edge_p, heap) *
+vec<cgraph_edge_p>
collect_callers_of_node (struct cgraph_node *node)
{
- VEC (cgraph_edge_p, heap) * redirect_callers = NULL;
+ vec<cgraph_edge_p> redirect_callers = vec<cgraph_edge_p>();
cgraph_for_node_and_aliases (node, collect_callers_of_node_1,
&redirect_callers, false);
return redirect_callers;
/* FIXME: Inline-analysis sets frequency to 0 when edge is optimized out.
Remove this once edges are actually removed from the function at that time. */
&& (e->frequency
- || (inline_edge_summary_vec
- && ((VEC_length(inline_edge_summary_t, inline_edge_summary_vec)
- <= (unsigned) e->uid)
+ || (inline_edge_summary_vec.exists ()
+ && ((inline_edge_summary_vec.length () <= (unsigned) e->uid)
|| !inline_edge_summary (e)->predicate)))
&& (e->frequency
!= compute_call_stmt_bb_frequency (e->caller->symbol.decl,
bool ref_p;
};
typedef struct ipa_replace_map *ipa_replace_map_p;
-DEF_VEC_P(ipa_replace_map_p);
-DEF_VEC_ALLOC_P(ipa_replace_map_p,gc);
struct GTY(()) cgraph_clone_info
{
- VEC(ipa_replace_map_p,gc)* tree_map;
+ vec<ipa_replace_map_p, va_gc> *tree_map;
bitmap args_to_skip;
bitmap combined_args_to_skip;
};
/* Interprocedural passes scheduled to have their transform functions
applied next time we execute local pass on them. We maintain it
per-function in order to allow IPA passes to introduce new functions. */
- VEC(ipa_opt_pass,heap) * GTY((skip)) ipa_transforms_to_apply;
+ vec<ipa_opt_pass> GTY((skip)) ipa_transforms_to_apply;
struct cgraph_local_info local;
struct cgraph_global_info global;
unsigned dispatcher_function : 1;
};
-DEF_VEC_P(symtab_node);
-DEF_VEC_ALLOC_P(symtab_node,heap);
-DEF_VEC_ALLOC_P(symtab_node,gc);
typedef struct cgraph_node *cgraph_node_ptr;
-DEF_VEC_P(cgraph_node_ptr);
-DEF_VEC_ALLOC_P(cgraph_node_ptr,heap);
-DEF_VEC_ALLOC_P(cgraph_node_ptr,gc);
/* Function Multiversioning info. */
struct GTY(()) cgraph_function_version_info {
struct cgraph_node_set_def
{
struct pointer_map_t *map;
- VEC(cgraph_node_ptr, heap) *nodes;
+ vec<cgraph_node_ptr> nodes;
};
typedef struct varpool_node *varpool_node_ptr;
-DEF_VEC_P(varpool_node_ptr);
-DEF_VEC_ALLOC_P(varpool_node_ptr,heap);
-DEF_VEC_ALLOC_P(varpool_node_ptr,gc);
/* A varpool node set is a collection of varpool nodes. A varpool node
can appear in multiple sets. */
struct varpool_node_set_def
{
struct pointer_map_t * map;
- VEC(varpool_node_ptr, heap) *nodes;
+ vec<varpool_node_ptr> nodes;
};
typedef struct cgraph_node_set_def *cgraph_node_set;
-DEF_VEC_P(cgraph_node_set);
-DEF_VEC_ALLOC_P(cgraph_node_set,gc);
-DEF_VEC_ALLOC_P(cgraph_node_set,heap);
typedef struct varpool_node_set_def *varpool_node_set;
-DEF_VEC_P(varpool_node_set);
-DEF_VEC_ALLOC_P(varpool_node_set,gc);
-DEF_VEC_ALLOC_P(varpool_node_set,heap);
/* Iterator structure for cgraph node sets. */
typedef struct
typedef struct cgraph_edge *cgraph_edge_p;
-DEF_VEC_P(cgraph_edge_p);
-DEF_VEC_ALLOC_P(cgraph_edge_p,heap);
/* The varpool data structure.
Each static variable decl has assigned varpool_node. */
bool cgraph_for_node_and_aliases (struct cgraph_node *,
bool (*) (struct cgraph_node *, void *),
void *, bool);
-VEC (cgraph_edge_p, heap) * collect_callers_of_node (struct cgraph_node *node);
+vec<cgraph_edge_p> collect_callers_of_node (struct cgraph_node *node);
void verify_cgraph (void);
void verify_cgraph_node (struct cgraph_node *);
void cgraph_mark_address_taken_node (struct cgraph_node *);
struct cgraph_node *, gimple,
unsigned, gcov_type, int, bool);
struct cgraph_node * cgraph_clone_node (struct cgraph_node *, tree, gcov_type,
- int, bool, VEC(cgraph_edge_p,heap) *,
+ int, bool, vec<cgraph_edge_p>,
bool);
tree clone_function_name (tree decl, const char *);
struct cgraph_node * cgraph_create_virtual_clone (struct cgraph_node *old_node,
- VEC(cgraph_edge_p,heap)*,
- VEC(ipa_replace_map_p,gc)* tree_map,
+ vec<cgraph_edge_p>,
+ vec<ipa_replace_map_p, va_gc> *tree_map,
bitmap args_to_skip,
const char *clone_name);
struct cgraph_node *cgraph_find_replacement_node (struct cgraph_node *);
cgraph_inline_failed_t);
void cgraph_materialize_all_clones (void);
struct cgraph_node * cgraph_copy_node_for_versioning (struct cgraph_node *,
- tree, VEC(cgraph_edge_p,heap)*, bitmap);
+ tree, vec<cgraph_edge_p>, bitmap);
struct cgraph_node *cgraph_function_versioning (struct cgraph_node *,
- VEC(cgraph_edge_p,heap)*,
- VEC(ipa_replace_map_p,gc)*,
+ vec<cgraph_edge_p>,
+ vec<ipa_replace_map_p, va_gc> *,
bitmap, bool, bitmap,
basic_block, const char *);
-void tree_function_versioning (tree, tree, VEC (ipa_replace_map_p,gc)*,
+void tree_function_versioning (tree, tree, vec<ipa_replace_map_p, va_gc> *,
bool, bitmap, bool, bitmap, basic_block);
/* In cgraphbuild.c */
static inline bool
csi_end_p (cgraph_node_set_iterator csi)
{
- return csi.index >= VEC_length (cgraph_node_ptr, csi.set->nodes);
+ return csi.index >= csi.set->nodes.length ();
}
/* Advance iterator CSI. */
static inline struct cgraph_node *
csi_node (cgraph_node_set_iterator csi)
{
- return VEC_index (cgraph_node_ptr, csi.set->nodes, csi.index);
+ return csi.set->nodes[csi.index];
}
/* Return an iterator to the first node in SET. */
static inline size_t
cgraph_node_set_size (cgraph_node_set set)
{
- return VEC_length (cgraph_node_ptr, set->nodes);
+ return set->nodes.length ();
}
/* Return true if iterator VSI points to nothing. */
static inline bool
vsi_end_p (varpool_node_set_iterator vsi)
{
- return vsi.index >= VEC_length (varpool_node_ptr, vsi.set->nodes);
+ return vsi.index >= vsi.set->nodes.length ();
}
/* Advance iterator VSI. */
static inline struct varpool_node *
vsi_node (varpool_node_set_iterator vsi)
{
- return VEC_index (varpool_node_ptr, vsi.set->nodes, vsi.index);
+ return vsi.set->nodes[vsi.index];
}
/* Return an iterator to the first node in SET. */
static inline size_t
varpool_node_set_size (varpool_node_set set)
{
- return VEC_length (varpool_node_ptr, set->nodes);
+ return set->nodes.length ();
}
/* Uniquize all constants that appear in memory.
static inline bool
cgraph_node_set_nonempty_p (cgraph_node_set set)
{
- return !VEC_empty (cgraph_node_ptr, set->nodes);
+ return !set->nodes.is_empty ();
}
/* Return true if set is nonempty. */
static inline bool
varpool_node_set_nonempty_p (varpool_node_set set)
{
- return !VEC_empty (varpool_node_ptr, set->nodes);
+ return !set->nodes.is_empty ();
}
/* Return true when function NODE is only called directly or it has alias.
struct cgraph_node *
cgraph_clone_node (struct cgraph_node *n, tree decl, gcov_type count, int freq,
bool update_original,
- VEC(cgraph_edge_p,heap) *redirect_callers,
+ vec<cgraph_edge_p> redirect_callers,
bool call_duplication_hook)
{
struct cgraph_node *new_node = cgraph_create_empty_node ();
new_node->count = count;
new_node->frequency = n->frequency;
new_node->clone = n->clone;
- new_node->clone.tree_map = 0;
+ new_node->clone.tree_map = NULL;
if (n->count)
{
if (new_node->count > n->count)
n->count = 0;
}
- FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
+ FOR_EACH_VEC_ELT (redirect_callers, i, e)
{
/* Redirect calls to the old version node to point to its new
version. */
*/
struct cgraph_node *
cgraph_create_virtual_clone (struct cgraph_node *old_node,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- VEC(ipa_replace_map_p,gc) *tree_map,
+ vec<cgraph_edge_p> redirect_callers,
+ vec<ipa_replace_map_p, va_gc> *tree_map,
bitmap args_to_skip,
const char * suffix)
{
DECL_STATIC_DESTRUCTOR (new_node->symbol.decl) = 0;
new_node->clone.tree_map = tree_map;
new_node->clone.args_to_skip = args_to_skip;
- FOR_EACH_VEC_ELT (ipa_replace_map_p, tree_map, i, map)
+ FOR_EACH_VEC_SAFE_ELT (tree_map, i, map)
{
tree var = map->new_tree;
symtab_node ref_node;
struct cgraph_node *
cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
tree new_decl,
- VEC(cgraph_edge_p,heap) *redirect_callers,
+ vec<cgraph_edge_p> redirect_callers,
bitmap bbs_to_copy)
{
struct cgraph_node *new_version;
e->lto_stmt_uid, REG_BR_PROB_BASE,
CGRAPH_FREQ_BASE,
true);
- FOR_EACH_VEC_ELT (cgraph_edge_p, redirect_callers, i, e)
+ FOR_EACH_VEC_ELT (redirect_callers, i, e)
{
/* Redirect calls to the old version node to point to its new
version. */
struct cgraph_node *
cgraph_function_versioning (struct cgraph_node *old_version_node,
- VEC(cgraph_edge_p,heap) *redirect_callers,
- VEC (ipa_replace_map_p,gc)* tree_map,
+ vec<cgraph_edge_p> redirect_callers,
+ vec<ipa_replace_map_p, va_gc> *tree_map,
bitmap args_to_skip,
bool skip_return,
bitmap bbs_to_copy,
{
unsigned int i;
fprintf (cgraph_dump_file, " replace map: ");
- for (i = 0; i < VEC_length (ipa_replace_map_p,
- node->clone.tree_map);
- i++)
+ for (i = 0;
+ i < vec_safe_length (node->clone.tree_map);
+ i++)
{
struct ipa_replace_map *replace_info;
- replace_info = VEC_index (ipa_replace_map_p,
- node->clone.tree_map,
- i);
+ replace_info = (*node->clone.tree_map)[i];
print_generic_expr (cgraph_dump_file, replace_info->old_tree, 0);
fprintf (cgraph_dump_file, " -> ");
print_generic_expr (cgraph_dump_file, replace_info->new_tree, 0);
input_location = saved_loc;
return;
}
- if (!VEC_length (ipa_ref_t, node->symbol.ref_list.references))
+ if (!vec_safe_length (node->symbol.ref_list.references))
ipa_record_reference ((symtab_node)node, (symtab_node)tgt,
IPA_REF_ALIAS, NULL);
if (node->same_body_alias)
struct cgraph_node *node;
FOR_EACH_FUNCTION (node)
if (node->same_body_alias
- && !VEC_length (ipa_ref_t, node->symbol.ref_list.references))
+ && !vec_safe_length (node->symbol.ref_list.references))
{
struct cgraph_node *tgt = cgraph_get_node (node->thunk.alias);
ipa_record_reference ((symtab_node)node, (symtab_node)tgt,
alias_pair *p;
unsigned i;
- for (i = 0; VEC_iterate (alias_pair, alias_pairs, i, p);)
+ for (i = 0; alias_pairs && alias_pairs->iterate (i, &p);)
{
symtab_node target_node = symtab_node_for_asm (p->target);
else
varpool_get_node (p->decl)->alias = true;
DECL_EXTERNAL (p->decl) = 1;
- VEC_unordered_remove (alias_pair, alias_pairs, i);
+ alias_pairs->unordered_remove (i);
continue;
}
else if (!target_node)
{
error ("%q+D aliased to undefined symbol %qE", p->decl, p->target);
- VEC_unordered_remove (alias_pair, alias_pairs, i);
+ alias_pairs->unordered_remove (i);
continue;
}
if (src_node && src_node->local.finalized)
cgraph_reset_node (src_node);
cgraph_create_function_alias (p->decl, target_node->symbol.decl);
- VEC_unordered_remove (alias_pair, alias_pairs, i);
+ alias_pairs->unordered_remove (i);
}
else if (TREE_CODE (p->decl) == VAR_DECL
&& target_node && is_a <varpool_node> (target_node))
{
varpool_create_variable_alias (p->decl, target_node->symbol.decl);
- VEC_unordered_remove (alias_pair, alias_pairs, i);
+ alias_pairs->unordered_remove (i);
}
else
{
p->decl);
warning (0, "%q+D aliased declaration",
target_node->symbol.decl);
- VEC_unordered_remove (alias_pair, alias_pairs, i);
+ alias_pairs->unordered_remove (i);
}
}
- VEC_free (alias_pair, gc, alias_pairs);
+ vec_free (alias_pairs);
}
int i;
tree resdecl;
tree restmp = NULL;
- VEC(tree, heap) *vargs;
+ vec<tree> vargs;
gimple call;
gimple ret;
for (arg = a; arg; arg = DECL_CHAIN (arg))
nargs++;
- vargs = VEC_alloc (tree, heap, nargs);
+ vargs.create (nargs);
if (this_adjusting)
- VEC_quick_push (tree, vargs,
- thunk_adjust (&bsi,
- a, 1, fixed_offset,
- virtual_offset));
+ vargs.quick_push (thunk_adjust (&bsi, a, 1, fixed_offset,
+ virtual_offset));
else
- VEC_quick_push (tree, vargs, a);
+ vargs.quick_push (a);
for (i = 1, arg = DECL_CHAIN (a); i < nargs; i++, arg = DECL_CHAIN (arg))
- VEC_quick_push (tree, vargs, arg);
+ vargs.quick_push (arg);
call = gimple_build_call_vec (build_fold_addr_expr_loc (0, alias), vargs);
- VEC_free (tree, heap, vargs);
+ vargs.release ();
gimple_call_set_from_thunk (call, true);
if (restmp)
gimple_call_set_lhs (call, restmp);
ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
} reg_stat_type;
-DEF_VEC_O(reg_stat_type);
-DEF_VEC_ALLOC_O(reg_stat_type,heap);
-static VEC(reg_stat_type,heap) *reg_stat;
+static vec<reg_stat_type> reg_stat;
/* Record the luid of the last insn that invalidated memory
(anything that writes memory, and subroutine calls, but not pushes). */
ret = split_insns (pattern, insn);
nregs = max_reg_num ();
- if (nregs > VEC_length (reg_stat_type, reg_stat))
- VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
+ if (nregs > reg_stat.length ())
+ reg_stat.safe_grow_cleared (nregs);
return ret;
}
rtl_hooks = combine_rtl_hooks;
- VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
+ reg_stat.safe_grow_cleared (nregs);
init_recog_no_volatile ();
obstack_free (&insn_link_obstack, NULL);
free (uid_log_links);
free (uid_insn_cost);
- VEC_free (reg_stat_type, heap, reg_stat);
+ reg_stat.release ();
{
struct undo *undo, *next;
unsigned int i;
reg_stat_type *p;
- FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
+ FOR_EACH_VEC_ELT (reg_stat, i, p)
memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
}
\f
(DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
&& HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
{
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
+ reg_stat_type *rsp = ®_stat[REGNO (x)];
if (set == 0 || GET_CODE (set) == CLOBBER)
{
&& GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
&& ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
(REG_P (temp)
- && VEC_index (reg_stat_type, reg_stat,
- REGNO (temp)).nonzero_bits != 0
+ && reg_stat[REGNO (temp)].nonzero_bits != 0
&& GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
&& GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
- && (VEC_index (reg_stat_type, reg_stat,
- REGNO (temp)).nonzero_bits
+ && (reg_stat[REGNO (temp)].nonzero_bits
!= GET_MODE_MASK (word_mode))))
&& ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
&& (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
(REG_P (temp)
- && VEC_index (reg_stat_type, reg_stat,
- REGNO (temp)).nonzero_bits != 0
+ && reg_stat[REGNO (temp)].nonzero_bits != 0
&& GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
&& GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
- && (VEC_index (reg_stat_type, reg_stat,
- REGNO (temp)).nonzero_bits
+ && (reg_stat[REGNO (temp)].nonzero_bits
!= GET_MODE_MASK (word_mode)))))
&& ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
SET_SRC (XVECEXP (newpat, 0, 1)))
value. Otherwise, use the previously-computed global nonzero bits
for this register. */
- rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
+ rsp = ®_stat[REGNO (x)];
if (rsp->last_set_value != 0
&& (rsp->last_set_mode == mode
|| (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
rtx tem;
reg_stat_type *rsp;
- rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
+ rsp = ®_stat[REGNO (x)];
if (rsp->last_set_value != 0
&& rsp->last_set_mode == mode
&& ((rsp->last_set_label >= label_tick_ebb_start
for (r = regno; r < endregno; r++)
{
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, r);
+ reg_stat_type *rsp = ®_stat[r];
rsp->last_set_table_tick = label_tick;
}
register. */
for (i = regno; i < endregno; i++)
{
- rsp = &VEC_index (reg_stat_type, reg_stat, i);
+ rsp = ®_stat[i];
if (insn)
rsp->last_set = insn;
for (i = regno; i < endregno; i++)
{
- rsp = &VEC_index (reg_stat_type, reg_stat, i);
+ rsp = ®_stat[i];
rsp->last_set_label = label_tick;
if (!insn
|| (value && rsp->last_set_table_tick >= label_tick_ebb_start))
/* The value being assigned might refer to X (like in "x++;"). In that
case, we must replace it with (clobber (const_int 0)) to prevent
infinite loops. */
- rsp = &VEC_index (reg_stat_type, reg_stat, regno);
+ rsp = ®_stat[regno];
if (value && !get_last_value_validate (&value, insn, label_tick, 0))
{
value = copy_rtx (value);
{
reg_stat_type *rsp;
- rsp = &VEC_index (reg_stat_type, reg_stat, i);
+ rsp = ®_stat[i];
rsp->last_death = insn;
}
}
{
reg_stat_type *rsp;
- rsp = &VEC_index (reg_stat_type, reg_stat, i);
+ rsp = ®_stat[i];
rsp->last_set_invalid = 1;
rsp->last_set = insn;
rsp->last_set_value = 0;
continue;
}
- rsp = &VEC_index (reg_stat_type, reg_stat, regno);
+ rsp = ®_stat[regno];
if (rsp->last_set == insn)
{
if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
static bool
reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
{
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
+ reg_stat_type *rsp = ®_stat[REGNO (x)];
enum machine_mode truncated = rsp->truncated_to_mode;
if (truncated == 0
else
return 0;
- rsp = &VEC_index (reg_stat_type, reg_stat, REGNO (x));
+ rsp = ®_stat[REGNO (x)];
if (rsp->truncated_to_mode == 0
|| rsp->truncation_label < label_tick_ebb_start
|| (GET_MODE_SIZE (truncated_mode)
for (j = regno; j < endregno; j++)
{
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, j);
+ reg_stat_type *rsp = ®_stat[j];
if (rsp->last_set_invalid
/* If this is a pseudo-register that was only set once and not
live at the beginning of the function, it is always valid. */
return 0;
regno = REGNO (x);
- rsp = &VEC_index (reg_stat_type, reg_stat, regno);
+ rsp = ®_stat[regno];
value = rsp->last_set_value;
/* If we don't have a value, or if it isn't for this basic block and
#endif
for (; regno < endreg; regno++)
{
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, regno);
+ reg_stat_type *rsp = ®_stat[regno];
if (rsp->last_set
&& rsp->last_set_label == label_tick
&& DF_INSN_LUID (rsp->last_set) > from_luid)
if (code == REG)
{
unsigned int regno = REGNO (x);
- rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno).last_death;
+ rtx where_dead = reg_stat[regno].last_death;
/* Don't move the register if it gets killed in between from and to. */
if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
if (place && REG_NOTE_KIND (note) == REG_DEAD)
{
unsigned int regno = REGNO (XEXP (note, 0));
- reg_stat_type *rsp = &VEC_index (reg_stat_type, reg_stat, regno);
+ reg_stat_type *rsp = ®_stat[regno];
if (dead_or_set_p (place, XEXP (note, 0))
|| reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
};
typedef struct comparison *comparison_struct_p;
-DEF_VEC_P(comparison_struct_p);
-DEF_VEC_ALLOC_P(comparison_struct_p, heap);
-static VEC(comparison_struct_p, heap) *all_compares;
+static vec<comparison_struct_p> all_compares;
/* Look for a "conforming" comparison, as defined above. If valid, return
the rtx for the COMPARE itself. */
last_cmp->in_a = XEXP (src, 0);
last_cmp->in_b = XEXP (src, 1);
last_cmp->orig_mode = src_mode;
- VEC_safe_push (comparison_struct_p, heap, all_compares, last_cmp);
+ all_compares.safe_push (last_cmp);
/* It's unusual, but be prepared for comparison patterns that
also clobber an input, or perhaps a scratch. */
{
df_analyze ();
- gcc_checking_assert (all_compares == NULL);
+ gcc_checking_assert (!all_compares.exists ());
/* Locate all comparisons and their uses, and eliminate duplicates. */
find_comparisons ();
- if (all_compares)
+ if (all_compares.exists ())
{
struct comparison *cmp;
size_t i;
/* Eliminate comparisons that are redundant with flags computation. */
- FOR_EACH_VEC_ELT (comparison_struct_p, all_compares, i, cmp)
+ FOR_EACH_VEC_ELT (all_compares, i, cmp)
{
try_eliminate_compare (cmp);
XDELETE (cmp);
}
- VEC_free (comparison_struct_p, heap, all_compares);
- all_compares = NULL;
+ all_compares.release ();
}
return 0;
insn = BB_END (loop->incoming_src);
/* If we have to insert the LSETUP before a jump, count that jump in the
length. */
- if (VEC_length (edge, loop->incoming) > 1
- || !(VEC_last (edge, loop->incoming)->flags & EDGE_FALLTHRU))
+ if (vec_safe_length (loop->incoming) > 1
+ || !(loop->incoming->last ()->flags & EDGE_FALLTHRU))
{
gcc_assert (JUMP_P (insn));
insn = PREV_INSN (insn);
if (loop->incoming_src)
{
rtx prev = BB_END (loop->incoming_src);
- if (VEC_length (edge, loop->incoming) > 1
- || !(VEC_last (edge, loop->incoming)->flags & EDGE_FALLTHRU))
+ if (vec_safe_length (loop->incoming) > 1
+ || !(loop->incoming->last ()->flags & EDGE_FALLTHRU))
{
gcc_assert (JUMP_P (prev));
prev = PREV_INSN (prev);
unsigned int unit_mask;
} c6x_sched_insn_info;
-DEF_VEC_O(c6x_sched_insn_info);
-DEF_VEC_ALLOC_O(c6x_sched_insn_info, heap);
/* Record a c6x_sched_insn_info structure for every insn in the function. */
-static VEC(c6x_sched_insn_info, heap) *insn_info;
+static vec<c6x_sched_insn_info> insn_info;
-#define INSN_INFO_LENGTH (VEC_length (c6x_sched_insn_info, insn_info))
-#define INSN_INFO_ENTRY(N) (VEC_index (c6x_sched_insn_info, insn_info, (N)))
+#define INSN_INFO_LENGTH (insn_info).length ()
+#define INSN_INFO_ENTRY(N) (insn_info[(N)])
static bool done_cfi_sections;
{
enum attr_units units;
- if (insn_info)
+ if (insn_info.exists ())
{
int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
return c6x_unit_names[unit][0];
return;
}
- if (insn_info)
+ if (insn_info.exists ())
{
int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
fputs (".", file);
int i;
unsigned tmp_mask;
int best_reg, old_reg;
- VEC (du_head_p, heap) *involved_chains = NULL;
+ vec<du_head_p> involved_chains = vec<du_head_p>();
unit_req_table new_reqs;
for (i = 0, tmp_mask = op_mask; tmp_mask; i++)
if (info->op_info[i].n_chains != 1)
goto out_fail;
op_chain = regrename_chain_from_id (info->op_info[i].heads[0]->id);
- VEC_safe_push (du_head_p, heap, involved_chains, op_chain);
+ involved_chains.safe_push (op_chain);
tmp_mask &= ~(1 << i);
}
- if (VEC_length (du_head_p, involved_chains) > 1)
+ if (involved_chains.length () > 1)
goto out_fail;
- this_head = VEC_index (du_head_p, involved_chains, 0);
+ this_head = involved_chains[0];
if (this_head->cannot_rename)
goto out_fail;
{
unsigned int mask1, mask2, mask_changed;
int count, side1, side2, req1, req2;
- insn_rr_info *this_rr = &VEC_index (insn_rr_info, insn_rr,
- INSN_UID (chain->insn));
+ insn_rr_info *this_rr = &insn_rr[INSN_UID (chain->insn)];
count = get_unit_reqs (chain->insn, &req1, &side1, &req2, &side2);
memcpy (reqs, new_reqs, sizeof (unit_req_table));
out_fail:
- VEC_free (du_head_p, heap, involved_chains);
+ involved_chains.release ();
}
/* Find insns in LOOP which would, if shifted to the other side
if (!get_unit_operand_masks (insn, &mask1, &mask2))
continue;
- info = &VEC_index (insn_rr_info, insn_rr, INSN_UID (insn));
+ info = &insn_rr[INSN_UID (insn)];
if (info->op_info == NULL)
continue;
unsigned uid = INSN_UID (insn);
if (uid >= INSN_INFO_LENGTH)
- VEC_safe_grow (c6x_sched_insn_info, heap, insn_info, uid * 5 / 4 + 10);
+ insn_info.safe_grow (uid * 5 / 4 + 10);
INSN_INFO_ENTRY (uid).clock = cycle;
INSN_INFO_ENTRY (uid).new_cond = NULL;
ss.last_scheduled_iter0 = insn;
if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
ss.issued_this_cycle++;
- if (insn_info)
+ if (insn_info.exists ())
{
state_t st_after = alloca (dfa_state_size);
int curr_clock = ss.curr_sched_clock;
gcc_assert (loop->incoming_dest == loop->head);
entry_edge = NULL;
- FOR_EACH_VEC_ELT (edge, loop->incoming, i, entry_edge)
+ FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
if (entry_edge->flags & EDGE_FALLTHRU)
break;
if (entry_edge == NULL)
seq = get_insns ();
- if (!single_succ_p (entry_bb) || VEC_length (edge, loop->incoming) > 1)
+ if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1)
{
basic_block new_bb;
edge e;
end_sequence ();
/* Make sure we don't try to schedule this loop again. */
- for (ix = 0; VEC_iterate (basic_block, loop->blocks, ix, bb); ix++)
+ for (ix = 0; loop->blocks.iterate (ix, &bb); ix++)
bb->flags |= BB_DISABLE_SCHEDULE;
return true;
{
int sz = get_max_uid () * 3 / 2 + 1;
- insn_info = VEC_alloc (c6x_sched_insn_info, heap, sz);
+ insn_info.create (sz);
}
/* Make sure the real-jump insns we create are not deleted. When modulo-
{
c6x_output_fn_unwind (file);
- if (insn_info)
- VEC_free (c6x_sched_insn_info, heap, insn_info);
- insn_info = NULL;
+ insn_info.release ();
if (!flag_inhibit_size_directive)
ASM_OUTPUT_MEASURED_SIZE (file, fname);
typedef struct GTY (()) darwin_lto_section_e {
const char *sectname;
} darwin_lto_section_e ;
-DEF_VEC_O(darwin_lto_section_e);
-DEF_VEC_ALLOC_O(darwin_lto_section_e, gc);
-static GTY (()) VEC (darwin_lto_section_e, gc) * lto_section_names;
+static GTY (()) vec<darwin_lto_section_e, va_gc> *lto_section_names;
/* Segment for LTO data. */
#define LTO_SEGMENT_NAME "__GNU_LTO"
TODO: check that we do not revisit sections, that would break
the assumption of how this is done. */
if (lto_section_names == NULL)
- lto_section_names = VEC_alloc (darwin_lto_section_e, gc, 16);
- VEC_safe_push (darwin_lto_section_e, gc, lto_section_names, e);
+ vec_alloc (lto_section_names, 16);
+ vec_safe_push (lto_section_names, e);
}
else if (strncmp (name, "__DWARF,", 8) == 0)
darwin_asm_dwarf_section (name, flags, decl);
"not supported in this configuration; ignored");
}
-/* VEC Used by darwin_asm_dwarf_section.
+/* vec used by darwin_asm_dwarf_section.
Maybe a hash tab would be better here - but the intention is that this is
a very short list (fewer than 16 items) and each entry should (ideally,
eventually) only be presented once.
}
dwarf_sect_used_entry;
-DEF_VEC_O(dwarf_sect_used_entry);
-DEF_VEC_ALLOC_O(dwarf_sect_used_entry, gc);
/* A list of used __DWARF sections. */
-static GTY (()) VEC (dwarf_sect_used_entry, gc) * dwarf_sect_names_table;
+static GTY (()) vec<dwarf_sect_used_entry, va_gc> *dwarf_sect_names_table;
/* This is called when we are asked to assemble a named section and the
name begins with __DWARF,. We keep a list of the section names (without
namelen = strchr (sname, ',') - sname;
gcc_assert (namelen);
if (dwarf_sect_names_table == NULL)
- dwarf_sect_names_table = VEC_alloc (dwarf_sect_used_entry, gc, 16);
+ vec_alloc (dwarf_sect_names_table, 16);
else
for (i = 0;
- VEC_iterate (dwarf_sect_used_entry, dwarf_sect_names_table, i, ref);
+ dwarf_sect_names_table->iterate (i, &ref);
i++)
{
if (!ref)
fprintf (asm_out_file, "Lsection%.*s:\n", namelen, sname);
e.count = 1;
e.name = xstrdup (sname);
- VEC_safe_push (dwarf_sect_used_entry, gc, dwarf_sect_names_table, e);
+ vec_safe_push (dwarf_sect_names_table, e);
}
}
}
/* Output the names and indices. */
- if (lto_section_names && VEC_length (darwin_lto_section_e, lto_section_names))
+ if (lto_section_names && lto_section_names->length ())
{
int count;
darwin_lto_section_e *ref;
/* Emit the names. */
fprintf (asm_out_file, "\t.section %s,%s,regular,debug\n",
LTO_SEGMENT_NAME, LTO_NAMES_SECTION);
- FOR_EACH_VEC_ELT (darwin_lto_section_e, lto_section_names, count, ref)
+ FOR_EACH_VEC_ELT (*lto_section_names, count, ref)
{
fprintf (asm_out_file, "L_GNU_LTO_NAME%d:\n", count);
/* We have to jump through hoops to get the values of the intra-section
fputs ("\t.align\t2\n", asm_out_file);
fputs ("# Section offset, Section length, Name offset, Name length\n",
asm_out_file);
- FOR_EACH_VEC_ELT (darwin_lto_section_e, lto_section_names, count, ref)
+ FOR_EACH_VEC_ELT (*lto_section_names, count, ref)
{
fprintf (asm_out_file, "%s L$gnu$lto$offs%d\t;# %s\n",
op, count, ref->sectname);
if (!desc)
{
tree var, constructor, field;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int length = TREE_STRING_LENGTH (str) - 1;
if (darwin_warn_nonportable_cfstrings)
{
basic_block bb;
rtx insn, src;
- VEC (basic_block, heap) *todo;
+ vec<basic_block> todo;
sbitmap pushed;
bool need_commit = false;
bool finalize_fp_sets = (MACHINE_FUNCTION (cfun)->unknown_mode_sets == 0);
- todo = VEC_alloc (basic_block, heap, last_basic_block);
+ todo.create (last_basic_block);
pushed = sbitmap_alloc (last_basic_block);
bitmap_clear (pushed);
if (!finalize_fp_sets)
checking the total frequency of the affected edges. */
selected_mode = (enum attr_fp_mode) epiphany_normal_fp_rounding;
- VEC_quick_push (basic_block, todo, bb);
+ todo.quick_push (bb);
bitmap_set_bit (pushed, bb->index);
}
XVECEXP (XEXP (src, 0), 0, 0) = GEN_INT (selected_mode);
SET_SRC (XVECEXP (PATTERN (insn), 0, 2)) = copy_rtx (src);
df_insn_rescan (insn);
}
- while (VEC_length (basic_block, todo))
+ while (todo.length ())
{
- basic_block bb = VEC_pop (basic_block, todo);
+ basic_block bb = todo.pop ();
int selected_reg, jilted_reg;
enum attr_fp_mode jilted_mode;
edge e;
{
if (bitmap_bit_p (pushed, succ->index))
continue;
- VEC_quick_push (basic_block, todo, succ);
+ todo.quick_push (succ);
bitmap_set_bit (pushed, bb->index);
continue;
}
insert_insn_on_edge (seq, e);
}
}
- VEC_free (basic_block, heap, todo);
+ todo.release ();
sbitmap_free (pushed);
if (need_commit)
commit_edge_insertions ();
gimple_seq gseq;
int ix;
tree ele;
- VEC (tree, heap) *fndecls;
+ vec<tree> *fndecls;
unsigned int num_versions = 0;
unsigned int actual_versions = 0;
unsigned int i;
&& empty_bb != NULL);
/*fndecls_p is actually a vector. */
- fndecls = (VEC (tree, heap) *)fndecls_p;
+ fndecls = static_cast<vec<tree> *> (fndecls_p);
/* At least one more version other than the default. */
- num_versions = VEC_length (tree, fndecls);
+ num_versions = fndecls->length ();
gcc_assert (num_versions >= 2);
function_version_info = (struct _function_version_info *)
XNEWVEC (struct _function_version_info, (num_versions - 1));
/* The first version in the vector is the default decl. */
- default_decl = VEC_index (tree, fndecls, 0);
+ default_decl = (*fndecls)[0];
push_cfun (DECL_STRUCT_FUNCTION (dispatch_decl));
/* Function version dispatch is via IFUNC. IFUNC resolvers fire before
constructors, so explicity call __builtin_cpu_init here. */
ifunc_cpu_init_stmt = gimple_build_call_vec (
- ix86_builtins [(int) IX86_BUILTIN_CPU_INIT], NULL);
+ ix86_builtins [(int) IX86_BUILTIN_CPU_INIT], vec<tree>());
gimple_seq_add_stmt (&gseq, ifunc_cpu_init_stmt);
gimple_set_bb (ifunc_cpu_init_stmt, *empty_bb);
set_bb_seq (*empty_bb, gseq);
pop_cfun ();
- for (ix = 1; VEC_iterate (tree, fndecls, ix, ele); ++ix)
+ for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
{
tree version_decl = ele;
tree predicate_chain = NULL_TREE;
{
tree resolver_decl;
basic_block empty_bb;
- VEC (tree, heap) *fn_ver_vec = NULL;
+ vec<tree> fn_ver_vec = vec<tree>();
tree default_ver_decl;
struct cgraph_node *versn;
struct cgraph_node *node;
push_cfun (DECL_STRUCT_FUNCTION (resolver_decl));
- fn_ver_vec = VEC_alloc (tree, heap, 2);
+ fn_ver_vec.create (2);
for (versn_info = node_version_info->next; versn_info;
versn_info = versn_info->next)
if (DECL_VINDEX (versn->symbol.decl))
error_at (DECL_SOURCE_LOCATION (versn->symbol.decl),
"Virtual function multiversioning not supported");
- VEC_safe_push (tree, heap, fn_ver_vec, versn->symbol.decl);
+ fn_ver_vec.safe_push (versn->symbol.decl);
}
- dispatch_function_versions (resolver_decl, fn_ver_vec, &empty_bb);
+ dispatch_function_versions (resolver_decl, &fn_ver_vec, &empty_bb);
rebuild_cgraph_edges ();
pop_cfun ();
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
+ vec<cl_deferred_option> *v
+ = (vec<cl_deferred_option> *) ia64_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mfixed_range_:
- fix_range (opt->arg);
- break;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mfixed_range_:
+ fix_range (opt->arg);
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
if (TARGET_AUTO_PIC)
target_flags |= MASK_CONST_GP;
unsigned int i;
int j;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) mep_deferred_options;
+ vec<cl_deferred_option> *v = (vec<cl_deferred_option> *) mep_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mivc2:
- for (j = 0; j < 32; j++)
- fixed_regs[j + 48] = 0;
- for (j = 0; j < 32; j++)
- call_used_regs[j + 48] = 1;
- for (j = 6; j < 8; j++)
- call_used_regs[j + 48] = 0;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mivc2:
+ for (j = 0; j < 32; j++)
+ fixed_regs[j + 48] = 0;
+ for (j = 0; j < 32; j++)
+ call_used_regs[j + 48] = 1;
+ for (j = 6; j < 8; j++)
+ call_used_regs[j + 48] = 0;
#define RN(n,s) reg_names[FIRST_CCR_REGNO + n] = s
- RN (0, "$csar0");
- RN (1, "$cc");
- RN (4, "$cofr0");
- RN (5, "$cofr1");
- RN (6, "$cofa0");
- RN (7, "$cofa1");
- RN (15, "$csar1");
-
- RN (16, "$acc0_0");
- RN (17, "$acc0_1");
- RN (18, "$acc0_2");
- RN (19, "$acc0_3");
- RN (20, "$acc0_4");
- RN (21, "$acc0_5");
- RN (22, "$acc0_6");
- RN (23, "$acc0_7");
-
- RN (24, "$acc1_0");
- RN (25, "$acc1_1");
- RN (26, "$acc1_2");
- RN (27, "$acc1_3");
- RN (28, "$acc1_4");
- RN (29, "$acc1_5");
- RN (30, "$acc1_6");
- RN (31, "$acc1_7");
+ RN (0, "$csar0");
+ RN (1, "$cc");
+ RN (4, "$cofr0");
+ RN (5, "$cofr1");
+ RN (6, "$cofa0");
+ RN (7, "$cofa1");
+ RN (15, "$csar1");
+
+ RN (16, "$acc0_0");
+ RN (17, "$acc0_1");
+ RN (18, "$acc0_2");
+ RN (19, "$acc0_3");
+ RN (20, "$acc0_4");
+ RN (21, "$acc0_5");
+ RN (22, "$acc0_6");
+ RN (23, "$acc0_7");
+
+ RN (24, "$acc1_0");
+ RN (25, "$acc1_1");
+ RN (26, "$acc1_2");
+ RN (27, "$acc1_3");
+ RN (28, "$acc1_4");
+ RN (29, "$acc1_5");
+ RN (30, "$acc1_6");
+ RN (31, "$acc1_7");
#undef RN
- break;
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
if (flag_pic == 1)
warning (OPT_fpic, "-fpic is not supported");
};
typedef struct mips_multi_member mips_multi_member;
-/* Vector definitions for the above. */
-DEF_VEC_O(mips_multi_member);
-DEF_VEC_ALLOC_O(mips_multi_member, heap);
-
/* The instructions that make up the current multi-insn sequence. */
-static VEC (mips_multi_member, heap) *mips_multi_members;
+static vec<mips_multi_member> mips_multi_members;
/* How many instructions (as opposed to labels) are in the current
multi-insn sequence. */
static void
mips_multi_start (void)
{
- VEC_truncate (mips_multi_member, mips_multi_members, 0);
+ mips_multi_members.truncate (0);
mips_multi_num_insns = 0;
}
mips_multi_add (void)
{
mips_multi_member empty;
- return VEC_safe_push (mips_multi_member, heap, mips_multi_members, empty);
+ return mips_multi_members.safe_push (empty);
}
/* Add a normal insn with the given asm format to the current multi-insn
static unsigned int
mips_multi_last_index (void)
{
- return VEC_length (mips_multi_member, mips_multi_members) - 1;
+ return mips_multi_members.length () - 1;
}
/* Add a copy of an existing instruction to the current multi-insn
struct mips_multi_member *member;
member = mips_multi_add ();
- memcpy (member, &VEC_index (mips_multi_member, mips_multi_members, i),
- sizeof (*member));
+ memcpy (member, &mips_multi_members[i], sizeof (*member));
gcc_assert (!member->is_label_p);
}
static void
mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
{
- VEC_index (mips_multi_member, mips_multi_members, i).operands[op] = x;
+ mips_multi_members[i].operands[op] = x;
}
/* Write out the asm code for the current multi-insn sequence. */
struct mips_multi_member *member;
unsigned int i;
- FOR_EACH_VEC_ELT (mips_multi_member, mips_multi_members, i, member)
+ FOR_EACH_VEC_ELT (mips_multi_members, i, member)
if (member->is_label_p)
fprintf (asm_out_file, "%s\n", member->format);
else
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) pa_deferred_options;
+ vec<cl_deferred_option> *v
+ = (vec<cl_deferred_option> *) pa_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mfixed_range_:
- fix_range (opt->arg);
- break;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mfixed_range_:
+ fix_range (opt->arg);
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
/* Unconditional branches in the delay slot are not compatible with dwarf2
call frame information. There is no benefit in using this optimization
/* Vector of funcdef numbers. */
-static VEC(int,heap) *funcdef_nos;
+static vec<int> funcdef_nos;
/* Output deferred profile counters. */
static void
unsigned int i;
int align, n;
- if (VEC_empty (int, funcdef_nos))
+ if (funcdef_nos.is_empty ())
return;
switch_to_section (data_section);
align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
- for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
+ for (i = 0; funcdef_nos.iterate (i, &n); i++)
{
targetm.asm_out.internal_label (asm_out_file, "LP", n);
assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
}
- VEC_free (int, heap, funcdef_nos);
+ funcdef_nos.release ();
}
void
rtx count_label_rtx, addr, r24;
char count_label_name[16];
- VEC_safe_push (int, heap, funcdef_nos, label_no);
+ funcdef_nos.safe_push (label_no);
ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
} extern_symbol;
/* Define gc'd vector type for extern_symbol. */
-DEF_VEC_O(extern_symbol);
-DEF_VEC_ALLOC_O(extern_symbol,gc);
/* Vector of extern_symbol pointers. */
-static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
+static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
#ifdef ASM_OUTPUT_EXTERNAL_REAL
/* Mark DECL (name NAME) as an external reference (assembler output
{
gcc_assert (file == asm_out_file);
extern_symbol p = {decl, name};
- VEC_safe_push (extern_symbol, gc, extern_symbols, p);
+ vec_safe_push (extern_symbols, p);
}
/* Output text required at the end of an assembler file.
output_deferred_plabels ();
- for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
+ for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
{
tree decl = p->decl;
ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
}
- VEC_free (extern_symbol, gc, extern_symbols);
+ vec_free (extern_symbols);
}
#endif
altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
void *passed_arglist)
{
- VEC(tree,gc) *arglist = (VEC(tree,gc) *) passed_arglist;
- unsigned int nargs = VEC_length (tree, arglist);
+ vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
+ unsigned int nargs = vec_safe_length (arglist);
enum rs6000_builtins fcode
= (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
int size;
int i;
bool unsigned_p;
- VEC(constructor_elt,gc) *vec;
+ vec<constructor_elt, va_gc> *vec;
const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
if (nargs == 0)
}
/* Ignore promote's element argument. */
if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
- && !INTEGRAL_TYPE_P (TREE_TYPE (VEC_index (tree, arglist, 1))))
+ && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
goto bad;
- arg = VEC_index (tree, arglist, 0);
+ arg = (*arglist)[0];
type = TREE_TYPE (arg);
if (!SCALAR_FLOAT_TYPE_P (type)
&& !INTEGRAL_TYPE_P (type))
goto bad;
}
arg = save_expr (fold_convert (TREE_TYPE (type), arg));
- vec = VEC_alloc (constructor_elt, gc, size);
+ vec_alloc (vec, size);
for(i = 0; i < size; i++)
{
constructor_elt elt = {NULL_TREE, arg};
- VEC_quick_push (constructor_elt, vec, elt);
+ vec->quick_push (elt);
}
return build_constructor (type, vec);
}
return error_mark_node;
}
- arg2 = VEC_index (tree, arglist, 1);
- arg1 = VEC_index (tree, arglist, 0);
+ arg2 = (*arglist)[1];
+ arg1 = (*arglist)[0];
arg1_type = TREE_TYPE (arg1);
if (TREE_CODE (arg1_type) != VECTOR_TYPE)
return error_mark_node;
}
- arg0 = VEC_index (tree, arglist, 0);
- arg1 = VEC_index (tree, arglist, 1);
+ arg0 = (*arglist)[0];
+ arg1 = (*arglist)[1];
arg1_type = TREE_TYPE (arg1);
- arg2 = VEC_index (tree, arglist, 2);
+ arg2 = (*arglist)[2];
if (TREE_CODE (arg1_type) != VECTOR_TYPE)
goto bad;
fnargs = TREE_CHAIN (fnargs), n++)
{
tree decl_type = TREE_VALUE (fnargs);
- tree arg = VEC_index (tree, arglist, n);
+ tree arg = (*arglist)[n];
tree type;
if (arg == error_mark_node)
int line_number;
} branch_island;
-DEF_VEC_O(branch_island);
-DEF_VEC_ALLOC_O(branch_island,gc);
-static VEC(branch_island,gc) *branch_islands;
+static vec<branch_island, va_gc> *branch_islands;
/* Remember to generate a branch island for far calls to the given
function. */
int line_number)
{
branch_island bi = {function_name, label_name, line_number};
- VEC_safe_push (branch_island, gc, branch_islands, bi);
+ vec_safe_push (branch_islands, bi);
}
/* Generate far-jump branch islands for everything recorded in
{
char tmp_buf[512];
- while (!VEC_empty (branch_island, branch_islands))
+ while (!vec_safe_is_empty (branch_islands))
{
- branch_island *bi = &VEC_last (branch_island, branch_islands);
+ branch_island *bi = &branch_islands->last ();
const char *label = IDENTIFIER_POINTER (bi->label_name);
const char *name = IDENTIFIER_POINTER (bi->function_name);
char name_buf[512];
if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
dbxout_stabd (N_SLINE, bi->line_number);
#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
- VEC_pop (branch_island, branch_islands);
+ branch_islands->pop ();
}
}
branch_island *bi;
unsigned ix;
- FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
+ FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
if (function_name == bi->function_name)
return 0;
return 1;
branch_island *bi;
unsigned ix;
- FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
+ FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
if (function_name == bi->function_name)
return bi->label_name;
return NULL_TREE;
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) rx_deferred_options;
+ vec<cl_deferred_option> *v = (vec<cl_deferred_option> *) rx_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mint_register_:
- switch (opt->value)
- {
- case 4:
- fixed_regs[10] = call_used_regs [10] = 1;
- /* Fall through. */
- case 3:
- fixed_regs[11] = call_used_regs [11] = 1;
- /* Fall through. */
- case 2:
- fixed_regs[12] = call_used_regs [12] = 1;
- /* Fall through. */
- case 1:
- fixed_regs[13] = call_used_regs [13] = 1;
- /* Fall through. */
- case 0:
- rx_num_interrupt_regs = opt->value;
- break;
- default:
- rx_num_interrupt_regs = 0;
- /* Error message already given because rx_handle_option
- returned false. */
- break;
- }
- break;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mint_register_:
+ switch (opt->value)
+ {
+ case 4:
+ fixed_regs[10] = call_used_regs [10] = 1;
+ /* Fall through. */
+ case 3:
+ fixed_regs[11] = call_used_regs [11] = 1;
+ /* Fall through. */
+ case 2:
+ fixed_regs[12] = call_used_regs [12] = 1;
+ /* Fall through. */
+ case 1:
+ fixed_regs[13] = call_used_regs [13] = 1;
+ /* Fall through. */
+ case 0:
+ rx_num_interrupt_regs = opt->value;
+ break;
+ default:
+ rx_num_interrupt_regs = 0;
+ /* Error message already given because rx_handle_option
+ returned false. */
+ break;
+ }
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
/* This target defaults to strict volatile bitfields. */
if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
#define SCALAR_TYPE_P(t) (INTEGRAL_TYPE_P (t) \
|| SCALAR_FLOAT_TYPE_P (t) \
|| POINTER_TYPE_P (t))
- VEC(tree,gc) *fnargs = (VEC(tree,gc) *) passed_args;
- unsigned int nargs = VEC_length (tree, fnargs);
+ vec<tree, va_gc> *fnargs = static_cast <vec<tree, va_gc> *> (passed_args);
+ unsigned int nargs = vec_safe_length (fnargs);
int new_fcode, fcode = DECL_FUNCTION_CODE (fndecl);
struct spu_builtin_description *desc;
tree match = NULL_TREE;
return error_mark_node;
}
- var = VEC_index (tree, fnargs, p);
+ var = (*fnargs)[p];
if (TREE_CODE (var) == NON_LVALUE_EXPR)
var = TREE_OPERAND (var, 0);
/* List of aliased identifiers. They must be persistent across gc. */
-static GTY(()) VEC(tree,gc) *aliases_id;
+static GTY(()) vec<tree, va_gc> *aliases_id;
/* Add a CRTL translation. This simply use the transparent alias
mechanism, which is platform independent and works with the
IDENTIFIER_TRANSPARENT_ALIAS (targ) = 1;
TREE_CHAIN (targ) = get_identifier_with_length (id_str, id_len);
- VEC_safe_push (tree, gc, aliases_id, targ);
+ vec_safe_push (aliases_id, targ);
}
/* Do VMS specific stuff on builtins: disable the ones that are not
static tree
vxworks_emutls_var_init (tree var, tree decl, tree tmpl_addr)
{
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 3);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 3);
tree type = TREE_TYPE (var);
tree field = TYPE_FIELDS (type);
constructor_elt elt = {field, fold_convert (TREE_TYPE (field), tmpl_addr)};
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
field = DECL_CHAIN (field);
elt.index = field;
elt.value = build_int_cst (TREE_TYPE (field), 0);
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
field = DECL_CHAIN (field);
elt.index = field;
elt.value = fold_convert (TREE_TYPE (field), DECL_SIZE_UNIT (decl));
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
return build_constructor (type, v);
}
is a pointer to a pointer, the second a cookie. */
typedef void (*gt_pointer_operator) (void *, void *);
-#endif /* coretypes.h */
+#if !defined (HAVE_UCHAR)
+typedef unsigned char uchar;
+#endif
+#endif /* coretypes.h */
static tree build_fn_info (const struct coverage_data *, tree, tree);
static tree build_info (tree, tree);
static bool coverage_obj_init (void);
-static VEC(constructor_elt,gc) *coverage_obj_fn
-(VEC(constructor_elt,gc) *, tree, struct coverage_data const *);
-static void coverage_obj_finish (VEC(constructor_elt,gc) *);
+static vec<constructor_elt, va_gc> *coverage_obj_fn
+(vec<constructor_elt, va_gc> *, tree, struct coverage_data const *);
+static void coverage_obj_finish (vec<constructor_elt, va_gc> *);
\f
/* Return the type node for gcov_type. */
tree fields = TYPE_FIELDS (type);
tree ctr_type;
unsigned ix;
- VEC(constructor_elt,gc) *v1 = NULL;
- VEC(constructor_elt,gc) *v2 = NULL;
+ vec<constructor_elt, va_gc> *v1 = NULL;
+ vec<constructor_elt, va_gc> *v2 = NULL;
/* key */
CONSTRUCTOR_APPEND_ELT (v1, fields,
for (ix = 0; ix != GCOV_COUNTERS; ix++)
if (prg_ctr_mask & (1 << ix))
{
- VEC(constructor_elt,gc) *ctr = NULL;
+ vec<constructor_elt, va_gc> *ctr = NULL;
tree var = data->ctr_vars[ix];
unsigned count = 0;
unsigned ix;
tree filename_string;
int da_file_name_len;
- VEC(constructor_elt,gc) *v1 = NULL;
- VEC(constructor_elt,gc) *v2 = NULL;
+ vec<constructor_elt, va_gc> *v1 = NULL;
+ vec<constructor_elt, va_gc> *v2 = NULL;
/* Version ident */
CONSTRUCTOR_APPEND_ELT (v1, info_fields,
/* Generate the coverage function info for FN and DATA. Append a
pointer to that object to CTOR and return the appended CTOR. */
-static VEC(constructor_elt,gc) *
-coverage_obj_fn (VEC(constructor_elt,gc) *ctor, tree fn,
+static vec<constructor_elt, va_gc> *
+coverage_obj_fn (vec<constructor_elt, va_gc> *ctor, tree fn,
struct coverage_data const *data)
{
tree init = build_fn_info (data, gcov_fn_info_type, gcov_info_var);
function objects from CTOR. Generate the gcov_info initializer. */
static void
-coverage_obj_finish (VEC(constructor_elt,gc) *ctor)
+coverage_obj_finish (vec<constructor_elt, va_gc> *ctor)
{
- unsigned n_functions = VEC_length(constructor_elt, ctor);
+ unsigned n_functions = vec_safe_length (ctor);
tree fn_info_ary_type = build_array_type
(build_qualified_type (gcov_fn_info_ptr_type, TYPE_QUAL_CONST),
build_index_type (size_int (n_functions - 1)));
if (coverage_obj_init ())
{
- VEC(constructor_elt,gc) *fn_ctor = NULL;
+ vec<constructor_elt, va_gc> *fn_ctor = NULL;
struct coverage_data *fn;
for (fn = functions_head; fn; fn = fn->next)
+2012-11-17 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * Make-lang.in: Remove dependencies on vecir.h and vecprim.h everywhere.
+ * call.c: Use new vec API in vec.h.
+ * class.c: Likewise.
+ * cp-gimplify.c: Likewise.
+ * cp-tree.h: Likewise.
+ * cvt.c: Likewise.
+ * decl.c: Likewise.
+ * decl2.c: Likewise.
+ * error.c: Likewise.
+ * except.c: Likewise.
+ * init.c: Likewise.
+ * mangle.c: Likewise.
+ * method.c: Likewise.
+ * name-lookup.c: Likewise.
+ * name-lookup.h: Likewise.
+ * parser.c: Likewise.
+ * parser.h: Likewise.
+ * pt.c: Likewise.
+ * repo.c: Likewise.
+ * rtti.c: Likewise.
+ * search.c: Likewise.
+ * semantics.c: Likewise.
+ * tree.c: Likewise.
+ * typeck.c: Likewise.
+ * typeck2.c: Likewise.
+
2012-11-17 Gabriel Dos Reis <gdr@integrable-solutions.net>
* semantics.c (finish_id_expression): Tidy diagnostic message.
cp/cfns.h $(TREE_INLINE_H) $(TARGET_H) gt-cp-except.h
cp/expr.o: cp/expr.c $(CXX_TREE_H) $(TM_H) $(FLAGS_H) $(TM_P_H)
cp/pt.o: cp/pt.c $(CXX_TREE_H) $(TM_H) cp/decl.h cp/cp-objcp-common.h \
- toplev.h $(TREE_INLINE_H) pointer-set.h gt-cp-pt.h vecprim.h intl.h \
+ toplev.h $(TREE_INLINE_H) pointer-set.h gt-cp-pt.h intl.h \
c-family/c-objc.h
cp/error.o: cp/error.c $(CXX_TREE_H) $(TM_H) $(DIAGNOSTIC_H) \
$(FLAGS_H) $(REAL_H) $(LANGHOOKS_DEF_H) $(CXX_PRETTY_PRINT_H) \
static struct z_candidate *splice_viable (struct z_candidate *, bool, bool *);
static bool any_strictly_viable (struct z_candidate *);
static struct z_candidate *add_template_candidate
- (struct z_candidate **, tree, tree, tree, tree, const VEC(tree,gc) *,
+ (struct z_candidate **, tree, tree, tree, tree, const vec<tree, va_gc> *,
tree, tree, tree, int, unification_kind_t, tsubst_flags_t);
static struct z_candidate *add_template_candidate_real
- (struct z_candidate **, tree, tree, tree, tree, const VEC(tree,gc) *,
+ (struct z_candidate **, tree, tree, tree, tree, const vec<tree, va_gc> *,
tree, tree, tree, int, tree, unification_kind_t, tsubst_flags_t);
static struct z_candidate *add_template_conv_candidate
- (struct z_candidate **, tree, tree, tree, const VEC(tree,gc) *, tree,
- tree, tree, tsubst_flags_t);
+ (struct z_candidate **, tree, tree, tree, const vec<tree, va_gc> *,
+ tree, tree, tree, tsubst_flags_t);
static void add_builtin_candidates
(struct z_candidate **, enum tree_code, enum tree_code,
tree, tree *, int, tsubst_flags_t);
(struct z_candidate **, tree, tree, tree, tree *, tree *,
int, tsubst_flags_t);
static struct z_candidate *add_conv_candidate
- (struct z_candidate **, tree, tree, tree, const VEC(tree,gc) *, tree,
+ (struct z_candidate **, tree, tree, tree, const vec<tree, va_gc> *, tree,
tree, tsubst_flags_t);
static struct z_candidate *add_function_candidate
- (struct z_candidate **, tree, tree, tree, const VEC(tree,gc) *, tree,
+ (struct z_candidate **, tree, tree, tree, const vec<tree, va_gc> *, tree,
tree, int, tsubst_flags_t);
static conversion *implicit_conversion (tree, tree, tree, bool, int,
tsubst_flags_t);
static conversion *maybe_handle_ref_bind (conversion **);
static void maybe_handle_implicit_object (conversion **);
static struct z_candidate *add_candidate
- (struct z_candidate **, tree, tree, const VEC(tree,gc) *, size_t,
+ (struct z_candidate **, tree, tree, const vec<tree, va_gc> *, size_t,
conversion **, tree, tree, int, struct rejection_reason *);
static tree source_type (conversion *);
static void add_warning (struct z_candidate *, struct z_candidate *);
static conversion *conditional_conversion (tree, tree, tsubst_flags_t);
static char *name_as_c_string (tree, tree, bool *);
static tree prep_operand (tree);
-static void add_candidates (tree, tree, const VEC(tree,gc) *, tree, tree, bool,
- tree, tree, int, struct z_candidate **,
+static void add_candidates (tree, tree, const vec<tree, va_gc> *, tree, tree,
+ bool, tree, tree, int, struct z_candidate **,
tsubst_flags_t);
static conversion *merge_conversion_sequences (conversion *, conversion *);
static bool magic_varargs_p (tree);
/* The rest of the arguments to use when calling this function. If
there are no further arguments this may be NULL or it may be an
empty vector. */
- const VEC(tree,gc) *args;
+ const vec<tree, va_gc> *args;
/* The implicit conversion sequences for each of the arguments to
FN. */
conversion **convs;
static struct z_candidate *
add_candidate (struct z_candidate **candidates,
- tree fn, tree first_arg, const VEC(tree,gc) *args,
+ tree fn, tree first_arg, const vec<tree, va_gc> *args,
size_t num_convs, conversion **convs,
tree access_path, tree conversion_path,
int viable, struct rejection_reason *reason)
static struct z_candidate *
add_function_candidate (struct z_candidate **candidates,
tree fn, tree ctype, tree first_arg,
- const VEC(tree,gc) *args, tree access_path,
+ const vec<tree, va_gc> *args, tree access_path,
tree conversion_path, int flags,
tsubst_flags_t complain)
{
else
skip = 0;
- len = VEC_length (tree, args) - skip + (first_arg != NULL_TREE ? 1 : 0);
+ len = vec_safe_length (args) - skip + (first_arg != NULL_TREE ? 1 : 0);
convs = alloc_conversions (len);
/* 13.3.2 - Viable functions [over.match.viable]
arg = first_arg;
else
arg = CONST_CAST_TREE (
- VEC_index (tree, args,
- i + skip - (first_arg != NULL_TREE ? 1 : 0)));
+ (*args)[i + skip - (first_arg != NULL_TREE ? 1 : 0)]);
argtype = lvalue_type (arg);
is_this = (i == 0 && DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
static struct z_candidate *
add_conv_candidate (struct z_candidate **candidates, tree fn, tree obj,
- tree first_arg, const VEC(tree,gc) *arglist,
+ tree first_arg, const vec<tree, va_gc> *arglist,
tree access_path, tree conversion_path,
tsubst_flags_t complain)
{
parmlist = TREE_TYPE (parmlist);
parmlist = TYPE_ARG_TYPES (parmlist);
- len = VEC_length (tree, arglist) + (first_arg != NULL_TREE ? 1 : 0) + 1;
+ len = vec_safe_length (arglist) + (first_arg != NULL_TREE ? 1 : 0) + 1;
convs = alloc_conversions (len);
parmnode = parmlist;
viable = 1;
else if (i == 1 && first_arg != NULL_TREE)
arg = first_arg;
else
- arg = VEC_index (tree, arglist,
- i - (first_arg != NULL_TREE ? 1 : 0) - 1);
+ arg = (*arglist)[i - (first_arg != NULL_TREE ? 1 : 0) - 1];
argtype = lvalue_type (arg);
if (i == 0)
tree type, argtypes[3], t;
/* TYPES[i] is the set of possible builtin-operator parameter types
we will consider for the Ith argument. */
- VEC(tree,gc) *types[2];
+ vec<tree, va_gc> *types[2];
unsigned ix;
for (i = 0; i < 3; ++i)
if (code == COND_EXPR)
{
if (real_lvalue_p (args[i]))
- VEC_safe_push (tree, gc, types[i],
- build_reference_type (argtypes[i]));
+ vec_safe_push (types[i], build_reference_type (argtypes[i]));
- VEC_safe_push (tree, gc, types[i],
- TYPE_MAIN_VARIANT (argtypes[i]));
+ vec_safe_push (types[i], TYPE_MAIN_VARIANT (argtypes[i]));
}
else if (! convs)
continue;
if (code == COND_EXPR && TREE_CODE (type) == REFERENCE_TYPE)
- VEC_safe_push (tree, gc, types[i], type);
+ vec_safe_push (types[i], type);
type = non_reference (type);
if (i != 0 || ! ref1)
{
type = cv_unqualified (type_decays_to (type));
if (enum_p && TREE_CODE (type) == ENUMERAL_TYPE)
- VEC_safe_push (tree, gc, types[i], type);
+ vec_safe_push (types[i], type);
if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type))
type = type_promotes_to (type);
}
if (! vec_member (type, types[i]))
- VEC_safe_push (tree, gc, types[i], type);
+ vec_safe_push (types[i], type);
}
}
else
{
if (code == COND_EXPR && real_lvalue_p (args[i]))
- VEC_safe_push (tree, gc, types[i],
- build_reference_type (argtypes[i]));
+ vec_safe_push (types[i], build_reference_type (argtypes[i]));
type = non_reference (argtypes[i]);
if (i != 0 || ! ref1)
{
type = cv_unqualified (type_decays_to (type));
if (enum_p && UNSCOPED_ENUM_P (type))
- VEC_safe_push (tree, gc, types[i], type);
+ vec_safe_push (types[i], type);
if (INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (type))
type = type_promotes_to (type);
}
- VEC_safe_push (tree, gc, types[i], type);
+ vec_safe_push (types[i], type);
}
}
/* Run through the possible parameter types of both arguments,
creating candidates with those parameter types. */
- FOR_EACH_VEC_ELT_REVERSE (tree, types[0], ix, t)
+ FOR_EACH_VEC_ELT_REVERSE (*(types[0]), ix, t)
{
unsigned jx;
tree u;
- if (!VEC_empty (tree, types[1]))
- FOR_EACH_VEC_ELT_REVERSE (tree, types[1], jx, u)
+ if (!types[1]->is_empty ())
+ FOR_EACH_VEC_ELT_REVERSE (*(types[1]), jx, u)
add_builtin_candidate
(candidates, code, code2, fnname, t,
u, args, argtypes, flags, complain);
static struct z_candidate*
add_template_candidate_real (struct z_candidate **candidates, tree tmpl,
tree ctype, tree explicit_targs, tree first_arg,
- const VEC(tree,gc) *arglist, tree return_type,
+ const vec<tree, va_gc> *arglist, tree return_type,
tree access_path, tree conversion_path,
int flags, tree obj, unification_kind_t strict,
tsubst_flags_t complain)
{
int ntparms = DECL_NTPARMS (tmpl);
tree targs = make_tree_vec (ntparms);
- unsigned int len = VEC_length (tree, arglist);
+ unsigned int len = vec_safe_length (arglist);
unsigned int nargs = (first_arg == NULL_TREE ? 0 : 1) + len;
unsigned int skip_without_in_chrg = 0;
tree first_arg_without_in_chrg = first_arg;
++ia;
}
for (ix = skip_without_in_chrg;
- VEC_iterate (tree, arglist, ix, arg);
+ vec_safe_iterate (arglist, ix, &arg);
++ix)
{
args_without_in_chrg[ia] = arg;
static struct z_candidate *
add_template_candidate (struct z_candidate **candidates, tree tmpl, tree ctype,
tree explicit_targs, tree first_arg,
- const VEC(tree,gc) *arglist, tree return_type,
+ const vec<tree, va_gc> *arglist, tree return_type,
tree access_path, tree conversion_path, int flags,
unification_kind_t strict, tsubst_flags_t complain)
{
static struct z_candidate *
add_template_conv_candidate (struct z_candidate **candidates, tree tmpl,
tree obj, tree first_arg,
- const VEC(tree,gc) *arglist,
+ const vec<tree, va_gc> *arglist,
tree return_type, tree access_path,
tree conversion_path, tsubst_flags_t complain)
{
non-list constructor.
Parameters are as for add_candidates, except that the arguments are in
- the form of a CONSTRUCTOR (the initializer list) rather than a VEC, and
+ the form of a CONSTRUCTOR (the initializer list) rather than a vector, and
the RETURN_TYPE parameter is replaced by TOTYPE, the desired type. */
static void
struct z_candidate **candidates,
tsubst_flags_t complain)
{
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
gcc_assert (*candidates == NULL);
tree conv_fns = NULL_TREE;
conversion *conv = NULL;
tree first_arg = NULL_TREE;
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
bool any_viable_p;
int convflags;
/* Do any initial processing on the arguments to a function call. */
-static VEC(tree,gc) *
-resolve_args (VEC(tree,gc) *args, tsubst_flags_t complain)
+static vec<tree, va_gc> *
+resolve_args (vec<tree, va_gc> *args, tsubst_flags_t complain)
{
unsigned int ix;
tree arg;
- FOR_EACH_VEC_ELT (tree, args, ix, arg)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
{
if (error_operand_p (arg))
return NULL;
static struct z_candidate *
perform_overload_resolution (tree fn,
- const VEC(tree,gc) *args,
+ const vec<tree, va_gc> *args,
struct z_candidate **candidates,
bool *any_viable_p, tsubst_flags_t complain)
{
functions. */
static void
-print_error_for_call_failure (tree fn, VEC(tree,gc) *args, bool any_viable_p,
+print_error_for_call_failure (tree fn, vec<tree, va_gc> *args, bool any_viable_p,
struct z_candidate *candidates)
{
tree name = DECL_NAME (OVL_CURRENT (fn));
ARGS. */
tree
-build_new_function_call (tree fn, VEC(tree,gc) **args, bool koenig_p,
+build_new_function_call (tree fn, vec<tree, va_gc> **args, bool koenig_p,
tsubst_flags_t complain)
{
struct z_candidate *candidates, *cand;
function called. */
tree
-build_operator_new_call (tree fnname, VEC(tree,gc) **args,
+build_operator_new_call (tree fnname, vec<tree, va_gc> **args,
tree *size, tree *cookie_size, tree size_check,
tree *fn, tsubst_flags_t complain)
{
if (size_check != NULL_TREE)
*size = fold_build3 (COND_EXPR, sizetype, size_check,
original_size, TYPE_MAX_VALUE (sizetype));
- VEC_safe_insert (tree, gc, *args, 0, *size);
+ vec_safe_insert (*args, 0, *size);
*args = resolve_args (*args, complain);
if (*args == NULL)
return error_mark_node;
/* In G++ 3.2, the check was implemented incorrectly; it
looked at the placement expression, rather than the
type of the function. */
- if (VEC_length (tree, *args) == 2
- && same_type_p (TREE_TYPE (VEC_index (tree, *args, 1)),
- ptr_type_node))
+ if ((*args)->length () == 2
+ && same_type_p (TREE_TYPE ((**args)[1]), ptr_type_node))
use_cookie = false;
}
else
*size = fold_build3 (COND_EXPR, sizetype, size_check,
*size, TYPE_MAX_VALUE (sizetype));
/* Update the argument list to reflect the adjusted size. */
- VEC_replace (tree, *args, 0, *size);
+ (**args)[0] = *size;
}
else
*cookie_size = NULL_TREE;
/* Build a new call to operator(). This may change ARGS. */
static tree
-build_op_call_1 (tree obj, VEC(tree,gc) **args, tsubst_flags_t complain)
+build_op_call_1 (tree obj, vec<tree, va_gc> **args, tsubst_flags_t complain)
{
struct z_candidate *candidates = 0, *cand;
tree fns, convs, first_mem_arg = NULL_TREE;
/* Wrapper for above. */
tree
-build_op_call (tree obj, VEC(tree,gc) **args, tsubst_flags_t complain)
+build_op_call (tree obj, vec<tree, va_gc> **args, tsubst_flags_t complain)
{
tree ret;
bool subtime = timevar_cond_start (TV_OVERLOAD);
add_function_candidate. */
static void
-add_candidates (tree fns, tree first_arg, const VEC(tree,gc) *args,
+add_candidates (tree fns, tree first_arg, const vec<tree, va_gc> *args,
tree return_type,
tree explicit_targs, bool template_only,
tree conversion_path, tree access_path,
tsubst_flags_t complain)
{
tree ctype;
- const VEC(tree,gc) *non_static_args;
+ const vec<tree, va_gc> *non_static_args;
bool check_list_ctor;
bool check_converting;
unification_kind_t strict;
for (; fns; fns = OVL_NEXT (fns))
{
tree fn_first_arg;
- const VEC(tree,gc) *fn_args;
+ const vec<tree, va_gc> *fn_args;
fn = OVL_CURRENT (fns);
{
unsigned int ix;
tree arg;
- VEC(tree,gc) *tempvec
- = VEC_alloc (tree, gc, VEC_length (tree, args) - 1);
- for (ix = 1; VEC_iterate (tree, args, ix, arg); ++ix)
- VEC_quick_push (tree, tempvec, arg);
+ vec<tree, va_gc> *tempvec;
+ vec_alloc (tempvec, args->length () - 1);
+ for (ix = 1; args->iterate (ix, &arg); ++ix)
+ tempvec->quick_push (arg);
non_static_args = tempvec;
- first_arg = build_this (VEC_index (tree, args, 0));
+ first_arg = build_this ((*args)[0]);
}
fn_first_arg = first_arg;
tree arg2, tree arg3, tree *overload, tsubst_flags_t complain)
{
struct z_candidate *candidates = 0, *cand;
- VEC(tree,gc) *arglist;
+ vec<tree, va_gc> *arglist;
tree fnname;
tree args[3];
tree result = NULL_TREE;
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
arg2 = integer_zero_node;
- arglist = VEC_alloc (tree, gc, 3);
- VEC_quick_push (tree, arglist, arg1);
+ vec_alloc (arglist, 3);
+ arglist->quick_push (arg1);
if (arg2 != NULL_TREE)
- VEC_quick_push (tree, arglist, arg2);
+ arglist->quick_push (arg2);
if (arg3 != NULL_TREE)
- VEC_quick_push (tree, arglist, arg3);
+ arglist->quick_push (arg3);
/* Get the high-water mark for the CONVERSION_OBSTACK. */
p = conversion_obstack_alloc (0);
else
{
tree ret;
- VEC(tree,gc) *args = VEC_alloc (tree, gc, 2);
- VEC_quick_push (tree, args, addr);
+ vec<tree, va_gc> *args;
+ vec_alloc (args, 2);
+ args->quick_push (addr);
if (FUNCTION_ARG_CHAIN (fn) != void_list_node)
- VEC_quick_push (tree, args, size);
+ args->quick_push (size);
ret = cp_build_function_call_vec (fn, &args, complain);
- VEC_free (tree, gc, args);
+ vec_free (args);
return ret;
}
}
diagnostic_t *diagnostic_kind, tsubst_flags_t complain)
{
int savew, savee;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
savew = warningcount, savee = errorcount;
args = make_tree_vector_single (expr);
tree new_ctor = build_constructor (init_list_type_node, NULL);
unsigned len = CONSTRUCTOR_NELTS (expr);
tree array, val, field;
- VEC(constructor_elt,gc) *vec = NULL;
+ vec<constructor_elt, va_gc> *vec = NULL;
unsigned ix;
/* Convert all the elements. */
zero-based argument number. Do any required conversions. Return
the converted value. */
-static GTY(()) VEC(tree,gc) *default_arg_context;
+static GTY(()) vec<tree, va_gc> *default_arg_context;
void
push_defarg_context (tree fn)
-{ VEC_safe_push (tree, gc, default_arg_context, fn); }
+{ vec_safe_push (default_arg_context, fn); }
+
void
pop_defarg_context (void)
-{ VEC_pop (tree, default_arg_context); }
+{ default_arg_context->pop (); }
tree
convert_default_arg (tree type, tree arg, tree fn, int parmnum,
fn = DECL_ORIGIN (fn);
/* Detect recursion. */
- FOR_EACH_VEC_ELT (tree, default_arg_context, i, t)
+ FOR_EACH_VEC_SAFE_ELT (default_arg_context, i, t)
if (t == fn)
{
if (complain & tf_error)
build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
{
tree fn = cand->fn;
- const VEC(tree,gc) *args = cand->args;
+ const vec<tree, va_gc> *args = cand->args;
tree first_arg = cand->first_arg;
conversion **convs = cand->convs;
conversion *conv;
unsigned int nargs;
return_type = TREE_TYPE (TREE_TYPE (fn));
- nargs = VEC_length (tree, args);
+ nargs = vec_safe_length (args);
if (first_arg == NULL_TREE)
- argarray = VEC_address (tree, CONST_CAST (VEC(tree,gc) *, args));
+ argarray = args->address ();
else
{
tree *alcarray;
++nargs;
alcarray = XALLOCAVEC (tree, nargs);
alcarray[0] = first_arg;
- FOR_EACH_VEC_ELT (tree, args, ix, arg)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
alcarray[ix + 1] = arg;
argarray = alcarray;
}
/* Find maximum size of vector to hold converted arguments. */
parmlen = list_length (parm);
- nargs = VEC_length (tree, args) + (first_arg != NULL_TREE ? 1 : 0);
+ nargs = vec_safe_length (args) + (first_arg != NULL_TREE ? 1 : 0);
if (parmlen > nargs)
nargs = parmlen;
argarray = XALLOCAVEC (tree, nargs);
}
else
{
- argarray[j++] = VEC_index (tree, args, arg_index);
+ argarray[j++] = (*args)[arg_index];
++arg_index;
}
parm = TREE_CHAIN (parm);
if (DECL_HAS_VTT_PARM_P (fn))
{
- argarray[j++] = VEC_index (tree, args, arg_index);
+ argarray[j++] = (*args)[arg_index];
++arg_index;
parm = TREE_CHAIN (parm);
}
tree parmtype = TREE_VALUE (parm);
tree arg = (first_arg != NULL_TREE
? first_arg
- : VEC_index (tree, args, arg_index));
+ : (*args)[arg_index]);
tree argtype = TREE_TYPE (arg);
tree converted_arg;
tree base_binfo;
}
gcc_assert (first_arg == NULL_TREE);
- for (; arg_index < VEC_length (tree, args) && parm;
+ for (; arg_index < vec_safe_length (args) && parm;
parm = TREE_CHAIN (parm), ++arg_index, ++i)
{
tree type = TREE_VALUE (parm);
- tree arg = VEC_index (tree, args, arg_index);
+ tree arg = (*args)[arg_index];
bool conversion_warning = true;
conv = convs[i];
}
/* Ellipsis */
- for (; arg_index < VEC_length (tree, args); ++arg_index)
+ for (; arg_index < vec_safe_length (args); ++arg_index)
{
- tree a = VEC_index (tree, args, arg_index);
+ tree a = (*args)[arg_index];
if (magic_varargs_p (fn))
/* Do no conversions for magic varargs. */
a = mark_type_use (a);
store the newly constructed object into a VAR_DECL. */
tree
-build_special_member_call (tree instance, tree name, VEC(tree,gc) **args,
+build_special_member_call (tree instance, tree name, vec<tree, va_gc> **args,
tree binfo, int flags, tsubst_flags_t complain)
{
tree fns;
/* The type of the subobject to be constructed or destroyed. */
tree class_type;
- VEC(tree,gc) *allocated = NULL;
+ vec<tree, va_gc> *allocated = NULL;
tree ret;
gcc_assert (name == complete_ctor_identifier
if (name == complete_dtor_identifier
|| name == base_dtor_identifier
|| name == deleting_dtor_identifier)
- gcc_assert (args == NULL || VEC_empty (tree, *args));
+ gcc_assert (args == NULL || vec_safe_is_empty (*args));
/* Convert to the base class, if necessary. */
if (!same_type_ignoring_top_level_qualifiers_p
args = &allocated;
}
- VEC_safe_insert (tree, gc, *args, 0, sub_vtt);
+ vec_safe_insert (*args, 0, sub_vtt);
}
ret = build_new_method_call (instance, fns, args,
This may change ARGS. */
static tree
-build_new_method_call_1 (tree instance, tree fns, VEC(tree,gc) **args,
+build_new_method_call_1 (tree instance, tree fns, vec<tree, va_gc> **args,
tree conversion_path, int flags,
tree *fn_p, tsubst_flags_t complain)
{
tree instance_ptr;
tree name;
bool skip_first_for_error;
- VEC(tree,gc) *user_args;
+ vec<tree, va_gc> *user_args;
tree call;
tree fn;
int template_only = 0;
bool any_viable_p;
tree orig_instance;
tree orig_fns;
- VEC(tree,gc) *orig_args = NULL;
+ vec<tree, va_gc> *orig_args = NULL;
void *p;
gcc_assert (instance != NULL_TREE);
/* If CONSTRUCTOR_IS_DIRECT_INIT is set, this was a T{ } form
initializer, not T({ }). */
- if (DECL_CONSTRUCTOR_P (fn) && args != NULL && !VEC_empty (tree, *args)
- && BRACE_ENCLOSED_INITIALIZER_P (VEC_index (tree, *args, 0))
- && CONSTRUCTOR_IS_DIRECT_INIT (VEC_index (tree, *args, 0)))
+ if (DECL_CONSTRUCTOR_P (fn) && args != NULL && !vec_safe_is_empty (*args)
+ && BRACE_ENCLOSED_INITIALIZER_P ((**args)[0])
+ && CONSTRUCTOR_IS_DIRECT_INIT ((**args)[0]))
{
- tree init_list = VEC_index (tree, *args, 0);
+ tree init_list = (**args)[0];
tree init = NULL_TREE;
- gcc_assert (VEC_length (tree, *args) == 1
+ gcc_assert ((*args)->length () == 1
&& !(flags & LOOKUP_ONLYCONVERTING));
/* If the initializer list has no elements and T is a class type with
/* Wrapper for above. */
tree
-build_new_method_call (tree instance, tree fns, VEC(tree,gc) **args,
+build_new_method_call (tree instance, tree fns, vec<tree, va_gc> **args,
tree conversion_path, int flags,
tree *fn_p, tsubst_flags_t complain)
{
ill-formed. */
if (CLASS_TYPE_P (type))
{
- VEC(tree,gc) *args = make_tree_vector_single (expr);
+ vec<tree, va_gc> *args = make_tree_vector_single (expr);
expr = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&args, type, LOOKUP_NORMAL, complain);
release_tree_vector (args);
code to initialize the new variable is returned through INITP. */
static tree
-set_up_extended_ref_temp (tree decl, tree expr, VEC(tree,gc) **cleanups,
+set_up_extended_ref_temp (tree decl, tree expr, vec<tree, va_gc> **cleanups,
tree *initp)
{
tree init;
{
tree cleanup = cxx_maybe_build_cleanup (var, tf_warning_or_error);
if (cleanup)
- VEC_safe_push (tree, gc, *cleanups, cleanup);
+ vec_safe_push (*cleanups, cleanup);
}
/* We must be careful to destroy the temporary only
which is bound either to a reference or a std::initializer_list. */
static tree
-extend_ref_init_temps_1 (tree decl, tree init, VEC(tree,gc) **cleanups)
+extend_ref_init_temps_1 (tree decl, tree init, vec<tree, va_gc> **cleanups)
{
tree sub = init;
tree *p;
lifetime to match that of DECL. */
tree
-extend_ref_init_temps (tree decl, tree init, VEC(tree,gc) **cleanups)
+extend_ref_init_temps (tree decl, tree init, vec<tree, va_gc> **cleanups)
{
tree type = TREE_TYPE (init);
if (processing_template_decl)
{
unsigned i;
constructor_elt *p;
- VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (init);
- FOR_EACH_VEC_ELT (constructor_elt, elts, i, p)
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (init);
+ FOR_EACH_VEC_SAFE_ELT (elts, i, p)
p->value = extend_ref_init_temps (decl, p->value, cleanups);
}
tree rtti_binfo;
/* The negative-index vtable initializers built up so far. These
are in order from least negative index to most negative index. */
- VEC(constructor_elt,gc) *inits;
+ vec<constructor_elt, va_gc> *inits;
/* The binfo for the virtual base for which we're building
vcall offset initializers. */
tree vbase;
/* The functions in vbase for which we have already provided vcall
offsets. */
- VEC(tree,gc) *fns;
+ vec<tree, va_gc> *fns;
/* The vtable index of the next vcall or vbase offset. */
tree index;
/* Nonzero if we are building the initializer for the primary
/* An array of all local classes present in this translation unit, in
declaration order. */
-VEC(tree,gc) *local_classes;
+vec<tree, va_gc> *local_classes;
static tree get_vfield_name (tree);
static void finish_struct_anon (tree);
static tree build_simple_base_path (tree expr, tree binfo);
static tree build_vtbl_ref_1 (tree, tree);
static void build_vtbl_initializer (tree, tree, tree, tree, int *,
- VEC(constructor_elt,gc) **);
+ vec<constructor_elt, va_gc> **);
static int count_fields (tree);
static int add_fields_to_record_type (tree, struct sorted_fields_type*, int);
static void insert_into_classtype_sorted_fields (tree, tree, int);
static void dump_vtt (tree, tree);
static void dump_thunk (FILE *, int, tree);
static tree build_vtable (tree, tree, tree);
-static void initialize_vtable (tree, VEC(constructor_elt,gc) *);
+static void initialize_vtable (tree, vec<constructor_elt, va_gc> *);
static void layout_nonempty_base_or_field (record_layout_info,
tree, tree, splay_tree);
static tree end_of_class (tree, int);
static bool layout_empty_base (record_layout_info, tree, tree, splay_tree);
static void accumulate_vtbl_inits (tree, tree, tree, tree, tree,
- VEC(constructor_elt,gc) **);
+ vec<constructor_elt, va_gc> **);
static void dfs_accumulate_vtbl_inits (tree, tree, tree, tree, tree,
- VEC(constructor_elt,gc) **);
+ vec<constructor_elt, va_gc> **);
static void build_rtti_vtbl_entries (tree, vtbl_init_data *);
static void build_vcall_and_vbase_vtbl_entries (tree, vtbl_init_data *);
static void clone_constructors_and_destructors (tree);
static void build_ctor_vtbl_group (tree, tree);
static void build_vtt (tree);
static tree binfo_ctor_vtable (tree);
-static void build_vtt_inits (tree, tree, VEC(constructor_elt,gc) **, tree *);
+static void build_vtt_inits (tree, tree, vec<constructor_elt, va_gc> **,
+ tree *);
static tree dfs_build_secondary_vptr_vtt_inits (tree, void *);
static tree dfs_fixup_binfo_vtbls (tree, void *);
static int record_subobject_offset (tree, tree, splay_tree);
tree overload;
bool template_conv_p = false;
bool conv_p;
- VEC(tree,gc) *method_vec;
+ vec<tree, va_gc> *method_vec;
bool complete_p;
bool insert_p = false;
tree current_fns;
allocate at least two (for constructors and destructors), and
we're going to end up with an assignment operator at some
point as well. */
- method_vec = VEC_alloc (tree, gc, 8);
+ vec_alloc (method_vec, 8);
/* Create slots for constructors and destructors. */
- VEC_quick_push (tree, method_vec, NULL_TREE);
- VEC_quick_push (tree, method_vec, NULL_TREE);
+ method_vec->quick_push (NULL_TREE);
+ method_vec->quick_push (NULL_TREE);
CLASSTYPE_METHOD_VEC (type) = method_vec;
}
insert_p = true;
/* See if we already have an entry with this name. */
for (slot = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, slot, m);
+ vec_safe_iterate (method_vec, slot, &m);
++slot)
{
m = OVL_CURRENT (m);
break;
}
}
- current_fns = insert_p ? NULL_TREE : VEC_index (tree, method_vec, slot);
+ current_fns = insert_p ? NULL_TREE : (*method_vec)[slot];
/* Check to see if we've already got this method. */
for (fns = current_fns; fns; fns = OVL_NEXT (fns))
/* We only expect to add few methods in the COMPLETE_P case, so
just make room for one more method in that case. */
if (complete_p)
- reallocated = VEC_reserve_exact (tree, gc, method_vec, 1);
+ reallocated = vec_safe_reserve_exact (method_vec, 1);
else
- reallocated = VEC_reserve (tree, gc, method_vec, 1);
+ reallocated = vec_safe_reserve (method_vec, 1);
if (reallocated)
CLASSTYPE_METHOD_VEC (type) = method_vec;
- if (slot == VEC_length (tree, method_vec))
- VEC_quick_push (tree, method_vec, overload);
+ if (slot == method_vec->length ())
+ method_vec->quick_push (overload);
else
- VEC_quick_insert (tree, method_vec, slot, overload);
+ method_vec->quick_insert (slot, overload);
}
else
/* Replace the current slot. */
- VEC_replace (tree, method_vec, slot, overload);
+ (*method_vec)[slot] = overload;
return true;
}
gt_pointer_operator new_value,
void* cookie)
{
- VEC(tree,gc) *method_vec = (VEC(tree,gc) *) obj;
- int len = VEC_length (tree, method_vec);
+ vec<tree, va_gc> *method_vec = (vec<tree, va_gc> *) obj;
+ int len = vec_safe_length (method_vec);
size_t slot;
tree fn;
/* The type conversion ops have to live at the front of the vec, so we
can't sort them. */
for (slot = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, slot, fn);
+ vec_safe_iterate (method_vec, slot, &fn);
++slot)
if (!DECL_CONV_FN_P (OVL_CURRENT (fn)))
break;
{
resort_data.new_value = new_value;
resort_data.cookie = cookie;
- qsort (VEC_address (tree, method_vec) + slot, len - slot, sizeof (tree),
+ qsort (method_vec->address () + slot, len - slot, sizeof (tree),
resort_method_name_cmp);
}
}
finish_struct_methods (tree t)
{
tree fn_fields;
- VEC(tree,gc) *method_vec;
+ vec<tree, va_gc> *method_vec;
int slot, len;
method_vec = CLASSTYPE_METHOD_VEC (t);
if (!method_vec)
return;
- len = VEC_length (tree, method_vec);
+ len = method_vec->length ();
/* Clear DECL_IN_AGGR_P for all functions. */
for (fn_fields = TYPE_METHODS (t); fn_fields;
/* The type conversion ops have to live at the front of the vec, so we
can't sort them. */
for (slot = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, slot, fn_fields);
+ method_vec->iterate (slot, &fn_fields);
++slot)
if (!DECL_CONV_FN_P (OVL_CURRENT (fn_fields)))
break;
if (len - slot > 1)
- qsort (VEC_address (tree, method_vec) + slot,
+ qsort (method_vec->address () + slot,
len-slot, sizeof (tree), method_name_cmp);
}
/* The candidate overriders. */
tree candidates;
/* Path to most derived. */
- VEC(tree,heap) *path;
+ vec<tree> path;
} find_final_overrider_data;
/* Add the overrider along the current path to FFOD->CANDIDATES.
{
depth--;
if (dfs_find_final_overrider_1
- (VEC_index (tree, ffod->path, depth), ffod, depth))
+ (ffod->path[depth], ffod, depth))
return true;
}
find_final_overrider_data *ffod = (find_final_overrider_data *) data;
if (binfo == ffod->declaring_base)
- dfs_find_final_overrider_1 (binfo, ffod, VEC_length (tree, ffod->path));
- VEC_safe_push (tree, heap, ffod->path, binfo);
+ dfs_find_final_overrider_1 (binfo, ffod, ffod->path.length ());
+ ffod->path.safe_push (binfo);
return NULL_TREE;
}
dfs_find_final_overrider_post (tree /*binfo*/, void *data)
{
find_final_overrider_data *ffod = (find_final_overrider_data *) data;
- VEC_pop (tree, ffod->path);
+ ffod->path.pop ();
return NULL_TREE;
}
ffod.fn = fn;
ffod.declaring_base = binfo;
ffod.candidates = NULL_TREE;
- ffod.path = VEC_alloc (tree, heap, 30);
+ ffod.path.create (30);
dfs_walk_all (derived, dfs_find_final_overrider_pre,
dfs_find_final_overrider_post, &ffod);
- VEC_free (tree, heap, ffod.path);
+ ffod.path.release ();
/* If there was no winner, issue an error message. */
if (!ffod.candidates || TREE_CHAIN (ffod.candidates))
static tree
get_vcall_index (tree fn, tree type)
{
- VEC(tree_pair_s,gc) *indices = CLASSTYPE_VCALL_INDICES (type);
+ vec<tree_pair_s, va_gc> *indices = CLASSTYPE_VCALL_INDICES (type);
tree_pair_p p;
unsigned ix;
- FOR_EACH_VEC_ELT (tree_pair_s, indices, ix, p)
+ FOR_EACH_VEC_SAFE_ELT (indices, ix, p)
if ((DECL_DESTRUCTOR_P (fn) && DECL_DESTRUCTOR_P (p->purpose))
|| same_signature_p (fn, p->purpose))
return p->value;
/* Find virtual functions in T with the indicated NAME. */
i = lookup_fnfields_1 (t, name);
if (i != -1)
- for (methods = VEC_index (tree, CLASSTYPE_METHOD_VEC (t), i);
+ for (methods = (*CLASSTYPE_METHOD_VEC (t))[i];
methods;
methods = OVL_NEXT (methods))
{
static void
warn_hidden (tree t)
{
- VEC(tree,gc) *method_vec = CLASSTYPE_METHOD_VEC (t);
+ vec<tree, va_gc> *method_vec = CLASSTYPE_METHOD_VEC (t);
tree fns;
size_t i;
/* We go through each separately named virtual function. */
for (i = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, i, fns);
+ vec_safe_iterate (method_vec, i, &fns);
++i)
{
tree fn;
if (abi_version_at_least (2) && CLASSTYPE_VBASECLASSES (type))
{
unsigned ix;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
/* Iterate through the virtual base classes of TYPE. In G++
3.2, we included virtual bases in the direct base class
working with the most derived type. */
if (vbases_p)
for (vbases = CLASSTYPE_VBASECLASSES (type), ix = 0;
- VEC_iterate (tree, vbases, ix, binfo); ix++)
+ vec_safe_iterate (vbases, ix, &binfo); ix++)
{
r = walk_subobject_offsets (binfo,
f,
{
TYPE_POLYMORPHIC_P (t) = 1;
if (DECL_PURE_VIRTUAL_P (x))
- VEC_safe_push (tree, gc, CLASSTYPE_PURE_VIRTUALS (t), x);
+ vec_safe_push (CLASSTYPE_PURE_VIRTUALS (t), x);
}
/* All user-provided destructors are non-trivial.
Constructors and assignment ops are handled in
end_of_class (tree t, int include_virtuals_p)
{
tree result = size_zero_node;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
tree binfo;
tree base_binfo;
tree offset;
/* G++ 3.2 did not check indirect virtual bases. */
if (abi_version_at_least (2) && include_virtuals_p)
for (vbases = CLASSTYPE_VBASECLASSES (t), i = 0;
- VEC_iterate (tree, vbases, i, base_binfo); i++)
+ vec_safe_iterate (vbases, i, &base_binfo); i++)
{
offset = end_of_base (base_binfo);
if (INT_CST_LT_UNSIGNED (result, offset))
warn_about_ambiguous_bases (tree t)
{
int i;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
tree basetype;
tree binfo;
tree base_binfo;
/* Check for ambiguous virtual bases. */
if (extra_warnings)
for (vbases = CLASSTYPE_VBASECLASSES (t), i = 0;
- VEC_iterate (tree, vbases, i, binfo); i++)
+ vec_safe_iterate (vbases, i, &binfo); i++)
{
basetype = BINFO_TYPE (binfo);
CLASSTYPE_PURE_VIRTUALS (t) = NULL;
for (x = TYPE_METHODS (t); x; x = DECL_CHAIN (x))
if (DECL_PURE_VIRTUAL_P (x))
- VEC_safe_push (tree, gc, CLASSTYPE_PURE_VIRTUALS (t), x);
+ vec_safe_push (CLASSTYPE_PURE_VIRTUALS (t), x);
complete_vars (t);
/* We need to add the target functions to the CLASSTYPE_METHOD_VEC if
an enclosing scope is a template class, so that this function be
current_class_stack_size = 10;
current_class_stack
= XNEWVEC (struct class_stack_node, current_class_stack_size);
- local_classes = VEC_alloc (tree, gc, 8);
+ vec_alloc (local_classes, 8);
sizeof_biggest_empty_class = size_zero_node;
ridpointers[(int) RID_PUBLIC] = access_public_node;
int
current_lang_depth (void)
{
- return VEC_length (tree, current_lang_base);
+ return vec_safe_length (current_lang_base);
}
/* Set global variables CURRENT_LANG_NAME to appropriate value
void
push_lang_context (tree name)
{
- VEC_safe_push (tree, gc, current_lang_base, current_lang_name);
+ vec_safe_push (current_lang_base, current_lang_name);
if (name == lang_name_cplusplus)
{
void
pop_lang_context (void)
{
- current_lang_name = VEC_pop (tree, current_lang_base);
+ current_lang_name = current_lang_base->pop ();
}
\f
/* Type instantiation routines. */
finish_vtbls (tree t)
{
tree vbase;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree vtable = BINFO_VTABLE (TYPE_BINFO (t));
/* We lay out the primary and secondary vtables in one contiguous
/* Initialize the vtable for BINFO with the INITS. */
static void
-initialize_vtable (tree binfo, VEC(constructor_elt,gc) *inits)
+initialize_vtable (tree binfo, vec<constructor_elt, va_gc> *inits)
{
tree decl;
- layout_vtable_decl (binfo, VEC_length (constructor_elt, inits));
+ layout_vtable_decl (binfo, vec_safe_length (inits));
decl = get_vtbl_decl_for_binfo (binfo);
initialize_artificial_var (decl, inits);
dump_vtable (BINFO_TYPE (binfo), binfo, decl);
tree type;
tree vtt;
tree index;
- VEC(constructor_elt,gc) *inits;
+ vec<constructor_elt, va_gc> *inits;
/* Build up the initializers for the VTT. */
inits = NULL;
/* Figure out the type of the VTT. */
type = build_array_of_n_type (const_ptr_type_node,
- VEC_length (constructor_elt, inits));
+ inits->length ());
/* Now, build the VTT object itself. */
vtt = build_vtable (t, mangle_vtt_for_type (t), type);
tree index;
/* Vector of initializers built up. */
- VEC(constructor_elt,gc) *inits;
+ vec<constructor_elt, va_gc> *inits;
/* The type being constructed by this secondary VTT. */
tree type_being_constructed;
vtables for the BINFO-in-T variant. */
static void
-build_vtt_inits (tree binfo, tree t, VEC(constructor_elt,gc) **inits, tree *index)
+build_vtt_inits (tree binfo, tree t, vec<constructor_elt, va_gc> **inits,
+ tree *index)
{
int i;
tree b;
tree vtbl;
tree id;
tree vbase;
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* See if we've already created this construction vtable group. */
id = mangle_ctor_vtbl_for_type (t, binfo);
}
/* Figure out the type of the construction vtable. */
- type = build_array_of_n_type (vtable_entry_type,
- VEC_length (constructor_elt, v));
+ type = build_array_of_n_type (vtable_entry_type, v->length ());
layout_type (type);
TREE_TYPE (vtbl) = type;
DECL_SIZE (vtbl) = DECL_SIZE_UNIT (vtbl) = NULL_TREE;
tree rtti_binfo,
tree vtbl,
tree t,
- VEC(constructor_elt,gc) **inits)
+ vec<constructor_elt, va_gc> **inits)
{
int i;
tree base_binfo;
tree rtti_binfo,
tree orig_vtbl,
tree t,
- VEC(constructor_elt,gc) **l)
+ vec<constructor_elt, va_gc> **l)
{
tree vtbl = NULL_TREE;
int ctor_vtbl_p = !SAME_BINFO_TYPE_P (BINFO_TYPE (rtti_binfo), t);
else if (!BINFO_NEW_VTABLE_MARKED (orig_binfo))
return;
- n_inits = VEC_length (constructor_elt, *l);
+ n_inits = vec_safe_length (*l);
if (!vtbl)
{
BINFO_VTABLE (binfo) = tree_cons (rtti_binfo, vtbl, BINFO_VTABLE (binfo));
else if (BINFO_PRIMARY_P (binfo) && BINFO_VIRTUAL_P (binfo))
/* Throw away any unneeded intializers. */
- VEC_truncate (constructor_elt, *l, n_inits);
+ (*l)->truncate (n_inits);
else
/* For an ordinary vtable, set BINFO_VTABLE. */
BINFO_VTABLE (binfo) = vtbl;
tree t,
tree rtti_binfo,
int* non_fn_entries_p,
- VEC(constructor_elt,gc) **inits)
+ vec<constructor_elt, va_gc> **inits)
{
tree v;
vtbl_init_data vid;
unsigned ix, jx;
tree vbinfo;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
constructor_elt *e;
/* Initialize VID. */
/* Create an array for keeping track of the functions we've
processed. When we see multiple functions with the same
signature, we share the vcall offsets. */
- vid.fns = VEC_alloc (tree, gc, 32);
+ vec_alloc (vid.fns, 32);
/* Add the vcall and vbase offset entries. */
build_vcall_and_vbase_vtbl_entries (binfo, &vid);
/* Clear BINFO_VTABLE_PATH_MARKED; it's set by
build_vbase_offset_vtbl_entries. */
for (vbases = CLASSTYPE_VBASECLASSES (t), ix = 0;
- VEC_iterate (tree, vbases, ix, vbinfo); ix++)
+ vec_safe_iterate (vbases, ix, &vbinfo); ix++)
BINFO_VTABLE_PATH_MARKED (vbinfo) = 0;
/* If the target requires padding between data entries, add that now. */
if (TARGET_VTABLE_DATA_ENTRY_DISTANCE > 1)
{
- int n_entries = VEC_length (constructor_elt, vid.inits);
+ int n_entries = vec_safe_length (vid.inits);
- VEC_safe_grow (constructor_elt, gc, vid.inits,
- TARGET_VTABLE_DATA_ENTRY_DISTANCE * n_entries);
+ vec_safe_grow (vid.inits, TARGET_VTABLE_DATA_ENTRY_DISTANCE * n_entries);
/* Move data entries into their new positions and add padding
after the new positions. Iterate backwards so we don't
overwrite entries that we would need to process later. */
for (ix = n_entries - 1;
- VEC_iterate (constructor_elt, vid.inits, ix, e);
+ vid.inits->iterate (ix, &e);
ix--)
{
int j;
int new_position = (TARGET_VTABLE_DATA_ENTRY_DISTANCE * ix
+ (TARGET_VTABLE_DATA_ENTRY_DISTANCE - 1));
- VEC_replace (constructor_elt, vid.inits, new_position, *e);
+ (*vid.inits)[new_position] = *e;
for (j = 1; j < TARGET_VTABLE_DATA_ENTRY_DISTANCE; ++j)
{
- constructor_elt *f = &VEC_index (constructor_elt, vid.inits,
- new_position - j);
+ constructor_elt *f = &(*vid.inits)[new_position - j];
f->index = NULL_TREE;
f->value = build1 (NOP_EXPR, vtable_entry_type,
null_pointer_node);
}
if (non_fn_entries_p)
- *non_fn_entries_p = VEC_length (constructor_elt, vid.inits);
+ *non_fn_entries_p = vec_safe_length (vid.inits);
/* The initializers for virtual functions were built up in reverse
order. Straighten them out and add them to the running list in one
step. */
- jx = VEC_length (constructor_elt, *inits);
- VEC_safe_grow (constructor_elt, gc, *inits,
- (jx + VEC_length (constructor_elt, vid.inits)));
+ jx = vec_safe_length (*inits);
+ vec_safe_grow (*inits, jx + vid.inits->length ());
- for (ix = VEC_length (constructor_elt, vid.inits) - 1;
- VEC_iterate (constructor_elt, vid.inits, ix, e);
+ for (ix = vid.inits->length () - 1;
+ vid.inits->iterate (ix, &e);
ix--, jx++)
- VEC_replace (constructor_elt, *inits, jx, *e);
+ (**inits)[jx] = *e;
/* Go through all the ordinary virtual functions, building up
initializers. */
signature as FN, then we do not need a second vcall offset.
Check the list of functions already present in the derived
class vtable. */
- FOR_EACH_VEC_ELT (tree, vid->fns, i, derived_entry)
+ FOR_EACH_VEC_SAFE_ELT (vid->fns, i, derived_entry)
{
if (same_signature_p (derived_entry, orig_fn)
/* We only use one vcall offset for virtual destructors,
if (vid->binfo == TYPE_BINFO (vid->derived))
{
tree_pair_s elt = {orig_fn, vid->index};
- VEC_safe_push (tree_pair_s, gc, CLASSTYPE_VCALL_INDICES (vid->derived),
- elt);
+ vec_safe_push (CLASSTYPE_VCALL_INDICES (vid->derived), elt);
}
/* The next vcall offset will be found at a more negative
ssize_int (TARGET_VTABLE_DATA_ENTRY_DISTANCE));
/* Keep track of this function. */
- VEC_safe_push (tree, gc, vid->fns, orig_fn);
+ vec_safe_push (vid->fns, orig_fn);
if (vid->generate_vcall_entries)
{
struct cp_genericize_data
{
struct pointer_set_t *p_set;
- VEC (tree, heap) *bind_expr_stack;
+ vec<tree> bind_expr_stack;
struct cp_genericize_omp_taskreg *omp_ctx;
};
: OMP_CLAUSE_DEFAULT_PRIVATE);
}
}
- VEC_safe_push (tree, heap, wtd->bind_expr_stack, stmt);
+ wtd->bind_expr_stack.safe_push (stmt);
cp_walk_tree (&BIND_EXPR_BODY (stmt),
cp_genericize_r, data, NULL);
- VEC_pop (tree, wtd->bind_expr_stack);
+ wtd->bind_expr_stack.pop ();
}
else if (TREE_CODE (stmt) == USING_STMT)
/* Get the innermost inclosing GIMPLE_BIND that has a non NULL
BLOCK, and append an IMPORTED_DECL to its
BLOCK_VARS chained list. */
- if (wtd->bind_expr_stack)
+ if (wtd->bind_expr_stack.exists ())
{
int i;
- for (i = VEC_length (tree, wtd->bind_expr_stack) - 1; i >= 0; i--)
- if ((block = BIND_EXPR_BLOCK (VEC_index (tree,
- wtd->bind_expr_stack, i))))
+ for (i = wtd->bind_expr_stack.length () - 1; i >= 0; i--)
+ if ((block = BIND_EXPR_BLOCK (wtd->bind_expr_stack[i])))
break;
}
if (block)
struct cp_genericize_data wtd;
wtd.p_set = pointer_set_create ();
- wtd.bind_expr_stack = NULL;
+ wtd.bind_expr_stack.create (0);
wtd.omp_ctx = NULL;
cp_walk_tree (t_p, cp_genericize_r, &wtd, NULL);
pointer_set_destroy (wtd.p_set);
- VEC_free (tree, heap, wtd.bind_expr_stack);
+ wtd.bind_expr_stack.release ();
}
void
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
- VEC(tree,gc) *instantiations;
+ vec<tree, va_gc> *instantiations;
};
tree return_type;
tree extra_scope;
tree closure;
- VEC(tree,gc)* pending_proxies;
+ vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
-DEF_VEC_O (qualified_typedef_usage_t);
-DEF_VEC_ALLOC_O (qualified_typedef_usage_t,gc);
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
struct GTY(()) tree_template_info {
struct tree_common common;
- VEC(qualified_typedef_usage_t,gc) *typedefs_needing_access_checking;
+ vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
enum cp_tree_node_structure_enum {
/* Global state. */
struct GTY(()) saved_scope {
- VEC(cxx_saved_binding,gc) *old_bindings;
+ vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
- VEC(tree,gc) *decl_ns_list;
+ vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
- VEC(tree,gc) *lang_base;
+ vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
htab_t GTY((param_is(struct named_label_entry))) x_named_labels;
cp_binding_level *bindings;
- VEC(tree,gc) *x_local_names;
+ vec<tree, va_gc> *x_local_names;
htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map;
};
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
-DEF_VEC_O (tree_pair_s);
-DEF_VEC_ALLOC_O (tree_pair_s,gc);
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
unsigned dummy : 2;
tree primary_base;
- VEC(tree_pair_s,gc) *vcall_indices;
+ vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
- VEC(tree,gc) *pure_virtuals;
+ vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
- VEC(tree,gc) * GTY((reorder ("resort_type_method_vec"))) methods;
+ vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
- (VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_CONSTRUCTOR_SLOT))
+ ((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
- ? VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_DESTRUCTOR_SLOT) \
+ ? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
-/* A VEC(tree) of virtual functions which cannot be inherited by
+/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
\f
-/* A VEC(tree_pair_s) of the vcall indices associated with the class
+/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
- && VEC_empty (constructor_elt, \
- CONSTRUCTOR_ELTS (NODE)) \
+ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
/* An array of all local classes present in this translation unit, in
declaration order. */
-extern GTY(()) VEC(tree,gc) *local_classes;
+extern GTY(()) vec<tree, va_gc> *local_classes;
\f
/* Here's where we control how name mangling takes place. */
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
-extern tree build_new_function_call (tree, VEC(tree,gc) **, bool,
+extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
-extern tree build_operator_new_call (tree, VEC(tree,gc) **, tree *,
+extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree *,
tsubst_flags_t);
-extern tree build_new_method_call (tree, tree, VEC(tree,gc) **,
+extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
-extern tree build_special_member_call (tree, tree, VEC(tree,gc) **,
+extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
-extern tree build_op_call (tree, VEC(tree,gc) **,
+extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
-extern tree extend_ref_init_temps (tree, tree, VEC(tree,gc)**);
+extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
-extern void initialize_artificial_var (tree, VEC(constructor_elt,gc) *);
+extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern bool defer_mark_used_calls;
-extern GTY(()) VEC(tree, gc) *deferred_mark_used_calls;
+extern GTY(()) vec<tree, va_gc> *deferred_mark_used_calls;
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
extern void constrain_class_visibility (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
-extern tree build_offset_ref_call_from_tree (tree, VEC(tree,gc) **,
+extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree build_offset_ref (tree, tree, bool);
-extern tree build_new (VEC(tree,gc) **, tree, tree,
- VEC(tree,gc) **, int,
+extern tree build_new (vec<tree, va_gc> **, tree, tree,
+ vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
-extern VEC(qualified_typedef_usage_t,gc)* get_types_needing_access_check (tree);
+extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
-extern bool any_type_dependent_arguments_p (const VEC(tree,gc) *);
+extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
-extern void make_args_non_dependent (VEC(tree,gc) *);
+extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree fold_non_dependent_expr (tree);
extern tree fold_non_dependent_expr_sfinae (tree, tsubst_flags_t);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
-extern GTY(()) VEC(tree,gc) *unemitted_tinfo_decls;
+extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree);
/* The location of this access. */
location_t loc;
} deferred_access_check;
-DEF_VEC_O(deferred_access_check);
-DEF_VEC_ALLOC_O(deferred_access_check,gc);
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
-extern VEC (deferred_access_check,gc)* get_deferred_access_checks (void);
+extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void pop_to_parent_deferring_access_checks (void);
-extern bool perform_access_checks (VEC (deferred_access_check,gc)*,
+extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern void explain_invalid_constexpr_fn (tree);
-extern VEC(tree,heap)* cx_error_context (void);
+extern vec<tree> cx_error_context (void);
enum {
BCS_NO_SCOPE = 1,
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
-extern tree perform_koenig_lookup (tree, VEC(tree,gc) *, bool,
+extern tree perform_koenig_lookup (tree, vec<tree, va_gc> *, bool,
tsubst_flags_t);
-extern tree finish_call_expr (tree, VEC(tree,gc) **, bool,
+extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
-extern tree build_min_non_dep_call_vec (tree, tree, VEC(tree,gc) *);
+extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree, tsubst_flags_t);
extern tree get_target_expr (tree);
extern tree cp_build_function_call (tree, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
-extern tree cp_build_function_call_vec (tree, VEC(tree,gc) **,
+extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
-extern tree build_x_compound_expr_from_vec (VEC(tree,gc) *, const char *,
- tsubst_flags_t);
+extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
+ const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
-extern tree store_init_value (tree, tree, VEC(tree,gc)**, int);
+extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern void check_narrowing (tree, tree);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
tree type = TREE_TYPE (expr);
if (MAYBE_CLASS_TYPE_P (type) && TREE_CODE (expr) != TARGET_EXPR)
{
- VEC(tree,gc) *args = make_tree_vector_single (expr);
+ vec<tree, va_gc> *args = make_tree_vector_single (expr);
expr = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&args, type, LOOKUP_NORMAL, complain);
release_tree_vector (args);
ctor = build_user_type_conversion (type, ctor, flags, complain);
else
{
- VEC(tree,gc) *ctor_vec = make_tree_vector_single (ctor);
+ vec<tree, va_gc> *ctor_vec = make_tree_vector_single (ctor);
ctor = build_special_member_call (NULL_TREE,
complete_ctor_identifier,
&ctor_vec,
static int walk_namespaces_r (tree, walk_namespaces_fn, void *);
static void maybe_deduce_size_from_array_init (tree, tree);
static void layout_var_decl (tree);
-static tree check_initializer (tree, tree, int, VEC(tree,gc) **);
+static tree check_initializer (tree, tree, int, vec<tree, va_gc> **);
static void make_rtl_for_nonlocal_decl (tree, tree, const char *);
static void save_function_data (tree);
static void copy_type_enum (tree , tree);
tree names_in_scope;
/* A vector of all decls from all binding levels that would be
crossed by a backward branch to the label. */
- VEC(tree,gc) *bad_decls;
+ vec<tree, va_gc> *bad_decls;
/* A list of uses of the label, before the label is defined. */
struct named_label_use_entry *uses;
/* To avoid unwanted recursion, finish_function defers all mark_used calls
encountered during its execution until it finishes. */
bool defer_mark_used_calls;
-VEC(tree, gc) *deferred_mark_used_calls;
+vec<tree, va_gc> *deferred_mark_used_calls;
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
tree incomplete_type;
} incomplete_var;
-DEF_VEC_O(incomplete_var);
-DEF_VEC_ALLOC_O(incomplete_var,gc);
-static GTY(()) VEC(incomplete_var,gc) *incomplete_vars;
+static GTY(()) vec<incomplete_var, va_gc> *incomplete_vars;
\f
/* Returns the kind of template specialization we are currently
processing, given that it's declaration contained N_CLASS_SCOPES
? DECL_CHAIN (decl)
: TREE_CHAIN (decl)))
if (decl_jump_unsafe (decl))
- VEC_safe_push (tree, gc, ent->bad_decls, decl);
+ vec_safe_push (ent->bad_decls, decl);
ent->binding_level = obl;
ent->names_in_scope = obl->names;
functionbody = 0;
subblocks = functionbody >= 0 ? current_binding_level->blocks : 0;
- gcc_assert (!VEC_length(cp_class_binding,
- current_binding_level->class_shadowed));
+ gcc_assert (!vec_safe_length (current_binding_level->class_shadowed));
/* We used to use KEEP == 2 to indicate that the new block should go
at the beginning of the list of blocks at this binding level,
/* Add it to the list of dead variables in the next
outermost binding to that we can remove these when we
leave that binding. */
- VEC_safe_push (tree, gc,
- current_binding_level->level_chain->dead_vars_from_for,
- link);
+ vec_safe_push (
+ current_binding_level->level_chain->dead_vars_from_for,
+ link);
/* Although we don't pop the cxx_binding, we do clear
its SCOPE since the scope is going away now. */
/* Remove declarations for any `for' variables from inner scopes
that we kept around. */
- FOR_EACH_VEC_ELT_REVERSE (tree, current_binding_level->dead_vars_from_for,
- ix, decl)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (current_binding_level->dead_vars_from_for,
+ ix, decl)
pop_binding (DECL_NAME (decl), decl);
/* Restore the IDENTIFIER_TYPE_VALUEs. */
SET_IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (link), TREE_VALUE (link));
/* Restore the IDENTIFIER_LABEL_VALUEs for local labels. */
- FOR_EACH_VEC_ELT_REVERSE (cp_label_binding,
- current_binding_level->shadowed_labels,
- ix, label_bind)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (current_binding_level->shadowed_labels,
+ ix, label_bind)
pop_local_label (label_bind->label, label_bind->prev_value);
/* There may be OVERLOADs (wrapped in TREE_LISTs) on the BLOCK_VARs
wrapup_globals_for_namespace (tree name_space, void* data)
{
cp_binding_level *level = NAMESPACE_LEVEL (name_space);
- VEC(tree,gc) *statics = level->static_decls;
- tree *vec = VEC_address (tree, statics);
- int len = VEC_length (tree, statics);
+ vec<tree, va_gc> *statics = level->static_decls;
+ tree *vec = statics->address ();
+ int len = statics->length ();
int last_time = (data != 0);
if (last_time)
name = DECL_NAME (decl);
- nelts = VEC_length (tree, local_names);
+ nelts = vec_safe_length (local_names);
for (i = 0; i < nelts; i++)
{
- t = VEC_index (tree, local_names, i);
+ t = (*local_names)[i];
if (DECL_NAME (t) == name)
{
if (!DECL_LANG_SPECIFIC (decl))
else
DECL_DISCRIMINATOR (decl) = 1;
- VEC_replace (tree, local_names, i, decl);
+ (*local_names)[i] = decl;
timevar_stop (TV_NAME_LOOKUP);
return;
}
}
- VEC_safe_push (tree, gc, local_names, decl);
+ vec_safe_push (local_names, decl);
timevar_stop (TV_NAME_LOOKUP);
}
\f
decl = make_label_decl (id, /*local_p=*/1);
bind.label = decl;
- VEC_safe_push (cp_label_binding, gc, current_binding_level->shadowed_labels,
- bind);
+ vec_safe_push (current_binding_level->shadowed_labels, bind);
return decl;
}
}
if (ent->in_try_scope || ent->in_catch_scope
- || ent->in_omp_scope || !VEC_empty (tree, ent->bad_decls))
+ || ent->in_omp_scope || !vec_safe_is_empty (ent->bad_decls))
{
permerror (input_location, "jump to label %q+D", decl);
permerror (input_location, " from here");
identified = true;
}
- FOR_EACH_VEC_ELT (tree, ent->bad_decls, ix, bad)
+ FOR_EACH_VEC_SAFE_ELT (ent->bad_decls, ix, bad)
{
int u = decl_jump_unsafe (bad);
initializer. */
if (initializer && TREE_CODE (initializer) == CONSTRUCTOR)
{
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (initializer);
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initializer);
constructor_elt *ce;
HOST_WIDE_INT i;
- FOR_EACH_VEC_ELT (constructor_elt, v, i, ce)
+ FOR_EACH_VEC_SAFE_ELT (v, i, ce)
if (!check_array_designated_initializer (ce, i))
failure = 1;
}
}
else if (first_initializer_p && d->cur != d->end)
{
- VEC(constructor_elt, gc) *v = 0;
+ vec<constructor_elt, va_gc> *v = 0;
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, d->cur->value);
if (has_designator_problem (d, complain))
element (as allowed by [dcl.init.string]). */
if (!first_initializer_p
&& TREE_CODE (str_init) == CONSTRUCTOR
- && VEC_length (constructor_elt, CONSTRUCTOR_ELTS (str_init)) == 1)
+ && vec_safe_length (CONSTRUCTOR_ELTS (str_init)) == 1)
{
- str_init = VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (str_init), 0).value;
+ str_init = (*CONSTRUCTOR_ELTS (str_init))[0].value;
}
/* If it's a string literal, then it's the initializer for the array
struct S { int a; int b; };
struct S a[] = { 1, 2, 3, 4 };
- Here INIT will hold a VEC of four elements, rather than a
- VEC of two elements, each itself a VEC of two elements. This
+ Here INIT will hold a vector of four elements, rather than a
+ vector of two elements, each itself a vector of two elements. This
routine transforms INIT from the former form into the latter. The
revised CONSTRUCTOR node is returned. */
tree
reshape_init (tree type, tree init, tsubst_flags_t complain)
{
- VEC(constructor_elt, gc) *v;
+ vec<constructor_elt, va_gc> *v;
reshape_iter d;
tree new_init;
/* An empty constructor does not need reshaping, and it is always a valid
initializer. */
- if (VEC_empty (constructor_elt, v))
+ if (vec_safe_is_empty (v))
return init;
/* Recurse on this CONSTRUCTOR. */
- d.cur = &VEC_index (constructor_elt, v, 0);
- d.end = d.cur + VEC_length (constructor_elt, v);
+ d.cur = &(*v)[0];
+ d.end = d.cur + v->length ();
new_init = reshape_init_r (type, &d, true, complain);
if (new_init == error_mark_node)
evaluated dynamically to initialize DECL. */
static tree
-check_initializer (tree decl, tree init, int flags, VEC(tree,gc) **cleanups)
+check_initializer (tree decl, tree init, int flags, vec<tree, va_gc> **cleanups)
{
tree type = TREE_TYPE (decl);
tree init_code = NULL;
if (init && BRACE_ENCLOSED_INITIALIZER_P (init))
{
- int init_len = VEC_length (constructor_elt, CONSTRUCTOR_ELTS (init));
+ int init_len = vec_safe_length (CONSTRUCTOR_ELTS (init));
if (SCALAR_TYPE_P (type))
{
if (init_len == 0)
back end. */
void
-initialize_artificial_var (tree decl, VEC(constructor_elt,gc) *v)
+initialize_artificial_var (tree decl, vec<constructor_elt, va_gc> *v)
{
tree init;
gcc_assert (DECL_ARTIFICIAL (decl));
else if (TREE_CODE (init) == CONSTRUCTOR)
/* A brace-enclosed initializer, e.g.: int i = { 3 }; ? */
{
- VEC(constructor_elt, gc) *elts;
+ vec<constructor_elt, va_gc> *elts;
size_t nelts;
size_t i;
elts = CONSTRUCTOR_ELTS (init);
- nelts = VEC_length (constructor_elt, elts);
+ nelts = vec_safe_length (elts);
for (i = 0; i < nelts; ++i)
- if (type_dependent_init_p (VEC_index (constructor_elt,
- elts, i).value))
+ if (type_dependent_init_p ((*elts)[i].value))
return true;
}
else
else if (TREE_CODE (init) == CONSTRUCTOR)
/* A brace-enclosed initializer, e.g.: int i = { 3 }; ? */
{
- VEC(constructor_elt, gc) *elts;
+ vec<constructor_elt, va_gc> *elts;
size_t nelts;
size_t i;
elts = CONSTRUCTOR_ELTS (init);
- nelts = VEC_length (constructor_elt, elts);
+ nelts = vec_safe_length (elts);
for (i = 0; i < nelts; ++i)
- if (value_dependent_init_p (VEC_index (constructor_elt,
- elts, i).value))
+ if (value_dependent_init_p ((*elts)[i].value))
return true;
}
else
tree asmspec_tree, int flags)
{
tree type;
- VEC(tree,gc) *cleanups = NULL;
+ vec<tree, va_gc> *cleanups = NULL;
const char *asmspec = NULL;
int was_readonly = 0;
bool var_definition_p = false;
if (cleanups)
{
unsigned i; tree t;
- FOR_EACH_VEC_ELT (tree, cleanups, i, t)
+ FOR_EACH_VEC_ELT (*cleanups, i, t)
push_cleanup (decl, t, false);
release_tree_vector (cleanups);
}
/* Find the destructor. */
idx = lookup_fnfields_1 (type, complete_dtor_identifier);
gcc_assert (idx >= 0);
- cleanup = VEC_index (tree, CLASSTYPE_METHOD_VEC (type), idx);
+ cleanup = (*CLASSTYPE_METHOD_VEC (type))[idx];
/* Make sure it is accessible. */
perform_or_defer_access_check (TYPE_BINFO (type), cleanup, cleanup,
tf_warning_or_error);
we should just call reshape_init here? */
if (char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (*ptype)))
&& TREE_CODE (initial_value) == CONSTRUCTOR
- && !VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (initial_value)))
+ && !vec_safe_is_empty (CONSTRUCTOR_ELTS (initial_value)))
{
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (initial_value);
- tree value = VEC_index (constructor_elt, v, 0).value;
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (initial_value);
+ tree value = (*v)[0].value;
if (TREE_CODE (value) == STRING_CST
- && VEC_length (constructor_elt, v) == 1)
+ && v->length () == 1)
initial_value = value;
}
if (TREE_TYPE (*basep))
max_vbases++;
if (CLASS_TYPE_P (basetype))
- max_vbases += VEC_length (tree, CLASSTYPE_VBASECLASSES (basetype));
+ max_vbases += vec_safe_length (CLASSTYPE_VBASECLASSES (basetype));
basep = &TREE_CHAIN (*basep);
}
}
if (max_bases)
{
- BINFO_BASE_ACCESSES (binfo) = VEC_alloc (tree, gc, max_bases);
+ vec_alloc (BINFO_BASE_ACCESSES (binfo), max_bases);
/* An aggregate cannot have baseclasses. */
CLASSTYPE_NON_AGGREGATE (ref) = 1;
if (max_vbases)
{
- CLASSTYPE_VBASECLASSES (ref) = VEC_alloc (tree, gc, max_vbases);
+ vec_alloc (CLASSTYPE_VBASECLASSES (ref), max_vbases);
if (TYPE_FOR_JAVA (ref))
{
BINFO_BASE_ACCESS_APPEND (binfo, access);
}
- if (VEC_length (tree, CLASSTYPE_VBASECLASSES (ref)) < max_vbases)
+ if (vec_safe_length (CLASSTYPE_VBASECLASSES (ref)) < max_vbases)
/* If we didn't get max_vbases vbases, we must have shared at
least one of them, and are therefore diamond shaped. */
CLASSTYPE_DIAMOND_SHAPED_P (ref) = 1;
unsigned int i;
tree decl;
- FOR_EACH_VEC_ELT (tree, deferred_mark_used_calls, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (deferred_mark_used_calls, i, decl)
mark_used (decl);
- VEC_free (tree, gc, deferred_mark_used_calls);
+ vec_free (deferred_mark_used_calls);
}
return fndecl;
&& TYPE_BEING_DEFINED (inner_type)))
{
incomplete_var iv = {var, inner_type};
- VEC_safe_push (incomplete_var, gc, incomplete_vars, iv);
+ vec_safe_push (incomplete_vars, iv);
}
}
}
unsigned ix;
incomplete_var *iv;
- for (ix = 0; VEC_iterate (incomplete_var, incomplete_vars, ix, iv); )
+ for (ix = 0; vec_safe_iterate (incomplete_vars, ix, &iv); )
{
if (same_type_p (type, iv->incomplete_type))
{
complete_type (type);
cp_apply_type_quals_to_decl (cp_type_quals (type), var);
/* Remove this entry from the list. */
- VEC_unordered_remove (incomplete_var, incomplete_vars, ix);
+ incomplete_vars->unordered_remove (ix);
}
else
ix++;
/* A list of static class variables. This is needed, because a
static class variable can be declared inside the class without
an initializer, and then initialized, statically, outside the class. */
-static GTY(()) VEC(tree,gc) *pending_statics;
+static GTY(()) vec<tree, va_gc> *pending_statics;
/* A list of functions which were declared inline, but which we
may need to emit outline anyway. */
-static GTY(()) VEC(tree,gc) *deferred_fns;
+static GTY(()) vec<tree, va_gc> *deferred_fns;
/* A list of decls that use types with no linkage, which we need to make
sure are defined. */
-static GTY(()) VEC(tree,gc) *no_linkage_decls;
+static GTY(()) vec<tree, va_gc> *no_linkage_decls;
/* Nonzero if we're done parsing and into end-of-file activities. */
ix = class_method_index_for_fn (complete_type (ctype), function);
if (ix >= 0)
{
- VEC(tree,gc) *methods = CLASSTYPE_METHOD_VEC (ctype);
+ vec<tree, va_gc> *methods = CLASSTYPE_METHOD_VEC (ctype);
tree fndecls, fndecl = 0;
bool is_conv_op;
const char *format = NULL;
- for (fndecls = VEC_index (tree, methods, ix);
+ for (fndecls = (*methods)[ix];
fndecls; fndecls = OVL_NEXT (fndecls))
{
tree p1, p2;
if (is_conv_op)
ix = CLASSTYPE_FIRST_CONVERSION_SLOT;
- fndecls = VEC_index (tree, methods, ix);
+ fndecls = (*methods)[ix];
while (fndecls)
{
fndecl = OVL_CURRENT (fndecls);
if (!fndecls && is_conv_op)
{
- if (VEC_length (tree, methods) > (size_t) ++ix)
+ if (methods->length () > (size_t) ++ix)
{
- fndecls = VEC_index (tree, methods, ix);
+ fndecls = (*methods)[ix];
if (!DECL_CONV_FN_P (OVL_CURRENT (fndecls)))
{
fndecls = NULL_TREE;
note_vague_linkage_fn (tree decl)
{
DECL_DEFER_OUTPUT (decl) = 1;
- VEC_safe_push (tree, gc, deferred_fns, decl);
+ vec_safe_push (deferred_fns, decl);
}
/* We have just processed the DECL, which is a static data member.
the right thing, namely, to put this decl out straight away. */
if (! processing_template_decl)
- VEC_safe_push (tree, gc, pending_statics, decl);
+ vec_safe_push (pending_statics, decl);
if (LOCAL_CLASS_P (current_class_type)
/* We already complained about the template definition. */
if (TREE_TYPE (DECL_INITIAL (vtbl)) == 0)
{
- VEC(tree,gc)* cleanups = NULL;
+ vec<tree, va_gc> *cleanups = NULL;
tree expr = store_init_value (vtbl, DECL_INITIAL (vtbl), &cleanups,
LOOKUP_NORMAL);
/* All the static storage duration functions created in this
translation unit. */
-static GTY(()) VEC(tree,gc) *ssdf_decls;
+static GTY(()) vec<tree, va_gc> *ssdf_decls;
/* A map from priority levels to information about that priority
level. There may be many such levels, so efficient lookup is
static constructors and destructors. */
if (!ssdf_decls)
{
- ssdf_decls = VEC_alloc (tree, gc, 32);
+ vec_alloc (ssdf_decls, 32);
/* Take this opportunity to initialize the map from priority
numbers to information about that priority level. */
get_priority_info (DEFAULT_INIT_PRIORITY);
}
- VEC_safe_push (tree, gc, ssdf_decls, ssdf_decl);
+ vec_safe_push (ssdf_decls, ssdf_decl);
/* Create the argument list. */
initialize_p_decl = cp_build_parm_decl
/* Call the static storage duration function with appropriate
arguments. */
- FOR_EACH_VEC_ELT (tree, ssdf_decls, i, fndecl)
+ FOR_EACH_VEC_SAFE_ELT (ssdf_decls, i, fndecl)
{
/* Calls to pure or const functions will expand to nothing. */
if (! (flags_from_decl_or_type (fndecl) & (ECF_CONST | ECF_PURE)))
/* Bad parse errors. Just forget about it. */
if (! global_bindings_p () || current_class_type
- || !VEC_empty (tree,decl_namespace_list))
+ || !vec_safe_is_empty (decl_namespace_list))
return;
if (pch_file)
cause other variables to be needed. New elements will be
appended, and we remove from the vector those that actually
get emitted. */
- for (i = VEC_length (tree, unemitted_tinfo_decls);
- VEC_iterate (tree, unemitted_tinfo_decls, --i, t);)
+ for (i = unemitted_tinfo_decls->length ();
+ unemitted_tinfo_decls->iterate (--i, &t);)
if (emit_tinfo_decl (t))
{
reconsider = true;
- VEC_unordered_remove (tree, unemitted_tinfo_decls, i);
+ unemitted_tinfo_decls->unordered_remove (i);
}
/* The list of objects with static storage duration is built up
/* Go through the set of inline functions whose bodies have not
been emitted yet. If out-of-line copies of these functions
are required, emit them. */
- FOR_EACH_VEC_ELT (tree, deferred_fns, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
/* Does it need synthesizing? */
if (DECL_DEFAULTED_FN (decl) && ! DECL_INITIAL (decl)
reconsider = true;
/* Static data members are just like namespace-scope globals. */
- FOR_EACH_VEC_ELT (tree, pending_statics, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (pending_statics, i, decl)
{
if (var_finalized_p (decl) || DECL_REALLY_EXTERN (decl)
/* Don't write it out if we haven't seen a definition. */
if (DECL_NOT_REALLY_EXTERN (decl) && decl_needed_p (decl))
DECL_EXTERNAL (decl) = 0;
}
- if (VEC_length (tree, pending_statics) != 0
- && wrapup_global_declarations (VEC_address (tree, pending_statics),
- VEC_length (tree, pending_statics)))
+ if (vec_safe_length (pending_statics) != 0
+ && wrapup_global_declarations (pending_statics->address (),
+ pending_statics->length ()))
reconsider = true;
retries++;
while (reconsider);
/* All used inline functions must have a definition at this point. */
- FOR_EACH_VEC_ELT (tree, deferred_fns, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl)
{
if (/* Check online inline functions that were actually used. */
DECL_ODR_USED (decl) && DECL_DECLARED_INLINE_P (decl)
}
/* So must decls that use a type with no linkage. */
- FOR_EACH_VEC_ELT (tree, no_linkage_decls, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (no_linkage_decls, i, decl)
if (!decl_defined_p (decl))
no_linkage_error (decl);
/* Now, issue warnings about static, but not defined, functions,
etc., and emit debugging information. */
walk_namespaces (wrapup_globals_for_namespace, /*data=*/&reconsider);
- if (VEC_length (tree, pending_statics) != 0)
+ if (vec_safe_length (pending_statics) != 0)
{
- check_global_declarations (VEC_address (tree, pending_statics),
- VEC_length (tree, pending_statics));
- emit_debug_global_declarations (VEC_address (tree, pending_statics),
- VEC_length (tree, pending_statics));
+ check_global_declarations (pending_statics->address (),
+ pending_statics->length ());
+ emit_debug_global_declarations (pending_statics->address (),
+ pending_statics->length ());
}
perform_deferred_noexcept_checks ();
ARGS. */
tree
-build_offset_ref_call_from_tree (tree fn, VEC(tree,gc) **args,
+build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args,
tsubst_flags_t complain)
{
tree orig_fn;
- VEC(tree,gc) *orig_args = NULL;
+ vec<tree, va_gc> *orig_args = NULL;
tree expr;
tree object;
{
if (TREE_CODE (fn) == DOTSTAR_EXPR)
object = cp_build_addr_expr (object, complain);
- VEC_safe_insert (tree, gc, *args, 0, object);
+ vec_safe_insert (*args, 0, object);
}
/* Now that the arguments are done, transform FN. */
fn = build_non_dependent_expr (fn);
fn = TREE_OPERAND (fn, 1);
fn = get_member_function_from_ptrfunc (&object_addr, fn,
complain);
- VEC_safe_insert (tree, gc, *args, 0, object_addr);
+ vec_safe_insert (*args, 0, object_addr);
}
if (CLASS_TYPE_P (TREE_TYPE (fn)))
finishes, otherwise it might recurse. */
if (defer_mark_used_calls)
{
- VEC_safe_push (tree, gc, deferred_mark_used_calls, decl);
+ vec_safe_push (deferred_mark_used_calls, decl);
return true;
}
the vector interferes with GC, so give an error now. */
no_linkage_error (decl);
else
- VEC_safe_push (tree, gc, no_linkage_decls, decl);
+ vec_safe_push (no_linkage_decls, decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)
static void dump_template_argument (tree, int);
static void dump_template_argument_list (tree, int);
static void dump_template_parameter (tree, int);
-static void dump_template_bindings (tree, tree, VEC(tree,gc) *);
+static void dump_template_bindings (tree, tree, vec<tree, va_gc> *);
static void dump_scope (tree, int);
static void dump_template_parms (tree, int, int);
static int get_non_default_template_args_count (tree, int);
TREE_VEC. */
static void
-dump_template_bindings (tree parms, tree args, VEC(tree,gc)* typenames)
+dump_template_bindings (tree parms, tree args, vec<tree, va_gc> *typenames)
{
bool need_semicolon = false;
int i;
}
/* Don't bother with typenames for a partial instantiation. */
- if (VEC_empty (tree, typenames) || uses_template_parms (args))
+ if (vec_safe_is_empty (typenames) || uses_template_parms (args))
return;
- FOR_EACH_VEC_ELT (tree, typenames, i, t)
+ FOR_EACH_VEC_SAFE_ELT (typenames, i, t)
{
if (need_semicolon)
pp_separate_with_semicolon (cxx_pp);
}
/* find_typenames looks through the type of the function template T
- and returns a VEC containing any typedefs, decltypes or TYPENAME_TYPEs
+ and returns a vec containing any typedefs, decltypes or TYPENAME_TYPEs
it finds. */
struct find_typenames_t
{
struct pointer_set_t *p_set;
- VEC (tree,gc) *typenames;
+ vec<tree, va_gc> *typenames;
};
static tree
mv = TYPE_MAIN_VARIANT (*tp);
if (mv && (mv == *tp || !pointer_set_insert (d->p_set, mv)))
- VEC_safe_push (tree, gc, d->typenames, mv);
+ vec_safe_push (d->typenames, mv);
/* Search into class template arguments, which cp_walk_subtrees
doesn't do. */
return NULL_TREE;
}
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
find_typenames (tree t)
{
struct find_typenames_t ft;
int show_return = flags & TFF_RETURN_TYPE || flags & TFF_DECL_SPECIFIERS;
int do_outer_scope = ! (flags & TFF_UNQUALIFIED_NAME);
tree exceptions;
- VEC(tree,gc) *typenames = NULL;
+ vec<tree, va_gc> *typenames = NULL;
if (DECL_NAME (t) && LAMBDA_FUNCTION_P (t))
{
/* Print out a vector of initializers (subr of dump_expr). */
static void
-dump_expr_init_vec (VEC(constructor_elt,gc) *v, int flags)
+dump_expr_init_vec (vec<constructor_elt, va_gc> *v, int flags)
{
unsigned HOST_WIDE_INT idx;
tree value;
FOR_EACH_CONSTRUCTOR_VALUE (v, idx, value)
{
dump_expr (value, flags | TFF_EXPR_IN_PARENS);
- if (idx != VEC_length (constructor_elt, v) - 1)
+ if (idx != v->length () - 1)
pp_separate_with_comma (cxx_pp);
}
}
void
maybe_print_constexpr_context (diagnostic_context *context)
{
- VEC(tree,heap) *call_stack = cx_error_context ();
+ vec<tree> call_stack = cx_error_context ();
unsigned ix;
tree t;
- FOR_EACH_VEC_ELT (tree, call_stack, ix, t)
+ FOR_EACH_VEC_ELT (call_stack, ix, t)
{
expanded_location xloc = expand_location (EXPR_LOCATION (t));
const char *s = expr_as_string (t, 0);
if (CLASS_TYPE_P (temp_type))
{
int flags = LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING;
- VEC(tree,gc) *exp_vec;
+ vec<tree, va_gc> *exp_vec;
/* Under C++0x [12.8/16 class.copy], a thrown lvalue is sometimes
treated as an rvalue for the purposes of overload resolution
tree fn;
location_t loc;
} pending_noexcept;
-DEF_VEC_O(pending_noexcept);
-DEF_VEC_ALLOC_O(pending_noexcept,gc);
-static GTY(()) VEC(pending_noexcept,gc) *pending_noexcept_checks;
+static GTY(()) vec<pending_noexcept, va_gc> *pending_noexcept_checks;
/* FN is a FUNCTION_DECL that caused a noexcept-expr to be false. Warn if
it can't throw. */
int i;
pending_noexcept *p;
location_t saved_loc = input_location;
- FOR_EACH_VEC_ELT (pending_noexcept, pending_noexcept_checks, i, p)
+ FOR_EACH_VEC_SAFE_ELT (pending_noexcept_checks, i, p)
{
input_location = p->loc;
maybe_noexcept_warning (p->fn);
{
/* Not defined yet; check again at EOF. */
pending_noexcept p = {fn, input_location};
- VEC_safe_push (pending_noexcept, gc, pending_noexcept_checks, p);
+ vec_safe_push (pending_noexcept_checks, p);
}
else
maybe_noexcept_warning (fn);
else if (CLASS_TYPE_P (type))
{
tree field;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Iterate over the fields, building initializations. */
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree max_index;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Iterate over the array elements, building initializations. */
if (nelts)
{
constructor_elt ce;
- v = VEC_alloc (constructor_elt, gc, 1);
+ vec_alloc (v, 1);
/* If this is a one element array, we just use a regular init. */
if (tree_int_cst_equal (size_zero_node, max_index))
ce.value = build_zero_init_1 (TREE_TYPE (type),
/*nelts=*/NULL_TREE,
static_storage_p, NULL_TREE);
- VEC_quick_push (constructor_elt, v, ce);
+ v->quick_push (ce);
}
/* Build a constructor to contain the initializations. */
if (TREE_CODE (type) != UNION_TYPE)
{
tree field;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Iterate over the fields, building initializations. */
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Iterate over the array elements, building initializations. */
tree max_index = array_type_nelts (type);
{
constructor_elt ce;
- v = VEC_alloc (constructor_elt, gc, 1);
+ vec_alloc (v, 1);
/* If this is a one element array, we just use a regular init. */
if (tree_int_cst_equal (size_zero_node, max_index))
ce.index = build2 (RANGE_EXPR, sizetype, size_zero_node, max_index);
ce.value = build_value_init (TREE_TYPE (type), complain);
- VEC_quick_push (constructor_elt, v, ce);
+ v->quick_push (ce);
if (ce.value == error_mark_node)
return error_mark_node;
reference member in a constructor’s ctor-initializer (12.6.2)
persists until the constructor exits." */
unsigned i; tree t;
- VEC(tree,gc) *cleanups = make_tree_vector ();
+ vec<tree, va_gc> *cleanups = make_tree_vector ();
if (TREE_CODE (init) == TREE_LIST)
init = build_x_compound_expr_from_list (init, ELK_MEM_INIT,
tf_warning_or_error);
init = build_vec_init_expr (type, init, tf_warning_or_error);
init = build2 (INIT_EXPR, type, decl, init);
finish_expr_stmt (init);
- FOR_EACH_VEC_ELT (tree, cleanups, i, t)
+ FOR_EACH_VEC_ELT (*cleanups, i, t)
push_cleanup (decl, t, false);
release_tree_vector (cleanups);
}
tree base, binfo, base_binfo;
tree sorted_inits;
tree next_subobject;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
int i;
int uses_unions_p = 0;
/* Process the virtual bases. */
for (vbases = CLASSTYPE_VBASECLASSES (t), i = 0;
- VEC_iterate (tree, vbases, i, base); i++)
+ vec_safe_iterate (vbases, i, &base); i++)
sorted_inits = tree_cons (base, NULL_TREE, sorted_inits);
/* Process the direct bases. */
followed by initialization by X. If neither of these work
out, then look hard. */
tree rval;
- VEC(tree,gc) *parms;
+ vec<tree, va_gc> *parms;
/* If we have direct-initialization from an initializer list, pull
it out of the TREE_LIST so the code below can see it. */
{
parms = make_tree_vector ();
for (; init != NULL_TREE; init = TREE_CHAIN (init))
- VEC_safe_push (tree, gc, parms, TREE_VALUE (init));
+ vec_safe_push (parms, TREE_VALUE (init));
}
else
parms = make_tree_vector_single (init);
tree elt; unsigned i;
/* Unshare the arguments for the second call. */
- VEC(tree,gc) *parms2 = make_tree_vector ();
- FOR_EACH_VEC_ELT (tree, parms, i, elt)
+ vec<tree, va_gc> *parms2 = make_tree_vector ();
+ FOR_EACH_VEC_SAFE_ELT (parms, i, elt)
{
elt = break_out_target_exprs (elt);
- VEC_safe_push (tree, gc, parms2, elt);
+ vec_safe_push (parms2, elt);
}
complete = build_special_member_call (exp, complete_ctor_identifier,
&parms2, binfo, flags,
if (init && TREE_CODE (exp) == VAR_DECL
&& COMPOUND_LITERAL_P (init))
{
- VEC(tree,gc)* cleanups = NULL;
+ vec<tree, va_gc> *cleanups = NULL;
/* If store_init_value returns NULL_TREE, the INIT has been
recorded as the DECL_INITIAL for EXP. That means there's
nothing more we have to do. */
creates and returns a NEW_EXPR. */
static tree
-build_raw_new_expr (VEC(tree,gc) *placement, tree type, tree nelts,
- VEC(tree,gc) *init, int use_global_new)
+build_raw_new_expr (vec<tree, va_gc> *placement, tree type, tree nelts,
+ vec<tree, va_gc> *init, int use_global_new)
{
tree init_list;
tree new_expr;
int" from an empty initializer "new int()". */
if (init == NULL)
init_list = NULL_TREE;
- else if (VEC_empty (tree, init))
+ else if (init->is_empty ())
init_list = void_zero_node;
else
init_list = build_tree_list_vec (init);
build_raw_new_expr. This may change PLACEMENT and INIT. */
static tree
-build_new_1 (VEC(tree,gc) **placement, tree type, tree nelts,
- VEC(tree,gc) **init, bool globally_qualified_p,
+build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
+ vec<tree, va_gc> **init, bool globally_qualified_p,
tsubst_flags_t complain)
{
tree size, rval;
reference, prepare to capture it in a temporary variable. Do
this now, since PLACEMENT will change in the calls below. */
placement_first = NULL_TREE;
- if (VEC_length (tree, *placement) == 1
- && (TREE_CODE (TREE_TYPE (VEC_index (tree, *placement, 0)))
- == POINTER_TYPE))
- placement_first = VEC_index (tree, *placement, 0);
+ if (vec_safe_length (*placement) == 1
+ && (TREE_CODE (TREE_TYPE ((**placement)[0])) == POINTER_TYPE))
+ placement_first = (**placement)[0];
/* Allocate the object. */
- if (VEC_empty (tree, *placement) && TYPE_FOR_JAVA (elt_type))
+ if (vec_safe_is_empty (*placement) && TYPE_FOR_JAVA (elt_type))
{
tree class_addr;
tree class_decl = build_java_class_ref (elt_type);
size = fold_build3 (COND_EXPR, sizetype, outer_nelts_check,
size, TYPE_MAX_VALUE (sizetype));
/* Create the argument list. */
- VEC_safe_insert (tree, gc, *placement, 0, size);
+ vec_safe_insert (*placement, 0, size);
/* Do name-lookup to find the appropriate operator. */
fns = lookup_fnfields (elt_type, fnname, /*protect=*/2);
if (fns == NULL_TREE)
bool stable;
bool explicit_value_init_p = false;
- if (*init != NULL && VEC_empty (tree, *init))
+ if (*init != NULL && (*init)->is_empty ())
{
*init = NULL;
explicit_value_init_p = true;
else if (array_p)
{
tree vecinit = NULL_TREE;
- if (*init && VEC_length (tree, *init) == 1
- && BRACE_ENCLOSED_INITIALIZER_P (VEC_index (tree, *init, 0))
- && CONSTRUCTOR_IS_DIRECT_INIT (VEC_index (tree, *init, 0)))
+ if (vec_safe_length (*init) == 1
+ && BRACE_ENCLOSED_INITIALIZER_P ((**init)[0])
+ && CONSTRUCTOR_IS_DIRECT_INIT ((**init)[0]))
{
- vecinit = VEC_index (tree, *init, 0);
+ vecinit = (**init)[0];
if (CONSTRUCTOR_NELTS (vecinit) == 0)
/* List-value-initialization, leave it alone. */;
else
rather than just "new". This may change PLACEMENT and INIT. */
tree
-build_new (VEC(tree,gc) **placement, tree type, tree nelts,
- VEC(tree,gc) **init, int use_global_new, tsubst_flags_t complain)
+build_new (vec<tree, va_gc> **placement, tree type, tree nelts,
+ vec<tree, va_gc> **init, int use_global_new, tsubst_flags_t complain)
{
tree rval;
- VEC(tree,gc) *orig_placement = NULL;
+ vec<tree, va_gc> *orig_placement = NULL;
tree orig_nelts = NULL_TREE;
- VEC(tree,gc) *orig_init = NULL;
+ vec<tree, va_gc> *orig_init = NULL;
if (type == error_mark_node)
return error_mark_node;
- if (nelts == NULL_TREE && VEC_length (tree, *init) == 1
+ if (nelts == NULL_TREE && vec_safe_length (*init) == 1
/* Don't do auto deduction where it might affect mangling. */
&& (!processing_template_decl || at_function_scope_p ()))
{
tree auto_node = type_uses_auto (type);
if (auto_node)
{
- tree d_init = VEC_index (tree, *init, 0);
+ tree d_init = (**init)[0];
d_init = resolve_nondeduced_context (d_init);
type = do_auto_deduction (type, d_init, auto_node);
}
&& ((TREE_CODE (init) == CONSTRUCTOR
/* Don't do this if the CONSTRUCTOR might contain something
that might throw and require us to clean up. */
- && (VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (init))
+ && (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))
|| ! TYPE_HAS_NONTRIVIAL_DESTRUCTOR (inner_elt_type)))
|| from_array))
{
initialization of any elements with constant initializers even if
some are non-constant. */
bool do_static_init = (DECL_P (obase) && TREE_STATIC (obase));
- VEC(constructor_elt,gc) *new_vec;
+ vec<constructor_elt, va_gc> *new_vec;
from_array = 0;
if (try_const)
- new_vec = VEC_alloc (constructor_elt, gc, CONSTRUCTOR_NELTS (init));
+ vec_alloc (new_vec, CONSTRUCTOR_NELTS (init));
else
new_vec = NULL;
else if (do_static_init && saw_const)
DECL_INITIAL (obase) = build_constructor (atype, new_vec);
else
- VEC_free (constructor_elt, gc, new_vec);
+ vec_free (new_vec);
}
/* Clear out INIT so that we don't get confused below. */
int i;
tree member;
tree expr;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
/* Run destructors for all virtual baseclasses. */
if (CLASSTYPE_VBASECLASSES (current_class_type))
/* The CLASSTYPE_VBASECLASSES vector is in initialization
order, which is also the right order for pushing cleanups. */
for (vbases = CLASSTYPE_VBASECLASSES (current_class_type), i = 0;
- VEC_iterate (tree, vbases, i, base_binfo); i++)
+ vec_safe_iterate (vbases, i, &base_binfo); i++)
{
if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (BINFO_TYPE (base_binfo)))
{
typedef struct GTY(()) globals {
/* An array of the current substitution candidates, in the order
we've seen them. */
- VEC(tree,gc) *substitutions;
+ vec<tree, va_gc> *substitutions;
/* The entity that is being mangled. */
tree GTY ((skip)) entity;
tree el;
fprintf (stderr, " ++ substitutions ");
- FOR_EACH_VEC_ELT (tree, G.substitutions, i, el)
+ FOR_EACH_VEC_ELT (*G.substitutions, i, el)
{
const char *name = "???";
int i;
tree candidate;
- FOR_EACH_VEC_ELT (tree, G.substitutions, i, candidate)
+ FOR_EACH_VEC_SAFE_ELT (G.substitutions, i, candidate)
{
gcc_assert (!(DECL_P (node) && node == candidate));
gcc_assert (!(TYPE_P (node) && TYPE_P (candidate)
#endif /* ENABLE_CHECKING */
/* Put the decl onto the varray of substitution candidates. */
- VEC_safe_push (tree, gc, G.substitutions, node);
+ vec_safe_push (G.substitutions, node);
if (DEBUG_MANGLE)
dump_substitution_candidates ();
find_substitution (tree node)
{
int i;
- const int size = VEC_length (tree, G.substitutions);
+ const int size = vec_safe_length (G.substitutions);
tree decl;
tree type;
operation. */
for (i = 0; i < size; ++i)
{
- tree candidate = VEC_index (tree, G.substitutions, i);
+ tree candidate = (*G.substitutions)[i];
/* NODE is a matched to a candidate if it's the same decl node or
if it's the same type. */
if (decl == candidate
tags = TREE_VALUE (tags);
- VEC(tree,gc)* vec = make_tree_vector();
+ vec<tree, va_gc> * vec = make_tree_vector();
for (tree t = tags; t; t = TREE_CHAIN (t))
{
tree str = TREE_VALUE (t);
- VEC_safe_push (tree, gc, vec, str);
+ vec_safe_push (vec, str);
}
- VEC_qsort (tree, vec, tree_string_cmp);
+ vec->qsort (tree_string_cmp);
unsigned i; tree str;
- FOR_EACH_VEC_ELT (tree, vec, i, str)
+ FOR_EACH_VEC_ELT (*vec, i, str)
{
write_string ("B");
write_unsigned_number (TREE_STRING_LENGTH (str) - 1);
tree ctx = TYPE_CONTEXT (entity);
for (ix = 0; ; ix++)
{
- tree type = VEC_index (tree, local_classes, ix);
+ tree type = (*local_classes)[ix];
if (type == entity)
return discriminator;
if (TYPE_CONTEXT (type) == ctx
}
else if (code == CONSTRUCTOR)
{
- VEC(constructor_elt,gc)* elts = CONSTRUCTOR_ELTS (expr);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (expr);
unsigned i; tree val;
if (BRACE_ENCLOSED_INITIALIZER_P (expr))
G.entity);
/* Clear all the substitutions. */
- VEC_truncate (tree, G.substitutions, 0);
+ vec_safe_truncate (G.substitutions, 0);
/* Null-terminate the string. */
write_char ('\0');
{
gcc_obstack_init (&name_obstack);
name_base = obstack_alloc (&name_obstack, 0);
- G.substitutions = NULL;
+ vec_alloc (G.substitutions, 0);
/* Cache these identifiers for quick comparison when checking for
standard substitutions. */
int i;
tree binfo, base_binfo;
tree init;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
/* Initialize all the base-classes with the parameter converted
to their type so that we get their copy constructor and not
deal with the binfo's directly as a direct base might be
inaccessible due to ambiguity. */
for (vbases = CLASSTYPE_VBASECLASSES (current_class_type), i = 0;
- VEC_iterate (tree, vbases, i, binfo); i++)
+ vec_safe_iterate (vbases, i, &binfo); i++)
{
member_init_list = add_one_base_init (binfo, parm, move_p, inh,
member_init_list);
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
{
tree converted_parm;
- VEC(tree,gc) *parmvec;
+ vec<tree, va_gc> *parmvec;
/* We must convert PARM directly to the base class
explicitly since the base class may be ambiguous. */
tsubst_flags_t complain)
{
tree ob, fn, fns, binfo, rval;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
if (TYPE_P (type))
binfo = TYPE_BINFO (type);
if (TREE_CODE (type) != REFERENCE_TYPE)
type = cp_build_reference_type (type, /*rval*/true);
tree arg = build_stub_object (type);
- VEC_safe_push (tree, gc, args, arg);
+ vec_safe_push (args, arg);
}
}
else
{
tree arg = build_stub_object (argtype);
- VEC_quick_push (tree, args, arg);
+ args->quick_push (arg);
}
}
{
tree binfo, base_binfo, scope, fnname, rval, argtype;
bool move_p, copy_arg_p, assign_p, expected_trivial, check_vdtor;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
int i, quals, flags;
tsubst_flags_t complain;
bool ctor_p;
{
if (constexpr_p)
*constexpr_p = false;
- FOR_EACH_VEC_ELT (tree, vbases, i, base_binfo)
+ FOR_EACH_VEC_ELT (*vbases, i, base_binfo)
{
tree basetype = BINFO_TYPE (base_binfo);
if (copy_arg_p)
{
cp_class_binding cb = {cxx_binding_make (value, type), name};
cxx_binding *binding = cb.base;
- VEC_safe_push (cp_class_binding, gc, scope->class_shadowed, cb);
+ vec_safe_push (scope->class_shadowed, cb);
binding->scope = scope;
return binding;
}
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
|| (TREE_CODE (decl) == FUNCTION_DECL
&& (!TREE_PUBLIC (decl) || DECL_DECLARED_INLINE_P (decl))))
- VEC_safe_push (tree, gc, b->static_decls, decl);
+ vec_safe_push (b->static_decls, decl);
}
}
case sk_namespace:
NAMESPACE_LEVEL (entity) = scope;
- scope->static_decls =
- VEC_alloc (tree, gc,
- DECL_NAME (entity) == std_identifier
- || DECL_NAME (entity) == global_scope_name
- ? 200 : 10);
+ vec_alloc (scope->static_decls,
+ (DECL_NAME (entity) == std_identifier
+ || DECL_NAME (entity) == global_scope_name) ? 200 : 10);
break;
default:
if (i)
fprintf (stderr, "\n");
}
- if (VEC_length (cp_class_binding, lvl->class_shadowed))
+ if (vec_safe_length (lvl->class_shadowed))
{
size_t i;
cp_class_binding *b;
fprintf (stderr, " class-shadowed:");
- FOR_EACH_VEC_ELT (cp_class_binding, lvl->class_shadowed, i, b)
+ FOR_EACH_VEC_ELT (*lvl->class_shadowed, i, b)
fprintf (stderr, " %s ", IDENTIFIER_POINTER (b->identifier));
fprintf (stderr, "\n");
}
/* Remove the bindings for all of the class-level declarations. */
if (level->class_shadowed)
{
- FOR_EACH_VEC_ELT (cp_class_binding, level->class_shadowed, i, cb)
+ FOR_EACH_VEC_ELT (*level->class_shadowed, i, cb)
{
IDENTIFIER_BINDING (cb->identifier) = cb->base->previous;
cxx_binding_free (cb->base);
{
tree result;
/* If we have been pushed into a different namespace, use it. */
- if (!VEC_empty (tree, decl_namespace_list))
- return VEC_last (tree, decl_namespace_list);
+ if (!vec_safe_is_empty (decl_namespace_list))
+ return decl_namespace_list->last ();
if (current_class_type)
result = decl_namespace_context (current_class_type);
{
if (TREE_CODE (decl) != NAMESPACE_DECL)
decl = decl_namespace_context (decl);
- VEC_safe_push (tree, gc, decl_namespace_list, ORIGINAL_NAMESPACE (decl));
+ vec_safe_push (decl_namespace_list, ORIGINAL_NAMESPACE (decl));
}
/* [namespace.memdef]/2 */
void
pop_decl_namespace (void)
{
- VEC_pop (tree, decl_namespace_list);
+ decl_namespace_list->pop ();
}
/* Return the namespace that is the common ancestor
void
suggest_alternatives_for (location_t location, tree name)
{
- VEC(tree,heap) *candidates = NULL;
- VEC(tree,heap) *namespaces_to_search = NULL;
+ vec<tree> candidates = vec<tree>();
+ vec<tree> namespaces_to_search = vec<tree>();
int max_to_search = PARAM_VALUE (CXX_MAX_NAMESPACES_FOR_DIAGNOSTIC_HELP);
int n_searched = 0;
tree t;
unsigned ix;
- VEC_safe_push (tree, heap, namespaces_to_search, global_namespace);
+ namespaces_to_search.safe_push (global_namespace);
- while (!VEC_empty (tree, namespaces_to_search)
+ while (!namespaces_to_search.is_empty ()
&& n_searched < max_to_search)
{
- tree scope = VEC_pop (tree, namespaces_to_search);
+ tree scope = namespaces_to_search.pop ();
struct scope_binding binding = EMPTY_SCOPE_BINDING;
cp_binding_level *level = NAMESPACE_LEVEL (scope);
n_searched++;
if (binding.value)
- VEC_safe_push (tree, heap, candidates, binding.value);
+ candidates.safe_push (binding.value);
/* Add child namespaces. */
for (t = level->namespaces; t; t = DECL_CHAIN (t))
- VEC_safe_push (tree, heap, namespaces_to_search, t);
+ namespaces_to_search.safe_push (t);
}
/* If we stopped before we could examine all namespaces, inform the
might be more candidates further down that we weren't able to
find. */
if (n_searched >= max_to_search
- && !VEC_empty (tree, namespaces_to_search))
+ && !namespaces_to_search.is_empty ())
inform (location,
"maximum limit of %d namespaces searched for %qE",
max_to_search, name);
- VEC_free (tree, heap, namespaces_to_search);
+ namespaces_to_search.release ();
/* Nothing useful to report. */
- if (VEC_empty (tree, candidates))
+ if (candidates.is_empty ())
return;
- inform_n (location, VEC_length (tree, candidates),
+ inform_n (location, candidates.length (),
"suggested alternative:",
"suggested alternatives:");
- FOR_EACH_VEC_ELT (tree, candidates, ix, t)
+ FOR_EACH_VEC_ELT (candidates, ix, t)
inform (location_of (t), " %qE", t);
- VEC_free (tree, heap, candidates);
+ candidates.release ();
}
/* Unscoped lookup of a global: iterate over current namespaces,
/* Returns true iff VEC contains TARGET. */
static bool
-tree_vec_contains (VEC(tree,gc)* vec, tree target)
+tree_vec_contains (vec<tree, va_gc> *vec, tree target)
{
unsigned int i;
tree elt;
- FOR_EACH_VEC_ELT (tree,vec,i,elt)
+ FOR_EACH_VEC_SAFE_ELT (vec,i,elt)
if (elt == target)
return true;
return false;
struct scope_binding *result, int flags)
{
/* Maintain a list of namespaces visited... */
- VEC(tree,gc) *seen = NULL;
- VEC(tree,gc) *seen_inline = NULL;
+ vec<tree, va_gc> *seen = NULL;
+ vec<tree, va_gc> *seen_inline = NULL;
/* ... and a list of namespace yet to see. */
- VEC(tree,gc) *todo = NULL;
- VEC(tree,gc) *todo_maybe = NULL;
- VEC(tree,gc) *todo_inline = NULL;
+ vec<tree, va_gc> *todo = NULL;
+ vec<tree, va_gc> *todo_maybe = NULL;
+ vec<tree, va_gc> *todo_inline = NULL;
tree usings;
timevar_start (TV_NAME_LOOKUP);
/* Look through namespace aliases. */
namespaces. For each used namespace, look through its inline
namespace set for any bindings and usings. If no bindings are
found, add any usings seen to the set of used namespaces. */
- VEC_safe_push (tree, gc, todo, scope);
+ vec_safe_push (todo, scope);
- while (VEC_length (tree, todo))
+ while (todo->length ())
{
bool found_here;
- scope = VEC_pop (tree, todo);
+ scope = todo->pop ();
if (tree_vec_contains (seen, scope))
continue;
- VEC_safe_push (tree, gc, seen, scope);
- VEC_safe_push (tree, gc, todo_inline, scope);
+ vec_safe_push (seen, scope);
+ vec_safe_push (todo_inline, scope);
found_here = false;
- while (VEC_length (tree, todo_inline))
+ while (todo_inline->length ())
{
cxx_binding *binding;
- scope = VEC_pop (tree, todo_inline);
+ scope = todo_inline->pop ();
if (tree_vec_contains (seen_inline, scope))
continue;
- VEC_safe_push (tree, gc, seen_inline, scope);
+ vec_safe_push (seen_inline, scope);
binding =
cp_binding_level_find_binding_for_name (NAMESPACE_LEVEL (scope), name);
if (!TREE_INDIRECT_USING (usings))
{
if (is_associated_namespace (scope, TREE_PURPOSE (usings)))
- VEC_safe_push (tree, gc, todo_inline, TREE_PURPOSE (usings));
+ vec_safe_push (todo_inline, TREE_PURPOSE (usings));
else
- VEC_safe_push (tree, gc, todo_maybe, TREE_PURPOSE (usings));
+ vec_safe_push (todo_maybe, TREE_PURPOSE (usings));
}
}
if (found_here)
- VEC_truncate (tree, todo_maybe, 0);
+ vec_safe_truncate (todo_maybe, 0);
else
- while (VEC_length (tree, todo_maybe))
- VEC_safe_push (tree, gc, todo, VEC_pop (tree, todo_maybe));
- }
- VEC_free (tree,gc,todo);
- VEC_free (tree,gc,todo_maybe);
- VEC_free (tree,gc,todo_inline);
- VEC_free (tree,gc,seen);
- VEC_free (tree,gc,seen_inline);
+ while (vec_safe_length (todo_maybe))
+ vec_safe_push (todo, todo_maybe->pop ());
+ }
+ vec_free (todo);
+ vec_free (todo_maybe);
+ vec_free (todo_inline);
+ vec_free (seen);
+ vec_free (seen_inline);
timevar_stop (TV_NAME_LOOKUP);
return result->value != error_mark_node;
}
}
tree
-lookup_function_nonclass (tree name, VEC(tree,gc) *args, bool block_p)
+lookup_function_nonclass (tree name, vec<tree, va_gc> *args, bool block_p)
{
return
lookup_arg_dependent (name,
struct arg_lookup
{
tree name;
- VEC(tree,gc) *args;
- VEC(tree,gc) *namespaces;
- VEC(tree,gc) *classes;
+ vec<tree, va_gc> *args;
+ vec<tree, va_gc> *namespaces;
+ vec<tree, va_gc> *classes;
tree functions;
struct pointer_set_t *fn_set;
};
static bool arg_assoc (struct arg_lookup*, tree);
static bool arg_assoc_args (struct arg_lookup*, tree);
-static bool arg_assoc_args_vec (struct arg_lookup*, VEC(tree,gc) *);
+static bool arg_assoc_args_vec (struct arg_lookup*, vec<tree, va_gc> *);
static bool arg_assoc_type (struct arg_lookup*, tree);
static bool add_function (struct arg_lookup *, tree);
static bool arg_assoc_namespace (struct arg_lookup *, tree);
bool
is_associated_namespace (tree current, tree scope)
{
- VEC(tree,gc) *seen = make_tree_vector ();
- VEC(tree,gc) *todo = make_tree_vector ();
+ vec<tree, va_gc> *seen = make_tree_vector ();
+ vec<tree, va_gc> *todo = make_tree_vector ();
tree t;
bool ret;
ret = true;
break;
}
- VEC_safe_push (tree, gc, seen, scope);
+ vec_safe_push (seen, scope);
for (t = DECL_NAMESPACE_ASSOCIATIONS (scope); t; t = TREE_CHAIN (t))
if (!vec_member (TREE_PURPOSE (t), seen))
- VEC_safe_push (tree, gc, todo, TREE_PURPOSE (t));
- if (!VEC_empty (tree, todo))
+ vec_safe_push (todo, TREE_PURPOSE (t));
+ if (!todo->is_empty ())
{
- scope = VEC_last (tree, todo);
- VEC_pop (tree, todo);
+ scope = todo->last ();
+ todo->pop ();
}
else
{
if (vec_member (scope, k->namespaces))
return false;
- VEC_safe_push (tree, gc, k->namespaces, scope);
+ vec_safe_push (k->namespaces, scope);
/* Check out our super-users. */
for (value = DECL_NAMESPACE_ASSOCIATIONS (scope); value;
if (vec_member (type, k->classes))
return false;
- VEC_safe_push (tree, gc, k->classes, type);
+ vec_safe_push (k->classes, type);
if (TYPE_CLASS_SCOPE_P (type)
&& arg_assoc_class_only (k, TYPE_CONTEXT (type)))
on error. */
static bool
-arg_assoc_args_vec (struct arg_lookup *k, VEC(tree,gc) *args)
+arg_assoc_args_vec (struct arg_lookup *k, vec<tree, va_gc> *args)
{
unsigned int ix;
tree arg;
- FOR_EACH_VEC_ELT (tree, args, ix, arg)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
if (arg_assoc (k, arg))
return true;
return false;
are the functions found in normal lookup. */
static tree
-lookup_arg_dependent_1 (tree name, tree fns, VEC(tree,gc) *args,
+lookup_arg_dependent_1 (tree name, tree fns, vec<tree, va_gc> *args,
bool include_std)
{
struct arg_lookup k;
/* Wrapper for lookup_arg_dependent_1. */
tree
-lookup_arg_dependent (tree name, tree fns, VEC(tree,gc) *args,
+lookup_arg_dependent (tree name, tree fns, vec<tree, va_gc> *args,
bool include_std)
{
tree ret;
add_decl_expr (decl);
}
else
- VEC_safe_push (tree, gc, local_classes, type);
+ vec_safe_push (local_classes, type);
}
}
if (b->kind == sk_class
have enough space reserved. */
static void
-store_binding (tree id, VEC(cxx_saved_binding,gc) **old_bindings)
+store_binding (tree id, vec<cxx_saved_binding, va_gc> **old_bindings)
{
cxx_saved_binding saved;
saved.identifier = id;
saved.binding = IDENTIFIER_BINDING (id);
saved.real_type_value = REAL_IDENTIFIER_TYPE_VALUE (id);
- VEC_quick_push (cxx_saved_binding, *old_bindings, saved);
+ (*old_bindings)->quick_push (saved);
IDENTIFIER_BINDING (id) = NULL;
}
static void
-store_bindings (tree names, VEC(cxx_saved_binding,gc) **old_bindings)
+store_bindings (tree names, vec<cxx_saved_binding, va_gc> **old_bindings)
{
- static VEC(tree,heap) *bindings_need_stored = NULL;
+ static vec<tree> bindings_need_stored = vec<tree>();
tree t, id;
size_t i;
id = DECL_NAME (t);
if (store_binding_p (id))
- VEC_safe_push(tree, heap, bindings_need_stored, id);
+ bindings_need_stored.safe_push (id);
}
- if (!VEC_empty (tree, bindings_need_stored))
+ if (!bindings_need_stored.is_empty ())
{
- VEC_reserve_exact (cxx_saved_binding, gc, *old_bindings,
- VEC_length (tree, bindings_need_stored));
- for (i = 0; VEC_iterate(tree, bindings_need_stored, i, id); ++i)
+ vec_safe_reserve_exact (*old_bindings, bindings_need_stored.length ());
+ for (i = 0; bindings_need_stored.iterate (i, &id); ++i)
{
/* We can appearantly have duplicates in NAMES. */
if (store_binding_p (id))
store_binding (id, old_bindings);
}
- VEC_truncate (tree, bindings_need_stored, 0);
+ bindings_need_stored.truncate (0);
}
timevar_cond_stop (TV_NAME_LOOKUP, subtime);
}
objects, rather than a TREE_LIST. */
static void
-store_class_bindings (VEC(cp_class_binding,gc) *names,
- VEC(cxx_saved_binding,gc) **old_bindings)
+store_class_bindings (vec<cp_class_binding, va_gc> *names,
+ vec<cxx_saved_binding, va_gc> **old_bindings)
{
- static VEC(tree,heap) *bindings_need_stored = NULL;
+ static vec<tree> bindings_need_stored = vec<tree>();
size_t i;
cp_class_binding *cb;
bool subtime = timevar_cond_start (TV_NAME_LOOKUP);
- for (i = 0; VEC_iterate(cp_class_binding, names, i, cb); ++i)
+ for (i = 0; vec_safe_iterate (names, i, &cb); ++i)
if (store_binding_p (cb->identifier))
- VEC_safe_push (tree, heap, bindings_need_stored, cb->identifier);
- if (!VEC_empty (tree, bindings_need_stored))
+ bindings_need_stored.safe_push (cb->identifier);
+ if (!bindings_need_stored.is_empty ())
{
tree id;
- VEC_reserve_exact (cxx_saved_binding, gc, *old_bindings,
- VEC_length (tree, bindings_need_stored));
- for (i = 0; VEC_iterate(tree, bindings_need_stored, i, id); ++i)
+ vec_safe_reserve_exact (*old_bindings, bindings_need_stored.length ());
+ for (i = 0; bindings_need_stored.iterate (i, &id); ++i)
store_binding (id, old_bindings);
- VEC_truncate (tree, bindings_need_stored, 0);
+ bindings_need_stored.truncate (0);
}
timevar_cond_stop (TV_NAME_LOOKUP, subtime);
}
SET_IDENTIFIER_TYPE_VALUE (TREE_PURPOSE (t), TREE_VALUE (t));
}
- FOR_EACH_VEC_ELT (cxx_saved_binding, s->old_bindings, i, sb)
+ FOR_EACH_VEC_SAFE_ELT (s->old_bindings, i, sb)
IDENTIFIER_MARKED (sb->identifier) = 0;
s->prev = scope_chain;
scope_chain = s;
current_function_decl = NULL_TREE;
- current_lang_base = VEC_alloc (tree, gc, 10);
+ vec_alloc (current_lang_base, 10);
current_lang_name = lang_name_cplusplus;
current_namespace = global_namespace;
push_class_stack ();
current_lang_base = 0;
scope_chain = s->prev;
- FOR_EACH_VEC_ELT (cxx_saved_binding, s->old_bindings, i, saved)
+ FOR_EACH_VEC_SAFE_ELT (s->old_bindings, i, saved)
{
tree id = saved->identifier;
tree real_type_value;
} cxx_saved_binding;
-DEF_VEC_O(cxx_saved_binding);
-DEF_VEC_ALLOC_O(cxx_saved_binding,gc);
extern tree identifier_type_value (tree);
extern void set_identifier_type_value (tree, tree);
tree identifier;
} cp_class_binding;
-DEF_VEC_O(cp_class_binding);
-DEF_VEC_ALLOC_O(cp_class_binding,gc);
typedef struct GTY(()) cp_label_binding {
/* The bound LABEL_DECL. */
tree prev_value;
} cp_label_binding;
-DEF_VEC_O(cp_label_binding);
-DEF_VEC_ALLOC_O(cp_label_binding,gc);
/* For each binding contour we allocate a binding_level structure
which records the names defined in that contour.
tree namespaces;
/* An array of static functions and variables (for namespaces only) */
- VEC(tree,gc) *static_decls;
+ vec<tree, va_gc> *static_decls;
/* A list of USING_DECL nodes. */
tree usings;
/* For the binding level corresponding to a class, the entities
declared in the class or its base classes. */
- VEC(cp_class_binding,gc) *class_shadowed;
+ vec<cp_class_binding, va_gc> *class_shadowed;
/* Similar to class_shadowed, but for IDENTIFIER_TYPE_VALUE, and
is used for all binding levels. The TREE_PURPOSE is the name of
/* Similar to class_shadowed, but for IDENTIFIER_LABEL_VALUE, and
used for all binding levels. */
- VEC(cp_label_binding,gc) *shadowed_labels;
+ vec<cp_label_binding, va_gc> *shadowed_labels;
/* For each level (except not the global one),
a chain of BLOCK nodes for all the levels
/* List of VAR_DECLS saved from a previous for statement.
These would be dead in ISO-conforming code, but might
be referenced in ARM-era code. */
- VEC(tree,gc) *dead_vars_from_for;
+ vec<tree, va_gc> *dead_vars_from_for;
/* STATEMENT_LIST for statements in this binding contour.
Only used at present for SK_CLEANUP temporary bindings. */
extern tree lookup_name_nonclass (tree);
extern tree lookup_name_innermost_nonclass_level (tree);
extern bool is_local_extern (tree);
-extern tree lookup_function_nonclass (tree, VEC(tree,gc) *, bool);
+extern tree lookup_function_nonclass (tree, vec<tree, va_gc> *, bool);
extern void push_local_binding (tree, tree, int);
extern bool pushdecl_class_level (tree);
extern tree pushdecl_namespace_level (tree, bool);
extern void do_local_using_decl (tree, tree, tree);
extern tree do_class_using_decl (tree, tree);
extern void do_using_directive (tree);
-extern tree lookup_arg_dependent (tree, tree, VEC(tree,gc) *, bool);
+extern tree lookup_arg_dependent (tree, tree, vec<tree, va_gc> *, bool);
extern bool is_associated_namespace (tree, tree);
extern void parse_using_directive (tree, tree);
extern tree innermost_non_namespace_value (tree);
highlighted by surrounding it in [[ ]]. */
static void
-cp_lexer_dump_tokens (FILE *file, VEC(cp_token,gc) *buffer,
+cp_lexer_dump_tokens (FILE *file, vec<cp_token, va_gc> *buffer,
cp_token *start_token, unsigned num,
cp_token *curr_token)
{
cp_token *token;
bool do_print;
- fprintf (file, "%u tokens\n", VEC_length (cp_token, buffer));
+ fprintf (file, "%u tokens\n", vec_safe_length (buffer));
if (buffer == NULL)
return;
if (num == 0)
- num = VEC_length (cp_token, buffer);
+ num = buffer->length ();
if (start_token == NULL)
- start_token = VEC_address (cp_token, buffer);
+ start_token = buffer->address ();
- if (start_token > VEC_address (cp_token, buffer))
+ if (start_token > buffer->address ())
{
- cp_lexer_print_token (file, &VEC_index (cp_token, buffer, 0));
+ cp_lexer_print_token (file, &(*buffer)[0]);
fprintf (file, " ... ");
}
do_print = false;
nprinted = 0;
- for (i = 0; VEC_iterate (cp_token, buffer, i, token) && nprinted < num; i++)
+ for (i = 0; buffer->iterate (i, &token) && nprinted < num; i++)
{
if (token == start_token)
do_print = true;
}
}
- if (i == num && i < VEC_length (cp_token, buffer))
+ if (i == num && i < buffer->length ())
{
fprintf (file, " ... ");
- cp_lexer_print_token (file, &VEC_last (cp_token, buffer));
+ cp_lexer_print_token (file, &buffer->last ());
}
fprintf (file, "\n");
/* Dump all tokens in BUFFER to stderr. */
void
-cp_lexer_debug_tokens (VEC(cp_token,gc) *buffer)
+cp_lexer_debug_tokens (vec<cp_token, va_gc> *buffer)
{
cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL);
}
fprintf (file, "\tFunctions with default args:\n");
for (i = 0;
- VEC_iterate (cp_default_arg_entry, uf->funs_with_default_args, i,
- default_arg_fn);
+ vec_safe_iterate (uf->funs_with_default_args, i, &default_arg_fn);
i++)
{
fprintf (file, "\t\tClass type: ");
fprintf (file, "\n\tFunctions with definitions that require "
"post-processing\n\t\t");
- for (i = 0; VEC_iterate (tree, uf->funs_with_definitions, i, fn); i++)
+ for (i = 0; vec_safe_iterate (uf->funs_with_definitions, i, &fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
fprintf (file, "\n\tNon-static data members with initializers that require "
"post-processing\n\t\t");
- for (i = 0; VEC_iterate (tree, uf->nsdmis, i, fn); i++)
+ for (i = 0; vec_safe_iterate (uf->nsdmis, i, &fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
static void
cp_debug_print_unparsed_queues (FILE *file,
- VEC(cp_unparsed_functions_entry, gc) *s)
+ vec<cp_unparsed_functions_entry, va_gc> *s)
{
unsigned i;
cp_unparsed_functions_entry *uf;
fprintf (file, "Unparsed functions\n");
- for (i = 0; VEC_iterate (cp_unparsed_functions_entry, s, i, uf); i++)
+ for (i = 0; vec_safe_iterate (s, i, &uf); i++)
{
fprintf (file, "#%u:\n", i);
cp_debug_print_unparsed_function (file, uf);
file = stderr;
next_token = parser->lexer->next_token;
- first_token = VEC_address (cp_token, parser->lexer->buffer);
+ first_token = parser->lexer->buffer->address ();
start_token = (next_token > first_token + window_size / 2)
? next_token - window_size / 2
: first_token;
fprintf (file, "Parser state\n\n");
fprintf (file, "Number of tokens: %u\n",
- VEC_length (cp_token, parser->lexer->buffer));
+ vec_safe_length (parser->lexer->buffer));
cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope);
cp_debug_print_tree_if_set (file, "Object scope",
parser->object_scope);
/* Initially we are not debugging. */
lexer->debugging_p = false;
- lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
- CP_SAVED_TOKEN_STACK);
+ lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK);
/* Create the buffer. */
- lexer->buffer = VEC_alloc (cp_token, gc, CP_LEXER_BUFFER_SIZE);
+ vec_alloc (lexer->buffer, CP_LEXER_BUFFER_SIZE);
return lexer;
}
lexer = cp_lexer_alloc ();
/* Put the first token in the buffer. */
- VEC_quick_push (cp_token, lexer->buffer, token);
+ lexer->buffer->quick_push (token);
/* Get the remaining tokens from the preprocessor. */
while (token.type != CPP_EOF)
{
cp_lexer_get_preprocessor_token (lexer, &token);
- VEC_safe_push (cp_token, gc, lexer->buffer, token);
+ vec_safe_push (lexer->buffer, token);
}
- lexer->last_token = VEC_address (cp_token, lexer->buffer)
- + VEC_length (cp_token, lexer->buffer)
+ lexer->last_token = lexer->buffer->address ()
+ + lexer->buffer->length ()
- 1;
- lexer->next_token = VEC_length (cp_token, lexer->buffer)
- ? VEC_address (cp_token, lexer->buffer)
+ lexer->next_token = lexer->buffer->length ()
+ ? lexer->buffer->address ()
: &eof_token;
/* Subsequent preprocessor diagnostics should use compiler
lexer->next_token = first == last ? &eof_token : first;
lexer->last_token = last;
- lexer->saved_tokens = VEC_alloc (cp_token_position, heap,
- CP_SAVED_TOKEN_STACK);
+ lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK);
/* Initially we are not debugging. */
lexer->debugging_p = false;
static void
cp_lexer_destroy (cp_lexer *lexer)
{
- VEC_free (cp_token, gc, lexer->buffer);
- VEC_free (cp_token_position, heap, lexer->saved_tokens);
+ vec_free (lexer->buffer);
+ lexer->saved_tokens.release ();
ggc_free (lexer);
}
static inline int
cp_lexer_saving_tokens (const cp_lexer* lexer)
{
- return VEC_length (cp_token_position, lexer->saved_tokens) != 0;
+ return lexer->saved_tokens.length () != 0;
}
/* Store the next token from the preprocessor in *TOKEN. Return true
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n");
- VEC_safe_push (cp_token_position, heap,
- lexer->saved_tokens, lexer->next_token);
+ lexer->saved_tokens.safe_push (lexer->next_token);
}
/* Commit to the portion of the token stream most recently saved. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n");
- VEC_pop (cp_token_position, lexer->saved_tokens);
+ lexer->saved_tokens.pop ();
}
/* Return all tokens saved since the last call to cp_lexer_save_tokens
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n");
- lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens);
+ lexer->next_token = lexer->saved_tokens.pop ();
}
/* Print a representation of the TOKEN on the STREAM. */
/* Managing the unparsed function queues. */
#define unparsed_funs_with_default_args \
- VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues).funs_with_default_args
+ parser->unparsed_queues->last ().funs_with_default_args
#define unparsed_funs_with_definitions \
- VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues).funs_with_definitions
+ parser->unparsed_queues->last ().funs_with_definitions
#define unparsed_nsdmis \
- VEC_last (cp_unparsed_functions_entry, parser->unparsed_queues).nsdmis
+ parser->unparsed_queues->last ().nsdmis
static void
push_unparsed_function_queues (cp_parser *parser)
{
cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL};
- VEC_safe_push (cp_unparsed_functions_entry, gc, parser->unparsed_queues, e);
+ vec_safe_push (parser->unparsed_queues, e);
}
static void
pop_unparsed_function_queues (cp_parser *parser)
{
release_tree_vector (unparsed_funs_with_definitions);
- VEC_pop (cp_unparsed_functions_entry, parser->unparsed_queues);
+ parser->unparsed_queues->pop ();
}
/* Prototypes. */
(cp_parser *, tree, bool);
static tree cp_parser_postfix_dot_deref_expression
(cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t);
-static VEC(tree,gc) *cp_parser_parenthesized_expression_list
+static vec<tree, va_gc> *cp_parser_parenthesized_expression_list
(cp_parser *, int, bool, bool, bool *);
/* Values for the second parameter of cp_parser_parenthesized_expression_list. */
enum { non_attr = 0, normal_attr = 1, id_attr = 2 };
(cp_token *);
static tree cp_parser_new_expression
(cp_parser *);
-static VEC(tree,gc) *cp_parser_new_placement
+static vec<tree, va_gc> *cp_parser_new_placement
(cp_parser *);
static tree cp_parser_new_type_id
(cp_parser *, tree *);
(cp_parser *);
static cp_declarator *cp_parser_direct_new_declarator
(cp_parser *);
-static VEC(tree,gc) *cp_parser_new_initializer
+static vec<tree, va_gc> *cp_parser_new_initializer
(cp_parser *);
static tree cp_parser_delete_expression
(cp_parser *);
/* Declarators [gram.dcl.decl] */
static tree cp_parser_init_declarator
- (cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *, tree *);
+ (cp_parser *, cp_decl_specifier_seq *, vec<deferred_access_check, va_gc> *, bool, bool, int, bool *, tree *);
static cp_declarator *cp_parser_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool *, bool);
static cp_declarator *cp_parser_direct_declarator
(cp_parser *, bool *);
static tree cp_parser_braced_list
(cp_parser*, bool*);
-static VEC(constructor_elt,gc) *cp_parser_initializer_list
+static vec<constructor_elt, va_gc> *cp_parser_initializer_list
(cp_parser *, bool *);
static bool cp_parser_ctor_initializer_opt_and_function_body
static void cp_parser_template_declaration_after_export
(cp_parser *, bool);
static void cp_parser_perform_template_parameter_access_checks
- (VEC (deferred_access_check,gc)*);
+ (vec<deferred_access_check, va_gc> *);
static tree cp_parser_single_declaration
- (cp_parser *, VEC (deferred_access_check,gc)*, bool, bool, bool *);
+ (cp_parser *, vec<deferred_access_check, va_gc> *, bool, bool, bool *);
static tree cp_parser_functional_cast
(cp_parser *, tree);
static tree cp_parser_save_member_function_body
/* Look up a literal operator with the name and the exact arguments. */
static tree
-lookup_literal_operator (tree name, VEC(tree,gc) *args)
+lookup_literal_operator (tree name, vec<tree, va_gc> *args)
{
tree decl, fns;
decl = lookup_name (name);
argtypes = TYPE_ARG_TYPES (TREE_TYPE (fn));
if (argtypes != NULL_TREE)
{
- for (ix = 0; ix < VEC_length (tree, args) && argtypes != NULL_TREE;
+ for (ix = 0; ix < vec_safe_length (args) && argtypes != NULL_TREE;
++ix, argtypes = TREE_CHAIN (argtypes))
{
tree targ = TREE_VALUE (argtypes);
- tree tparm = TREE_TYPE (VEC_index (tree, args, ix));
+ tree tparm = TREE_TYPE ((*args)[ix]);
bool ptr = TREE_CODE (targ) == POINTER_TYPE;
bool arr = TREE_CODE (tparm) == ARRAY_TYPE;
if ((ptr || arr || !same_type_p (targ, tparm))
found = false;
}
if (found
- && ix == VEC_length (tree, args)
+ && ix == vec_safe_length (args)
/* May be this should be sufficient_parms_p instead,
depending on how exactly should user-defined literals
work in presence of default arguments on the literal
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
- VEC(tree,gc) *args = make_tree_vector ();
- VEC_safe_push (tree, gc, args, value);
+ vec<tree, va_gc> *args = make_tree_vector ();
+ vec_safe_push (args, value);
decl = lookup_literal_operator (name, args);
if (!decl || decl == error_mark_node)
{
tree num_string = USERDEF_LITERAL_NUM_STRING (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
/* Look for a literal operator taking the exact type of numeric argument
as the literal value. */
args = make_tree_vector ();
- VEC_safe_push (tree, gc, args, value);
+ vec_safe_push (args, value);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
operator taking a const char* argument consisting of the number
in string format. */
args = make_tree_vector ();
- VEC_safe_push (tree, gc, args, num_string);
+ vec_safe_push (args, num_string);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
- VEC(tree,gc) *args = make_tree_vector ();
- VEC_safe_push (tree, gc, args, value);
- VEC_safe_push (tree, gc, args, build_int_cst (size_type_node, len));
+ vec<tree, va_gc> *args = make_tree_vector ();
+ vec_safe_push (args, value);
+ vec_safe_push (args, build_int_cst (size_type_node, len));
decl = lookup_name (name);
if (!decl || decl == error_mark_node)
{
case RID_BUILTIN_SHUFFLE:
{
- VEC(tree,gc)* vec;
+ vec<tree, va_gc> *vec;
unsigned int i;
tree p;
location_t loc = token->location;
if (vec == NULL)
return error_mark_node;
- FOR_EACH_VEC_ELT (tree, vec, i, p)
+ FOR_EACH_VEC_ELT (*vec, i, p)
mark_exp_read (p);
- if (VEC_length (tree, vec) == 2)
- return
- c_build_vec_perm_expr
- (loc, VEC_index (tree, vec, 0),
- NULL_TREE, VEC_index (tree, vec, 1));
-
- else if (VEC_length (tree, vec) == 3)
- return
- c_build_vec_perm_expr
- (loc, VEC_index (tree, vec, 0),
- VEC_index (tree, vec, 1),
- VEC_index (tree, vec, 2));
+ if (vec->length () == 2)
+ return c_build_vec_perm_expr (loc, (*vec)[0], NULL_TREE, (*vec)[1]);
+ else if (vec->length () == 3)
+ return c_build_vec_perm_expr (loc, (*vec)[0], (*vec)[1], (*vec)[2]);
else
{
error_at (loc, "wrong number of arguments to "
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
- VEC(constructor_elt,gc) *initializer_list = NULL;
+ vec<constructor_elt, va_gc> *initializer_list = NULL;
bool saved_in_type_id_in_expr_p;
cp_parser_parse_tentatively (parser);
bool is_builtin_constant_p;
bool saved_integral_constant_expression_p = false;
bool saved_non_integral_constant_expression_p = false;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
is_member_access = false;
{
if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE)
{
- if (!VEC_empty (tree, args))
+ if (!args->is_empty ())
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
/* We do not perform argument-dependent lookup if
normal lookup finds a non-function, in accordance
with the expected resolution of DR 218. */
- else if (!VEC_empty (tree, args)
+ else if (!args->is_empty ()
&& is_overloaded_fn (postfix_expression))
{
tree fn = get_first_fn (postfix_expression);
NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or
not all of the expressions in the list were constant. */
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
cp_parser_parenthesized_expression_list (cp_parser* parser,
int is_attribute_list,
bool cast_p,
bool allow_expansion_p,
bool *non_constant_p)
{
- VEC(tree,gc) *expression_list;
+ vec<tree, va_gc> *expression_list;
bool fold_expr_p = is_attribute_list != non_attr;
tree identifier = NULL_TREE;
bool saved_greater_than_is_operator_p;
expressions to the list, so that we can still tell if
the correct form for a parenthesized expression-list
is found. That gives better errors. */
- VEC_safe_push (tree, gc, expression_list, expr);
+ vec_safe_push (expression_list, expr);
if (expr == error_mark_node)
goto skip_comma;
= saved_greater_than_is_operator_p;
if (identifier)
- VEC_safe_insert (tree, gc, expression_list, 0, identifier);
+ vec_safe_insert (expression_list, 0, identifier);
return expression_list;
}
cp_parser_new_expression (cp_parser* parser)
{
bool global_scope_p;
- VEC(tree,gc) *placement;
+ vec<tree, va_gc> *placement;
tree type;
- VEC(tree,gc) *initializer;
+ vec<tree, va_gc> *initializer;
tree nelts = NULL_TREE;
tree ret;
Returns the same representation as for an expression-list. */
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
cp_parser_new_placement (cp_parser* parser)
{
- VEC(tree,gc) *expression_list;
+ vec<tree, va_gc> *expression_list;
/* Parse the expression-list. */
expression_list = (cp_parser_parenthesized_expression_list
Returns a representation of the expression-list. */
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
cp_parser_new_initializer (cp_parser* parser)
{
- VEC(tree,gc) *expression_list;
+ vec<tree, va_gc> *expression_list;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
tree t;
int i;
} tree_int;
-DEF_VEC_O(tree_int);
-DEF_VEC_ALLOC_O(tree_int,gc);
-static GTY(()) VEC(tree_int,gc) *lambda_scope_stack;
+static GTY(()) vec<tree_int, va_gc> *lambda_scope_stack;
static void
start_lambda_scope (tree decl)
decl = current_function_decl;
ti.t = lambda_scope;
ti.i = lambda_count;
- VEC_safe_push (tree_int, gc, lambda_scope_stack, ti);
+ vec_safe_push (lambda_scope_stack, ti);
if (lambda_scope != decl)
{
/* Don't reset the count if we're still in the same function. */
static void
finish_lambda_scope (void)
{
- tree_int *p = &VEC_last (tree_int, lambda_scope_stack);
+ tree_int *p = &lambda_scope_stack->last ();
if (lambda_scope != p->t)
{
lambda_scope = p->t;
lambda_count = p->i;
}
- VEC_pop (tree_int, lambda_scope_stack);
+ lambda_scope_stack->pop ();
}
/* Parse a lambda expression.
else
{
/* Use global functions with ADL. */
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
vec = make_tree_vector ();
- VEC_safe_push (tree, gc, vec, range);
+ vec_safe_push (vec, range);
member_begin = perform_koenig_lookup (id_begin, vec,
/*include_std=*/true,
cp_parser_range_for_member_function (tree range, tree identifier)
{
tree member, res;
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
member = finish_class_member_access_expr (range, identifier,
false, tf_warning_or_error);
}
else
{
- VEC(tree,gc)* vec;
+ vec<tree, va_gc> *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
tree template_id;
cp_token_position start_of_id = 0;
deferred_access_check *chk;
- VEC (deferred_access_check,gc) *access_check;
+ vec<deferred_access_check, va_gc> *access_check;
cp_token *next_token = NULL, *next_token_2 = NULL;
bool is_identifier;
access_check = check_value->checks;
if (access_check)
{
- FOR_EACH_VEC_ELT (deferred_access_check, access_check, i, chk)
+ FOR_EACH_VEC_ELT (*access_check, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl,
static tree
cp_parser_init_declarator (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
- VEC (deferred_access_check,gc)* checks,
+ vec<deferred_access_check, va_gc> *checks,
bool function_definition_allowed_p,
bool member_p,
int declares_class_or_enum,
}
else if (token->type == CPP_OPEN_PAREN)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
identifier :
[ constant-expression ] =
- Returns a VEC of constructor_elt. The VALUE of each elt is an expression
+ Returns a vec of constructor_elt. The VALUE of each elt is an expression
for the initializer. If the INDEX of the elt is non-NULL, it is the
IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is
as for cp_parser_initializer. */
-static VEC(constructor_elt,gc) *
+static vec<constructor_elt, va_gc> *
cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Assume all of the expressions are constant. */
*non_constant_p = false;
};
*/
- FOR_EACH_VEC_ELT (cp_default_arg_entry, unparsed_funs_with_default_args,
- ix, e)
+ FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_default_args, ix, e)
{
decl = e->decl;
/* If there are default arguments that have not yet been processed,
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
}
- VEC_truncate (cp_default_arg_entry, unparsed_funs_with_default_args, 0);
+ vec_safe_truncate (unparsed_funs_with_default_args, 0);
/* Now parse any NSDMIs. */
save_ccp = current_class_ptr;
save_ccr = current_class_ref;
- FOR_EACH_VEC_ELT (tree, unparsed_nsdmis, ix, decl)
+ FOR_EACH_VEC_SAFE_ELT (unparsed_nsdmis, ix, decl)
{
if (class_type != DECL_CONTEXT (decl))
{
inject_this_parameter (class_type, TYPE_UNQUALIFIED);
cp_parser_late_parsing_nsdmi (parser, decl);
}
- VEC_truncate (tree, unparsed_nsdmis, 0);
+ vec_safe_truncate (unparsed_nsdmis, 0);
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
if (pushed_scope)
pop_scope (pushed_scope);
/* Now parse the body of the functions. */
- FOR_EACH_VEC_ELT (tree, unparsed_funs_with_definitions, ix, decl)
+ FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl)
cp_parser_late_parsing_for_member (parser, decl);
- VEC_truncate (tree, unparsed_funs_with_definitions, 0);
+ vec_safe_truncate (unparsed_funs_with_definitions, 0);
}
/* Put back any saved access checks. */
&& !DECL_C_BIT_FIELD (decl)
&& DECL_INITIAL (decl))
/* Add DECL to the queue of NSDMI to be parsed later. */
- VEC_safe_push (tree, gc, unparsed_nsdmis, decl);
+ vec_safe_push (unparsed_nsdmis, decl);
}
if (assume_semicolon)
/* If it's an `(', then parse the attribute arguments. */
if (token->type == CPP_OPEN_PAREN)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
int attr_flag = (attribute_takes_identifier_p (identifier)
? id_attr : normal_attr);
vec = cp_parser_parenthesized_expression_list
return attribute;
{
- VEC(tree, gc) *vec;
+ vec<tree, va_gc> *vec;
int attr_flag = normal_attr;
if (attr_ns == get_identifier ("gnu")
cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p)
{
tree decl = NULL_TREE;
- VEC (deferred_access_check,gc) *checks;
+ vec<deferred_access_check, va_gc> *checks;
tree parameter_list;
bool friend_p = false;
bool need_lang_pop;
if (member_p && decl
&& (TREE_CODE (decl) == FUNCTION_DECL
|| DECL_FUNCTION_TEMPLATE_P (decl)))
- VEC_safe_push (tree, gc, unparsed_funs_with_definitions, decl);
+ vec_safe_push (unparsed_funs_with_definitions, decl);
}
/* Perform the deferred access checks from a template-parameter-list.
get_deferred_access_checks. */
static void
-cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks)
+cp_parser_perform_template_parameter_access_checks (vec<deferred_access_check, va_gc> *checks)
{
++processing_template_parmlist;
perform_access_checks (checks, tf_warning_or_error);
static tree
cp_parser_single_declaration (cp_parser* parser,
- VEC (deferred_access_check,gc)* checks,
+ vec<deferred_access_check, va_gc> *checks,
bool member_p,
bool explicit_specialization_p,
bool* friend_p)
static tree
cp_parser_functional_cast (cp_parser* parser, tree type)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
tree expression_list;
tree cast;
bool nonconst_p;
DECL_INITIALIZED_IN_CLASS_P (fn) = 1;
/* Add FN to the queue of functions to be parsed later. */
- VEC_safe_push (tree, gc, unparsed_funs_with_definitions, fn);
+ vec_safe_push (unparsed_funs_with_definitions, fn);
return fn;
}
if (TREE_PURPOSE (probe))
{
cp_default_arg_entry entry = {current_class_type, decl};
- VEC_safe_push (cp_default_arg_entry, gc,
- unparsed_funs_with_default_args, entry);
+ vec_safe_push (unparsed_funs_with_default_args, entry);
break;
}
}
{
tree default_arg = TREE_PURPOSE (parm);
tree parsed_arg;
- VEC(tree,gc) *insts;
+ vec<tree, va_gc> *insts;
tree copy;
unsigned ix;
/* Update any instantiations we've already created. */
for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0;
- VEC_iterate (tree, insts, ix, copy); ix++)
+ vec_safe_iterate (insts, ix, ©); ix++)
TREE_PURPOSE (copy) = parsed_arg;
}
int i;
struct tree_check *check_value;
deferred_access_check *chk;
- VEC (deferred_access_check,gc) *checks;
+ vec<deferred_access_check, va_gc> *checks;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
checks = check_value->checks;
if (checks)
{
- FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk)
+ FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl, tf_warning_or_error);
location_t loc_first;
bool collapse_err = false;
int i, collapse = 1, nbraces = 0;
- VEC(tree,gc) *for_block = make_tree_vector ();
+ vec<tree, va_gc> *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
LOOKUP_ONLYCONVERTING);
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
- VEC_safe_push (tree, gc, for_block, this_pre_body);
+ vec_safe_push (for_block, this_pre_body);
init = NULL_TREE;
}
else
}
}
- while (!VEC_empty (tree, for_block))
- add_stmt (pop_stmt_list (VEC_pop (tree, for_block)));
+ while (!for_block->is_empty ())
+ add_stmt (pop_stmt_list (for_block->pop ()));
release_tree_vector (for_block);
return ret;
/* The value associated with the token. */
tree value;
/* The checks that have been associated with value. */
- VEC (deferred_access_check, gc)* checks;
+ vec<deferred_access_check, va_gc> *checks;
/* The token's qualifying scope (used when it is a
CPP_NESTED_NAME_SPECIFIER). */
tree qualifying_scope;
} GTY((desc ("(%1.type == CPP_TEMPLATE_ID) || (%1.type == CPP_NESTED_NAME_SPECIFIER)"))) u;
} cp_token;
-DEF_VEC_O (cp_token);
-DEF_VEC_ALLOC_O (cp_token,gc);
-DEF_VEC_ALLOC_O (cp_token,heap);
/* We use a stack of token pointer for saving token sets. */
typedef struct cp_token *cp_token_position;
-DEF_VEC_P (cp_token_position);
-DEF_VEC_ALLOC_P (cp_token_position,heap);
/* The cp_lexer structure represents the C++ lexer. It is responsible
for managing the token stream from the preprocessor and supplying
typedef struct GTY (()) cp_lexer {
/* The memory allocated for the buffer. NULL if this lexer does not
own the token buffer. */
- VEC(cp_token,gc) *buffer;
+ vec<cp_token, va_gc> *buffer;
/* A pointer just past the last available token. The tokens
in this lexer are [buffer, last_token). */
called. The top entry is the most recent position at which we
began saving tokens. If the stack is non-empty, we are saving
tokens. */
- VEC(cp_token_position,heap) *GTY ((skip)) saved_tokens;
+ vec<cp_token_position> GTY ((skip)) saved_tokens;
/* The next lexer in a linked list of lexers. */
struct cp_lexer *next;
bool in_pragma;
} cp_lexer;
-DEF_VEC_O (cp_lexer);
-DEF_VEC_ALLOC_O (cp_lexer,heap);
/* cp_token_cache is a range of tokens. There is no need to represent
allocate heap memory for it, since tokens are never removed from the
} cp_token_cache;
typedef cp_token_cache *cp_token_cache_ptr;
-DEF_VEC_P (cp_token_cache_ptr);
-DEF_VEC_ALLOC_P (cp_token_cache_ptr,gc);
struct cp_token_ident_d
{
tree decl;
} cp_default_arg_entry;
-DEF_VEC_O(cp_default_arg_entry);
-DEF_VEC_ALLOC_O(cp_default_arg_entry,gc);
/* An entry in a stack for member functions of local classes. */
typedef struct GTY(()) cp_unparsed_functions_entry_d {
/* Functions with default arguments that require post-processing.
Functions appear in this list in declaration order. */
- VEC(cp_default_arg_entry,gc) *funs_with_default_args;
+ vec<cp_default_arg_entry, va_gc> *funs_with_default_args;
/* Functions with defintions that require post-processing. Functions
appear in this list in declaration order. */
- VEC(tree,gc) *funs_with_definitions;
+ vec<tree, va_gc> *funs_with_definitions;
/* Non-static data members with initializers that require post-processing.
FIELD_DECLs appear in this list in declaration order. */
- VEC(tree,gc) *nsdmis;
+ vec<tree, va_gc> *nsdmis;
} cp_unparsed_functions_entry;
-DEF_VEC_O(cp_unparsed_functions_entry);
-DEF_VEC_ALLOC_O(cp_unparsed_functions_entry,gc);
/* The status of a tentative parse. */
/* A stack used for member functions of local classes. The lists
contained in an individual entry can only be processed once the
outermost class being defined is complete. */
- VEC(cp_unparsed_functions_entry,gc) *unparsed_queues;
+ vec<cp_unparsed_functions_entry, va_gc> *unparsed_queues;
/* The number of classes whose definitions are currently in
progress. */
} cp_parser;
/* In parser.c */
-extern void cp_lexer_debug_tokens (VEC(cp_token,gc) *);
+extern void cp_lexer_debug_tokens (vec<cp_token, va_gc> *);
extern void cp_debug_parser (FILE *, cp_parser *);
#endif /* GCC_CP_PARSER_H */
#include "toplev.h"
#include "timevar.h"
#include "tree-iterator.h"
-#include "vecprim.h"
/* The type of functions taking a tree, and some additional data, and
returning an int. */
static int template_header_count;
static GTY(()) tree saved_trees;
-static VEC(int,heap) *inline_parm_levels;
+static vec<int> inline_parm_levels;
static GTY(()) struct tinst_level *current_tinst_level;
the TEMPLATE_TYPE_IDX of the template parameter. Each element is a
TREE_LIST, whose TREE_VALUEs contain the canonical template
parameters of various types and levels. */
-static GTY(()) VEC(tree,gc) *canonical_template_parms;
+static GTY(()) vec<tree, va_gc> *canonical_template_parms;
#define UNIFY_ALLOW_NONE 0
#define UNIFY_ALLOW_MORE_CV_QUAL 1
/* Remember how many levels of template parameters we pushed so that
we can pop them later. */
- VEC_safe_push (int, heap, inline_parm_levels, levels);
+ inline_parm_levels.safe_push (levels);
}
/* Undo the effects of maybe_begin_member_template_processing. */
int i;
int last;
- if (VEC_length (int, inline_parm_levels) == 0)
+ if (inline_parm_levels.length () == 0)
return;
- last = VEC_pop (int, inline_parm_levels);
+ last = inline_parm_levels.pop ();
for (i = 0; i < last; ++i)
{
--processing_template_decl;
{
tree class_template;
tree class_specialization;
- VEC(tree,gc) *methods;
+ vec<tree, va_gc> *methods;
tree fns;
int idx;
/* Iterate through the methods with the indicated name, looking
for the one that has an instance of TMPL. */
methods = CLASSTYPE_METHOD_VEC (class_specialization);
- for (fns = VEC_index (tree, methods, idx); fns; fns = OVL_NEXT (fns))
+ for (fns = (*methods)[idx]; fns; fns = OVL_NEXT (fns))
{
tree fn = OVL_CURRENT (fns);
if (DECL_TEMPLATE_INFO (fn) && DECL_TI_TEMPLATE (fn) == tmpl
{
idx = lookup_fnfields_1 (ctype, name);
if (idx >= 0)
- fns = VEC_index (tree, CLASSTYPE_METHOD_VEC (ctype), idx);
+ fns = (*CLASSTYPE_METHOD_VEC (ctype))[idx];
}
else
{
- VEC(tree,gc) *methods;
+ vec<tree, va_gc> *methods;
tree ovl;
/* For a type-conversion operator, we cannot do a
methods = CLASSTYPE_METHOD_VEC (ctype);
if (methods)
for (idx = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, methods, idx, ovl);
+ methods->iterate (idx, &ovl);
++idx)
{
if (!DECL_CONV_FN_P (OVL_CURRENT (ovl)))
tree list;
int idx = TEMPLATE_TYPE_IDX (type);
if (!canonical_template_parms)
- canonical_template_parms = VEC_alloc (tree, gc, idx+1);
+ vec_alloc (canonical_template_parms, idx+1);
- while (VEC_length (tree, canonical_template_parms) <= (unsigned)idx)
- VEC_safe_push (tree, gc, canonical_template_parms, NULL_TREE);
+ while (canonical_template_parms->length () <= (unsigned)idx)
+ vec_safe_push (canonical_template_parms, NULL_TREE);
- list = VEC_index (tree, canonical_template_parms, idx);
+ list = (*canonical_template_parms)[idx];
while (list && !comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL))
list = TREE_CHAIN (list);
return TREE_VALUE (list);
else
{
- VEC_replace(tree, canonical_template_parms, idx,
- tree_cons (NULL_TREE, type,
- VEC_index (tree, canonical_template_parms, idx)));
+ (*canonical_template_parms)[idx]
+ = tree_cons (NULL_TREE, type,
+ (*canonical_template_parms)[idx]);
return type;
}
}
perform_typedefs_access_check (tree tmpl, tree targs)
{
location_t saved_location;
- int i;
+ unsigned i;
qualified_typedef_usage_t *iter;
if (!tmpl
return;
saved_location = input_location;
- FOR_EACH_VEC_ELT (qualified_typedef_usage_t,
- get_types_needing_access_check (tmpl),
- i, iter)
+ FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (tmpl), i, iter)
{
tree type_decl = iter->typedef_decl;
tree type_scope = iter->context;
argument in a call of this function. */
remaining_arg_types =
tree_cons (default_arg, type, remaining_arg_types);
- VEC_safe_push (tree, gc, DEFARG_INSTANTIATIONS (default_arg),
- remaining_arg_types);
+ vec_safe_push (DEFARG_INSTANTIATIONS(default_arg), remaining_arg_types);
}
else
remaining_arg_types =
{
tree placement = RECUR (TREE_OPERAND (t, 0));
tree init = RECUR (TREE_OPERAND (t, 3));
- VEC(tree,gc) *placement_vec;
- VEC(tree,gc) *init_vec;
+ vec<tree, va_gc> *placement_vec;
+ vec<tree, va_gc> *init_vec;
tree ret;
if (placement == NULL_TREE)
{
placement_vec = make_tree_vector ();
for (; placement != NULL_TREE; placement = TREE_CHAIN (placement))
- VEC_safe_push (tree, gc, placement_vec, TREE_VALUE (placement));
+ vec_safe_push (placement_vec, TREE_VALUE (placement));
}
/* If there was an initializer in the original tree, but it
else
{
for (; init != NULL_TREE; init = TREE_CHAIN (init))
- VEC_safe_push (tree, gc, init_vec, TREE_VALUE (init));
+ vec_safe_push (init_vec, TREE_VALUE (init));
}
}
case CALL_EXPR:
{
tree function;
- VEC(tree,gc) *call_args;
+ vec<tree, va_gc> *call_args;
unsigned int nargs, i;
bool qualified_p;
bool koenig_p;
tree arg = CALL_EXPR_ARG (t, i);
if (!PACK_EXPANSION_P (arg))
- VEC_safe_push (tree, gc, call_args,
- RECUR (CALL_EXPR_ARG (t, i)));
+ vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i)));
else
{
/* Expand the pack expansion and push each entry onto
tree value = TREE_VEC_ELT (arg, j);
if (value != NULL_TREE)
value = convert_from_reference (value);
- VEC_safe_push (tree, gc, call_args, value);
+ vec_safe_push (call_args, value);
}
}
else
{
/* A partial substitution. Add one entry. */
- VEC_safe_push (tree, gc, call_args, arg);
+ vec_safe_push (call_args, arg);
}
}
}
case CONSTRUCTOR:
{
- VEC(constructor_elt,gc) *n;
+ vec<constructor_elt, va_gc> *n;
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
looked up by digest_init. */
process_index_p = !(type && MAYBE_CLASS_TYPE_P (type));
- n = VEC_copy (constructor_elt, gc, CONSTRUCTOR_ELTS (t));
- newlen = VEC_length (constructor_elt, n);
- FOR_EACH_VEC_ELT (constructor_elt, n, idx, ce)
+ n = vec_safe_copy (CONSTRUCTOR_ELTS (t));
+ newlen = vec_safe_length (n);
+ FOR_EACH_VEC_SAFE_ELT (n, idx, ce)
{
if (ce->index && process_index_p)
ce->index = RECUR (ce->index);
if (need_copy_p)
{
- VEC(constructor_elt,gc) *old_n = n;
+ vec<constructor_elt, va_gc> *old_n = n;
- n = VEC_alloc (constructor_elt, gc, newlen);
- FOR_EACH_VEC_ELT (constructor_elt, old_n, idx, ce)
+ vec_alloc (n, newlen);
+ FOR_EACH_VEC_ELT (*old_n, idx, ce)
{
if (TREE_CODE (ce->value) == TREE_VEC)
{
/* Returns TRUE if ARGS contains a type-dependent expression. */
bool
-any_type_dependent_arguments_p (const VEC(tree,gc) *args)
+any_type_dependent_arguments_p (const vec<tree, va_gc> *args)
{
unsigned int i;
tree arg;
- FOR_EACH_VEC_ELT (tree, args, i, arg)
+ FOR_EACH_VEC_SAFE_ELT (args, i, arg)
{
if (type_dependent_expression_p (arg))
return true;
This modifies ARGS in place. */
void
-make_args_non_dependent (VEC(tree,gc) *args)
+make_args_non_dependent (vec<tree, va_gc> *args)
{
unsigned int ix;
tree arg;
- FOR_EACH_VEC_ELT (tree, args, ix, arg)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
{
tree newarg = build_non_dependent_expr (arg);
if (newarg != arg)
- VEC_replace (tree, args, ix, newarg);
+ (*args)[ix] = newarg;
}
}
Those typedefs were added to T by the function
append_type_to_template_for_access_check. */
-VEC(qualified_typedef_usage_t,gc)*
+vec<qualified_typedef_usage_t, va_gc> *
get_types_needing_access_check (tree t)
{
tree ti;
- VEC(qualified_typedef_usage_t,gc) *result = NULL;
+ vec<qualified_typedef_usage_t, va_gc> *result = NULL;
if (!t || t == error_mark_node)
return NULL;
typedef_usage.context = scope;
typedef_usage.locus = location;
- VEC_safe_push (qualified_typedef_usage_t, gc,
- TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti),
- typedef_usage);
+ vec_safe_push (TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti), typedef_usage);
}
/* Append TYPE_DECL to the template TEMPL.
location_t location)
{
qualified_typedef_usage_t *iter;
- int i;
+ unsigned i;
gcc_assert (type_decl && (TREE_CODE (type_decl) == TYPE_DECL));
/* Make sure we don't append the type to the template twice. */
- FOR_EACH_VEC_ELT (qualified_typedef_usage_t,
- get_types_needing_access_check (templ),
- i, iter)
+ FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (templ), i, iter)
if (iter->typedef_decl == type_decl && scope == iter->context)
return;
static char *afgets (FILE *);
static FILE *reopen_repo_file_for_write (void);
-static GTY(()) VEC(tree,gc) *pending_repo;
+static GTY(()) vec<tree, va_gc> *pending_repo;
static char *repo_name;
static const char *old_args, *old_dir, *old_main;
fprintf (repo_file, "\n");
}
- FOR_EACH_VEC_ELT_REVERSE (tree, pending_repo, ix, val)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (pending_repo, ix, val)
{
tree name = DECL_ASSEMBLER_NAME (val);
char type = IDENTIFIER_REPO_CHOSEN (name) ? 'C' : 'O';
if (!DECL_REPO_AVAILABLE_P (decl))
{
DECL_REPO_AVAILABLE_P (decl) = 1;
- VEC_safe_push (tree, gc, pending_repo, decl);
+ vec_safe_push (pending_repo, decl);
}
return IDENTIFIER_REPO_CHOSEN (DECL_ASSEMBLER_NAME (decl)) ? 1 : ret;
the type_info derived type. */
} tinfo_s;
-DEF_VEC_O(tinfo_s);
-DEF_VEC_ALLOC_O(tinfo_s,gc);
typedef enum tinfo_kind
{
} tinfo_kind;
/* A vector of all tinfo decls that haven't yet been emitted. */
-VEC(tree,gc) *unemitted_tinfo_decls;
+vec<tree, va_gc> *unemitted_tinfo_decls;
/* A vector of all type_info derived types we need. The first few are
fixed and created early. The remainder are for multiple inheritance
and are generated as needed. */
-static GTY (()) VEC(tinfo_s,gc) *tinfo_descs;
+static GTY (()) vec<tinfo_s, va_gc> *tinfo_descs;
static tree ifnonnull (tree, tree, tsubst_flags_t);
static tree tinfo_name (tree, bool);
= cp_build_qualified_type (type_info_type, TYPE_QUAL_CONST);
type_info_ptr_type = build_pointer_type (const_type_info_type_node);
- unemitted_tinfo_decls = VEC_alloc (tree, gc, 124);
+ vec_alloc (unemitted_tinfo_decls, 124);
create_tinfo_types ();
}
return false;
}
- pseudo_type_info
- = VEC_index (tinfo_s, tinfo_descs, TK_TYPE_INFO_TYPE).type;
+ pseudo_type_info = (*tinfo_descs)[TK_TYPE_INFO_TYPE].type;
type_info_type = TYPE_MAIN_VARIANT (const_type_info_type_node);
/* Make sure abi::__type_info_pseudo has the same alias set
if (!d)
{
int ix = get_pseudo_ti_index (type);
- tinfo_s *ti = &VEC_index (tinfo_s, tinfo_descs, ix);
+ tinfo_s *ti = &(*tinfo_descs)[ix];
d = build_lang_decl (VAR_DECL, name, ti->type);
SET_DECL_ASSEMBLER_NAME (d, name);
CLASSTYPE_TYPEINFO_VAR (TYPE_MAIN_VARIANT (type)) = d;
/* Add decl to the global array of tinfo decls. */
- VEC_safe_push (tree, gc, unemitted_tinfo_decls, d);
+ vec_safe_push (unemitted_tinfo_decls, d);
}
return d;
tree init;
tree name_decl;
tree vtable_ptr;
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
{
tree name_name, name_string;
ti->vtable = vtable_ptr;
}
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, vtable_ptr);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
decay_conversion (name_decl, tf_warning_or_error));
tree to = TREE_TYPE (target);
int flags = qualifier_flags (to);
bool incomplete = target_incomplete_p (to);
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 3);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 3);
if (incomplete)
flags |= 8;
tree klass = TYPE_PTRMEM_CLASS_TYPE (target);
int flags = qualifier_flags (to);
bool incomplete = target_incomplete_p (to);
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 4);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 4);
if (incomplete)
flags |= 0x8;
tree init = tinfo_base_init (ti, target);
va_list extra_inits;
unsigned i;
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, n+1);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, n+1);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
va_start (extra_inits, n);
static tree
get_pseudo_ti_init (tree type, unsigned tk_index)
{
- tinfo_s *ti = &VEC_index (tinfo_s, tinfo_descs, tk_index);
+ tinfo_s *ti = &(*tinfo_descs)[tk_index];
gcc_assert (at_eof);
switch (tk_index)
tree tinfo = get_tinfo_ptr (BINFO_TYPE (base_binfo));
/* get_tinfo_ptr might have reallocated the tinfo_descs vector. */
- ti = &VEC_index (tinfo_s, tinfo_descs, tk_index);
+ ti = &(*tinfo_descs)[tk_index];
return class_initializer (ti, type, 1, tinfo);
}
| (CLASSTYPE_DIAMOND_SHAPED_P (type) << 1));
tree binfo = TYPE_BINFO (type);
int nbases = BINFO_N_BASE_BINFOS (binfo);
- VEC(tree,gc) *base_accesses = BINFO_BASE_ACCESSES (binfo);
+ vec<tree, va_gc> *base_accesses = BINFO_BASE_ACCESSES (binfo);
tree offset_type = integer_types[itk_long];
tree base_inits = NULL_TREE;
int ix;
- VEC(constructor_elt,gc) *init_vec = NULL;
+ vec<constructor_elt, va_gc> *init_vec = NULL;
constructor_elt *e;
gcc_assert (tk_index >= TK_FIXED);
- VEC_safe_grow (constructor_elt, gc, init_vec, nbases);
+ vec_safe_grow (init_vec, nbases);
/* Generate the base information initializer. */
for (ix = nbases; ix--;)
{
int flags = 0;
tree tinfo;
tree offset;
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
- if (VEC_index (tree, base_accesses, ix) == access_public_node)
+ if ((*base_accesses)[ix] == access_public_node)
flags |= 2;
tinfo = get_tinfo_ptr (BINFO_TYPE (base_binfo));
if (BINFO_VIRTUAL_P (base_binfo))
offset = fold_build2_loc (input_location,
BIT_IOR_EXPR, offset_type, offset,
build_int_cst (offset_type, flags));
- v = VEC_alloc (constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tinfo);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, offset);
base_init = build_constructor (init_list_type_node, v);
- e = &VEC_index (constructor_elt, init_vec, ix);
+ e = &(*init_vec)[ix];
e->index = NULL_TREE;
e->value = base_init;
}
base_inits = build_constructor (init_list_type_node, init_vec);
/* get_tinfo_ptr might have reallocated the tinfo_descs vector. */
- ti = &VEC_index (tinfo_s, tinfo_descs, tk_index);
+ ti = &(*tinfo_descs)[tk_index];
return class_initializer (ti, type, 3,
build_int_cst (NULL_TREE, hint),
build_int_cst (NULL_TREE, nbases),
/* First field is the pseudo type_info base class. */
fields = build_decl (input_location,
FIELD_DECL, NULL_TREE,
- VEC_index (tinfo_s, tinfo_descs,
- TK_TYPE_INFO_TYPE).type);
+ (*tinfo_descs)[TK_TYPE_INFO_TYPE].type);
/* Now add the derived fields. */
while ((field_decl = va_arg (ap, tree)))
finish_builtin_struct (pseudo_type, pseudo_name, fields, NULL_TREE);
CLASSTYPE_AS_BASE (pseudo_type) = pseudo_type;
- ti = &VEC_index (tinfo_s, tinfo_descs, tk);
+ ti = &(*tinfo_descs)[tk];
ti->type = cp_build_qualified_type (pseudo_type, TYPE_QUAL_CONST);
ti->name = get_identifier (real_name);
ti->vtable = NULL_TREE;
else
{
tree binfo = TYPE_BINFO (type);
- VEC(tree,gc) *base_accesses = BINFO_BASE_ACCESSES (binfo);
+ vec<tree, va_gc> *base_accesses = BINFO_BASE_ACCESSES (binfo);
tree base_binfo = BINFO_BASE_BINFO (binfo, 0);
int num_bases = BINFO_N_BASE_BINFOS (binfo);
if (num_bases == 1
- && VEC_index (tree, base_accesses, 0) == access_public_node
+ && (*base_accesses)[0] == access_public_node
&& !BINFO_VIRTUAL_P (base_binfo)
&& integer_zerop (BINFO_OFFSET (base_binfo)))
{
tree array_domain, base_array;
ix = TK_FIXED + num_bases;
- if (VEC_length (tinfo_s, tinfo_descs) <= ix)
+ if (vec_safe_length (tinfo_descs) <= ix)
{
/* too short, extend. */
- unsigned len = VEC_length (tinfo_s, tinfo_descs);
+ unsigned len = vec_safe_length (tinfo_descs);
- VEC_safe_grow (tinfo_s, gc, tinfo_descs, ix + 1);
- while (VEC_iterate (tinfo_s, tinfo_descs, len++, ti))
+ vec_safe_grow (tinfo_descs, ix + 1);
+ while (tinfo_descs->iterate (len++, &ti))
ti->type = ti->vtable = ti->name = NULL_TREE;
}
- else if (VEC_index (tinfo_s, tinfo_descs, ix).type)
+ else if ((*tinfo_descs)[ix].type)
/* already created. */
break;
array_domain = build_index_type (size_int (num_bases - 1));
else
array_domain = build_index_type (size_int (num_bases));
- base_array =
- build_array_type (VEC_index (tinfo_s, tinfo_descs,
- TK_BASE_TYPE).type,
- array_domain);
+ base_array = build_array_type ((*tinfo_descs)[TK_BASE_TYPE].type,
+ array_domain);
push_abi_namespace ();
create_pseudo_type_info
gcc_assert (!tinfo_descs);
- VEC_safe_grow (tinfo_s, gc, tinfo_descs, TK_FIXED);
+ vec_safe_grow (tinfo_descs, TK_FIXED);
push_abi_namespace ();
DECL_CHAIN (field) = fields;
fields = field;
- ti = &VEC_index (tinfo_s, tinfo_descs, TK_TYPE_INFO_TYPE);
+ ti = &(*tinfo_descs)[TK_TYPE_INFO_TYPE];
ti->type = make_class_type (RECORD_TYPE);
ti->vtable = NULL_TREE;
ti->name = NULL_TREE;
DECL_CHAIN (field) = fields;
fields = field;
- ti = &VEC_index (tinfo_s, tinfo_descs, TK_BASE_TYPE);
+ ti = &(*tinfo_descs)[TK_BASE_TYPE];
ti->type = make_class_type (RECORD_TYPE);
ti->vtable = NULL_TREE;
{
int i;
tree base_binfo;
- VEC(tree,gc) *accesses;
+ vec<tree, va_gc> *accesses;
/* Otherwise, scan our baseclasses, and pick the most favorable
access. */
accesses = BINFO_BASE_ACCESSES (binfo);
for (i = 0; BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
{
- tree base_access = VEC_index (tree, accesses, i);
+ tree base_access = (*accesses)[i];
access_kind base_access_now = BINFO_ACCESS (base_binfo);
if (base_access_now == ak_none || base_access_now == ak_private)
{
int i;
tree fn;
- VEC(tree,gc) *methods = CLASSTYPE_METHOD_VEC (class_type);
+ vec<tree, va_gc> *methods = CLASSTYPE_METHOD_VEC (class_type);
for (i = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, methods, i, fn); ++i)
+ vec_safe_iterate (methods, i, &fn); ++i)
{
/* All the conversion operators come near the beginning of
the class. Therefore, if FN is not a conversion
static int
lookup_fnfields_idx_nolazy (tree type, tree name)
{
- VEC(tree,gc) *method_vec;
+ vec<tree, va_gc> *method_vec;
tree fn;
tree tmp;
size_t i;
/* Skip the conversion operators. */
for (i = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, i, fn);
+ vec_safe_iterate (method_vec, i, &fn);
++i)
if (!DECL_CONV_FN_P (OVL_CURRENT (fn)))
break;
int hi;
lo = i;
- hi = VEC_length (tree, method_vec);
+ hi = method_vec->length ();
while (lo < hi)
{
i = (lo + hi) / 2;
if (GATHER_STATISTICS)
n_outer_fields_searched++;
- tmp = VEC_index (tree, method_vec, i);
+ tmp = (*method_vec)[i];
tmp = DECL_NAME (OVL_CURRENT (tmp));
if (tmp > name)
hi = i;
}
}
else
- for (; VEC_iterate (tree, method_vec, i, fn); ++i)
+ for (; vec_safe_iterate (method_vec, i, &fn); ++i)
{
if (GATHER_STATISTICS)
n_outer_fields_searched++;
int ix = lookup_fnfields_1 (complete_type (type), name);
if (ix < 0)
return NULL_TREE;
- return VEC_index (tree, CLASSTYPE_METHOD_VEC (type), ix);
+ return (*CLASSTYPE_METHOD_VEC (type))[ix];
}
/* As above, but avoid lazily declaring functions. */
int ix = lookup_fnfields_idx_nolazy (complete_type (type), name);
if (ix < 0)
return NULL_TREE;
- return VEC_index (tree, CLASSTYPE_METHOD_VEC (type), ix);
+ return (*CLASSTYPE_METHOD_VEC (type))[ix];
}
/* Like lookup_fnfields_1, except that the name is extracted from
/* We are at the top of the hierarchy, and can use the
CLASSTYPE_VBASECLASSES list for unmarking the virtual
bases. */
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
unsigned ix;
tree base_binfo;
for (vbases = CLASSTYPE_VBASECLASSES (BINFO_TYPE (binfo)), ix = 0;
- VEC_iterate (tree, vbases, ix, base_binfo); ix++)
+ vec_safe_iterate (vbases, ix, &base_binfo); ix++)
BINFO_MARKED (base_binfo) = 0;
}
else
/* We are at the top of the hierarchy, and can use the
CLASSTYPE_VBASECLASSES list for unmarking the virtual
bases. */
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
unsigned ix;
tree base_binfo;
for (vbases = CLASSTYPE_VBASECLASSES (BINFO_TYPE (binfo)), ix = 0;
- VEC_iterate (tree, vbases, ix, base_binfo); ix++)
+ vec_safe_iterate (vbases, ix, &base_binfo); ix++)
BINFO_MARKED (base_binfo) = 0;
}
else
ix = lookup_fnfields_1 (type, DECL_NAME (fndecl));
if (ix >= 0)
{
- tree fns = VEC_index (tree, CLASSTYPE_METHOD_VEC (type), ix);
+ tree fns = (*CLASSTYPE_METHOD_VEC (type))[ix];
for (; fns; fns = OVL_NEXT (fns))
{
virtuals;
virtuals = TREE_CHAIN (virtuals))
if (DECL_PURE_VIRTUAL_P (BV_FN (virtuals)))
- VEC_safe_push (tree, gc, CLASSTYPE_PURE_VIRTUALS (type),
- BV_FN (virtuals));
+ vec_safe_push (CLASSTYPE_PURE_VIRTUALS (type), BV_FN (virtuals));
}
return NULL_TREE;
tree child_tpl_convs = NULL_TREE;
unsigned i;
tree base_binfo;
- VEC(tree,gc) *method_vec = CLASSTYPE_METHOD_VEC (BINFO_TYPE (binfo));
+ vec<tree, va_gc> *method_vec = CLASSTYPE_METHOD_VEC (BINFO_TYPE (binfo));
tree conv;
/* If we have no conversion operators, then don't look. */
/* First, locate the unhidden ones at this level. */
for (i = CLASSTYPE_FIRST_CONVERSION_SLOT;
- VEC_iterate (tree, method_vec, i, conv);
+ vec_safe_iterate (method_vec, i, &conv);
++i)
{
tree cur = OVL_CURRENT (conv);
{
unsigned ix;
tree binfo;
- VEC(tree,gc) *vbases;
+ vec<tree, va_gc> *vbases;
for (vbases = CLASSTYPE_VBASECLASSES (t), ix = 0;
- VEC_iterate (tree, vbases, ix, binfo); ix++)
+ vec_safe_iterate (vbases, ix, &binfo); ix++)
if (SAME_BINFO_TYPE_P (BINFO_TYPE (binfo), base))
return binfo;
return NULL;
2. When a declaration such as a type, or a variable, is encountered,
the function `perform_or_defer_access_check' is called. It
- maintains a VEC of all deferred checks.
+ maintains a vector of all deferred checks.
3. The global `current_class_type' or `current_function_decl' is then
setup by the parser. `enforce_access' relies on these information
4. Upon exiting the context mentioned in step 1,
`perform_deferred_access_checks' is called to check all declaration
- stored in the VEC. `pop_deferring_access_checks' is then
+ stored in the vector. `pop_deferring_access_checks' is then
called to restore the previous access checking mode.
In case of parsing error, we simply call `pop_deferring_access_checks'
without `perform_deferred_access_checks'. */
typedef struct GTY(()) deferred_access {
- /* A VEC representing name-lookups for which we have deferred
+ /* A vector representing name-lookups for which we have deferred
checking access controls. We cannot check the accessibility of
names used in a decl-specifier-seq until we know what is being
declared because code like:
A::B* A::f() { return 0; }
is valid, even though `A::B' is not generally accessible. */
- VEC (deferred_access_check,gc)* GTY(()) deferred_access_checks;
+ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks;
/* The current mode of access checks. */
enum deferring_kind deferring_access_checks_kind;
} deferred_access;
-DEF_VEC_O (deferred_access);
-DEF_VEC_ALLOC_O (deferred_access,gc);
/* Data for deferred access checking. */
-static GTY(()) VEC(deferred_access,gc) *deferred_access_stack;
+static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack;
static GTY(()) unsigned deferred_access_no_check;
/* Save the current deferred access states and start deferred
else
{
deferred_access e = {NULL, deferring};
- VEC_safe_push (deferred_access, gc, deferred_access_stack, e);
+ vec_safe_push (deferred_access_stack, e);
}
}
resume_deferring_access_checks (void)
{
if (!deferred_access_no_check)
- VEC_last (deferred_access, deferred_access_stack)
- .deferring_access_checks_kind = dk_deferred;
+ deferred_access_stack->last().deferring_access_checks_kind = dk_deferred;
}
/* Stop deferring access checks. */
stop_deferring_access_checks (void)
{
if (!deferred_access_no_check)
- VEC_last (deferred_access, deferred_access_stack)
- .deferring_access_checks_kind = dk_no_deferred;
+ deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred;
}
/* Discard the current deferred access checks and restore the
if (deferred_access_no_check)
deferred_access_no_check--;
else
- VEC_pop (deferred_access, deferred_access_stack);
+ deferred_access_stack->pop ();
}
/* Returns a TREE_LIST representing the deferred checks.
access occurred; the TREE_VALUE is the declaration named.
*/
-VEC (deferred_access_check,gc)*
+vec<deferred_access_check, va_gc> *
get_deferred_access_checks (void)
{
if (deferred_access_no_check)
return NULL;
else
- return (VEC_last (deferred_access, deferred_access_stack)
- .deferred_access_checks);
+ return (deferred_access_stack->last().deferred_access_checks);
}
/* Take current deferred checks and combine with the
deferred_access_no_check--;
else
{
- VEC (deferred_access_check,gc) *checks;
+ vec<deferred_access_check, va_gc> *checks;
deferred_access *ptr;
- checks = (VEC_last (deferred_access, deferred_access_stack)
- .deferred_access_checks);
+ checks = (deferred_access_stack->last ().deferred_access_checks);
- VEC_pop (deferred_access, deferred_access_stack);
- ptr = &VEC_last (deferred_access, deferred_access_stack);
+ deferred_access_stack->pop ();
+ ptr = &deferred_access_stack->last ();
if (ptr->deferring_access_checks_kind == dk_no_deferred)
{
/* Check access. */
int i, j;
deferred_access_check *chk, *probe;
- FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk)
+ FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
{
- FOR_EACH_VEC_ELT (deferred_access_check,
- ptr->deferred_access_checks, j, probe)
+ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe)
{
if (probe->binfo == chk->binfo &&
probe->decl == chk->decl &&
goto found;
}
/* Insert into parent's checks. */
- VEC_safe_push (deferred_access_check, gc,
- ptr->deferred_access_checks, *chk);
+ vec_safe_push (ptr->deferred_access_checks, *chk);
found:;
}
}
otherwise FALSE. */
bool
-perform_access_checks (VEC (deferred_access_check,gc)* checks,
+perform_access_checks (vec<deferred_access_check, va_gc> *checks,
tsubst_flags_t complain)
{
int i;
if (!checks)
return true;
- FOR_EACH_VEC_ELT (deferred_access_check, checks, i, chk)
+ FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
{
input_location = chk->loc;
ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain);
gcc_assert (TREE_CODE (binfo) == TREE_BINFO);
- ptr = &VEC_last (deferred_access, deferred_access_stack);
+ ptr = &deferred_access_stack->last ();
/* If we are not supposed to defer access checks, just check now. */
if (ptr->deferring_access_checks_kind == dk_no_deferred)
}
/* See if we are already going to perform this check. */
- FOR_EACH_VEC_ELT (deferred_access_check,
- ptr->deferred_access_checks, i, chk)
+ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk)
{
if (chk->decl == decl && chk->binfo == binfo &&
chk->diag_decl == diag_decl)
}
/* If not, record the check. */
deferred_access_check new_access = {binfo, decl, diag_decl, input_location};
- VEC_safe_push (deferred_access_check, gc, ptr->deferred_access_checks,
- new_access);
+ vec_safe_push (ptr->deferred_access_checks, new_access);
return true;
}
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
- gcc_checking_assert (!VEC_empty (tree, stmt_list_stack));
+ gcc_checking_assert (!stmt_list_stack->is_empty ());
append_to_statement_list_force (t, &cur_stmt_list);
return t;
Returns the functions to be considered by overload resolution. */
tree
-perform_koenig_lookup (tree fn, VEC(tree,gc) *args, bool include_std,
+perform_koenig_lookup (tree fn, vec<tree, va_gc> *args, bool include_std,
tsubst_flags_t complain)
{
tree identifier = NULL_TREE;
Returns code for the call. */
tree
-finish_call_expr (tree fn, VEC(tree,gc) **args, bool disallow_virtual,
+finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual,
bool koenig_p, tsubst_flags_t complain)
{
tree result;
tree orig_fn;
- VEC(tree,gc) *orig_args = NULL;
+ vec<tree, va_gc> *orig_args = NULL;
if (fn == error_mark_node)
return error_mark_node;
if (!result)
{
if (warn_sizeof_pointer_memaccess
- && !VEC_empty(tree, *args)
+ && !vec_safe_is_empty (*args)
&& !processing_template_decl)
{
location_t sizeof_arg_loc[3];
sizeof_arg_loc[i] = UNKNOWN_LOCATION;
sizeof_arg[i] = NULL_TREE;
- if (i >= VEC_length (tree, *args))
+ if (i >= (*args)->length ())
continue;
- t = VEC_index (tree, *args, i);
+ t = (**args)[i];
if (TREE_CODE (t) != SIZEOF_EXPR)
continue;
if (SIZEOF_EXPR_TYPE_P (t))
}
else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR)
{
- if (!VEC_empty (tree, *args))
+ if (!vec_safe_is_empty (*args))
error ("arguments to destructor are not allowed");
/* Mark the pseudo-destructor call as having side-effects so
that we do not issue warnings about its use. */
tree
calculate_direct_bases (tree type)
{
- VEC(tree, gc) *vector = make_tree_vector();
+ vec<tree, va_gc> *vector = make_tree_vector();
tree bases_vec = NULL_TREE;
- VEC(tree, none) *base_binfos;
+ vec<tree, va_gc> *base_binfos;
tree binfo;
unsigned i;
base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type));
/* Virtual bases are initialized first */
- for (i = 0; VEC_iterate (tree, base_binfos, i, binfo); i++)
+ for (i = 0; base_binfos->iterate (i, &binfo); i++)
{
if (BINFO_VIRTUAL_P (binfo))
{
- VEC_safe_push (tree, gc, vector, binfo);
+ vec_safe_push (vector, binfo);
}
}
/* Now non-virtuals */
- for (i = 0; VEC_iterate (tree, base_binfos, i, binfo); i++)
+ for (i = 0; base_binfos->iterate (i, &binfo); i++)
{
if (!BINFO_VIRTUAL_P (binfo))
{
- VEC_safe_push (tree, gc, vector, binfo);
+ vec_safe_push (vector, binfo);
}
}
- bases_vec = make_tree_vec (VEC_length (tree, vector));
+ bases_vec = make_tree_vec (vector->length ());
- for (i = 0; i < VEC_length (tree, vector); ++i)
+ for (i = 0; i < vector->length (); ++i)
{
- TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE (VEC_index (tree, vector, i));
+ TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]);
}
return bases_vec;
}
static tree
dfs_calculate_bases_post (tree binfo, void *data_)
{
- VEC(tree, gc) **data = (VEC(tree, gc) **) data_;
+ vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_);
if (!BINFO_VIRTUAL_P (binfo))
{
- VEC_safe_push (tree, gc, *data, BINFO_TYPE (binfo));
+ vec_safe_push (*data, BINFO_TYPE (binfo));
}
return NULL_TREE;
}
/* Calculates the morally non-virtual base classes of a class */
-static VEC(tree, gc) *
+static vec<tree, va_gc> *
calculate_bases_helper (tree type)
{
- VEC(tree, gc) *vector = make_tree_vector();
+ vec<tree, va_gc> *vector = make_tree_vector();
/* Now add non-virtual base classes in order of construction */
dfs_walk_all (TYPE_BINFO (type),
tree
calculate_bases (tree type)
{
- VEC(tree, gc) *vector = make_tree_vector();
+ vec<tree, va_gc> *vector = make_tree_vector();
tree bases_vec = NULL_TREE;
unsigned i;
- VEC(tree, gc) *vbases;
- VEC(tree, gc) *nonvbases;
+ vec<tree, va_gc> *vbases;
+ vec<tree, va_gc> *nonvbases;
tree binfo;
complete_type (type);
/* First go through virtual base classes */
for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0;
- VEC_iterate (tree, vbases, i, binfo); i++)
+ vec_safe_iterate (vbases, i, &binfo); i++)
{
- VEC(tree, gc) *vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo));
- VEC_safe_splice (tree, gc, vector, vbase_bases);
+ vec<tree, va_gc> *vbase_bases;
+ vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo));
+ vec_safe_splice (vector, vbase_bases);
release_tree_vector (vbase_bases);
}
/* Now for the non-virtual bases */
nonvbases = calculate_bases_helper (type);
- VEC_safe_splice (tree, gc, vector, nonvbases);
+ vec_safe_splice (vector, nonvbases);
release_tree_vector (nonvbases);
/* Last element is entire class, so don't copy */
- bases_vec = make_tree_vec (VEC_length (tree, vector) - 1);
+ bases_vec = make_tree_vec (vector->length () - 1);
- for (i = 0; i < VEC_length (tree, vector) - 1; ++i)
+ for (i = 0; i < vector->length () - 1; ++i)
{
- TREE_VEC_ELT (bases_vec, i) = VEC_index (tree, vector, i);
+ TREE_VEC_ELT (bases_vec, i) = (*vector)[i];
}
release_tree_vector (vector);
return bases_vec;
finish_omp_barrier (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
- VEC(tree,gc) *vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
finish_omp_flush (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
- VEC(tree,gc) *vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
finish_omp_taskwait (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
- VEC(tree,gc) *vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
finish_omp_taskyield (void)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
- VEC(tree,gc) *vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error);
release_tree_vector (vec);
finish_expr_stmt (stmt);
ix = lookup_fnfields_1 (type, ansi_assopname (NOP_EXPR));
if (ix < 0)
return false;
- fns = VEC_index (tree, CLASSTYPE_METHOD_VEC (type), ix);
+ fns = (*CLASSTYPE_METHOD_VEC (type))[ix];
}
else if (TYPE_HAS_COPY_CTOR (type))
{
to the existing initialization pair INITS. */
static bool
-build_data_member_initialization (tree t, VEC(constructor_elt,gc) **vec)
+build_data_member_initialization (tree t, vec<constructor_elt, va_gc> **vec)
{
tree member, init;
if (TREE_CODE (t) == CLEANUP_POINT_EXPR)
return ok;
}
-/* VEC is a vector of constructor elements built up for the base and member
+/* V is a vector of constructor elements built up for the base and member
initializers of a constructor for TYPE. They need to be in increasing
offset order, which they might not be yet if TYPE has a primary base
which is not first in the base-clause. */
-static VEC(constructor_elt,gc) *
-sort_constexpr_mem_initializers (tree type, VEC(constructor_elt,gc) *vec)
+static vec<constructor_elt, va_gc> *
+sort_constexpr_mem_initializers (tree type, vec<constructor_elt, va_gc> *v)
{
tree pri = CLASSTYPE_PRIMARY_BINFO (type);
constructor_elt elt;
if (pri == NULL_TREE
|| pri == BINFO_BASE_BINFO (TYPE_BINFO (type), 0))
- return vec;
+ return v;
/* Find the element for the primary base and move it to the beginning of
the vec. */
- VEC(constructor_elt,gc) &v = *vec;
+ vec<constructor_elt, va_gc> &vref = *v;
pri = BINFO_TYPE (pri);
for (i = 1; ; ++i)
- if (TREE_TYPE (v[i].index) == pri)
+ if (TREE_TYPE (vref[i].index) == pri)
break;
- elt = v[i];
+ elt = vref[i];
for (; i > 0; --i)
- v[i] = v[i-1];
- v[0] = elt;
- return vec;
+ vref[i] = vref[i-1];
+ vref[0] = elt;
+ return v;
}
/* Build compile-time evalable representations of member-initializer list
static tree
build_constexpr_constructor_member_initializers (tree type, tree body)
{
- VEC(constructor_elt,gc) *vec = NULL;
+ vec<constructor_elt, va_gc> *vec = NULL;
bool ok = true;
if (TREE_CODE (body) == MUST_NOT_THROW_EXPR
|| TREE_CODE (body) == EH_SPEC_BLOCK)
gcc_assert (errorcount > 0);
if (ok)
{
- if (VEC_length (constructor_elt, vec) > 0)
+ if (vec_safe_length (vec) > 0)
{
/* In a delegating constructor, return the target. */
- constructor_elt *ce = &VEC_index (constructor_elt, vec, 0);
+ constructor_elt *ce = &(*vec)[0];
if (ce->index == current_class_ptr)
{
body = ce->value;
- VEC_free (constructor_elt, gc, vec);
+ vec_free (vec);
return body;
}
}
These do not need to be marked for PCH or GC. */
/* FIXME remember and print actual constant arguments. */
-static VEC(tree,heap) *call_stack = NULL;
+static vec<tree> call_stack = vec<tree>();
static int call_stack_tick;
static int last_cx_error_tick;
++call_stack_tick;
if (!EXPR_HAS_LOCATION (call))
SET_EXPR_LOCATION (call, input_location);
- VEC_safe_push (tree, heap, call_stack, call);
- if (VEC_length (tree, call_stack) > (unsigned) max_constexpr_depth)
+ call_stack.safe_push (call);
+ if (call_stack.length () > (unsigned) max_constexpr_depth)
return false;
return true;
}
pop_cx_call_context (void)
{
++call_stack_tick;
- VEC_pop (tree, call_stack);
+ call_stack.pop ();
}
-VEC(tree,heap) *
+vec<tree>
cx_error_context (void)
{
- VEC(tree,heap) *r = NULL;
+ vec<tree> r = vec<tree>();
if (call_stack_tick != last_cx_error_tick
- && !VEC_empty (tree, call_stack))
+ && !call_stack.is_empty ())
r = call_stack;
last_cx_error_tick = call_stack_tick;
return r;
}
i = tree_low_cst (index, 0);
if (TREE_CODE (ary) == CONSTRUCTOR)
- return VEC_index (constructor_elt, CONSTRUCTOR_ELTS (ary), i).value;
+ return (*CONSTRUCTOR_ELTS (ary))[i].value;
else if (elem_nchars == 1)
return build_int_cst (cv_unqualified (TREE_TYPE (TREE_TYPE (ary))),
TREE_STRING_POINTER (ary)[i]);
initialization of the field. */
static constructor_elt *
-base_field_constructor_elt (VEC(constructor_elt,gc) *v, tree ref)
+base_field_constructor_elt (vec<constructor_elt, va_gc> *v, tree ref)
{
tree aggr = TREE_OPERAND (ref, 0);
tree field = TREE_OPERAND (ref, 1);
v = CONSTRUCTOR_ELTS (base_ce->value);
}
- for (i = 0; VEC_iterate (constructor_elt, v, i, ce); ++i)
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
if (ce->index == field)
return ce;
bool allow_non_constant, bool addr,
bool *non_constant_p)
{
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (t);
- VEC(constructor_elt,gc) *n = VEC_alloc (constructor_elt, gc,
- VEC_length (constructor_elt, v));
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (t);
+ vec<constructor_elt, va_gc> *n;
+ vec_alloc (n, vec_safe_length (v));
constructor_elt *ce;
HOST_WIDE_INT i;
bool changed = false;
gcc_assert (!BRACE_ENCLOSED_INITIALIZER_P (t));
- for (i = 0; VEC_iterate (constructor_elt, v, i, ce); ++i)
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
{
tree elt = cxx_eval_constant_expression (call, ce->value,
allow_non_constant, addr,
if (*non_constant_p || !changed)
{
fail:
- VEC_free (constructor_elt, gc, n);
+ vec_free (n);
return t;
}
t = build_constructor (TREE_TYPE (t), n);
{
tree elttype = TREE_TYPE (atype);
int max = tree_low_cst (array_type_nelts (atype), 0);
- VEC(constructor_elt,gc) *n = VEC_alloc (constructor_elt, gc, max + 1);
+ vec<constructor_elt, va_gc> *n;
+ vec_alloc (n, max + 1);
bool pre_init = false;
int i;
}
else if (!init)
{
- VEC(tree,gc) *argvec = make_tree_vector ();
+ vec<tree, va_gc> *argvec = make_tree_vector ();
init = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&argvec, elttype, LOOKUP_NORMAL,
tf_warning_or_error);
else
{
/* Copying an element. */
- VEC(tree,gc) *argvec;
+ vec<tree, va_gc> *argvec;
gcc_assert (same_type_ignoring_top_level_qualifiers_p
(atype, TREE_TYPE (init)));
eltinit = cp_build_array_ref (input_location, init, idx,
if (!real_lvalue_p (init))
eltinit = move (eltinit);
argvec = make_tree_vector ();
- VEC_quick_push (tree, argvec, eltinit);
+ argvec->quick_push (eltinit);
eltinit = (build_special_member_call
(NULL_TREE, complete_ctor_identifier, &argvec,
elttype, LOOKUP_NORMAL, tf_warning_or_error));
}
fail:
- VEC_free (constructor_elt, gc, n);
+ vec_free (n);
return init;
}
case CONSTRUCTOR:
{
- VEC(constructor_elt, gc) *v = CONSTRUCTOR_ELTS (t);
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (t);
constructor_elt *ce;
- for (i = 0; VEC_iterate (constructor_elt, v, i, ce); ++i)
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
if (!potential_constant_expression_1 (ce->value, want_rval, flags))
return false;
return true;
/* Build aggregate constructor call.
- cp_parser_braced_list
- cp_parser_functional_cast */
- VEC(constructor_elt,gc) *elts = NULL;
+ vec<constructor_elt, va_gc> *elts = NULL;
tree node, expr, type;
location_t saved_loc;
/* And put a DECL_EXPR in the STATEMENT_LIST for the same block. */
var = build_stmt (DECL_SOURCE_LOCATION (var), DECL_EXPR, var);
- stmt_list = VEC_index (tree, stmt_list_stack,
- VEC_length (tree, stmt_list_stack) - 1 - skip);
+ stmt_list = (*stmt_list_stack)[stmt_list_stack->length () - 1 - skip];
gcc_assert (stmt_list);
append_to_statement_list_force (var, &stmt_list);
}
insert_pending_capture_proxies (void)
{
tree lam;
- VEC(tree,gc) *proxies;
+ vec<tree, va_gc> *proxies;
unsigned i;
if (!current_function_decl || !LAMBDA_FUNCTION_P (current_function_decl))
lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl));
proxies = LAMBDA_EXPR_PENDING_PROXIES (lam);
- for (i = 0; i < VEC_length (tree, proxies); ++i)
+ for (i = 0; i < vec_safe_length (proxies); ++i)
{
- tree var = VEC_index (tree, proxies, i);
+ tree var = (*proxies)[i];
insert_capture_proxy (var);
}
release_tree_vector (LAMBDA_EXPR_PENDING_PROXIES (lam));
if (fn == current_function_decl)
insert_capture_proxy (var);
else
- VEC_safe_push (tree, gc, LAMBDA_EXPR_PENDING_PROXIES (lam), var);
+ vec_safe_push (LAMBDA_EXPR_PENDING_PROXIES (lam), var);
return var;
}
tree callop = lambda_function (type);
tree rettype, name, fntype, fn, body, compound_stmt;
tree thistype, stattype, statfn, convfn, call, arg;
- VEC (tree, gc) *argvec;
+ vec<tree, va_gc> *argvec;
if (LAMBDA_EXPR_CAPTURE_LIST (CLASSTYPE_LAMBDA_EXPR (type)) != NULL_TREE)
return;
arg = build1 (NOP_EXPR, TREE_TYPE (DECL_ARGUMENTS (callop)),
null_pointer_node);
argvec = make_tree_vector ();
- VEC_quick_push (tree, argvec, arg);
+ argvec->quick_push (arg);
for (arg = DECL_ARGUMENTS (statfn); arg; arg = DECL_CHAIN (arg))
{
mark_exp_read (arg);
- VEC_safe_push (tree, gc, argvec, arg);
+ vec_safe_push (argvec, arg);
}
- call = build_call_a (callop, VEC_length (tree, argvec),
- VEC_address (tree, argvec));
+ call = build_call_a (callop, argvec->length (), argvec->address ());
CALL_FROM_THUNK_P (call) = 1;
if (MAYBE_CLASS_TYPE_P (TREE_TYPE (call)))
call = build_cplus_new (TREE_TYPE (call), call, tf_warning_or_error);
build_vec_init_elt (tree type, tree init, tsubst_flags_t complain)
{
tree inner_type = strip_array_types (type);
- VEC(tree,gc) *argvec;
+ vec<tree, va_gc> *argvec;
if (integer_zerop (array_type_nelts_total (type))
|| !CLASS_TYPE_P (inner_type))
tree dummy = build_dummy_object (inner_type);
if (!real_lvalue_p (init))
dummy = move (dummy);
- VEC_quick_push (tree, argvec, dummy);
+ argvec->quick_push (dummy);
}
init = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&argvec, inner_type, LOOKUP_NORMAL,
case TREE_LIST:
{
- VEC(tree,gc) *vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
bool changed = false;
tree it;
for (it = t; it; it = TREE_CHAIN (it))
{
tree val = strip_typedefs_expr (TREE_VALUE (t));
- VEC_safe_push (tree, gc, vec, val);
+ vec_safe_push (vec, val);
if (val != TREE_VALUE (t))
changed = true;
gcc_assert (TREE_PURPOSE (it) == NULL_TREE);
if (changed)
{
r = NULL_TREE;
- FOR_EACH_VEC_ELT_REVERSE (tree, vec, i, it)
+ FOR_EACH_VEC_ELT_REVERSE (*vec, i, it)
r = tree_cons (NULL_TREE, it, r);
}
else
case TREE_VEC:
{
bool changed = false;
- VEC(tree,gc)* vec = make_tree_vector ();
+ vec<tree, va_gc> *vec = make_tree_vector ();
n = TREE_VEC_LENGTH (t);
- VEC_reserve (tree, gc, vec, n);
+ vec_safe_reserve (vec, n);
for (i = 0; i < n; ++i)
{
tree op = strip_typedefs_expr (TREE_VEC_ELT (t, i));
- VEC_quick_push (tree, vec, op);
+ vec->quick_push (op);
if (op != TREE_VEC_ELT (t, i))
changed = true;
}
{
r = copy_node (t);
for (i = 0; i < n; ++i)
- TREE_VEC_ELT (r, i) = VEC_index (tree, vec, i);
+ TREE_VEC_ELT (r, i) = (*vec)[i];
}
else
r = t;
case CONSTRUCTOR:
{
bool changed = false;
- VEC(constructor_elt,gc) *vec
- = VEC_copy (constructor_elt, gc, CONSTRUCTOR_ELTS (t));
+ vec<constructor_elt, va_gc> *vec
+ = vec_safe_copy (CONSTRUCTOR_ELTS (t));
n = CONSTRUCTOR_NELTS (t);
type = strip_typedefs (TREE_TYPE (t));
for (i = 0; i < n; ++i)
{
- constructor_elt *e = &VEC_index (constructor_elt, vec, i);
+ constructor_elt *e = &(*vec)[i];
tree op = strip_typedefs_expr (e->value);
if (op != e->value)
{
if (!changed && type == TREE_TYPE (t))
{
- VEC_free (constructor_elt, gc, vec);
+ vec_free (vec);
return t;
}
else
{
/* Push it onto the list after any virtual bases it contains
will have been pushed. */
- VEC_quick_push (tree, CLASSTYPE_VBASECLASSES (t), new_binfo);
+ CLASSTYPE_VBASECLASSES (t)->quick_push (new_binfo);
BINFO_VIRTUAL_P (new_binfo) = 1;
BINFO_INHERITANCE_CHAIN (new_binfo) = TYPE_BINFO (t);
}
/* In an NSDMI build_base_path defers building conversions to virtual
bases, and we handle it here. */
tree basetype = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (*t)));
- VEC(tree,gc) *vbases = CLASSTYPE_VBASECLASSES (current_class_type);
+ vec<tree, va_gc> *vbases = CLASSTYPE_VBASECLASSES (current_class_type);
int i; tree binfo;
- FOR_EACH_VEC_ELT (tree, vbases, i, binfo)
+ FOR_EACH_VEC_SAFE_ELT (vbases, i, binfo)
if (BINFO_TYPE (binfo) == basetype)
break;
*t = build_base_path (PLUS_EXPR, TREE_OPERAND (*t, 0), binfo, true,
that has been built. */
tree
-build_min_non_dep_call_vec (tree non_dep, tree fn, VEC(tree,gc) *argvec)
+build_min_non_dep_call_vec (tree non_dep, tree fn, vec<tree, va_gc> *argvec)
{
tree t = build_nt_call_vec (fn, argvec);
if (REFERENCE_REF_P (non_dep))
unsigned i;
constructor_elt *ce;
bool good = true;
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (t);
- for (i = 0; VEC_iterate (constructor_elt, v, i, ce); ++i)
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (t);
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
{
tree type = TREE_TYPE (ce->value);
tree subinit;
/* Don't fix same_body aliases. Although they don't have their own
CFG, they share it with what they alias to. */
if (!node || !node->alias
- || !VEC_length (ipa_ref_t, node->symbol.ref_list.references))
+ || !vec_safe_length (node->symbol.ref_list.references))
return true;
}
static void maybe_warn_about_returning_address_of_local (tree);
static tree lookup_destructor (tree, tree, tree);
static void warn_args_num (location_t, tree, bool);
-static int convert_arguments (tree, VEC(tree,gc) **, tree, int,
+static int convert_arguments (tree, vec<tree, va_gc> **, tree, int,
tsubst_flags_t);
/* Do `exp = require_complete_type (exp);' to make sure exp
/* Used by the C-common bits. */
tree
build_function_call_vec (location_t /*loc*/,
- tree function, VEC(tree,gc) *params,
- VEC(tree,gc) * /*origtypes*/)
+ tree function, vec<tree, va_gc> *params,
+ vec<tree, va_gc> * /*origtypes*/)
{
- VEC(tree,gc) *orig_params = params;
+ vec<tree, va_gc> *orig_params = params;
tree ret = cp_build_function_call_vec (function, ¶ms,
tf_warning_or_error);
tree
cp_build_function_call (tree function, tree params, tsubst_flags_t complain)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
tree ret;
vec = make_tree_vector ();
for (; params != NULL_TREE; params = TREE_CHAIN (params))
- VEC_safe_push (tree, gc, vec, TREE_VALUE (params));
+ vec_safe_push (vec, TREE_VALUE (params));
ret = cp_build_function_call_vec (function, &vec, complain);
release_tree_vector (vec);
return ret;
tree
cp_build_function_call_nary (tree function, tsubst_flags_t complain, ...)
{
- VEC(tree,gc) *vec;
+ vec<tree, va_gc> *vec;
va_list args;
tree ret, t;
vec = make_tree_vector ();
va_start (args, complain);
for (t = va_arg (args, tree); t != NULL_TREE; t = va_arg (args, tree))
- VEC_safe_push (tree, gc, vec, t);
+ vec_safe_push (vec, t);
va_end (args);
ret = cp_build_function_call_vec (function, &vec, complain);
release_tree_vector (vec);
PARAMS. */
tree
-cp_build_function_call_vec (tree function, VEC(tree,gc) **params,
+cp_build_function_call_vec (tree function, vec<tree, va_gc> **params,
tsubst_flags_t complain)
{
tree fntype, fndecl;
int nargs;
tree *argarray;
tree parm_types;
- VEC(tree,gc) *allocated = NULL;
+ vec<tree, va_gc> *allocated = NULL;
tree ret;
/* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF
expressions, like those used for ObjC messenger dispatches. */
- if (params != NULL && !VEC_empty (tree, *params))
- function = objc_rewrite_function_call (function,
- VEC_index (tree, *params, 0));
+ if (params != NULL && !vec_safe_is_empty (*params))
+ function = objc_rewrite_function_call (function, (**params)[0]);
/* build_c_cast puts on a NOP_EXPR to make the result not an lvalue.
Strip such NOP_EXPRs, since FUNCTION is used in non-lvalue context. */
if (nargs < 0)
return error_mark_node;
- argarray = VEC_address (tree, *params);
+ argarray = (*params)->address ();
/* Check for errors in format strings and inappropriately
null parameters. */
default arguments, if such were specified. Do so here. */
static int
-convert_arguments (tree typelist, VEC(tree,gc) **values, tree fndecl,
+convert_arguments (tree typelist, vec<tree, va_gc> **values, tree fndecl,
int flags, tsubst_flags_t complain)
{
tree typetail;
flags |= LOOKUP_ONLYCONVERTING;
for (i = 0, typetail = typelist;
- i < VEC_length (tree, *values);
+ i < vec_safe_length (*values);
i++)
{
tree type = typetail ? TREE_VALUE (typetail) : 0;
- tree val = VEC_index (tree, *values, i);
+ tree val = (**values)[i];
if (val == error_mark_node || type == error_mark_node)
return -1;
if (parmval == error_mark_node)
return -1;
- VEC_replace (tree, *values, i, parmval);
+ (**values)[i] = parmval;
}
else
{
else
val = convert_arg_to_ellipsis (val, complain);
- VEC_replace (tree, *values, i, val);
+ (**values)[i] = val;
}
if (typetail)
if (parmval == error_mark_node)
return -1;
- VEC_safe_push (tree, gc, *values, parmval);
+ vec_safe_push (*values, parmval);
typetail = TREE_CHAIN (typetail);
/* ends with `...'. */
if (typetail == NULL_TREE)
/* Like build_x_compound_expr_from_list, but using a VEC. */
tree
-build_x_compound_expr_from_vec (VEC(tree,gc) *vec, const char *msg,
+build_x_compound_expr_from_vec (vec<tree, va_gc> *vec, const char *msg,
tsubst_flags_t complain)
{
- if (VEC_empty (tree, vec))
+ if (vec_safe_is_empty (vec))
return NULL_TREE;
- else if (VEC_length (tree, vec) == 1)
- return VEC_index (tree, vec, 0);
+ else if (vec->length () == 1)
+ return (*vec)[0];
else
{
tree expr;
return error_mark_node;
}
- expr = VEC_index (tree, vec, 0);
- for (ix = 1; VEC_iterate (tree, vec, ix, t); ++ix)
+ expr = (*vec)[0];
+ for (ix = 1; vec->iterate (ix, &t); ++ix)
expr = build_x_compound_expr (EXPR_LOCATION (t), expr,
t, complain);
/* Do the default thing. */;
else
{
- VEC(tree,gc) *rhs_vec = make_tree_vector_single (rhs);
+ vec<tree, va_gc> *rhs_vec = make_tree_vector_single (rhs);
result = build_special_member_call (lhs, complete_ctor_identifier,
&rhs_vec, lhstype, LOOKUP_NORMAL,
complain);
tree u = NULL_TREE;
tree delta_field;
tree pfn_field;
- VEC(constructor_elt, gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* Pull the FIELD_DECLs out of the type. */
pfn_field = TYPE_FIELDS (type);
pfn = fold_convert (TREE_TYPE (pfn_field), pfn);
/* Finish creating the initializer. */
- v = VEC_alloc(constructor_elt, gc, 2);
+ vec_alloc (v, 2);
CONSTRUCTOR_APPEND_ELT(v, pfn_field, pfn);
CONSTRUCTOR_APPEND_ELT(v, delta_field, delta);
u = build_constructor (type, v);
int
abstract_virtuals_error_sfinae (tree decl, tree type, tsubst_flags_t complain)
{
- VEC(tree,gc) *pure;
+ vec<tree, va_gc> *pure;
/* This function applies only to classes. Any other entity can never
be abstract. */
error ("cannot allocate an object of abstract type %qT", type);
/* Only go through this once. */
- if (VEC_length (tree, pure))
+ if (pure->length ())
{
unsigned ix;
tree fn;
" because the following virtual functions are pure within %qT:",
type);
- FOR_EACH_VEC_ELT (tree, pure, ix, fn)
+ FOR_EACH_VEC_ELT (*pure, ix, fn)
if (! DECL_CLONED_FUNCTION_P (fn)
|| DECL_COMPLETE_DESTRUCTOR_P (fn))
inform (input_location, "\t%+#D", fn);
/* Now truncate the vector. This leaves it non-null, so we know
there are pure virtuals, but empty so we don't list them out
again. */
- VEC_truncate (tree, pure, 0);
+ pure->truncate (0);
}
else
inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
split_non_constant_init into process_init_constructor_array,
that is separating constants from non-constants while building
the vector. */
- VEC_ordered_remove (constructor_elt, CONSTRUCTOR_ELTS (init),
- idx);
+ CONSTRUCTOR_ELTS (init)->ordered_remove (idx);
--idx;
if (array_type_p)
for static variable. In that case, caller must emit the code. */
tree
-store_init_value (tree decl, tree init, VEC(tree,gc)** cleanups, int flags)
+store_init_value (tree decl, tree init, vec<tree, va_gc>** cleanups, int flags)
{
tree value, type;
int flags = 0;
bool unbounded = false;
constructor_elt *ce;
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (init);
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (init);
gcc_assert (TREE_CODE (type) == ARRAY_TYPE
|| TREE_CODE (type) == VECTOR_TYPE);
len = TYPE_VECTOR_SUBPARTS (type);
/* There must not be more initializers than needed. */
- if (!unbounded && VEC_length (constructor_elt, v) > len)
+ if (!unbounded && vec_safe_length (v) > len)
{
if (complain & tf_error)
error ("too many initializers for %qT", type);
return PICFLAG_ERRONEOUS;
}
- FOR_EACH_VEC_ELT (constructor_elt, v, i, ce)
+ FOR_EACH_VEC_SAFE_ELT (v, i, ce)
{
if (ce->index)
{
process_init_constructor_record (tree type, tree init,
tsubst_flags_t complain)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int flags = 0;
tree field;
unsigned HOST_WIDE_INT idx = 0;
if (DECL_BIT_FIELD_TYPE (field))
type = DECL_BIT_FIELD_TYPE (field);
- if (idx < VEC_length (constructor_elt, CONSTRUCTOR_ELTS (init)))
+ if (idx < vec_safe_length (CONSTRUCTOR_ELTS (init)))
{
- constructor_elt *ce = &VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (init), idx);
+ constructor_elt *ce = &(*CONSTRUCTOR_ELTS (init))[idx];
if (ce->index)
{
/* We can have either a FIELD_DECL or an IDENTIFIER_NODE. The
CONSTRUCTOR_APPEND_ELT (v, field, next);
}
- if (idx < VEC_length (constructor_elt, CONSTRUCTOR_ELTS (init)))
+ if (idx < vec_safe_length (CONSTRUCTOR_ELTS (init)))
{
if (complain & tf_error)
error ("too many initializers for %qT", type);
int len;
/* If the initializer was empty, use default zero initialization. */
- if (VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (init)))
+ if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return 0;
- len = VEC_length (constructor_elt, CONSTRUCTOR_ELTS (init));
+ len = CONSTRUCTOR_ELTS (init)->length ();
if (len > 1)
{
if (!(complain & tf_error))
return PICFLAG_ERRONEOUS;
error ("too many initializers for %qT", type);
- VEC_block_remove (constructor_elt, CONSTRUCTOR_ELTS (init), 1, len-1);
+ CONSTRUCTOR_ELTS (init)->block_remove (1, len-1);
}
- ce = &VEC_index (constructor_elt, CONSTRUCTOR_ELTS (init), 0);
+ ce = &(*CONSTRUCTOR_ELTS (init))[0];
/* If this element specifies a field, initialize via that field. */
if (ce->index)
tree orig_expr = expr;
tree type = TREE_TYPE (expr);
tree last_rval = NULL_TREE;
- VEC(tree,gc) *types_memoized = NULL;
+ vec<tree, va_gc> *types_memoized = NULL;
if (type == error_mark_node)
return error_mark_node;
return error_mark_node;
}
- VEC_safe_push (tree, gc, types_memoized, TREE_TYPE (expr));
+ vec_safe_push (types_memoized, TREE_TYPE (expr));
last_rval = expr;
}
/* The type to which we are casting. */
tree type;
- VEC(tree,gc) *parmvec;
+ vec<tree, va_gc> *parmvec;
if (exp == error_mark_node || parms == error_mark_node)
return error_mark_node;
/* Call the constructor. */
parmvec = make_tree_vector ();
for (; parms != NULL_TREE; parms = TREE_CHAIN (parms))
- VEC_safe_push (tree, gc, parmvec, TREE_VALUE (parms));
+ vec_safe_push (parmvec, TREE_VALUE (parms));
exp = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&parmvec, type, LOOKUP_NORMAL, complain);
release_tree_vector (parmvec);
};
typedef struct occr *occr_t;
-DEF_VEC_P (occr_t);
-DEF_VEC_ALLOC_P (occr_t, heap);
/* Hash table entry for assignment expressions. */
static unsigned const BITS_PER_BITPACK_WORD = HOST_BITS_PER_WIDE_INT;
typedef unsigned HOST_WIDE_INT bitpack_word_t;
-DEF_VEC_I(bitpack_word_t);
-DEF_VEC_ALLOC_I(bitpack_word_t, heap);
struct bitpack_d
{
{
int i;
tree child;
- VEC(tree,gc) *accesses = BINFO_BASE_ACCESSES (binfo);
+ vec<tree, va_gc> *accesses = BINFO_BASE_ACCESSES (binfo);
if (use_gnu_debug_info_extensions)
{
}
for (i = 0; BINFO_BASE_ITERATE (binfo, i, child); i++)
{
- tree access = (accesses ? VEC_index (tree, accesses, i)
- : access_public_node);
+ tree access = (accesses ? (*accesses)[i] : access_public_node);
if (use_gnu_debug_info_extensions)
{
output_used_types_helper (void **slot, void *data)
{
tree type = (tree) *slot;
- VEC(tree, heap) **types_p = (VEC(tree, heap) **) data;
+ vec<tree> *types_p = (vec<tree> *) data;
if ((TREE_CODE (type) == RECORD_TYPE
|| TREE_CODE (type) == UNION_TYPE
&& TYPE_STUB_DECL (type)
&& DECL_P (TYPE_STUB_DECL (type))
&& ! DECL_IGNORED_P (TYPE_STUB_DECL (type)))
- VEC_quick_push (tree, *types_p, TYPE_STUB_DECL (type));
+ types_p->quick_push (TYPE_STUB_DECL (type));
else if (TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
- VEC_quick_push (tree, *types_p, TYPE_NAME (type));
+ types_p->quick_push (TYPE_NAME (type));
return 1;
}
{
if (cfun && cfun->used_types_hash)
{
- VEC(tree, heap) *types;
+ vec<tree> types;
int i;
tree type;
- types = VEC_alloc (tree, heap, htab_elements (cfun->used_types_hash));
+ types.create (htab_elements (cfun->used_types_hash));
htab_traverse (cfun->used_types_hash, output_used_types_helper, &types);
/* Sort by UID to prevent dependence on hash table ordering. */
- VEC_qsort (tree, types, output_types_sort);
+ types.qsort (output_types_sort);
- FOR_EACH_VEC_ELT (tree, types, i, type)
+ FOR_EACH_VEC_ELT (types, i, type)
debug_queue_symbol (type);
- VEC_free (tree, heap, types);
+ types.release ();
}
}
/* Instructions that have been marked but whose dependencies have not
yet been processed. */
-static VEC(rtx,heap) *worklist;
+static vec<rtx> worklist;
/* Bitmap of instructions marked as needed indexed by INSN_UID. */
static sbitmap marked;
if (!marked_insn_p (insn))
{
if (!fast)
- VEC_safe_push (rtx, heap, worklist, insn);
+ worklist.safe_push (insn);
bitmap_set_bit (marked, INSN_UID (insn));
if (dump_file)
fprintf (dump_file, " Adding insn %d to worklist\n", INSN_UID (insn));
prescan_insns_for_dce (false);
mark_artificial_uses ();
- while (VEC_length (rtx, worklist) > 0)
+ while (worklist.length () > 0)
{
- insn = VEC_pop (rtx, worklist);
+ insn = worklist.pop ();
mark_reg_dependencies (insn);
}
- VEC_free (rtx, heap, worklist);
+ worklist.release ();
if (MAY_HAVE_DEBUG_INSNS)
reset_unmarked_insns_debug_uses ();
bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
int age = 0;
bool changed;
- VEC(int, heap) *last_visit_age = NULL;
+ vec<int> last_visit_age = vec<int>();
int prev_age;
basic_block bb;
int i;
- VEC_safe_grow_cleared (int, heap, last_visit_age, n_blocks);
+ last_visit_age.safe_grow_cleared (n_blocks);
/* Double-queueing. Worklist is for the current iteration,
and pending is for the next. */
bitmap_clear_bit (pending, index);
bb_index = blocks_in_postorder[index];
bb = BASIC_BLOCK (bb_index);
- prev_age = VEC_index (int, last_visit_age, index);
+ prev_age = last_visit_age[index];
if (dir == DF_FORWARD)
changed = df_worklist_propagate_forward (dataflow, bb_index,
bbindex_to_postorder,
bbindex_to_postorder,
pending, considered,
prev_age);
- VEC_replace (int, last_visit_age, index, ++age);
+ last_visit_age[index] = ++age;
if (changed)
bb->aux = (void *)(ptrdiff_t)age;
}
BITMAP_FREE (worklist);
BITMAP_FREE (pending);
- VEC_free (int, heap, last_visit_age);
+ last_visit_age.release ();
/* Dump statistics. */
if (dump_file)
#include "df.h"
#include "except.h"
#include "dce.h"
-#include "vecprim.h"
#include "valtrack.h"
#include "dumpfile.h"
#include "df.h"
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
-DEF_VEC_P(df_ref);
-DEF_VEC_ALLOC_P_STACK(df_ref);
-
-#define VEC_df_ref_stack_alloc(alloc) VEC_stack_alloc (df_ref, alloc)
typedef struct df_mw_hardreg *df_mw_hardreg_ptr;
-DEF_VEC_P(df_mw_hardreg_ptr);
-DEF_VEC_ALLOC_P_STACK(df_mw_hardreg_ptr);
-
-#define VEC_df_mw_hardreg_ptr_stack_alloc(alloc) \
- VEC_stack_alloc (df_mw_hardreg_ptr, alloc)
#ifndef HAVE_epilogue
#define HAVE_epilogue 0
struct df_collection_rec
{
- VEC(df_ref,stack) *def_vec;
- VEC(df_ref,stack) *use_vec;
- VEC(df_ref,stack) *eq_use_vec;
- VEC(df_mw_hardreg_ptr,stack) *mw_vec;
+ vec<df_ref, va_stack> def_vec;
+ vec<df_ref, va_stack> use_vec;
+ vec<df_ref, va_stack> eq_use_vec;
+ vec<df_mw_hardreg_ptr, va_stack> mw_vec;
};
static df_ref df_null_ref_rec[1];
df_ref ref;
struct df_mw_hardreg *mw;
- FOR_EACH_VEC_ELT (df_ref, collection_rec->def_vec, ix, ref)
+ FOR_EACH_VEC_ELT (collection_rec->def_vec, ix, ref)
df_free_ref (ref);
- FOR_EACH_VEC_ELT (df_ref, collection_rec->use_vec, ix, ref)
+ FOR_EACH_VEC_ELT (collection_rec->use_vec, ix, ref)
df_free_ref (ref);
- FOR_EACH_VEC_ELT (df_ref, collection_rec->eq_use_vec, ix, ref)
+ FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
df_free_ref (ref);
- FOR_EACH_VEC_ELT (df_mw_hardreg_ptr, collection_rec->mw_vec, ix, mw)
+ FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
pool_free (problem_data->mw_reg_pool, mw);
- VEC_free (df_ref, stack, collection_rec->def_vec);
- VEC_free (df_ref, stack, collection_rec->use_vec);
- VEC_free (df_ref, stack, collection_rec->eq_use_vec);
- VEC_free (df_mw_hardreg_ptr, stack, collection_rec->mw_vec);
+ collection_rec->def_vec.release ();
+ collection_rec->use_vec.release ();
+ collection_rec->eq_use_vec.release ();
+ collection_rec->mw_vec.release ();
}
/* Rescan INSN. Return TRUE if the rescanning produced any changes. */
return false;
}
- collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
- collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+ vec_stack_alloc (df_ref, collection_rec.def_vec, 128);
+ vec_stack_alloc (df_ref, collection_rec.use_vec, 32);
+ vec_stack_alloc (df_ref, collection_rec.eq_use_vec, 32);
+ vec_stack_alloc (df_mw_hardreg_ptr, collection_rec.mw_vec, 32);
bitmap_clear_bit (&df->insns_to_delete, uid);
bitmap_clear_bit (&df->insns_to_rescan, uid);
if (!DEBUG_INSN_P (insn))
df_set_bb_dirty (bb);
- VEC_free (df_ref, stack, collection_rec.def_vec);
- VEC_free (df_ref, stack, collection_rec.use_vec);
- VEC_free (df_ref, stack, collection_rec.eq_use_vec);
- VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
+ collection_rec.def_vec.release ();
+ collection_rec.use_vec.release ();
+ collection_rec.eq_use_vec.release ();
+ collection_rec.mw_vec.release ();
return true;
}
unsigned int mw_len;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+ vec_stack_alloc (df_ref, collection_rec.eq_use_vec, 32);
+ vec_stack_alloc (df_mw_hardreg_ptr, collection_rec.mw_vec, 32);
num_deleted = df_mw_hardreg_chain_delete_eq_uses (insn_info);
df_ref_chain_delete (insn_info->eq_uses);
/* Find some place to put any new mw_hardregs. */
df_canonize_collection_rec (&collection_rec);
- mw_len = VEC_length (df_mw_hardreg_ptr, collection_rec.mw_vec);
+ mw_len = collection_rec.mw_vec.length ();
if (mw_len)
{
unsigned int count = 0;
count + 1 + mw_len);
}
memcpy (&insn_info->mw_hardregs[count],
- VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ collection_rec.mw_vec.address (),
mw_len * sizeof (struct df_mw_hardreg *));
insn_info->mw_hardregs[count + mw_len] = NULL;
qsort (insn_info->mw_hardregs, count + mw_len,
insn_info->mw_hardregs
= XNEWVEC (struct df_mw_hardreg*, 1 + mw_len);
memcpy (insn_info->mw_hardregs,
- VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ collection_rec.mw_vec.address (),
mw_len * sizeof (struct df_mw_hardreg *));
insn_info->mw_hardregs[mw_len] = NULL;
}
}
/* Get rid of the mw_rec so that df_refs_add_to_chains will
ignore it. */
- VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
+ collection_rec.mw_vec.release ();
df_refs_add_to_chains (&collection_rec, bb, insn);
- VEC_free (df_ref, stack, collection_rec.eq_use_vec);
+ collection_rec.eq_use_vec.release ();
}
else
df_insn_rescan (insn);
}
static void
-df_swap_refs (VEC(df_ref,stack) **ref_vec, int i, int j)
+df_swap_refs (vec<df_ref, va_stack> *ref_vec, int i, int j)
{
- df_ref tmp = VEC_index (df_ref, *ref_vec, i);
- VEC_replace (df_ref, *ref_vec, i, VEC_index (df_ref, *ref_vec, j));
- VEC_replace (df_ref, *ref_vec, j, tmp);
+ df_ref tmp = (*ref_vec)[i];
+ (*ref_vec)[i] = (*ref_vec)[j];
+ (*ref_vec)[j] = tmp;
}
/* Sort and compress a set of refs. */
static void
-df_sort_and_compress_refs (VEC(df_ref,stack) **ref_vec)
+df_sort_and_compress_refs (vec<df_ref, va_stack> *ref_vec)
{
unsigned int count;
unsigned int i;
unsigned int dist = 0;
- count = VEC_length (df_ref, *ref_vec);
+ count = ref_vec->length ();
/* If there are 1 or 0 elements, there is nothing to do. */
if (count < 2)
return;
else if (count == 2)
{
- df_ref r0 = VEC_index (df_ref, *ref_vec, 0);
- df_ref r1 = VEC_index (df_ref, *ref_vec, 1);
+ df_ref r0 = (*ref_vec)[0];
+ df_ref r1 = (*ref_vec)[1];
if (df_ref_compare (&r0, &r1) > 0)
df_swap_refs (ref_vec, 0, 1);
}
{
for (i = 0; i < count - 1; i++)
{
- df_ref r0 = VEC_index (df_ref, *ref_vec, i);
- df_ref r1 = VEC_index (df_ref, *ref_vec, i + 1);
+ df_ref r0 = (*ref_vec)[i];
+ df_ref r1 = (*ref_vec)[i + 1];
if (df_ref_compare (&r0, &r1) >= 0)
break;
}
of DF_REF_COMPARE. */
if (i == count - 1)
return;
- VEC_qsort (df_ref, *ref_vec, df_ref_compare);
+ ref_vec->qsort (df_ref_compare);
}
for (i=0; i<count-dist; i++)
{
/* Find the next ref that is not equal to the current ref. */
while (i + dist + 1 < count
- && df_ref_equal_p (VEC_index (df_ref, *ref_vec, i),
- VEC_index (df_ref, *ref_vec, i + dist + 1)))
+ && df_ref_equal_p ((*ref_vec)[i],
+ (*ref_vec)[i + dist + 1]))
{
- df_free_ref (VEC_index (df_ref, *ref_vec, i + dist + 1));
+ df_free_ref ((*ref_vec)[i + dist + 1]);
dist++;
}
/* Copy it down to the next position. */
if (dist && i + dist + 1 < count)
- VEC_replace (df_ref, *ref_vec, i + 1,
- VEC_index (df_ref, *ref_vec, i + dist + 1));
+ (*ref_vec)[i + 1] = (*ref_vec)[i + dist + 1];
}
count -= dist;
- VEC_truncate (df_ref, *ref_vec, count);
+ ref_vec->truncate (count);
}
/* Sort and compress a set of refs. */
static void
-df_sort_and_compress_mws (VEC(df_mw_hardreg_ptr,stack) **mw_vec)
+df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_stack> *mw_vec)
{
unsigned int count;
struct df_scan_problem_data *problem_data
unsigned int i;
unsigned int dist = 0;
- count = VEC_length (df_mw_hardreg_ptr, *mw_vec);
+ count = mw_vec->length ();
if (count < 2)
return;
else if (count == 2)
{
- struct df_mw_hardreg *m0 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 0);
- struct df_mw_hardreg *m1 = VEC_index (df_mw_hardreg_ptr, *mw_vec, 1);
+ struct df_mw_hardreg *m0 = (*mw_vec)[0];
+ struct df_mw_hardreg *m1 = (*mw_vec)[1];
if (df_mw_compare (&m0, &m1) > 0)
{
- struct df_mw_hardreg *tmp = VEC_index (df_mw_hardreg_ptr,
- *mw_vec, 0);
- VEC_replace (df_mw_hardreg_ptr, *mw_vec, 0,
- VEC_index (df_mw_hardreg_ptr, *mw_vec, 1));
- VEC_replace (df_mw_hardreg_ptr, *mw_vec, 1, tmp);
+ struct df_mw_hardreg *tmp = (*mw_vec)[0];
+ (*mw_vec)[0] = (*mw_vec)[1];
+ (*mw_vec)[1] = tmp;
}
}
else
- VEC_qsort (df_mw_hardreg_ptr, *mw_vec, df_mw_compare);
+ mw_vec->qsort (df_mw_compare);
for (i=0; i<count-dist; i++)
{
/* Find the next ref that is not equal to the current ref. */
while (i + dist + 1 < count
- && df_mw_equal_p (VEC_index (df_mw_hardreg_ptr, *mw_vec, i),
- VEC_index (df_mw_hardreg_ptr, *mw_vec,
- i + dist + 1)))
+ && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
{
pool_free (problem_data->mw_reg_pool,
- VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
+ (*mw_vec)[i + dist + 1]);
dist++;
}
/* Copy it down to the next position. */
if (dist && i + dist + 1 < count)
- VEC_replace (df_mw_hardreg_ptr, *mw_vec, i + 1,
- VEC_index (df_mw_hardreg_ptr, *mw_vec, i + dist + 1));
+ (*mw_vec)[i + 1] = (*mw_vec)[i + dist + 1];
}
count -= dist;
- VEC_truncate (df_mw_hardreg_ptr, *mw_vec, count);
+ mw_vec->truncate (count);
}
static df_ref *
df_install_refs (basic_block bb,
- VEC(df_ref,stack)* old_vec,
+ vec<df_ref, va_stack> old_vec,
struct df_reg_info **reg_info,
struct df_ref_info *ref_info,
bool is_notes)
{
unsigned int count;
- count = VEC_length (df_ref, old_vec);
+ count = old_vec.length ();
if (count)
{
df_ref *new_vec = XNEWVEC (df_ref, count + 1);
if (add_to_table && df->analyze_subset)
add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
- FOR_EACH_VEC_ELT (df_ref, old_vec, ix, this_ref)
+ FOR_EACH_VEC_ELT (old_vec, ix, this_ref)
{
new_vec[ix] = this_ref;
df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
insn. */
static struct df_mw_hardreg **
-df_install_mws (VEC(df_mw_hardreg_ptr,stack) *old_vec)
+df_install_mws (vec<df_mw_hardreg_ptr, va_stack> old_vec)
{
unsigned int count;
- count = VEC_length (df_mw_hardreg_ptr, old_vec);
+ count = old_vec.length ();
if (count)
{
struct df_mw_hardreg **new_vec
= XNEWVEC (struct df_mw_hardreg*, count + 1);
- memcpy (new_vec, VEC_address (df_mw_hardreg_ptr, old_vec),
+ memcpy (new_vec, old_vec.address (),
sizeof (struct df_mw_hardreg*) * count);
new_vec[count] = NULL;
return new_vec;
/* If there is a vector in the collection rec, add it to the
insn. A null rec is a signal that the caller will handle the
chain specially. */
- if (collection_rec->def_vec)
+ if (collection_rec->def_vec.exists ())
{
df_scan_free_ref_vec (insn_rec->defs);
insn_rec->defs
df->def_regs,
&df->def_info, false);
}
- if (collection_rec->use_vec)
+ if (collection_rec->use_vec.exists ())
{
df_scan_free_ref_vec (insn_rec->uses);
insn_rec->uses
df->use_regs,
&df->use_info, false);
}
- if (collection_rec->eq_use_vec)
+ if (collection_rec->eq_use_vec.exists ())
{
df_scan_free_ref_vec (insn_rec->eq_uses);
insn_rec->eq_uses
df->eq_use_regs,
&df->use_info, true);
}
- if (collection_rec->mw_vec)
+ if (collection_rec->mw_vec.exists ())
{
df_scan_free_mws_vec (insn_rec->mw_hardregs);
insn_rec->mw_hardregs
if (collection_rec)
{
if (DF_REF_REG_DEF_P (this_ref))
- VEC_safe_push (df_ref, stack, collection_rec->def_vec, this_ref);
+ collection_rec->def_vec.safe_push (this_ref);
else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE)
- VEC_safe_push (df_ref, stack, collection_rec->eq_use_vec, this_ref);
+ collection_rec->eq_use_vec.safe_push (this_ref);
else
- VEC_safe_push (df_ref, stack, collection_rec->use_vec, this_ref);
+ collection_rec->use_vec.safe_push (this_ref);
}
else
df_install_ref_incremental (this_ref);
hardreg->start_regno = regno;
hardreg->end_regno = endregno - 1;
hardreg->mw_order = df->ref_order++;
- VEC_safe_push (df_mw_hardreg_ptr, stack, collection_rec->mw_vec,
- hardreg);
+ collection_rec->mw_vec.safe_push (hardreg);
}
for (i = regno; i < endregno; i++)
unsigned int ix;
df_ref ref;
- FOR_EACH_VEC_ELT (df_ref, collection_rec->def_vec, ix, ref)
+ FOR_EACH_VEC_ELT (collection_rec->def_vec, ix, ref)
{
if (DF_REF_FLAGS_IS_SET (ref, DF_REF_CONDITIONAL))
{
bool is_cond_exec = (GET_CODE (PATTERN (insn_info->insn)) == COND_EXEC);
/* Clear out the collection record. */
- VEC_truncate (df_ref, collection_rec->def_vec, 0);
- VEC_truncate (df_ref, collection_rec->use_vec, 0);
- VEC_truncate (df_ref, collection_rec->eq_use_vec, 0);
- VEC_truncate (df_mw_hardreg_ptr, collection_rec->mw_vec, 0);
+ collection_rec->def_vec.truncate (0);
+ collection_rec->use_vec.truncate (0);
+ collection_rec->eq_use_vec.truncate (0);
+ collection_rec->mw_vec.truncate (0);
/* Process REG_EQUIV/REG_EQUAL notes. */
for (note = REG_NOTES (insn_info->insn); note;
static void
df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
{
- VEC_truncate (df_ref, collection_rec->def_vec, 0);
- VEC_truncate (df_ref, collection_rec->use_vec, 0);
- VEC_truncate (df_ref, collection_rec->eq_use_vec, 0);
- VEC_truncate (df_mw_hardreg_ptr, collection_rec->mw_vec, 0);
+ collection_rec->def_vec.truncate (0);
+ collection_rec->use_vec.truncate (0);
+ collection_rec->eq_use_vec.truncate (0);
+ collection_rec->mw_vec.truncate (0);
if (bb->index == ENTRY_BLOCK)
{
return;
df_grow_bb_info (df_scan);
- collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
- collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+ vec_stack_alloc (df_ref, collection_rec.def_vec, 128);
+ vec_stack_alloc (df_ref, collection_rec.use_vec, 32);
+ vec_stack_alloc (df_ref, collection_rec.eq_use_vec, 32);
+ vec_stack_alloc (df_mw_hardreg_ptr, collection_rec.mw_vec, 32);
if (scan_insns)
/* Scan the block an insn at a time from beginning to end. */
df_bb_refs_collect (&collection_rec, bb);
df_refs_add_to_chains (&collection_rec, bb, NULL);
- VEC_free (df_ref, stack, collection_rec.def_vec);
- VEC_free (df_ref, stack, collection_rec.use_vec);
- VEC_free (df_ref, stack, collection_rec.eq_use_vec);
- VEC_free (df_mw_hardreg_ptr, stack, collection_rec.mw_vec);
+ collection_rec.def_vec.release ();
+ collection_rec.use_vec.release ();
+ collection_rec.eq_use_vec.release ();
+ collection_rec.mw_vec.release ();
/* Now that the block has been processed, set the block as dirty so
LR and LIVE will get it processed. */
{
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- collection_rec.def_vec = VEC_alloc (df_ref, stack, FIRST_PSEUDO_REGISTER);
+ vec_stack_alloc (df_ref, collection_rec.def_vec, FIRST_PSEUDO_REGISTER);
df_entry_block_defs_collect (&collection_rec, entry_block_defs);
/* Process bb_refs chain */
df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (ENTRY_BLOCK), NULL);
- VEC_free (df_ref, stack, collection_rec.def_vec);
+ collection_rec.def_vec.release ();
}
{
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- collection_rec.use_vec = VEC_alloc (df_ref, stack, FIRST_PSEUDO_REGISTER);
-
+ vec_stack_alloc (df_ref, collection_rec.use_vec, FIRST_PSEUDO_REGISTER);
df_exit_block_uses_collect (&collection_rec, exit_block_uses);
/* Process bb_refs chain */
df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (EXIT_BLOCK), NULL);
- VEC_free (df_ref, stack, collection_rec.use_vec);
+ collection_rec.use_vec.release ();
}
df_reg_chain_mark (refs, regno, is_def, is_eq_use)
df_reg_chain_verify_unmarked (refs)
- df_refs_verify (VEC(stack,df_ref)*, ref*, bool)
+ df_refs_verify (vec<stack, va_df_ref>, ref*, bool)
df_mws_verify (mw*, mw*, bool)
df_insn_refs_verify (collection_rec, bb, insn, bool)
df_bb_refs_verify (bb, refs, bool)
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_refs_verify (VEC(df_ref,stack) *new_rec, df_ref *old_rec,
+df_refs_verify (vec<df_ref, va_stack> new_rec, df_ref *old_rec,
bool abort_if_fail)
{
unsigned int ix;
df_ref new_ref;
- FOR_EACH_VEC_ELT (df_ref, new_rec, ix, new_ref)
+ FOR_EACH_VEC_ELT (new_rec, ix, new_ref)
{
if (*old_rec == NULL || !df_ref_equal_p (new_ref, *old_rec))
{
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_mws_verify (VEC(df_mw_hardreg_ptr,stack) *new_rec,
+df_mws_verify (vec<df_mw_hardreg_ptr, va_stack> new_rec,
struct df_mw_hardreg **old_rec,
bool abort_if_fail)
{
unsigned int ix;
struct df_mw_hardreg *new_reg;
- FOR_EACH_VEC_ELT (df_mw_hardreg_ptr, new_rec, ix, new_reg)
+ FOR_EACH_VEC_ELT (new_rec, ix, new_reg)
{
if (*old_rec == NULL || !df_mw_equal_p (new_reg, *old_rec))
{
struct df_collection_rec collection_rec;
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
- collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
- collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.eq_use_vec = VEC_alloc (df_ref, stack, 32);
- collection_rec.mw_vec = VEC_alloc (df_mw_hardreg_ptr, stack, 32);
+ vec_stack_alloc (df_ref, collection_rec.def_vec, 128);
+ vec_stack_alloc (df_ref, collection_rec.use_vec, 32);
+ vec_stack_alloc (df_ref, collection_rec.eq_use_vec, 32);
+ vec_stack_alloc (df_mw_hardreg_ptr, collection_rec.mw_vec, 32);
gcc_assert (bb_info);
#include "diagnostic-core.h"
#include "et-forest.h"
#include "timevar.h"
-#include "vecprim.h"
#include "pointer-set.h"
#include "graphds.h"
#include "bitmap.h"
/* Returns the list of basic blocks immediately dominated by BB, in the
direction DIR. */
-VEC (basic_block, heap) *
+vec<basic_block>
get_dominated_by (enum cdi_direction dir, basic_block bb)
{
unsigned int dir_index = dom_convert_dir_to_idx (dir);
struct et_node *node = bb->dom[dir_index], *son = node->son, *ason;
- VEC (basic_block, heap) *bbs = NULL;
+ vec<basic_block> bbs = vec<basic_block>();
gcc_checking_assert (dom_computed[dir_index]);
if (!son)
- return NULL;
+ return vec<basic_block>();
- VEC_safe_push (basic_block, heap, bbs, (basic_block) son->data);
+ bbs.safe_push ((basic_block) son->data);
for (ason = son->right; ason != son; ason = ason->right)
- VEC_safe_push (basic_block, heap, bbs, (basic_block) ason->data);
+ bbs.safe_push ((basic_block) ason->data);
return bbs;
}
direction DIR) by some block between N_REGION ones stored in REGION,
except for blocks in the REGION itself. */
-VEC (basic_block, heap) *
+vec<basic_block>
get_dominated_by_region (enum cdi_direction dir, basic_block *region,
unsigned n_region)
{
unsigned i;
basic_block dom;
- VEC (basic_block, heap) *doms = NULL;
+ vec<basic_block> doms = vec<basic_block>();
for (i = 0; i < n_region; i++)
region[i]->flags |= BB_DUPLICATED;
dom;
dom = next_dom_son (dir, dom))
if (!(dom->flags & BB_DUPLICATED))
- VEC_safe_push (basic_block, heap, doms, dom);
+ doms.safe_push (dom);
for (i = 0; i < n_region; i++)
region[i]->flags &= ~BB_DUPLICATED;
produce a vector containing all dominated blocks. The vector will be sorted
in preorder. */
-VEC (basic_block, heap) *
+vec<basic_block>
get_dominated_to_depth (enum cdi_direction dir, basic_block bb, int depth)
{
- VEC(basic_block, heap) *bbs = NULL;
+ vec<basic_block> bbs = vec<basic_block>();
unsigned i;
unsigned next_level_start;
i = 0;
- VEC_safe_push (basic_block, heap, bbs, bb);
- next_level_start = 1; /* = VEC_length (basic_block, bbs); */
+ bbs.safe_push (bb);
+ next_level_start = 1; /* = bbs.length (); */
do
{
basic_block son;
- bb = VEC_index (basic_block, bbs, i++);
+ bb = bbs[i++];
for (son = first_dom_son (dir, bb);
son;
son = next_dom_son (dir, son))
- VEC_safe_push (basic_block, heap, bbs, son);
+ bbs.safe_push (son);
if (i == next_level_start && --depth)
- next_level_start = VEC_length (basic_block, bbs);
+ next_level_start = bbs.length ();
}
while (i < next_level_start);
/* Returns the list of basic blocks including BB dominated by BB, in the
direction DIR. The vector will be sorted in preorder. */
-VEC (basic_block, heap) *
+vec<basic_block>
get_all_dominated_blocks (enum cdi_direction dir, basic_block bb)
{
return get_dominated_to_depth (dir, bb, 0);
from BBS. */
static void
-prune_bbs_to_update_dominators (VEC (basic_block, heap) *bbs,
+prune_bbs_to_update_dominators (vec<basic_block> bbs,
bool conservative)
{
unsigned i;
edge_iterator ei;
edge e;
- for (i = 0; VEC_iterate (basic_block, bbs, i, bb);)
+ for (i = 0; bbs.iterate (i, &bb);)
{
if (bb == ENTRY_BLOCK_PTR)
goto succeed;
continue;
succeed:
- VEC_unordered_remove (basic_block, bbs, i);
+ bbs.unordered_remove (i);
}
}
blocks. */
static void
-determine_dominators_for_sons (struct graph *g, VEC (basic_block, heap) *bbs,
+determine_dominators_for_sons (struct graph *g, vec<basic_block> bbs,
int y, int *son, int *brother)
{
bitmap gprime;
int i, a, nc;
- VEC (int, heap) **sccs;
+ vec<int> *sccs;
basic_block bb, dom, ybb;
unsigned si;
edge e;
if (son[y] == -1)
return;
- if (y == (int) VEC_length (basic_block, bbs))
+ if (y == (int) bbs.length ())
ybb = ENTRY_BLOCK_PTR;
else
- ybb = VEC_index (basic_block, bbs, y);
+ ybb = bbs[y];
if (brother[son[y]] == -1)
{
/* Handle the common case Y has just one son specially. */
- bb = VEC_index (basic_block, bbs, son[y]);
+ bb = bbs[son[y]];
set_immediate_dominator (CDI_DOMINATORS, bb,
recompute_dominator (CDI_DOMINATORS, bb));
identify_vertices (g, y, son[y]);
nc = graphds_scc (g, gprime);
BITMAP_FREE (gprime);
- sccs = XCNEWVEC (VEC (int, heap) *, nc);
+ /* ??? Needed to work around the pre-processor confusion with
+ using a multi-argument template type as macro argument. */
+ typedef vec<int> vec_int_heap;
+ sccs = XCNEWVEC (vec_int_heap, nc);
for (a = son[y]; a != -1; a = brother[a])
- VEC_safe_push (int, heap, sccs[g->vertices[a].component], a);
+ sccs[g->vertices[a].component].safe_push (a);
for (i = nc - 1; i >= 0; i--)
{
dom = NULL;
- FOR_EACH_VEC_ELT (int, sccs[i], si, a)
+ FOR_EACH_VEC_ELT (sccs[i], si, a)
{
- bb = VEC_index (basic_block, bbs, a);
+ bb = bbs[a];
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (root_of_dom_tree (CDI_DOMINATORS, e->src) != ybb)
}
gcc_assert (dom != NULL);
- FOR_EACH_VEC_ELT (int, sccs[i], si, a)
+ FOR_EACH_VEC_ELT (sccs[i], si, a)
{
- bb = VEC_index (basic_block, bbs, a);
+ bb = bbs[a];
set_immediate_dominator (CDI_DOMINATORS, bb, dom);
}
}
for (i = 0; i < nc; i++)
- VEC_free (int, heap, sccs[i]);
+ sccs[i].release ();
free (sccs);
for (a = son[y]; a != -1; a = brother[a])
a block of BBS in the current dominance tree dominate it. */
void
-iterate_fix_dominators (enum cdi_direction dir, VEC (basic_block, heap) *bbs,
+iterate_fix_dominators (enum cdi_direction dir, vec<basic_block> bbs,
bool conservative)
{
unsigned i;
conservatively correct, setting the dominators using the
heuristics in prune_bbs_to_update_dominators could
create cycles in the dominance "tree", and cause ICE. */
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
set_immediate_dominator (CDI_DOMINATORS, bb, NULL);
}
prune_bbs_to_update_dominators (bbs, conservative);
- n = VEC_length (basic_block, bbs);
+ n = bbs.length ();
if (n == 0)
return;
if (n == 1)
{
- bb = VEC_index (basic_block, bbs, 0);
+ bb = bbs[0];
set_immediate_dominator (CDI_DOMINATORS, bb,
recompute_dominator (CDI_DOMINATORS, bb));
return;
/* Construct the graph G. */
map = pointer_map_create ();
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
/* If the dominance tree is conservatively correct, split it now. */
if (conservative)
g = new_graph (n + 1);
for (y = 0; y < g->n_vertices; y++)
g->vertices[y].data = BITMAP_ALLOC (NULL);
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
FOR_EACH_EDGE (e, ei, bb->preds)
{
/* First get some local data, reusing any local data
pointer we may have saved. */
- if (VEC_length (void_p, walk_data->free_block_data) > 0)
+ if (walk_data->free_block_data.length () > 0)
{
- bd = VEC_pop (void_p, walk_data->free_block_data);
+ bd = walk_data->free_block_data.pop ();
recycled = 1;
}
else
}
/* Push the local data into the local data stack. */
- VEC_safe_push (void_p, heap, walk_data->block_data_stack, bd);
+ walk_data->block_data_stack.safe_push (bd);
/* Call the initializer. */
walk_data->initialize_block_local_data (walk_data, bb,
if (walk_data->initialize_block_local_data)
{
/* And finally pop the record off the block local data stack. */
- bd = VEC_pop (void_p, walk_data->block_data_stack);
+ bd = walk_data->block_data_stack.pop ();
/* And save the block data so that we can re-use it. */
- VEC_safe_push (void_p, heap, walk_data->free_block_data, bd);
+ walk_data->free_block_data.safe_push (bd);
}
}
if (sp)
void
init_walk_dominator_tree (struct dom_walk_data *walk_data)
{
- walk_data->free_block_data = NULL;
- walk_data->block_data_stack = NULL;
+ walk_data->free_block_data.create (0);
+ walk_data->block_data_stack.create (0);
}
void
{
if (walk_data->initialize_block_local_data)
{
- while (VEC_length (void_p, walk_data->free_block_data) > 0)
- free (VEC_pop (void_p, walk_data->free_block_data));
+ while (walk_data->free_block_data.length () > 0)
+ free (walk_data->free_block_data.pop ());
}
- VEC_free (void_p, heap, walk_data->free_block_data);
- VEC_free (void_p, heap, walk_data->block_data_stack);
+ walk_data->free_block_data.release ();
+ walk_data->block_data_stack.release ();
}
<http://www.gnu.org/licenses/>. */
typedef void *void_p;
-DEF_VEC_P(void_p);
-DEF_VEC_ALLOC_P(void_p,heap);
/* This is the main data structure for the dominator walker. It provides
the callback hooks as well as a convenient place to hang block local
/* Stack of any data we need to keep on a per-block basis.
If you have no local data, then BLOCK_DATA_STACK will be NULL. */
- VEC(void_p,heap) *block_data_stack;
+ vec<void_p> block_data_stack;
/* Size of the block local data. If this is zero, then it is assumed
you have no local data and thus no BLOCK_DATA_STACK as well. */
information/data outside domwalk.c. */
/* Stack of available block local structures. */
- VEC(void_p,heap) *free_block_data;
+ vec<void_p> free_block_data;
};
void walk_dominator_tree (struct dom_walk_data *, basic_block);
/* Index into the rtx_group_vec. */
static int rtx_group_next_id;
-DEF_VEC_P(group_info_t);
-DEF_VEC_ALLOC_P(group_info_t,heap);
-static VEC(group_info_t,heap) *rtx_group_vec;
+static vec<group_info_t> rtx_group_vec;
/* This structure holds the set of changes that are being deferred
gi->offset_map_size_p = 0;
gi->offset_map_n = NULL;
gi->offset_map_p = NULL;
- VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
+ rtx_group_vec.safe_push (gi);
}
return clear_alias_group;
}
gi->offset_map_size_p = 0;
gi->offset_map_n = NULL;
gi->offset_map_p = NULL;
- VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
+ rtx_group_vec.safe_push (gi);
}
return gi;
frame pointer we can do global analysis. */
group_info_t group
- = VEC_index (group_info_t, rtx_group_vec, group_id);
+ = rtx_group_vec[group_id];
tree expr = MEM_EXPR (mem);
store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
else
{
group_info_t group
- = VEC_index (group_info_t, rtx_group_vec, group_id);
+ = rtx_group_vec[group_id];
mem_addr = group->canon_base_addr;
}
if (offset)
else
{
group_info_t group
- = VEC_index (group_info_t, rtx_group_vec, group_id);
+ = rtx_group_vec[group_id];
mem_addr = group->canon_base_addr;
}
if (offset)
store_info = store_info->next;
if (store_info->group_id >= 0
- && VEC_index (group_info_t, rtx_group_vec,
- store_info->group_id)->frame_related)
+ && rtx_group_vec[store_info->group_id]->frame_related)
remove_store = true;
}
if (store_info->group_id >= 0)
{
group_info_t group
- = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
+ = rtx_group_vec[store_info->group_id];
if (group->frame_related && !i_ptr->cannot_delete)
delete_dead_store_insn (i_ptr);
}
unsigned int i;
group_info_t group;
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
{
/* For all non stack related bases, we only consider a store to
be deletable if there are two or more stores for that
/* Position 0 is unused because 0 is used in the maps to mean
unused. */
current_position = 1;
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
{
bitmap_iterator bi;
unsigned int j;
{
HOST_WIDE_INT i;
group_info_t group_info
- = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
+ = rtx_group_vec[store_info->group_id];
if (group_info->process_globally)
for (i = store_info->begin; i < store_info->end; i++)
{
/* If this insn reads the frame, kill all the frame related stores. */
if (insn_info->frame_read)
{
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
if (group->process_globally && group->frame_related)
{
if (kill)
if (kill)
bitmap_ior_into (kill, kill_on_calls);
bitmap_and_compl_into (gen, kill_on_calls);
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
if (group->process_globally && !group->frame_related)
{
if (kill)
}
while (read_info)
{
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
{
if (group->process_globally)
{
unsigned int i;
group_info_t group;
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, i, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, i, group)
{
if (group->process_globally && group->frame_related)
bitmap_ior_into (bb_info->gen, group->group_kill);
group_info_t group;
all_ones = BITMAP_ALLOC (&dse_bitmap_obstack);
- FOR_EACH_VEC_ELT (group_info_t, rtx_group_vec, j, group)
+ FOR_EACH_VEC_ELT (rtx_group_vec, j, group)
bitmap_ior_into (all_ones, group->group_kill);
}
if (!bb_info->out)
{
HOST_WIDE_INT i;
group_info_t group_info
- = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
+ = rtx_group_vec[store_info->group_id];
for (i = store_info->begin; i < store_info->end; i++)
{
end_alias_analysis ();
free (bb_table);
rtx_group_table.dispose ();
- VEC_free (group_info_t, heap, rtx_group_vec);
+ rtx_group_vec.release ();
BITMAP_FREE (all_blocks);
BITMAP_FREE (scratch);
rtx saved_in_reg;
} reg_saved_in_data;
-DEF_VEC_O (reg_saved_in_data);
-DEF_VEC_ALLOC_O (reg_saved_in_data, heap);
/* Since we no longer have a proper CFG, we're going to create a facsimile
of one on the fly while processing the frame-related insns.
implemented as a flat array because it normally contains zero or 1
entry, depending on the target. IA-64 is the big spender here, using
a maximum of 5 entries. */
- VEC(reg_saved_in_data, heap) *regs_saved_in_regs;
+ vec<reg_saved_in_data> regs_saved_in_regs;
/* An identifier for this trace. Used only for debugging dumps. */
unsigned id;
bool args_size_undefined;
} dw_trace_info;
-DEF_VEC_O (dw_trace_info);
-DEF_VEC_ALLOC_O (dw_trace_info, heap);
typedef dw_trace_info *dw_trace_info_ref;
-DEF_VEC_P (dw_trace_info_ref);
-DEF_VEC_ALLOC_P (dw_trace_info_ref, heap);
/* The variables making up the pseudo-cfg, as described above. */
-static VEC (dw_trace_info, heap) *trace_info;
-static VEC (dw_trace_info_ref, heap) *trace_work_list;
+static vec<dw_trace_info> trace_info;
+static vec<dw_trace_info_ref> trace_work_list;
static htab_t trace_index;
/* A vector of call frame insns for the CIE. */
HOST_WIDE_INT cfa_offset;
} queued_reg_save;
-DEF_VEC_O (queued_reg_save);
-DEF_VEC_ALLOC_O (queued_reg_save, heap);
-static VEC(queued_reg_save, heap) *queued_reg_saves;
+static vec<queued_reg_save> queued_reg_saves;
/* True if any CFI directives were emitted at the current insn. */
static bool any_cfis_emitted;
dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
*dst = *src;
- dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
+ dst->reg_save = vec_safe_copy (src->reg_save);
return dst;
}
}
if (add_cfi_vec != NULL)
- VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
+ vec_safe_push (*add_cfi_vec, cfi);
}
static void
static void
update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
{
- if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
- VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
- VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
+ if (vec_safe_length (row->reg_save) <= column)
+ vec_safe_grow_cleared (row->reg_save, column + 1);
+ (*row->reg_save)[column] = cfi;
}
/* This function fills in aa dw_cfa_location structure from a dwarf location
else if (!cfa_equal_p (&a->cfa, &b->cfa))
return false;
- n_a = VEC_length (dw_cfi_ref, a->reg_save);
- n_b = VEC_length (dw_cfi_ref, b->reg_save);
+ n_a = vec_safe_length (a->reg_save);
+ n_b = vec_safe_length (b->reg_save);
n_max = MAX (n_a, n_b);
for (i = 0; i < n_max; ++i)
dw_cfi_ref r_a = NULL, r_b = NULL;
if (i < n_a)
- r_a = VEC_index (dw_cfi_ref, a->reg_save, i);
+ r_a = (*a->reg_save)[i];
if (i < n_b)
- r_b = VEC_index (dw_cfi_ref, b->reg_save, i);
+ r_b = (*b->reg_save)[i];
if (!cfi_equal_p (r_a, r_b))
return false;
reg_saved_in_data *elt;
size_t i;
- FOR_EACH_VEC_ELT (reg_saved_in_data, cur_trace->regs_saved_in_regs, i, elt)
+ FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
if (compare_reg_or_pc (elt->orig_reg, src))
{
if (dest == NULL)
- VEC_unordered_remove (reg_saved_in_data,
- cur_trace->regs_saved_in_regs, i);
+ cur_trace->regs_saved_in_regs.unordered_remove (i);
else
elt->saved_in_reg = dest;
return;
return;
reg_saved_in_data e = {src, dest};
- VEC_safe_push (reg_saved_in_data, heap, cur_trace->regs_saved_in_regs, e);
+ cur_trace->regs_saved_in_regs.safe_push (e);
}
/* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
/* Duplicates waste space, but it's also necessary to remove them
for correctness, since the queue gets output in reverse order. */
- FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
+ FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
if (compare_reg_or_pc (q->reg, reg))
{
*q = e;
return;
}
- VEC_safe_push (queued_reg_save, heap, queued_reg_saves, e);
+ queued_reg_saves.safe_push (e);
}
/* Output all the entries in QUEUED_REG_SAVES. */
queued_reg_save *q;
size_t i;
- FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
+ FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
{
unsigned int reg, sreg;
reg_save (reg, sreg, q->cfa_offset);
}
- VEC_truncate (queued_reg_save, queued_reg_saves, 0);
+ queued_reg_saves.truncate (0);
}
/* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
queued_reg_save *q;
size_t iq;
- FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, iq, q)
+ FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
{
size_t ir;
reg_saved_in_data *rir;
if (modified_in_p (q->reg, insn))
return true;
- FOR_EACH_VEC_ELT (reg_saved_in_data,
- cur_trace->regs_saved_in_regs, ir, rir)
+ FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
if (compare_reg_or_pc (q->reg, rir->orig_reg)
&& modified_in_p (rir->saved_in_reg, insn))
return true;
reg_saved_in_data *rir;
size_t i;
- FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
+ FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
if (q->saved_reg && regn == REGNO (q->saved_reg))
return q->reg;
- FOR_EACH_VEC_ELT (reg_saved_in_data, cur_trace->regs_saved_in_regs, i, rir)
+ FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
if (regn == REGNO (rir->saved_in_reg))
return rir->orig_reg;
add_cfi (cfi);
}
- n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
- n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
+ n_old = vec_safe_length (old_row->reg_save);
+ n_new = vec_safe_length (new_row->reg_save);
n_max = MAX (n_old, n_new);
for (i = 0; i < n_max; ++i)
dw_cfi_ref r_old = NULL, r_new = NULL;
if (i < n_old)
- r_old = VEC_index (dw_cfi_ref, old_row->reg_save, i);
+ r_old = (*old_row->reg_save)[i];
if (i < n_new)
- r_new = VEC_index (dw_cfi_ref, new_row->reg_save, i);
+ r_new = (*new_row->reg_save)[i];
if (r_old == r_new)
;
if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
{
- fde->dw_fde_switch_cfi_index
- = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
+ fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
/* Don't attempt to advance_loc4 between labels
in different sections. */
first = true;
xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
: DW_CFA_advance_loc4);
xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
- VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
+ vec_safe_push (fde->dw_fde_cfi, xcfi);
tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
NOTE_LABEL_NUMBER (tmp) = num;
do
{
if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
- VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi,
- NOTE_CFI (insn));
+ vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
insn = NEXT_INSN (insn);
}
while (insn != next);
ti->cfa_store = cur_trace->cfa_store;
ti->cfa_temp = cur_trace->cfa_temp;
- ti->regs_saved_in_regs = VEC_copy (reg_saved_in_data, heap,
- cur_trace->regs_saved_in_regs);
+ ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
- VEC_safe_push (dw_trace_info_ref, heap, trace_work_list, ti);
+ trace_work_list.safe_push (ti);
if (dump_file)
fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
if (BARRIER_P (insn))
{
/* Don't bother saving the unneeded queued registers at all. */
- VEC_truncate (queued_reg_save, queued_reg_saves, 0);
+ queued_reg_saves.truncate (0);
break;
}
if (save_point_p (insn))
add_cfi_insn = NULL;
restore_args_size = cur_trace->end_true_args_size;
cur_cfa = &cur_row->cfa;
- save_row_reg_save
- = VEC_copy (dw_cfi_ref, gc, cur_row->reg_save);
+ save_row_reg_save = vec_safe_copy (cur_row->reg_save);
scan_insn_after (elt);
/* ??? Should we instead save the entire row state? */
- gcc_assert (!VEC_length (queued_reg_save, queued_reg_saves));
+ gcc_assert (!queued_reg_saves.length ());
create_trace_edges (control);
{
dw_trace_info *ti;
- gcc_checking_assert (queued_reg_saves == NULL);
- gcc_checking_assert (trace_work_list == NULL);
+ gcc_checking_assert (!queued_reg_saves.exists ());
+ gcc_checking_assert (!trace_work_list.exists ());
/* Always begin at the entry trace. */
- ti = &VEC_index (dw_trace_info, trace_info, 0);
+ ti = &trace_info[0];
scan_trace (ti);
- while (!VEC_empty (dw_trace_info_ref, trace_work_list))
+ while (!trace_work_list.is_empty ())
{
- ti = VEC_pop (dw_trace_info_ref, trace_work_list);
+ ti = trace_work_list.pop ();
scan_trace (ti);
}
- VEC_free (queued_reg_save, heap, queued_reg_saves);
- VEC_free (dw_trace_info_ref, heap, trace_work_list);
+ queued_reg_saves.release ();
+ trace_work_list.release ();
}
/* Return the insn before the first NOTE_INSN_CFI after START. */
static void
connect_traces (void)
{
- unsigned i, n = VEC_length (dw_trace_info, trace_info);
+ unsigned i, n = trace_info.length ();
dw_trace_info *prev_ti, *ti;
/* ??? Ideally, we should have both queued and processed every trace.
/* Remove all unprocessed traces from the list. */
for (i = n - 1; i > 0; --i)
{
- ti = &VEC_index (dw_trace_info, trace_info, i);
+ ti = &trace_info[i];
if (ti->beg_row == NULL)
{
- VEC_ordered_remove (dw_trace_info, trace_info, i);
+ trace_info.ordered_remove (i);
n -= 1;
}
else
/* Work from the end back to the beginning. This lets us easily insert
remember/restore_state notes in the correct order wrt other notes. */
- prev_ti = &VEC_index (dw_trace_info, trace_info, n - 1);
+ prev_ti = &trace_info[n - 1];
for (i = n - 1; i > 0; --i)
{
dw_cfi_row *old_row;
ti = prev_ti;
- prev_ti = &VEC_index (dw_trace_info, trace_info, i - 1);
+ prev_ti = &trace_info[i - 1];
add_cfi_insn = ti->head;
}
/* Connect args_size between traces that have can_throw_internal insns. */
- if (cfun->eh->lp_array != NULL)
+ if (cfun->eh->lp_array)
{
HOST_WIDE_INT prev_args_size = 0;
for (i = 0; i < n; ++i)
{
- ti = &VEC_index (dw_trace_info, trace_info, i);
+ ti = &trace_info[i];
if (ti->switch_sections)
prev_args_size = 0;
/* The first trace begins at the start of the function,
and begins with the CIE row state. */
- trace_info = VEC_alloc (dw_trace_info, heap, 16);
+ trace_info.create (16);
memset (&ti, 0, sizeof (ti));
ti.head = get_insns ();
ti.beg_row = cie_cfi_row;
ti.cfa_store = cie_cfi_row->cfa;
ti.cfa_temp.reg = INVALID_REGNUM;
- VEC_quick_push (dw_trace_info, trace_info, ti);
+ trace_info.quick_push (ti);
if (cie_return_save)
- VEC_safe_push (reg_saved_in_data, heap,
- ti.regs_saved_in_regs, *cie_return_save);
+ ti.regs_saved_in_regs.safe_push (*cie_return_save);
/* Walk all the insns, collecting start of trace locations. */
saw_barrier = false;
memset (&ti, 0, sizeof (ti));
ti.head = insn;
ti.switch_sections = switch_sections;
- ti.id = VEC_length (dw_trace_info, trace_info) - 1;
- VEC_safe_push (dw_trace_info, heap, trace_info, ti);
+ ti.id = trace_info.length () - 1;
+ trace_info.safe_push (ti);
saw_barrier = false;
switch_sections = false;
/* Create the trace index after we've finished building trace_info,
avoiding stale pointer problems due to reallocation. */
- trace_index = htab_create (VEC_length (dw_trace_info, trace_info),
+ trace_index = htab_create (trace_info.length (),
dw_trace_info_hash, dw_trace_info_eq, NULL);
dw_trace_info *tp;
- FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, tp)
+ FOR_EACH_VEC_ELT (trace_info, i, tp)
{
void **slot;
the DW_CFA_offset against the return column, not the intermediate
save register. Save the contents of regs_saved_in_regs so that
we can re-initialize it at the start of each function. */
- switch (VEC_length (reg_saved_in_data, cie_trace.regs_saved_in_regs))
+ switch (cie_trace.regs_saved_in_regs.length ())
{
case 0:
break;
case 1:
cie_return_save = ggc_alloc_reg_saved_in_data ();
- *cie_return_save = VEC_index (reg_saved_in_data,
- cie_trace.regs_saved_in_regs, 0);
- VEC_free (reg_saved_in_data, heap, cie_trace.regs_saved_in_regs);
+ *cie_return_save = cie_trace.regs_saved_in_regs[0];
+ cie_trace.regs_saved_in_regs.release ();
break;
default:
gcc_unreachable ();
size_t i;
dw_trace_info *ti;
- FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
- VEC_free (reg_saved_in_data, heap, ti->regs_saved_in_regs);
+ FOR_EACH_VEC_ELT (trace_info, i, ti)
+ ti->regs_saved_in_regs.release ();
}
- VEC_free (dw_trace_info, heap, trace_info);
+ trace_info.release ();
htab_delete (trace_index);
trace_index = NULL;
}
output_cfi_directive (f, cfi);
- FOR_EACH_VEC_ELT (dw_cfi_ref, row->reg_save, i, cfi)
+ FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
if (cfi)
output_cfi_directive (f, cfi);
}
/* Array of RTXes referenced by the debugging information, which therefore
must be kept around forever. */
-static GTY(()) VEC(rtx,gc) *used_rtx_array;
+static GTY(()) vec<rtx, va_gc> *used_rtx_array;
/* A pointer to the base of a list of incomplete types which might be
completed at some later time. incomplete_types_list needs to be a
- VEC(tree,gc) because we want to tell the garbage collector about
+ vec<tree, va_gc> *because we want to tell the garbage collector about
it. */
-static GTY(()) VEC(tree,gc) *incomplete_types;
+static GTY(()) vec<tree, va_gc> *incomplete_types;
/* A pointer to the base of a table of references to declaration
scopes. This table is a display which tracks the nesting
of declaration scopes at the current scope and containing
scopes. This table is used to find the proper place to
define type declaration DIE's. */
-static GTY(()) VEC(tree,gc) *decl_scope_table;
+static GTY(()) vec<tree, va_gc> *decl_scope_table;
/* Pointers to various DWARF2 sections. */
static GTY(()) section *debug_info_section;
#define DWARF_CIE_ID DW_CIE_ID
#endif
-DEF_VEC_P (dw_fde_ref);
-DEF_VEC_ALLOC_P (dw_fde_ref, gc);
/* A vector for a table that contains frame description
information for each routine. */
#define NOT_INDEXED (-1U)
#define NO_INDEX_ASSIGNED (-2U)
-static GTY(()) VEC(dw_fde_ref, gc) *fde_vec;
+static GTY(()) vec<dw_fde_ref, va_gc> *fde_vec;
struct GTY(()) indirect_string_node {
const char *str;
size_t from, until, i;
from = 0;
- until = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
+ until = vec_safe_length (fde->dw_fde_cfi);
if (fde->dw_fde_second_begin == NULL)
;
from = fde->dw_fde_switch_cfi_index;
for (i = from; i < until; i++)
- output_cfi (VEC_index (dw_cfi_ref, fde->dw_fde_cfi, i), fde, for_eh);
+ output_cfi ((*fde->dw_fde_cfi)[i], fde, for_eh);
}
/* If we are to emit a ref/link from function bodies to their frame tables,
int dw_cie_version;
/* Don't emit a CIE if there won't be any FDEs. */
- if (fde_vec == NULL)
+ if (!fde_vec)
return;
/* Nothing to do if the assembler's doing it all. */
{
bool any_eh_needed = false;
- FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, i, fde)
+ FOR_EACH_VEC_ELT (*fde_vec, i, fde)
{
if (fde->uses_eh_lsda)
any_eh_needed = any_lsda_needed = true;
eh_data_format_name (fde_encoding));
}
- FOR_EACH_VEC_ELT (dw_cfi_ref, cie_cfi_vec, i, cfi)
+ FOR_EACH_VEC_ELT (*cie_cfi_vec, i, cfi)
output_cfi (cfi, NULL, for_eh);
/* Pad the CIE out to an address sized boundary. */
ASM_OUTPUT_LABEL (asm_out_file, l2);
/* Loop through all of the FDE's. */
- FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, i, fde)
+ FOR_EACH_VEC_ELT (*fde_vec, i, fde)
{
unsigned int k;
fde = ggc_alloc_cleared_dw_fde_node ();
fde->decl = current_function_decl;
fde->funcdef_number = current_function_funcdef_no;
- fde->fde_index = VEC_length (dw_fde_ref, fde_vec);
+ fde->fde_index = vec_safe_length (fde_vec);
fde->all_throwers_are_sibcalls = crtl->all_throwers_are_sibcalls;
fde->uses_eh_lsda = crtl->uses_eh_lsda;
fde->nothrow = crtl->nothrow;
/* Record the FDE associated with this function. */
cfun->fde = fde;
- VEC_safe_push (dw_fde_ref, gc, fde_vec, fde);
+ vec_safe_push (fde_vec, fde);
return fde;
}
dw_die_ref die;
} deferred_locations;
-DEF_VEC_O(deferred_locations);
-DEF_VEC_ALLOC_O(deferred_locations,gc);
-static GTY(()) VEC(deferred_locations, gc) *deferred_locations_list;
+static GTY(()) vec<deferred_locations, va_gc> *deferred_locations_list;
-DEF_VEC_P(dw_die_ref);
-DEF_VEC_ALLOC_P(dw_die_ref,heap);
/* Describe an entry into the .debug_addr section. */
unsigned int val;
} dw_line_info_entry;
-DEF_VEC_O(dw_line_info_entry);
-DEF_VEC_ALLOC_O(dw_line_info_entry, gc);
typedef struct GTY(()) dw_line_info_table_struct {
/* The label that marks the end of this section. */
bool is_stmt;
bool in_use;
- VEC(dw_line_info_entry, gc) *entries;
+ vec<dw_line_info_entry, va_gc> *entries;
} dw_line_info_table;
typedef dw_line_info_table *dw_line_info_table_p;
-DEF_VEC_P(dw_line_info_table_p);
-DEF_VEC_ALLOC_P(dw_line_info_table_p, gc);
/* Each DIE attribute has a field specifying the attribute kind,
a link to the next attribute in the chain, and an attribute value.
}
dw_attr_node;
-DEF_VEC_O(dw_attr_node);
-DEF_VEC_ALLOC_O(dw_attr_node,gc);
/* The Debugging Information Entry (DIE) structure. DIEs form a tree.
The children of each node form a circular list linked by
comdat_type_node_ref GTY ((tag ("1"))) die_type_node;
}
GTY ((desc ("%0.comdat_type_p"))) die_id;
- VEC(dw_attr_node,gc) * die_attr;
+ vec<dw_attr_node, va_gc> *die_attr;
dw_die_ref die_parent;
dw_die_ref die_child;
dw_die_ref die_sib;
}
pubname_entry;
-DEF_VEC_O(pubname_entry);
-DEF_VEC_ALLOC_O(pubname_entry, gc);
struct GTY(()) dw_ranges_struct {
/* If this is positive, it's a block number, otherwise it's a
}
macinfo_entry;
-DEF_VEC_O(macinfo_entry);
-DEF_VEC_ALLOC_O(macinfo_entry, gc);
struct GTY(()) dw_ranges_by_label_struct {
const char *begin;
tree arg;
} die_arg_entry;
-DEF_VEC_O(die_arg_entry);
-DEF_VEC_ALLOC_O(die_arg_entry,gc);
/* Node of the variable location list. */
struct GTY ((chain_next ("%h.next"))) var_loc_node {
/* Vector mapping block numbers to DW_TAG_{lexical_block,inlined_subroutine}
DIEs. */
-static VEC (dw_die_ref, heap) *block_map;
+static vec<dw_die_ref> block_map;
/* A cached location list. */
struct GTY (()) cached_dw_loc_list_def {
static GTY(()) dw_line_info_table *cold_text_section_line_info;
/* The set of all non-default tables of line number info. */
-static GTY(()) VEC (dw_line_info_table_p, gc) *separate_line_info;
+static GTY(()) vec<dw_line_info_table_p, va_gc> *separate_line_info;
/* A flag to tell pubnames/types export if there is an info section to
refer to. */
/* A pointer to the base of a table that contains a list of publicly
accessible names. */
-static GTY (()) VEC (pubname_entry, gc) * pubname_table;
+static GTY (()) vec<pubname_entry, va_gc> *pubname_table;
/* A pointer to the base of a table that contains a list of publicly
accessible types. */
-static GTY (()) VEC (pubname_entry, gc) * pubtype_table;
+static GTY (()) vec<pubname_entry, va_gc> *pubtype_table;
/* A pointer to the base of a table that contains a list of macro
defines/undefines (and file start/end markers). */
-static GTY (()) VEC (macinfo_entry, gc) * macinfo_table;
+static GTY (()) vec<macinfo_entry, va_gc> *macinfo_table;
/* True if .debug_macinfo or .debug_macros section is going to be
emitted. */
#define have_macinfo \
(debug_info_level >= DINFO_LEVEL_VERBOSE \
- && !VEC_empty (macinfo_entry, macinfo_table))
+ && !macinfo_table->is_empty ())
/* Array of dies for which we should generate .debug_ranges info. */
static GTY ((length ("ranges_table_allocated"))) dw_ranges_ref ranges_table;
/* Cached result of previous call to lookup_filename. */
static GTY(()) struct dwarf_file_data * file_table_last_lookup;
-static GTY(()) VEC(die_arg_entry,gc) *tmpl_value_parm_die_table;
+static GTY(()) vec<die_arg_entry, va_gc> *tmpl_value_parm_die_table;
/* Instances of generic types for which we need to generate debug
info that describe their generic parameters and arguments. That
generation needs to happen once all types are properly laid out so
we do it at the end of compilation. */
-static GTY(()) VEC(tree,gc) *generic_type_instances;
+static GTY(()) vec<tree, va_gc> *generic_type_instances;
/* Offset from the "steady-state frame pointer" to the frame base,
within the current function. */
static HOST_WIDE_INT frame_pointer_fb_offset;
static bool frame_pointer_fb_offset_valid;
-static VEC (dw_die_ref, heap) *base_types;
+static vec<dw_die_ref> base_types;
/* Forward declarations for functions defined in this file. */
static void mark_dies (dw_die_ref);
static void unmark_dies (dw_die_ref);
static void unmark_all_dies (dw_die_ref);
-static unsigned long size_of_pubnames (VEC (pubname_entry,gc) *);
+static unsigned long size_of_pubnames (vec<pubname_entry, va_gc> *);
static unsigned long size_of_aranges (void);
static enum dwarf_form value_format (dw_attr_ref);
static void output_value_format (dw_attr_ref);
static void add_enumerator_pubname (const char *, dw_die_ref);
static void add_pubname_string (const char *, dw_die_ref);
static void add_pubtype (tree, dw_die_ref);
-static void output_pubnames (VEC (pubname_entry,gc) *);
+static void output_pubnames (vec<pubname_entry, va_gc> *);
static void output_aranges (unsigned long);
static unsigned int add_ranges_num (int);
static unsigned int add_ranges (const_tree);
if (die == NULL)
return;
- if (die->die_attr == NULL)
- die->die_attr = VEC_alloc (dw_attr_node, gc, 1);
- VEC_safe_push (dw_attr_node, gc, die->die_attr, *attr);
+ vec_safe_reserve (die->die_attr, 1);
+ vec_safe_push (die->die_attr, *attr);
}
static inline enum dw_val_class
if (! die)
return NULL;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (a->dw_attr == attr_kind)
return a;
else if (a->dw_attr == DW_AT_specification
if (! die)
return;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (a->dw_attr == attr_kind)
{
if (AT_class (a) == dw_val_class_str)
if (a->dw_attr_val.v.val_str->refcount)
a->dw_attr_val.v.val_str->refcount--;
- /* VEC_ordered_remove should help reduce the number of abbrevs
+ /* vec::ordered_remove should help reduce the number of abbrevs
that are needed. */
- VEC_ordered_remove (dw_attr_node, die->die_attr, ix);
+ die->die_attr->ordered_remove (ix);
return;
}
}
fprintf (outfile, "\n");
}
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
print_spaces (outfile);
fprintf (outfile, " %s: ", dwarf_attr_name (a->dw_attr));
CHECKSUM (die->die_tag);
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
attr_checksum (a, ctx, mark);
FOR_EACH_CHILD (die, c, die_checksum (c, ctx, mark));
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
switch (a->dw_attr)
{
if (die1->die_tag != die2->die_tag)
return 0;
- if (VEC_length (dw_attr_node, die1->die_attr)
- != VEC_length (dw_attr_node, die2->die_attr))
+ if (vec_safe_length (die1->die_attr) != vec_safe_length (die2->die_attr))
return 0;
- FOR_EACH_VEC_ELT (dw_attr_node, die1->die_attr, ix, a1)
- if (!same_attr_p (a1, &VEC_index (dw_attr_node, die2->die_attr, ix), mark))
+ FOR_EACH_VEC_SAFE_ELT (die1->die_attr, ix, a1)
+ if (!same_attr_p (a1, &(*die2->die_attr)[ix], mark))
return 0;
c1 = die1->die_child;
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (a->dw_attr == DW_AT_declaration)
return 1;
clone = ggc_alloc_cleared_die_node ();
clone->die_tag = die->die_tag;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
add_dwarf_attr (clone, a);
return clone;
clone = ggc_alloc_cleared_die_node ();
clone->die_tag = die->die_tag;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
/* We don't want to copy over all attributes.
For example we don't want DW_AT_byte_size because otherwise we will no
remove_AT (die, DW_AT_specification);
- FOR_EACH_VEC_ELT (dw_attr_node, decl->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (decl->die_attr, ix, a)
{
if (a->dw_attr != DW_AT_name
&& a->dw_attr != DW_AT_declaration
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
if (AT_class (a) == dw_val_class_die_ref)
{
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_loc_list)
output_loc_list (AT_loc_list (a));
/* Scan the DIE references, and remember any that refer to DIEs from
other CUs (i.e. those which are not marked). */
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_die_ref
&& (c = AT_ref (a))->die_mark == 0
&& is_type_die (c))
/* Scan the DIE references, and replace any that refer to
DIEs from other CUs (i.e. those which are not marked) with
the local stubs we built in optimize_external_refs. */
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_die_ref
&& (c = AT_ref (a))->die_mark == 0)
{
if ((abbrev->die_child != NULL) != (die->die_child != NULL))
continue;
- if (VEC_length (dw_attr_node, abbrev->die_attr)
- != VEC_length (dw_attr_node, die->die_attr))
+ if (vec_safe_length (abbrev->die_attr) != vec_safe_length (die->die_attr))
continue;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, die_a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, die_a)
{
- abbrev_a = &VEC_index (dw_attr_node, abbrev->die_attr, ix);
+ abbrev_a = &(*abbrev->die_attr)[ix];
if ((abbrev_a->dw_attr != die_a->dw_attr)
|| (value_format (abbrev_a) != value_format (die_a)))
{
enum dwarf_form form;
size += size_of_uleb128 (die->die_abbrev);
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
switch (AT_class (a))
{
#endif
die_offset += size_of_die (comp_unit_die ());
- for (i = 0; VEC_iterate (dw_die_ref, base_types, i, base_type); i++)
+ for (i = 0; base_types.iterate (i, &base_type); i++)
{
#if ENABLE_ASSERT_CHECKING
gcc_assert (base_type->die_offset == 0
FOR_EACH_CHILD (die, c, unmark_all_dies (c));
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_die_ref)
unmark_all_dies (AT_ref (a));
}
generated for the compilation unit. */
static unsigned long
-size_of_pubnames (VEC (pubname_entry, gc) * names)
+size_of_pubnames (vec<pubname_entry, va_gc> *names)
{
unsigned long size;
unsigned i;
pubname_ref p;
size = DWARF_PUBNAMES_HEADER_SIZE;
- FOR_EACH_VEC_ELT (pubname_entry, names, i, p)
+ FOR_EACH_VEC_ELT (*names, i, p)
if (names != pubtype_table
|| p->die->die_offset != 0
|| !flag_eliminate_unused_debug_types)
unsigned fde_idx;
dw_fde_ref fde;
- FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, fde_idx, fde)
+ FOR_EACH_VEC_ELT (*fde_vec, fde_idx, fde)
{
if (!fde->in_std_section)
size += 2 * DWARF2_ADDR_SIZE;
else
dw2_asm_output_data (1, DW_children_no, "DW_children_no");
- for (ix = 0; VEC_iterate (dw_attr_node, abbrev->die_attr, ix, a_attr);
- ix++)
+ for (ix = 0; vec_safe_iterate (abbrev->die_attr, ix, &a_attr); ix++)
{
dw2_asm_output_data_uleb128 (a_attr->dw_attr, "(%s)",
dwarf_attr_name (a_attr->dw_attr));
(unsigned long)die->die_offset,
dwarf_tag_name (die->die_tag));
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
const char *name = dwarf_attr_name (a->dw_attr);
e.die = die;
e.name = xstrdup (str);
- VEC_safe_push (pubname_entry, gc, pubname_table, e);
+ vec_safe_push (pubname_table, e);
}
static void
gcc_assert (scope_name);
e.name = concat (scope_name, get_AT_string (die, DW_AT_name), NULL);
e.die = die;
- VEC_safe_push (pubname_entry, gc, pubname_table, e);
+ vec_safe_push (pubname_table, e);
}
/* Add a new entry to .debug_pubtypes if appropriate. */
{
e.die = die;
e.name = concat (scope_name, name, NULL);
- VEC_safe_push (pubname_entry, gc, pubtype_table, e);
+ vec_safe_push (pubtype_table, e);
}
/* Although it might be more consistent to add the pubinfo for the
visible names; or the public types table used to find type definitions. */
static void
-output_pubnames (VEC (pubname_entry, gc) * names)
+output_pubnames (vec<pubname_entry, va_gc> *names)
{
unsigned i;
unsigned long pubnames_length = size_of_pubnames (names);
dw2_asm_output_data (DWARF_OFFSET_SIZE, next_die_offset,
"Compilation Unit Length");
- FOR_EACH_VEC_ELT (pubname_entry, names, i, pub)
+ FOR_EACH_VEC_ELT (*names, i, pub)
{
/* Enumerator names are part of the pubname table, but the parent
DW_TAG_enumeration_type die may have been pruned. Don't output
unsigned fde_idx;
dw_fde_ref fde;
- FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, fde_idx, fde)
+ FOR_EACH_VEC_ELT (*fde_vec, fde_idx, fde)
{
if (DECL_IGNORED_P (fde->decl))
continue;
dw_line_info_entry *ent;
size_t i;
- FOR_EACH_VEC_ELT (dw_line_info_entry, table->entries, i, ent)
+ FOR_EACH_VEC_SAFE_ELT (table->entries, i, ent)
{
switch (ent->opcode)
{
dw_line_info_table *table;
size_t i;
- FOR_EACH_VEC_ELT (dw_line_info_table_p, separate_line_info, i, table)
+ FOR_EACH_VEC_ELT (*separate_line_info, i, table)
if (table->in_use)
{
output_one_line_info_table (table);
symref:
mem_loc_result = new_addr_loc_descr (rtl, dtprel_false);
- VEC_safe_push (rtx, gc, used_rtx_array, rtl);
+ vec_safe_push (used_rtx_array, rtl);
break;
case CONCAT:
{
loc_result = new_addr_loc_descr (rtl, dtprel_false);
add_loc_descr (&loc_result, new_loc_descr (DW_OP_stack_value, 0, 0));
- VEC_safe_push (rtx, gc, used_rtx_array, rtl);
+ vec_safe_push (used_rtx_array, rtl);
}
break;
loc_result = new_addr_loc_descr (rtl, dtprel_false);
add_loc_descr (&loc_result, new_loc_descr (DW_OP_stack_value, 0, 0));
add_AT_loc (die, DW_AT_location, loc_result);
- VEC_safe_push (rtx, gc, used_rtx_array, rtl);
+ vec_safe_push (used_rtx_array, rtl);
return true;
}
return false;
case CONSTRUCTOR:
if (TREE_CONSTANT (init))
{
- VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (init);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (init);
bool constant_p = true;
tree value;
unsigned HOST_WIDE_INT ix;
deferred_locations entry;
entry.variable = variable;
entry.die = die;
- VEC_safe_push (deferred_locations, gc, deferred_locations_list, entry);
+ vec_safe_push (deferred_locations_list, entry);
}
/* Helper function for tree_add_const_value_attribute. Natively encode
min_index = tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0);
memset (array, '\0', size);
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (init), cnt, ce)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce)
{
tree val = ce->value;
tree index = ce->index;
if (TREE_CODE (type) == RECORD_TYPE)
field = TYPE_FIELDS (type);
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (init), cnt, ce)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce)
{
tree val = ce->value;
int pos, fieldsize;
/* ??? Bald assumption that the CIE opcode list does not contain
advance opcodes. */
- FOR_EACH_VEC_ELT (dw_cfi_ref, cie_cfi_vec, ix, cfi)
+ FOR_EACH_VEC_ELT (*cie_cfi_vec, ix, cfi)
lookup_cfa_1 (cfi, &next_cfa, &remember);
last_cfa = next_cfa;
start_label = last_label = fde->dw_fde_second_begin;
}
- FOR_EACH_VEC_ELT (dw_cfi_ref, fde->dw_fde_cfi, ix, cfi)
+ FOR_EACH_VEC_SAFE_ELT (fde->dw_fde_cfi, ix, cfi)
{
switch (cfi->dw_cfi_opc)
{
{
add_AT_addr (die, DW_AT_VMS_rtnbeg_pd_address,
XEXP (DECL_RTL (decl), 0), false);
- VEC_safe_push (rtx, gc, used_rtx_array, XEXP (DECL_RTL (decl), 0));
+ vec_safe_push (used_rtx_array, XEXP (DECL_RTL (decl), 0));
}
#endif /* VMS_DEBUGGING_INFO */
}
static void
push_decl_scope (tree scope)
{
- VEC_safe_push (tree, gc, decl_scope_table, scope);
+ vec_safe_push (decl_scope_table, scope);
}
/* Pop a declaration scope. */
static inline void
pop_decl_scope (void)
{
- VEC_pop (tree, decl_scope_table);
+ decl_scope_table->pop ();
}
/* walk_tree helper function for uses_local_type, below. */
{
int i;
- for (i = VEC_length (tree, incomplete_types) - 1; i >= 0; i--)
- if (should_emit_struct_debug (VEC_index (tree, incomplete_types, i),
- DINFO_USAGE_DIR_USE))
- gen_type_die (VEC_index (tree, incomplete_types, i), comp_unit_die ());
+ for (i = vec_safe_length (incomplete_types) - 1; i >= 0; i--)
+ if (should_emit_struct_debug ((*incomplete_types)[i], DINFO_USAGE_DIR_USE))
+ gen_type_die ((*incomplete_types)[i], comp_unit_die ());
}
/* Determine what tag to use for a record type. */
&& block != DECL_INITIAL (decl)
&& TREE_CODE (block) == BLOCK)
{
- if (VEC_length (dw_die_ref, block_map) > BLOCK_NUMBER (block))
- stmt_die = VEC_index (dw_die_ref, block_map, BLOCK_NUMBER (block));
+ if (block_map.length () > BLOCK_NUMBER (block))
+ stmt_die = block_map[BLOCK_NUMBER (block)];
if (stmt_die)
break;
block = BLOCK_SUPERCONTEXT (block);
if (call_arg_locations)
{
- if (VEC_length (dw_die_ref, block_map) <= BLOCK_NUMBER (stmt))
- VEC_safe_grow_cleared (dw_die_ref, heap, block_map,
- BLOCK_NUMBER (stmt) + 1);
- VEC_replace (dw_die_ref, block_map, BLOCK_NUMBER (stmt), stmt_die);
+ if (block_map.length () <= BLOCK_NUMBER (stmt))
+ block_map.safe_grow_cleared (BLOCK_NUMBER (stmt) + 1);
+ block_map[BLOCK_NUMBER (stmt)] = stmt_die;
}
if (! BLOCK_ABSTRACT (stmt) && TREE_ASM_WRITTEN (stmt))
if (call_arg_locations)
{
- if (VEC_length (dw_die_ref, block_map) <= BLOCK_NUMBER (stmt))
- VEC_safe_grow_cleared (dw_die_ref, heap, block_map,
- BLOCK_NUMBER (stmt) + 1);
- VEC_replace (dw_die_ref, block_map, BLOCK_NUMBER (stmt), subr_die);
+ if (block_map.length () <= BLOCK_NUMBER (stmt))
+ block_map.safe_grow_cleared (BLOCK_NUMBER (stmt) + 1);
+ block_map[BLOCK_NUMBER (stmt)] = subr_die;
}
add_abstract_origin_attribute (subr_die, decl);
if (TREE_ASM_WRITTEN (stmt))
}
typedef const char *dchar_p; /* For DEF_VEC_P. */
-DEF_VEC_P(dchar_p);
-DEF_VEC_ALLOC_P(dchar_p,heap);
static char *producer_string;
gen_producer_string (void)
{
size_t j;
- VEC(dchar_p, heap) *switches = NULL;
+ vec<dchar_p> switches = vec<dchar_p>();
const char *language_string = lang_hooks.name;
char *producer, *tail;
const char *p;
default:
break;
}
- VEC_safe_push (dchar_p, heap, switches,
- save_decoded_options[j].orig_option_with_args_text);
+ switches.safe_push (save_decoded_options[j].orig_option_with_args_text);
len += strlen (save_decoded_options[j].orig_option_with_args_text) + 1;
break;
}
sprintf (tail, "%s %s", language_string, version_string);
tail += plen;
- FOR_EACH_VEC_ELT (dchar_p, switches, j, p)
+ FOR_EACH_VEC_ELT (switches, j, p)
{
len = strlen (p);
*tail = ' ';
}
*tail = '\0';
- VEC_free (dchar_p, heap, switches);
+ switches.release ();
return producer;
}
tree t;
const char *common_lang = NULL;
- FOR_EACH_VEC_ELT (tree, all_translation_units, i, t)
+ FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t)
{
if (!TRANSLATION_UNIT_LANGUAGE (t))
continue;
else if (strcmp (common_lang, TRANSLATION_UNIT_LANGUAGE (t)) == 0)
;
else if (strncmp (common_lang, "GNU C", 5) == 0
- && strncmp (TRANSLATION_UNIT_LANGUAGE (t), "GNU C", 5) == 0)
+ && strncmp(TRANSLATION_UNIT_LANGUAGE (t), "GNU C", 5) == 0)
/* Mixing C and C++ is ok, use C++ in that case. */
common_lang = "GNU C++";
else
/* First output info about the base classes. */
if (binfo)
{
- VEC(tree,gc) *accesses = BINFO_BASE_ACCESSES (binfo);
+ vec<tree, va_gc> *accesses = BINFO_BASE_ACCESSES (binfo);
int i;
tree base;
for (i = 0; BINFO_BASE_ITERATE (binfo, i, base); i++)
gen_inheritance_die (base,
- (accesses ? VEC_index (tree, accesses, i)
- : access_public_node), context_die);
+ (accesses ? (*accesses)[i] : access_public_node),
+ context_die);
}
/* Now output info about the data members and type members. */
/* We don't need to do this for function-local types. */
if (TYPE_STUB_DECL (type)
&& ! decl_function_context (TYPE_STUB_DECL (type)))
- VEC_safe_push (tree, gc, incomplete_types, type);
+ vec_safe_push (incomplete_types, type);
}
if (get_AT (type_die, DW_AT_name))
call_arg_loc_last = NULL;
call_site_count = -1;
tail_call_site_count = -1;
- VEC_free (dw_die_ref, heap, block_map);
+ block_map.release ();
htab_empty (decl_loc_table);
htab_empty (cached_dw_loc_list_table);
}
return;
if (!tmpl_value_parm_die_table)
- tmpl_value_parm_die_table
- = VEC_alloc (die_arg_entry, gc, 32);
+ vec_alloc (tmpl_value_parm_die_table, 32);
entry.die = die;
entry.arg = arg;
- VEC_safe_push (die_arg_entry, gc,
- tmpl_value_parm_die_table,
- entry);
+ vec_safe_push (tmpl_value_parm_die_table, entry);
}
/* Return TRUE if T is an instance of generic type, FALSE
if (!generic_type_p (t))
return;
- if (generic_type_instances == NULL)
- generic_type_instances = VEC_alloc (tree, gc, 256);
+ if (!generic_type_instances)
+ vec_alloc (generic_type_instances, 256);
- VEC_safe_push (tree, gc, generic_type_instances, t);
+ vec_safe_push (generic_type_instances, t);
}
/* Add a DW_AT_const_value attribute to DIEs that were scheduled
unsigned i;
die_arg_entry *e;
- FOR_EACH_VEC_ELT (die_arg_entry, tmpl_value_parm_die_table, i, e)
+ FOR_EACH_VEC_ELT (*tmpl_value_parm_die_table, i, e)
tree_add_const_value_attribute (e->die, e->arg);
}
}
unsigned i;
tree t;
- if (generic_type_instances == NULL)
+ if (!generic_type_instances)
return;
- FOR_EACH_VEC_ELT (tree, generic_type_instances, i, t)
+ FOR_EACH_VEC_ELT (*generic_type_instances, i, t)
gen_generic_params_dies (t);
}
table = new_line_info_table ();
table->end_label = end_label;
- VEC_safe_push (dw_line_info_table_p, gc, separate_line_info, table);
+ vec_safe_push (separate_line_info, table);
}
if (DWARF2_ASM_LINE_DEBUG_INFO)
dw_line_info_entry e;
e.opcode = opcode;
e.val = val;
- VEC_safe_push (dw_line_info_entry, gc, table->entries, e);
+ vec_safe_push (table->entries, e);
}
/* Output a label to mark the beginning of a source code line entry
e.code = DW_MACINFO_start_file;
e.lineno = lineno;
e.info = ggc_strdup (filename);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
}
e.code = DW_MACINFO_end_file;
e.lineno = lineno;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
}
macinfo_entry e;
/* Insert a dummy first entry to be able to optimize the whole
predefined macro block using DW_MACRO_GNU_transparent_include. */
- if (VEC_empty (macinfo_entry, macinfo_table) && lineno <= 1)
+ if (macinfo_table->is_empty () && lineno <= 1)
{
e.code = 0;
e.lineno = 0;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
e.code = DW_MACINFO_define;
e.lineno = lineno;
e.info = ggc_strdup (buffer);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
}
macinfo_entry e;
/* Insert a dummy first entry to be able to optimize the whole
predefined macro block using DW_MACRO_GNU_transparent_include. */
- if (VEC_empty (macinfo_entry, macinfo_table) && lineno <= 1)
+ if (macinfo_table->is_empty () && lineno <= 1)
{
e.code = 0;
e.lineno = 0;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
e.code = DW_MACINFO_undef;
e.lineno = lineno;
e.info = ggc_strdup (buffer);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
+ vec_safe_push (macinfo_table, e);
}
}
If the define/undef entry should be emitted normally, return 0. */
static unsigned
-optimize_macinfo_range (unsigned int idx, VEC (macinfo_entry, gc) *files,
+optimize_macinfo_range (unsigned int idx, vec<macinfo_entry, va_gc> *files,
htab_t *macinfo_htab)
{
macinfo_entry *first, *second, *cur, *inc;
unsigned int i, count, encoded_filename_len, linebuf_len;
void **slot;
- first = &VEC_index (macinfo_entry, macinfo_table, idx);
- second = &VEC_index (macinfo_entry, macinfo_table, idx + 1);
+ first = &(*macinfo_table)[idx];
+ second = &(*macinfo_table)[idx + 1];
/* Optimize only if there are at least two consecutive define/undef ops,
and either all of them are before first DW_MACINFO_start_file
in some included header file. */
if (second->code != DW_MACINFO_define && second->code != DW_MACINFO_undef)
return 0;
- if (VEC_empty (macinfo_entry, files))
+ if (vec_safe_is_empty (files))
{
if (first->lineno > 1 || second->lineno > 1)
return 0;
with first and at the same time compute md5 checksum of their
codes, linenumbers and strings. */
md5_init_ctx (&ctx);
- for (i = idx; VEC_iterate (macinfo_entry, macinfo_table, i, cur); i++)
+ for (i = idx; macinfo_table->iterate (i, &cur); i++)
if (cur->code != DW_MACINFO_define && cur->code != DW_MACINFO_undef)
break;
- else if (VEC_empty (macinfo_entry, files) && cur->lineno > 1)
+ else if (vec_safe_is_empty (files) && cur->lineno > 1)
break;
else
{
/* From the containing include filename (if any) pick up just
usable characters from its basename. */
- if (VEC_empty (macinfo_entry, files))
+ if (vec_safe_is_empty (files))
base = "";
else
- base = lbasename (VEC_last (macinfo_entry, files).info);
+ base = lbasename (files->last ().info);
for (encoded_filename_len = 0, i = 0; base[i]; i++)
if (ISIDNUM (base[i]) || base[i] == '.')
encoded_filename_len++;
/* Construct a macinfo_entry for DW_MACRO_GNU_transparent_include
in the empty vector entry before the first define/undef. */
- inc = &VEC_index (macinfo_entry, macinfo_table, idx - 1);
+ inc = &(*macinfo_table)[idx - 1];
inc->code = DW_MACRO_GNU_transparent_include;
inc->lineno = 0;
inc->info = ggc_strdup (grp_name);
output_macinfo_op (inc);
/* And clear all macinfo_entry in the range to avoid emitting them
in the second pass. */
- for (i = idx;
- VEC_iterate (macinfo_entry, macinfo_table, i, cur)
- && i < idx + count;
- i++)
+ for (i = idx; macinfo_table->iterate (i, &cur) && i < idx + count; i++)
{
cur->code = 0;
cur->info = NULL;
unsigned i;
macinfo_entry *ref;
- for (i = 0; VEC_iterate (macinfo_entry, macinfo_table, i, ref); i++)
+ for (i = 0; macinfo_table && macinfo_table->iterate (i, &ref); i++)
{
switch (ref->code)
{
output_macinfo (void)
{
unsigned i;
- unsigned long length = VEC_length (macinfo_entry, macinfo_table);
+ unsigned long length = vec_safe_length (macinfo_table);
macinfo_entry *ref;
- VEC (macinfo_entry, gc) *files = NULL;
+ vec<macinfo_entry, va_gc> *files = NULL;
htab_t macinfo_htab = NULL;
if (! length)
DW_MACRO_GNU_transparent_include op is emitted and kept in
the vector before the first define/undef in the range and the
whole range of define/undef ops is not emitted and kept. */
- for (i = 0; VEC_iterate (macinfo_entry, macinfo_table, i, ref); i++)
+ for (i = 0; macinfo_table->iterate (i, &ref); i++)
{
switch (ref->code)
{
case DW_MACINFO_start_file:
- VEC_safe_push (macinfo_entry, gc, files, *ref);
+ vec_safe_push (files, *ref);
break;
case DW_MACINFO_end_file:
- if (!VEC_empty (macinfo_entry, files))
- VEC_pop (macinfo_entry, files);
+ if (!vec_safe_is_empty (files))
+ files->pop ();
break;
case DW_MACINFO_define:
case DW_MACINFO_undef:
if (!dwarf_strict
&& HAVE_COMDAT_GROUP
- && VEC_length (macinfo_entry, files) != 1
+ && vec_safe_length (files) != 1
&& i > 0
&& i + 1 < length
- && VEC_index (macinfo_entry, macinfo_table, i - 1).code == 0)
+ && (*macinfo_table)[i - 1].code == 0)
{
unsigned count = optimize_macinfo_range (i, files, &macinfo_htab);
if (count)
DW_MACRO_GNU_transparent_include entries terminate the
current chain and switch to a new comdat .debug_macinfo
section and emit the define/undef entries within it. */
- for (i = 0; VEC_iterate (macinfo_entry, macinfo_table, i, ref); i++)
+ for (i = 0; macinfo_table->iterate (i, &ref); i++)
switch (ref->code)
{
case 0:
cached_dw_loc_list_table_eq, NULL);
/* Allocate the initial hunk of the decl_scope_table. */
- decl_scope_table = VEC_alloc (tree, gc, 256);
+ vec_alloc (decl_scope_table, 256);
/* Allocate the initial hunk of the abbrev_die_table. */
abbrev_die_table = ggc_alloc_cleared_vec_dw_die_ref
abbrev_die_table_in_use = 1;
/* Allocate the pubtypes and pubnames vectors. */
- pubname_table = VEC_alloc (pubname_entry, gc, 32);
- pubtype_table = VEC_alloc (pubname_entry, gc, 32);
+ vec_alloc (pubname_table, 32);
+ vec_alloc (pubtype_table, 32);
- incomplete_types = VEC_alloc (tree, gc, 64);
+ vec_alloc (incomplete_types, 64);
- used_rtx_array = VEC_alloc (rtx, gc, 32);
+ vec_alloc (used_rtx_array, 32);
if (!dwarf_split_debug_info)
{
ASM_GENERATE_INTERNAL_LABEL (loc_section_label, DEBUG_LOC_SECTION_LABEL, 0);
if (debug_info_level >= DINFO_LEVEL_VERBOSE)
- macinfo_table = VEC_alloc (macinfo_entry, gc, 64);
+ vec_alloc (macinfo_table, 64);
switch_to_section (text_section);
ASM_OUTPUT_LABEL (asm_out_file, text_section_label);
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
{
if (a->dw_attr_val.val_class == dw_val_class_die_ref)
{
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_str)
{
struct indirect_string_node *s = a->dw_attr_val.v.val_str;
are unusual in that they are pubnames that are the children of pubtypes.
They should only be marked via their parent DW_TAG_enumeration_type die,
not as roots in themselves. */
- FOR_EACH_VEC_ELT (pubname_entry, pubname_table, i, pub)
+ FOR_EACH_VEC_ELT (*pubname_table, i, pub)
if (pub->die->die_tag != DW_TAG_enumerator)
prune_unused_types_mark (pub->die, 1);
- for (i = 0; VEC_iterate (dw_die_ref, base_types, i, base_type); i++)
+ for (i = 0; base_types.iterate (i, &base_type); i++)
prune_unused_types_mark (base_type, 1);
if (debug_str_hash)
static inline void
move_linkage_attr (dw_die_ref die)
{
- unsigned ix = VEC_length (dw_attr_node, die->die_attr);
- dw_attr_node linkage = VEC_index (dw_attr_node, die->die_attr, ix - 1);
+ unsigned ix = vec_safe_length (die->die_attr);
+ dw_attr_node linkage = (*die->die_attr)[ix - 1];
gcc_assert (linkage.dw_attr == DW_AT_linkage_name
|| linkage.dw_attr == DW_AT_MIPS_linkage_name);
while (--ix > 0)
{
- dw_attr_node *prev = &VEC_index (dw_attr_node, die->die_attr, ix - 1);
+ dw_attr_node *prev = &(*die->die_attr)[ix - 1];
if (prev->dw_attr == DW_AT_decl_line || prev->dw_attr == DW_AT_name)
break;
}
- if (ix != VEC_length (dw_attr_node, die->die_attr) - 1)
+ if (ix != vec_safe_length (die->die_attr) - 1)
{
- VEC_pop (dw_attr_node, die->die_attr);
- VEC_quick_insert (dw_attr_node, die->die_attr, ix, linkage);
+ die->die_attr->pop ();
+ die->die_attr->quick_insert (ix, linkage);
}
}
base_type->die_mark++;
else
{
- VEC_safe_push (dw_die_ref, heap, base_types, base_type);
+ base_types.safe_push (base_type);
base_type->die_mark = 1;
}
}
unsigned int i;
dw_die_ref base_type, die, c;
- if (VEC_empty (dw_die_ref, base_types))
+ if (base_types.is_empty ())
return;
/* Sort by decreasing usage count, they will be added again in that
order later on. */
- VEC_qsort (dw_die_ref, base_types, base_type_cmp);
+ base_types.qsort (base_type_cmp);
die = comp_unit_die ();
c = die->die_child;
do
while (c != die->die_child);
gcc_assert (die->die_child);
c = die->die_child;
- for (i = 0; VEC_iterate (dw_die_ref, base_types, i, base_type); i++)
+ for (i = 0; base_types.iterate (i, &base_type); i++)
{
base_type->die_mark = 0;
base_type->die_sib = c->die_sib;
if (!rtl || !MEM_P (rtl))
return 1;
rtl = XEXP (rtl, 0);
- VEC_safe_push (rtx, gc, used_rtx_array, rtl);
+ vec_safe_push (used_rtx_array, rtl);
*addr = rtl;
return 0;
}
dw_loc_list_ref *curr, *start, loc;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
switch (AT_class (a))
{
case dw_val_class_loc_list:
unsigned ix;
void **slot;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_loc_list)
{
dw_loc_list_ref list = AT_loc_list (a);
dw_attr_ref a;
unsigned ix;
- FOR_EACH_VEC_ELT (dw_attr_node, die->die_attr, ix, a)
+ FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a)
if (AT_class (a) == dw_val_class_loc_list)
{
dw_loc_list_ref list = AT_loc_list (a);
add_comp_dir_attribute (comp_unit_die ());
}
- for (i = 0; i < VEC_length (deferred_locations, deferred_locations_list); i++)
- {
- add_location_or_const_value_attribute (
- VEC_index (deferred_locations, deferred_locations_list, i).die,
- VEC_index (deferred_locations, deferred_locations_list, i).variable,
- false,
- DW_AT_location);
- }
+ if (deferred_locations_list)
+ for (i = 0; i < deferred_locations_list->length (); i++)
+ {
+ add_location_or_const_value_attribute (
+ (*deferred_locations_list)[i].die,
+ (*deferred_locations_list)[i].variable,
+ false,
+ DW_AT_location);
+ }
/* Traverse the limbo die list, and add parent/child links. The only
dies without parents that should be here are concrete instances of
add_ranges_by_labels (main_comp_unit_die, cold_text_section_label,
cold_end_label, &range_list_added, true);
- FOR_EACH_VEC_ELT (dw_fde_ref, fde_vec, fde_idx, fde)
+ FOR_EACH_VEC_ELT (*fde_vec, fde_idx, fde)
{
if (DECL_IGNORED_P (fde->decl))
continue;
}
dw_cfi_node;
-DEF_VEC_P (dw_cfi_ref);
-DEF_VEC_ALLOC_P (dw_cfi_ref, heap);
-DEF_VEC_ALLOC_P (dw_cfi_ref, gc);
-typedef VEC(dw_cfi_ref, gc) *cfi_vec;
+typedef vec<dw_cfi_ref, va_gc> *cfi_vec;
typedef struct dw_fde_struct *dw_fde_ref;
#include "flags.h"
#include "function.h"
#include "expr.h"
-#include "vecprim.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "hashtab.h"
static int collect_one_action_chain (htab_t, eh_region);
static int add_call_site (rtx, int, int);
-static void push_uleb128 (VEC (uchar, gc) **, unsigned int);
-static void push_sleb128 (VEC (uchar, gc) **, int);
+static void push_uleb128 (vec<uchar, va_gc> **, unsigned int);
+static void push_sleb128 (vec<uchar, va_gc> **, int);
#ifndef HAVE_AS_LEB128
static int dw2_size_of_call_site_table (int);
static int sjlj_size_of_call_site_table (void);
cfun->eh = ggc_alloc_cleared_eh_status ();
/* Make sure zero'th entries are used. */
- VEC_safe_push (eh_region, gc, cfun->eh->region_array, NULL);
- VEC_safe_push (eh_landing_pad, gc, cfun->eh->lp_array, NULL);
+ vec_safe_push (cfun->eh->region_array, (eh_region)0);
+ vec_safe_push (cfun->eh->lp_array, (eh_landing_pad)0);
}
\f
/* Routines to generate the exception tree somewhat directly.
cfun->eh->region_tree = new_eh;
}
- new_eh->index = VEC_length (eh_region, cfun->eh->region_array);
- VEC_safe_push (eh_region, gc, cfun->eh->region_array, new_eh);
+ new_eh->index = vec_safe_length (cfun->eh->region_array);
+ vec_safe_push (cfun->eh->region_array, new_eh);
/* Copy the language's notion of whether to use __cxa_end_cleanup. */
if (targetm.arm_eabi_unwinder && lang_hooks.eh_use_cxa_end_cleanup)
lp->next_lp = region->landing_pads;
lp->region = region;
- lp->index = VEC_length (eh_landing_pad, cfun->eh->lp_array);
+ lp->index = vec_safe_length (cfun->eh->lp_array);
region->landing_pads = lp;
- VEC_safe_push (eh_landing_pad, gc, cfun->eh->lp_array, lp);
+ vec_safe_push (cfun->eh->lp_array, lp);
return lp;
}
eh_region
get_eh_region_from_number_fn (struct function *ifun, int i)
{
- return VEC_index (eh_region, ifun->eh->region_array, i);
+ return (*ifun->eh->region_array)[i];
}
eh_region
eh_landing_pad
get_eh_landing_pad_from_number_fn (struct function *ifun, int i)
{
- return VEC_index (eh_landing_pad, ifun->eh->lp_array, i);
+ return (*ifun->eh->lp_array)[i];
}
eh_landing_pad
get_eh_region_from_lp_number_fn (struct function *ifun, int i)
{
if (i < 0)
- return VEC_index (eh_region, ifun->eh->region_array, -i);
+ return (*ifun->eh->region_array)[-i];
else if (i == 0)
return NULL;
else
{
eh_landing_pad lp;
- lp = VEC_index (eh_landing_pad, ifun->eh->lp_array, i);
+ lp = (*ifun->eh->lp_array)[i];
return lp->region;
}
}
gcc_assert (ifun->eh->region_array);
gcc_assert (ifun->eh->region_tree);
- b_outer = sbitmap_alloc (VEC_length (eh_region, ifun->eh->region_array));
+ b_outer = sbitmap_alloc (ifun->eh->region_array->length());
bitmap_clear (b_outer);
do
n = XNEW (struct ttypes_filter);
n->t = type;
- n->filter = VEC_length (tree, cfun->eh->ttype_data) + 1;
+ n->filter = vec_safe_length (cfun->eh->ttype_data) + 1;
*slot = n;
- VEC_safe_push (tree, gc, cfun->eh->ttype_data, type);
+ vec_safe_push (cfun->eh->ttype_data, type);
}
return n->filter;
int len;
if (targetm.arm_eabi_unwinder)
- len = VEC_length (tree, cfun->eh->ehspec_data.arm_eabi);
+ len = vec_safe_length (cfun->eh->ehspec_data.arm_eabi);
else
- len = VEC_length (uchar, cfun->eh->ehspec_data.other);
+ len = vec_safe_length (cfun->eh->ehspec_data.other);
/* Filter value is a -1 based byte index into a uleb128 buffer. */
for (; list ; list = TREE_CHAIN (list))
{
if (targetm.arm_eabi_unwinder)
- VEC_safe_push (tree, gc, cfun->eh->ehspec_data.arm_eabi,
- TREE_VALUE (list));
+ vec_safe_push (cfun->eh->ehspec_data.arm_eabi, TREE_VALUE (list));
else
{
/* Look up each type in the list and encode its filter
}
}
if (targetm.arm_eabi_unwinder)
- VEC_safe_push (tree, gc, cfun->eh->ehspec_data.arm_eabi, NULL_TREE);
+ vec_safe_push (cfun->eh->ehspec_data.arm_eabi, NULL_TREE);
else
- VEC_safe_push (uchar, gc, cfun->eh->ehspec_data.other, 0);
+ vec_safe_push (cfun->eh->ehspec_data.other, (uchar)0);
}
return n->filter;
eh_region r;
eh_catch c;
- cfun->eh->ttype_data = VEC_alloc (tree, gc, 16);
+ vec_alloc (cfun->eh->ttype_data, 16);
if (targetm.arm_eabi_unwinder)
- cfun->eh->ehspec_data.arm_eabi = VEC_alloc (tree, gc, 64);
+ vec_alloc (cfun->eh->ehspec_data.arm_eabi, 64);
else
- cfun->eh->ehspec_data.other = VEC_alloc (uchar, gc, 64);
+ vec_alloc (cfun->eh->ehspec_data.other, 64);
ttypes = htab_create (31, ttypes_filter_hash, ttypes_filter_eq, free);
ehspec = htab_create (31, ehspec_filter_hash, ehspec_filter_eq, free);
- for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->region_array, i, &r); ++i)
{
if (r == NULL)
continue;
if (flag_reorder_blocks_and_partition)
e_flags |= EDGE_PRESERVE;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
{
basic_block bb;
rtx seq;
}
\f
-static VEC (int, heap) *sjlj_lp_call_site_index;
+static vec<int> sjlj_lp_call_site_index;
/* Process all active landing pads. Assign each one a compact dispatch
index, and a call-site index. */
int i, disp_index;
eh_landing_pad lp;
- crtl->eh.action_record_data = VEC_alloc (uchar, gc, 64);
+ vec_alloc (crtl->eh.action_record_data, 64);
ar_hash = htab_create (31, action_record_hash, action_record_eq, free);
disp_index = 0;
call_site_base = 1;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp && lp->post_landing_pad)
{
int action, call_site;
/* Otherwise, look it up in the table. */
else
call_site = add_call_site (GEN_INT (disp_index), action, 0);
- VEC_replace (int, sjlj_lp_call_site_index, i, call_site);
+ sjlj_lp_call_site_index[i] = call_site;
disp_index++;
}
if (nothrow)
continue;
if (lp)
- this_call_site = VEC_index (int, sjlj_lp_call_site_index, lp->index);
+ this_call_site = sjlj_lp_call_site_index[lp->index];
else if (r == NULL)
{
/* Calls (and trapping insns) without notes are outside any
eh_region r;
edge e;
int i, disp_index;
- VEC(tree, heap) *dispatch_labels = NULL;
+ vec<tree> dispatch_labels = vec<tree>();
fc = crtl->eh.sjlj_fc;
/* If there's exactly one call site in the function, don't bother
generating a switch statement. */
if (num_dispatch > 1)
- dispatch_labels = VEC_alloc (tree, heap, num_dispatch);
+ dispatch_labels.create (num_dispatch);
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp && lp->post_landing_pad)
{
rtx seq2, label;
t_label = create_artificial_label (UNKNOWN_LOCATION);
t = build_int_cst (integer_type_node, disp_index);
case_elt = build_case_label (t, NULL, t_label);
- VEC_quick_push (tree, dispatch_labels, case_elt);
+ dispatch_labels.quick_push (case_elt);
label = label_rtx (t_label);
}
else
{
int num_dispatch;
- num_dispatch = VEC_length (eh_landing_pad, cfun->eh->lp_array);
+ num_dispatch = vec_safe_length (cfun->eh->lp_array);
if (num_dispatch == 0)
return;
- VEC_safe_grow (int, heap, sjlj_lp_call_site_index, num_dispatch);
+ sjlj_lp_call_site_index.safe_grow_cleared (num_dispatch);
num_dispatch = sjlj_assign_call_site_values ();
if (num_dispatch > 0)
sjlj_emit_function_exit ();
}
- VEC_free (int, heap, sjlj_lp_call_site_index);
+ sjlj_lp_call_site_index.release ();
}
/* After initial rtl generation, call back to finish generating
if (lp->post_landing_pad)
EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
- VEC_replace (eh_landing_pad, cfun->eh->lp_array, lp->index, NULL);
+ (*cfun->eh->lp_array)[lp->index] = NULL;
}
/* Splice REGION from the region tree. */
{
if (lp->post_landing_pad)
EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
- VEC_replace (eh_landing_pad, cfun->eh->lp_array, lp->index, NULL);
+ (*cfun->eh->lp_array)[lp->index] = NULL;
}
outer = region->outer;
}
*pp = region->next_peer;
- VEC_replace (eh_region, cfun->eh->region_array, region->index, NULL);
+ (*cfun->eh->region_array)[region->index] = NULL;
}
/* Invokes CALLBACK for every exception handler landing pad label.
eh_landing_pad lp;
int i;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
{
if (lp)
{
}
if (lp_nr < 0)
- r = VEC_index (eh_region, cfun->eh->region_array, -lp_nr);
+ r = (*cfun->eh->region_array)[-lp_nr];
else
{
- lp = VEC_index (eh_landing_pad, cfun->eh->lp_array, lp_nr);
+ lp = (*cfun->eh->lp_array)[lp_nr];
r = lp->region;
}
gcc_assert (host_integerp (region_nr_t, 0));
region_nr = tree_low_cst (region_nr_t, 0);
- region = VEC_index (eh_region, cfun->eh->region_array, region_nr);
+ region = (*cfun->eh->region_array)[region_nr];
/* ??? We shouldn't have been able to delete a eh region without
deleting all the code that depended on it. */
if ((new_ar = *slot) == NULL)
{
new_ar = XNEW (struct action_record);
- new_ar->offset = VEC_length (uchar, crtl->eh.action_record_data) + 1;
+ new_ar->offset = crtl->eh.action_record_data->length () + 1;
new_ar->filter = filter;
new_ar->next = next;
*slot = new_ar;
push_sleb128 (&crtl->eh.action_record_data, filter);
if (next)
- next -= VEC_length (uchar, crtl->eh.action_record_data) + 1;
+ next -= crtl->eh.action_record_data->length () + 1;
push_sleb128 (&crtl->eh.action_record_data, next);
}
record->landing_pad = landing_pad;
record->action = action;
- VEC_safe_push (call_site_record, gc,
- crtl->eh.call_site_record_v[section], record);
+ vec_safe_push (crtl->eh.call_site_record_v[section], record);
- return call_site_base + VEC_length (call_site_record,
- crtl->eh.call_site_record_v[section]) - 1;
+ return call_site_base + crtl->eh.call_site_record_v[section]->length () - 1;
}
/* Turn REG_EH_REGION notes back into NOTE_INSN_EH_REGION notes.
rtx last_no_action_insn_before_switch = NULL_RTX;
int saved_call_site_base = call_site_base;
- crtl->eh.action_record_data = VEC_alloc (uchar, gc, 64);
+ vec_alloc (crtl->eh.action_record_data, 64);
ar_hash = htab_create (31, action_record_hash, action_record_eq, free);
opening a new one afterwards. */
else if (last_action != -3)
last_landing_pad = pc_rtx;
- call_site_base += VEC_length (call_site_record,
- crtl->eh.call_site_record_v[cur_sec]);
+ if (crtl->eh.call_site_record_v[cur_sec])
+ call_site_base += crtl->eh.call_site_record_v[cur_sec]->length ();
cur_sec++;
gcc_assert (crtl->eh.call_site_record_v[cur_sec] == NULL);
- crtl->eh.call_site_record_v[cur_sec]
- = VEC_alloc (call_site_record, gc, 10);
+ vec_alloc (crtl->eh.call_site_record_v[cur_sec], 10);
}
if (last_action >= -1 && ! first_no_action_insn)
};
\f
static void
-push_uleb128 (VEC (uchar, gc) **data_area, unsigned int value)
+push_uleb128 (vec<uchar, va_gc> **data_area, unsigned int value)
{
do
{
value >>= 7;
if (value)
byte |= 0x80;
- VEC_safe_push (uchar, gc, *data_area, byte);
+ vec_safe_push (*data_area, byte);
}
while (value);
}
static void
-push_sleb128 (VEC (uchar, gc) **data_area, int value)
+push_sleb128 (vec<uchar, va_gc> **data_area, int value)
{
unsigned char byte;
int more;
|| (value == -1 && (byte & 0x40) != 0));
if (more)
byte |= 0x80;
- VEC_safe_push (uchar, gc, *data_area, byte);
+ vec_safe_push (*data_area, byte);
}
while (more);
}
static int
dw2_size_of_call_site_table (int section)
{
- int n = VEC_length (call_site_record, crtl->eh.call_site_record_v[section]);
+ int n = vec_safe_length (crtl->eh.call_site_record_v[section]);
int size = n * (4 + 4 + 4);
int i;
for (i = 0; i < n; ++i)
{
struct call_site_record_d *cs =
- VEC_index (call_site_record, crtl->eh.call_site_record_v[section], i);
+ (*crtl->eh.call_site_record_v[section])[i];
size += size_of_uleb128 (cs->action);
}
static int
sjlj_size_of_call_site_table (void)
{
- int n = VEC_length (call_site_record, crtl->eh.call_site_record_v[0]);
+ int n = vec_safe_length (crtl->eh.call_site_record_v[0]);
int size = 0;
int i;
for (i = 0; i < n; ++i)
{
struct call_site_record_d *cs =
- VEC_index (call_site_record, crtl->eh.call_site_record_v[0], i);
+ (*crtl->eh.call_site_record_v[0])[i];
size += size_of_uleb128 (INTVAL (cs->landing_pad));
size += size_of_uleb128 (cs->action);
}
static void
dw2_output_call_site_table (int cs_format, int section)
{
- int n = VEC_length (call_site_record, crtl->eh.call_site_record_v[section]);
+ int n = vec_safe_length (crtl->eh.call_site_record_v[section]);
int i;
const char *begin;
for (i = 0; i < n; ++i)
{
- struct call_site_record_d *cs =
- VEC_index (call_site_record, crtl->eh.call_site_record_v[section], i);
+ struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[section])[i];
char reg_start_lab[32];
char reg_end_lab[32];
char landing_pad_lab[32];
static void
sjlj_output_call_site_table (void)
{
- int n = VEC_length (call_site_record, crtl->eh.call_site_record_v[0]);
+ int n = vec_safe_length (crtl->eh.call_site_record_v[0]);
int i;
for (i = 0; i < n; ++i)
{
- struct call_site_record_d *cs =
- VEC_index (call_site_record, crtl->eh.call_site_record_v[0], i);
+ struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[0])[i];
dw2_asm_output_data_uleb128 (INTVAL (cs->landing_pad),
"region %d landing pad", i);
int have_tt_data;
int tt_format_size = 0;
- have_tt_data = (VEC_length (tree, cfun->eh->ttype_data)
+ have_tt_data = (vec_safe_length (cfun->eh->ttype_data)
|| (targetm.arm_eabi_unwinder
- ? VEC_length (tree, cfun->eh->ehspec_data.arm_eabi)
- : VEC_length (uchar, cfun->eh->ehspec_data.other)));
+ ? vec_safe_length (cfun->eh->ehspec_data.arm_eabi)
+ : vec_safe_length (cfun->eh->ehspec_data.other)));
/* Indicate the format of the @TType entries. */
if (! have_tt_data)
before_disp = 1 + 1;
after_disp = (1 + size_of_uleb128 (call_site_len)
+ call_site_len
- + VEC_length (uchar, crtl->eh.action_record_data)
- + (VEC_length (tree, cfun->eh->ttype_data)
+ + vec_safe_length (crtl->eh.action_record_data)
+ + (vec_safe_length (cfun->eh->ttype_data)
* tt_format_size));
disp = after_disp;
/* ??? Decode and interpret the data for flag_debug_asm. */
{
uchar uc;
- FOR_EACH_VEC_ELT (uchar, crtl->eh.action_record_data, i, uc)
+ FOR_EACH_VEC_ELT (*crtl->eh.action_record_data, i, uc)
dw2_asm_output_data (1, uc, i ? NULL : "Action record table");
}
if (have_tt_data)
assemble_align (tt_format_size * BITS_PER_UNIT);
- i = VEC_length (tree, cfun->eh->ttype_data);
+ i = vec_safe_length (cfun->eh->ttype_data);
while (i-- > 0)
{
- tree type = VEC_index (tree, cfun->eh->ttype_data, i);
+ tree type = (*cfun->eh->ttype_data)[i];
output_ttype (type, tt_format, tt_format_size);
}
{
tree type;
for (i = 0;
- VEC_iterate (tree, cfun->eh->ehspec_data.arm_eabi, i, type); ++i)
+ vec_safe_iterate (cfun->eh->ehspec_data.arm_eabi, i, &type); ++i)
output_ttype (type, tt_format, tt_format_size);
}
else
{
uchar uc;
for (i = 0;
- VEC_iterate (uchar, cfun->eh->ehspec_data.other, i, uc); ++i)
+ vec_safe_iterate (cfun->eh->ehspec_data.other, i, &uc); ++i)
dw2_asm_output_data (1, uc,
i ? NULL : "Exception specification table");
}
targetm.asm_out.emit_except_table_label (asm_out_file);
output_one_function_exception_table (0);
- if (crtl->eh.call_site_record_v[1] != NULL)
+ if (crtl->eh.call_site_record_v[1])
output_one_function_exception_table (1);
switch_to_section (current_function_section ());
return;
count_r = 0;
- for (i = 1; VEC_iterate (eh_region, fun->eh->region_array, i, r); ++i)
+ for (i = 1; vec_safe_iterate (fun->eh->region_array, i, &r); ++i)
if (r)
{
if (r->index == i)
}
count_lp = 0;
- for (i = 1; VEC_iterate (eh_landing_pad, fun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (fun->eh->lp_array, i, &lp); ++i)
if (lp)
{
if (lp->index == i)
r = fun->eh->region_tree;
while (1)
{
- if (VEC_index (eh_region, fun->eh->region_array, r->index) != r)
+ if ((*fun->eh->region_array)[r->index] != r)
{
error ("region_array is corrupted for region %i", r->index);
err = true;
for (lp = r->landing_pads; lp ; lp = lp->next_lp)
{
- if (VEC_index (eh_landing_pad, fun->eh->lp_array, lp->index) != lp)
+ if ((*fun->eh->lp_array)[lp->index] != lp)
{
error ("lp_array is corrupted for lp %i", lp->index);
err = true;
#endif
#include "hashtab.h"
-#include "vecprim.h"
-#include "vecir.h"
struct function;
struct eh_region_d;
typedef struct eh_catch_d *eh_catch;
typedef struct eh_region_d *eh_region;
-DEF_VEC_P(eh_region);
-DEF_VEC_ALLOC_P(eh_region, gc);
-DEF_VEC_ALLOC_P(eh_region, heap);
-DEF_VEC_P(eh_landing_pad);
-DEF_VEC_ALLOC_P(eh_landing_pad, gc);
/* The exception status for each function. */
eh_region region_tree;
/* The same information as an indexable array. */
- VEC(eh_region,gc) *region_array;
+ vec<eh_region, va_gc> *region_array;
/* The landing pads as an indexable array. */
- VEC(eh_landing_pad,gc) *lp_array;
+ vec<eh_landing_pad, va_gc> *lp_array;
/* At the gimple level, a mapping from gimple statement to landing pad
or must-not-throw region. See record_stmt_eh_region. */
/* All of the runtime type data used by the function. These objects
are emitted to the lang-specific-data-area for the function. */
- VEC(tree,gc) *ttype_data;
+ vec<tree, va_gc> *ttype_data;
/* The table of all action chains. These encode the eh_region tree in
a compact form for use by the runtime, and is also emitted to the
lang-specific-data-area. Note that the ARM EABI uses a different
format for the encoding than all other ports. */
union eh_status_u {
- VEC(tree,gc) * GTY((tag ("1"))) arm_eabi;
- VEC(uchar,gc) * GTY((tag ("0"))) other;
+ vec<tree, va_gc> *GTY((tag ("1"))) arm_eabi;
+ vec<uchar, va_gc> *GTY((tag ("0"))) other;
} GTY ((desc ("targetm.arm_eabi_unwinder"))) ehspec_data;
};
register whose mode size isn't equal to SIZE since
clear_storage can't handle this case. */
else if (size > 0
- && (((int)VEC_length (constructor_elt, CONSTRUCTOR_ELTS (exp))
+ && (((int)vec_safe_length (CONSTRUCTOR_ELTS (exp))
!= fields_length (type))
|| mostly_zeros_p (exp))
&& (!REG_P (target)
/* Store each element of the constructor into the corresponding
element of TARGET, determined by counting the elements. */
for (idx = 0, i = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
+ vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
idx++, i += bitsize / elt_size)
{
HOST_WIDE_INT eltpos;
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
|| !safe_from_p (x, ce->value, 0))
return 0;
}
if (!tmp)
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
unsigned i;
- v = VEC_alloc (constructor_elt, gc, VECTOR_CST_NELTS (exp));
+ vec_alloc (v, VECTOR_CST_NELTS (exp));
for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
tmp = build_constructor (type, v);
extern void expand_case (gimple);
/* Like expand_case but special-case for SJLJ exception dispatching. */
-extern void expand_sjlj_dispatch_table (rtx, VEC(tree,heap) *);
+extern void expand_sjlj_dispatch_table (rtx, vec<tree> );
#endif /* GCC_EXPR_H */
#include "cgraph.h"
#include "coverage.h"
#include "df.h"
-#include "vecprim.h"
#include "ggc.h"
#include "cfgloop.h"
#include "params.h"
static int *insn_lengths;
-VEC(int,heap) *insn_addresses_;
+vec<int> insn_addresses_;
/* Max uid for which the above arrays are valid. */
static int insn_lengths_max_uid;
{
constructor_elt *elt;
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (arg), i, elt)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (arg), i, elt)
if (i >= nelts || TREE_CODE (TREE_TYPE (elt->value)) == VECTOR_TYPE)
return false;
else
if (need_ctor)
{
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, nelts);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, nelts);
for (i = 0; i < nelts; i++)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[2 * nelts + i]);
return build_constructor (type, v);
}
else
{
- VEC(constructor_elt, gc) *vals;
+ vec<constructor_elt, va_gc> *vals;
unsigned i;
if (CONSTRUCTOR_NELTS (arg0) == 0)
- return build_constructor (type, NULL);
+ return build_constructor (type,
+ NULL);
if (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (arg0,
0)->value))
!= VECTOR_TYPE)
{
- vals = VEC_alloc (constructor_elt, gc, n);
+ vec_alloc (vals, n);
for (i = 0;
i < n && idx + i < CONSTRUCTOR_NELTS (arg0);
++i)
&& TREE_CODE (op0) == CONSTRUCTOR
&& ! type_contains_placeholder_p (TREE_TYPE (op0)))
{
- VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (op0);
- unsigned HOST_WIDE_INT end = VEC_length (constructor_elt, elts);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (op0);
+ unsigned HOST_WIDE_INT end = vec_safe_length (elts);
unsigned HOST_WIDE_INT begin = 0;
/* Find a matching index by means of a binary search. */
while (begin != end)
{
unsigned HOST_WIDE_INT middle = (begin + end) / 2;
- tree index = VEC_index (constructor_elt, elts, middle).index;
+ tree index = (*elts)[middle].index;
if (TREE_CODE (index) == INTEGER_CST
&& tree_int_cst_lt (index, op1))
&& tree_int_cst_lt (op1, TREE_OPERAND (index, 0)))
end = middle;
else
- return VEC_index (constructor_elt, elts, middle).value;
+ return (*elts)[middle].value;
}
}
+2012-11-17 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * frontend-passes.c: Use new vec API in vec.h.
+ * trans-array.c: Likewise.
+ * trans-common.c: Likewise.
+ * trans-decl.c: Likewise.
+ * trans-expr.c: Likewise.
+ * trans-intrinsic.c: Likewise.
+ * trans-openmp.c: Likewise.
+ * trans-stmt.c: Likewise.
+ * trans-types.c: Likewise.
+ * trans.h: Likewise.
+
2012-11-17 Jakub Jelinek <jakub@redhat.com>
PR fortran/55341
* resolve.c (resolve_typebound_intrinsic_op): Only add typebound
operators to the operator list in the namespace of the derived type.
+
2012-11-12 Jan Hubicka <jh@suse.cz>
* f95-lang.c (ATTR_NOTHROW_LEAF_MALLOC_LIST): New macro.
static bool optimize_trim (gfc_expr *);
static bool optimize_lexical_comparison (gfc_expr *);
static void optimize_minmaxloc (gfc_expr **);
-static bool empty_string (gfc_expr *e);
+static bool is_empty_string (gfc_expr *e);
/* How deep we are inside an argument list. */
remove_trim (rhs);
/* Replace a = ' ' by a = '' to optimize away a memcpy. */
- if (empty_string(rhs))
+ if (is_empty_string(rhs))
rhs->value.character.length = 0;
}
/* Return true if a constant string contains only blanks. */
static bool
-empty_string (gfc_expr *e)
+is_empty_string (gfc_expr *e)
{
int i;
&& (op == INTRINSIC_EQ || op == INTRINSIC_NE))
{
bool empty_op1, empty_op2;
- empty_op1 = empty_string (op1);
- empty_op2 = empty_string (op2);
+ empty_op1 = is_empty_string (op1);
+ empty_op2 = is_empty_string (op2);
if (empty_op1 || empty_op2)
{
else
{
/* Collect multiple scalar constants into a constructor. */
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree init;
tree bound;
tree tmptype;
gfc_array_spec as;
gfc_se se;
int i;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* First traverse the constructor list, converting the constants
to tree to build an initializer. */
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT lo;
tree index, range;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (expr->expr_type == EXPR_VARIABLE
&& expr->symtree->n.sym->attr.flavor == FL_PARAMETER
tree tmp, field;
tree init;
unsigned char *data, *chk;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree type = unsigned_char_type_node;
int i;
if (is_init)
{
tree ctor, tmp;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (field != NULL_TREE && field_init != NULL_TREE)
CONSTRUCTOR_APPEND_ELT (v, field, field_init);
}
}
- gcc_assert (!VEC_empty (constructor_elt, v));
+ gcc_assert (!v->is_empty ());
ctor = build_constructor (union_type, v);
TREE_CONSTANT (ctor) = 1;
TREE_STATIC (ctor) = 1;
gfc_save_backend_locus (&old_loc);
for (el = ns->entries; el; el = el->next)
{
- VEC(tree,gc) *args = NULL;
- VEC(tree,gc) *string_args = NULL;
+ vec<tree, va_gc> *args = NULL;
+ vec<tree, va_gc> *string_args = NULL;
thunk_sym = el->sym;
/* Pass extra parameter identifying this entry point. */
tmp = build_int_cst (gfc_array_index_type, el->id);
- VEC_safe_push (tree, gc, args, tmp);
+ vec_safe_push (args, tmp);
if (thunk_sym->attr.function)
{
if (gfc_return_by_reference (ns->proc_name))
{
tree ref = DECL_ARGUMENTS (current_function_decl);
- VEC_safe_push (tree, gc, args, ref);
+ vec_safe_push (args, ref);
if (ns->proc_name->ts.type == BT_CHARACTER)
- VEC_safe_push (tree, gc, args, DECL_CHAIN (ref));
+ vec_safe_push (args, DECL_CHAIN (ref));
}
}
{
/* Pass the argument. */
DECL_ARTIFICIAL (thunk_formal->sym->backend_decl) = 1;
- VEC_safe_push (tree, gc, args, thunk_formal->sym->backend_decl);
+ vec_safe_push (args, thunk_formal->sym->backend_decl);
if (formal->sym->ts.type == BT_CHARACTER)
{
tmp = thunk_formal->sym->ts.u.cl->backend_decl;
- VEC_safe_push (tree, gc, string_args, tmp);
+ vec_safe_push (string_args, tmp);
}
}
else
{
/* Pass NULL for a missing argument. */
- VEC_safe_push (tree, gc, args, null_pointer_node);
+ vec_safe_push (args, null_pointer_node);
if (formal->sym->ts.type == BT_CHARACTER)
{
tmp = build_int_cst (gfc_charlen_type_node, 0);
- VEC_safe_push (tree, gc, string_args, tmp);
+ vec_safe_push (string_args, tmp);
}
}
}
/* Call the master function. */
- VEC_safe_splice (tree, gc, args, string_args);
+ vec_safe_splice (args, string_args);
tmp = ns->proc_name->backend_decl;
tmp = build_call_expr_loc_vec (input_location, tmp, args);
if (ns->proc_name->attr.mixed_entry_master)
build_library_function_decl_1 (tree name, const char *spec,
tree rettype, int nargs, va_list p)
{
- VEC(tree,gc) *arglist;
+ vec<tree, va_gc> *arglist;
tree fntype;
tree fndecl;
int n;
gcc_assert (current_function_decl == NULL_TREE);
/* Create a list of the argument types. */
- arglist = VEC_alloc (tree, gc, abs (nargs));
+ vec_alloc (arglist, abs (nargs));
for (n = abs (nargs); n > 0; n--)
{
tree argtype = va_arg (p, tree);
- VEC_quick_push (tree, arglist, argtype);
+ arglist->quick_push (argtype);
}
/* Build the function type and decl. */
language standard parameters. */
{
tree array_type, array, var;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Passing a new option to the library requires four modifications:
+ add it to the tree_cons list below
tree to_data;
tree to_ref;
tree from_ref;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
tree tmp;
tree index;
stmtblock_t loopbody;
if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (from_data)))
{
from_ref = gfc_get_class_array_ref (index, from);
- VEC_safe_push (tree, gc, args, from_ref);
+ vec_safe_push (args, from_ref);
}
else
- VEC_safe_push (tree, gc, args, from_data);
+ vec_safe_push (args, from_data);
to_ref = gfc_get_class_array_ref (index, to);
- VEC_safe_push (tree, gc, args, to_ref);
+ vec_safe_push (args, to_ref);
tmp = build_call_vec (fcn_type, fcn, args);
else
{
gcc_assert (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (from_data)));
- VEC_safe_push (tree, gc, args, from_data);
- VEC_safe_push (tree, gc, args, to_data);
+ vec_safe_push (args, from_data);
+ vec_safe_push (args, to_data);
tmp = build_call_vec (fcn_type, fcn, args);
}
int
gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
gfc_actual_arglist * args, gfc_expr * expr,
- VEC(tree,gc) *append_args)
+ vec<tree, va_gc> *append_args)
{
gfc_interface_mapping mapping;
- VEC(tree,gc) *arglist;
- VEC(tree,gc) *retargs;
+ vec<tree, va_gc> *arglist;
+ vec<tree, va_gc> *retargs;
tree tmp;
tree fntype;
gfc_se parmse;
tree var;
tree len;
tree base_object;
- VEC(tree,gc) *stringargs;
+ vec<tree, va_gc> *stringargs;
tree result = NULL;
gfc_formal_arglist *formal;
gfc_actual_arglist *arg;
/* Character strings are passed as two parameters, a length and a
pointer - except for Bind(c) which only passes the pointer. */
if (parmse.string_length != NULL_TREE && !sym->attr.is_bind_c)
- VEC_safe_push (tree, gc, stringargs, parmse.string_length);
+ vec_safe_push (stringargs, parmse.string_length);
/* For descriptorless coarrays and assumed-shape coarray dummies, we
pass the token and the offset as additional arguments. */
&& e == NULL)
{
/* Token and offset. */
- VEC_safe_push (tree, gc, stringargs, null_pointer_node);
- VEC_safe_push (tree, gc, stringargs,
- build_int_cst (gfc_array_index_type, 0));
+ vec_safe_push (stringargs, null_pointer_node);
+ vec_safe_push (stringargs, build_int_cst (gfc_array_index_type, 0));
gcc_assert (fsym->attr.optional);
}
else if (fsym && fsym->attr.codimension
tmp = GFC_TYPE_ARRAY_CAF_TOKEN (caf_type);
}
- VEC_safe_push (tree, gc, stringargs, tmp);
+ vec_safe_push (stringargs, tmp);
if (GFC_DESCRIPTOR_TYPE_P (caf_type)
&& GFC_TYPE_ARRAY_AKIND (caf_type) == GFC_ARRAY_ALLOCATABLE)
offset = fold_build2_loc (input_location, PLUS_EXPR,
gfc_array_index_type, offset, tmp);
- VEC_safe_push (tree, gc, stringargs, offset);
+ vec_safe_push (stringargs, offset);
}
- VEC_safe_push (tree, gc, arglist, parmse.expr);
+ vec_safe_push (arglist, parmse.expr);
}
gfc_finish_interface_mapping (&mapping, &se->pre, &se->post);
if (ts.deferred)
cl.backend_decl = gfc_create_var (gfc_charlen_type_node, "slen");
else if (!sym->attr.dummy)
- cl.backend_decl = VEC_index (tree, stringargs, 0);
+ cl.backend_decl = (*stringargs)[0];
else
{
formal = sym->ns->proc_name->formal;
else
result = build_fold_indirect_ref_loc (input_location,
se->expr);
- VEC_safe_push (tree, gc, retargs, se->expr);
+ vec_safe_push (retargs, se->expr);
}
else if (comp && comp->attr.dimension)
{
/* Pass the temporary as the first argument. */
result = info->descriptor;
tmp = gfc_build_addr_expr (NULL_TREE, result);
- VEC_safe_push (tree, gc, retargs, tmp);
+ vec_safe_push (retargs, tmp);
}
else if (!comp && sym->result->attr.dimension)
{
/* Pass the temporary as the first argument. */
result = info->descriptor;
tmp = gfc_build_addr_expr (NULL_TREE, result);
- VEC_safe_push (tree, gc, retargs, tmp);
+ vec_safe_push (retargs, tmp);
}
else if (ts.type == BT_CHARACTER)
{
else
var = gfc_conv_string_tmp (se, type, len);
- VEC_safe_push (tree, gc, retargs, var);
+ vec_safe_push (retargs, var);
}
else
{
type = gfc_get_complex_type (ts.kind);
var = gfc_build_addr_expr (NULL_TREE, gfc_create_var (type, "cmplx"));
- VEC_safe_push (tree, gc, retargs, var);
+ vec_safe_push (retargs, var);
}
/* Add the string length to the argument list. */
if (TREE_CODE (tmp) != VAR_DECL)
tmp = gfc_evaluate_now (len, &se->pre);
tmp = gfc_build_addr_expr (NULL_TREE, tmp);
- VEC_safe_push (tree, gc, retargs, tmp);
+ vec_safe_push (retargs, tmp);
}
else if (ts.type == BT_CHARACTER)
- VEC_safe_push (tree, gc, retargs, len);
+ vec_safe_push (retargs, len);
}
gfc_free_interface_mapping (&mapping);
/* We need to glom RETARGS + ARGLIST + STRINGARGS + APPEND_ARGS. */
- arglen = (VEC_length (tree, arglist)
- + VEC_length (tree, stringargs) + VEC_length (tree, append_args));
- VEC_reserve_exact (tree, gc, retargs, arglen);
+ arglen = (vec_safe_length (arglist) + vec_safe_length (stringargs)
+ + vec_safe_length (append_args));
+ vec_safe_reserve (retargs, arglen);
/* Add the return arguments. */
- VEC_splice (tree, retargs, arglist);
+ retargs->splice (arglist);
/* Add the hidden string length parameters to the arguments. */
- VEC_splice (tree, retargs, stringargs);
+ retargs->splice (stringargs);
/* We may want to append extra arguments here. This is used e.g. for
calls to libgfortran_matmul_??, which need extra information. */
- if (!VEC_empty (tree, append_args))
- VEC_splice (tree, retargs, append_args);
+ if (!vec_safe_is_empty (append_args))
+ retargs->splice (append_args);
arglist = retargs;
/* Generate the actual call. */
if (!sym)
sym = expr->symtree->n.sym;
- gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr, NULL);
+ gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr,
+ NULL);
}
tree val;
tree type;
tree tmp;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
gcc_assert (se->ss == NULL);
gcc_assert (expr->expr_type == EXPR_STRUCTURE);
a = {} instead. */
if (!POINTER_TYPE_P (TREE_TYPE (dest)))
return build2_loc (input_location, MODIFY_EXPR, void_type_node,
- dest, build_constructor (TREE_TYPE (dest), NULL));
+ dest, build_constructor (TREE_TYPE (dest),
+ NULL));
/* Convert arguments to the correct types. */
dest = fold_convert (pvoid_type_node, dest);
gfc_get_intrinsic_lib_fndecl (gfc_intrinsic_map_t * m, gfc_expr * expr)
{
tree type;
- VEC(tree,gc) *argtypes;
+ vec<tree, va_gc> *argtypes;
tree fndecl;
gfc_actual_arglist *actual;
tree *pdecl;
for (actual = expr->value.function.actual; actual; actual = actual->next)
{
type = gfc_typenode_for_spec (&actual->expr->ts);
- VEC_safe_push (tree, gc, argtypes, type);
+ vec_safe_push (argtypes, type);
}
type = build_function_type_vec (gfc_typenode_for_spec (ts), argtypes);
fndecl = build_decl (input_location,
gfc_conv_intrinsic_funcall (gfc_se * se, gfc_expr * expr)
{
gfc_symbol *sym;
- VEC(tree,gc) *append_args;
+ vec<tree, va_gc> *append_args;
gcc_assert (!se->ss || se->ss->info->expr == expr);
gemm_fndecl = gfor_fndecl_zgemm;
}
- append_args = VEC_alloc (tree, gc, 3);
- VEC_quick_push (tree, append_args, build_int_cst (cint, 1));
- VEC_quick_push (tree, append_args,
- build_int_cst (cint, gfc_option.blas_matmul_limit));
- VEC_quick_push (tree, append_args,
- gfc_build_addr_expr (NULL_TREE, gemm_fndecl));
+ vec_alloc (append_args, 3);
+ append_args->quick_push (build_int_cst (cint, 1));
+ append_args->quick_push (build_int_cst (cint,
+ gfc_option.blas_matmul_limit));
+ append_args->quick_push (gfc_build_addr_expr (NULL_TREE,
+ gemm_fndecl));
}
else
{
- append_args = VEC_alloc (tree, gc, 3);
- VEC_quick_push (tree, append_args, build_int_cst (cint, 0));
- VEC_quick_push (tree, append_args, build_int_cst (cint, 0));
- VEC_quick_push (tree, append_args, null_pointer_node);
+ vec_alloc (append_args, 3);
+ append_args->quick_push (build_int_cst (cint, 0));
+ append_args->quick_push (build_int_cst (cint, 0));
+ append_args->quick_push (null_pointer_node);
}
}
unsigned cur_pos;
gfc_actual_arglist* arg;
gfc_symbol* sym;
- VEC(tree,gc) *append_args;
+ vec<tree, va_gc> *append_args;
/* Find the two arguments given as position. */
cur_pos = 0;
tree dummy;
dummy = build_int_cst (gfc_charlen_type_node, 0);
- append_args = VEC_alloc (tree, gc, 1);
- VEC_quick_push (tree, append_args, dummy);
+ vec_alloc (append_args, 1);
+ append_args->quick_push (dummy);
}
/* Build the call itself. */
gfc_actual_arglist *actual;
tree type;
gfc_se argse;
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
for (actual = expr->value.function.actual; actual; actual = actual->next)
{
gfc_add_block_to_block (&se->pre, &argse.pre);
gfc_add_block_to_block (&se->post, &argse.post);
- VEC_safe_push (tree, gc, args, argse.expr);
+ vec_safe_push (args, argse.expr);
}
/* Convert it to the required type. */
tree init;
} dovar_init;
-DEF_VEC_O(dovar_init);
-DEF_VEC_ALLOC_O(dovar_init,heap);
static tree
gfc_trans_omp_do (gfc_code *code, stmtblock_t *pblock,
stmtblock_t body;
gfc_omp_clauses *clauses = code->ext.omp_clauses;
int i, collapse = clauses->collapse;
- VEC(dovar_init,heap) *inits = NULL;
+ vec<dovar_init> inits = vec<dovar_init>();
dovar_init *di;
unsigned ix;
tmp = fold_build2_loc (input_location, MULT_EXPR, type, count, step);
tmp = fold_build2_loc (input_location, PLUS_EXPR, type, from, tmp);
dovar_init e = {dovar, tmp};
- VEC_safe_push (dovar_init, heap, inits, e);
+ inits.safe_push (e);
}
if (!dovar_found)
gfc_start_block (&body);
- FOR_EACH_VEC_ELT (dovar_init, inits, ix, di)
+ FOR_EACH_VEC_ELT (inits, ix, di)
gfc_add_modify (&body, di->var, di->init);
- VEC_free (dovar_init, heap, inits);
+ inits.release ();
/* Cycle statement is implemented with a goto. Exit statement must not be
present for this loop. */
/* Add the subroutine call to the block. */
gfc_conv_procedure_call (&loopse, code->resolved_sym,
- code->ext.actual, code->expr1, NULL);
+ code->ext.actual, code->expr1,
+ NULL);
if (mask && count1)
{
gfc_code *c;
gfc_se se, expr1se;
int n, k;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
tree pchartype = gfc_get_pchar_type (code->expr1->ts.kind);
/* Generate the structure describing the branches */
for (d = cp; d; d = d->right)
{
- VEC(constructor_elt,gc) *node = NULL;
+ vec<constructor_elt, va_gc> *node = NULL;
gfc_init_se (&se, NULL);
gfc_get_function_type (gfc_symbol * sym)
{
tree type;
- VEC(tree,gc) *typelist;
+ vec<tree, va_gc> *typelist;
gfc_formal_arglist *f;
gfc_symbol *arg;
int alternate_return;
if (sym->attr.entry_master)
/* Additional parameter for selecting an entry point. */
- VEC_safe_push (tree, gc, typelist, gfc_array_index_type);
+ vec_safe_push (typelist, gfc_array_index_type);
if (sym->result)
arg = sym->result;
|| arg->ts.type == BT_CHARACTER)
type = build_reference_type (type);
- VEC_safe_push (tree, gc, typelist, type);
+ vec_safe_push (typelist, type);
if (arg->ts.type == BT_CHARACTER)
{
if (!arg->ts.deferred)
/* Transfer by value. */
- VEC_safe_push (tree, gc, typelist, gfc_charlen_type_node);
+ vec_safe_push (typelist, gfc_charlen_type_node);
else
/* Deferred character lengths are transferred by reference
so that the value can be returned. */
- VEC_safe_push (tree, gc, typelist,
- build_pointer_type (gfc_charlen_type_node));
+ vec_safe_push (typelist, build_pointer_type(gfc_charlen_type_node));
}
}
used without an explicit interface, and cannot be passed as
actual parameters for a dummy procedure. */
- VEC_safe_push (tree, gc, typelist, type);
+ vec_safe_push (typelist, type);
}
else
{
so that the value can be returned. */
type = build_pointer_type (gfc_charlen_type_node);
- VEC_safe_push (tree, gc, typelist, type);
+ vec_safe_push (typelist, type);
}
}
- if (!VEC_empty (tree, typelist)
+ if (!vec_safe_is_empty (typelist)
|| sym->attr.is_main_program
|| sym->attr.if_source != IFSRC_UNKNOWN)
is_varargs = false;
/* Used to call ordinary functions/subroutines
and procedure pointer components. */
int gfc_conv_procedure_call (gfc_se *, gfc_symbol *, gfc_actual_arglist *,
- gfc_expr *, VEC(tree,gc) *);
+ gfc_expr *, vec<tree, va_gc> *);
void gfc_conv_subref_array_arg (gfc_se *, gfc_expr *, int, sym_intent, bool);
#include "tree-pass.h"
#include "predict.h"
#include "df.h"
-#include "vecprim.h"
#include "params.h"
#include "bb-reorder.h"
\f
htab_t types_used_by_vars_hash = NULL;
-VEC(tree,gc) *types_used_by_cur_var_decl;
+vec<tree, va_gc> *types_used_by_cur_var_decl;
/* Forward declarations. */
static struct temp_slot *find_temp_slot_from_address (rtx);
static void pad_to_arg_alignment (struct args_size *, int, struct args_size *);
static void pad_below (struct args_size *, enum machine_mode, tree);
-static void reorder_blocks_1 (rtx, tree, VEC(tree,heap) **);
+static void reorder_blocks_1 (rtx, tree, vec<tree> *);
static int all_blocks (tree, tree *);
static tree *get_block_vector (tree, int *);
extern tree debug_find_var_in_block_tree (tree, tree);
typedef struct function *function_p;
-DEF_VEC_P(function_p);
-DEF_VEC_ALLOC_P(function_p,heap);
-static VEC(function_p,heap) *function_context_stack;
+static vec<function_p> function_context_stack;
/* Save the current context for compilation of a nested function.
This is called from language-specific code. */
if (cfun == 0)
allocate_struct_function (NULL, false);
- VEC_safe_push (function_p, heap, function_context_stack, cfun);
+ function_context_stack.safe_push (cfun);
set_cfun (NULL);
}
void
pop_function_context (void)
{
- struct function *p = VEC_pop (function_p, function_context_stack);
+ struct function *p = function_context_stack.pop ();
set_cfun (p);
current_function_decl = p->decl;
static struct temp_slot **
temp_slots_at_level (int level)
{
- if (level >= (int) VEC_length (temp_slot_p, used_temp_slots))
- VEC_safe_grow_cleared (temp_slot_p, gc, used_temp_slots, level + 1);
+ if (level >= (int) vec_safe_length (used_temp_slots))
+ vec_safe_grow_cleared (used_temp_slots, level + 1);
- return &(VEC_address (temp_slot_p, used_temp_slots)[level]);
+ return &(*used_temp_slots)[level];
}
/* Returns the maximal temporary slot level. */
if (!used_temp_slots)
return -1;
- return VEC_length (temp_slot_p, used_temp_slots) - 1;
+ return used_temp_slots->length () - 1;
}
/* Moves temporary slot TEMP to LEVEL. */
{
/* We have not allocated any temporaries yet. */
avail_temp_slots = 0;
- used_temp_slots = 0;
+ vec_alloc (used_temp_slots, 0);
temp_slot_level = 0;
n_temp_slots_in_use = 0;
FOR_EACH_LOCAL_DECL (cfun, ix, decl)
if (DECL_RTL_SET_P (decl))
instantiate_decl_rtl (DECL_RTL (decl));
- VEC_free (tree, gc, cfun->local_decls);
+ vec_free (cfun->local_decls);
}
/* Pass through the INSNS of function FNDECL and convert virtual register
needed, else the old list. */
static void
-split_complex_args (VEC(tree, heap) **args)
+split_complex_args (vec<tree> *args)
{
unsigned i;
tree p;
- FOR_EACH_VEC_ELT (tree, *args, i, p)
+ FOR_EACH_VEC_ELT (*args, i, p)
{
tree type = TREE_TYPE (p);
if (TREE_CODE (type) == COMPLEX_TYPE
DECL_IGNORED_P (p) = addressable;
TREE_ADDRESSABLE (p) = 0;
layout_decl (p, 0);
- VEC_replace (tree, *args, i, p);
+ (*args)[i] = p;
/* Build a second synthetic decl. */
decl = build_decl (EXPR_LOCATION (p),
DECL_ARTIFICIAL (decl) = addressable;
DECL_IGNORED_P (decl) = addressable;
layout_decl (decl, 0);
- VEC_safe_insert (tree, heap, *args, ++i, decl);
+ args->safe_insert (++i, decl);
}
}
}
the hidden struct return argument, and (abi willing) complex args.
Return the new parameter list. */
-static VEC(tree, heap) *
+static vec<tree>
assign_parms_augmented_arg_list (struct assign_parm_data_all *all)
{
tree fndecl = current_function_decl;
tree fntype = TREE_TYPE (fndecl);
- VEC(tree, heap) *fnargs = NULL;
+ vec<tree> fnargs = vec<tree>();
tree arg;
for (arg = DECL_ARGUMENTS (fndecl); arg; arg = DECL_CHAIN (arg))
- VEC_safe_push (tree, heap, fnargs, arg);
+ fnargs.safe_push (arg);
all->orig_fnargs = DECL_ARGUMENTS (fndecl);
DECL_CHAIN (decl) = all->orig_fnargs;
all->orig_fnargs = decl;
- VEC_safe_insert (tree, heap, fnargs, 0, decl);
+ fnargs.safe_insert (0, decl);
all->function_result_decl = decl;
}
static void
assign_parms_unsplit_complex (struct assign_parm_data_all *all,
- VEC(tree, heap) *fnargs)
+ vec<tree> fnargs)
{
tree parm;
tree orig_fnargs = all->orig_fnargs;
rtx tmp, real, imag;
enum machine_mode inner = GET_MODE_INNER (DECL_MODE (parm));
- real = DECL_RTL (VEC_index (tree, fnargs, i));
- imag = DECL_RTL (VEC_index (tree, fnargs, i + 1));
+ real = DECL_RTL (fnargs[i]);
+ imag = DECL_RTL (fnargs[i + 1]);
if (inner != GET_MODE (real))
{
real = gen_lowpart_SUBREG (inner, real);
tmp = gen_rtx_CONCAT (DECL_MODE (parm), real, imag);
SET_DECL_RTL (parm, tmp);
- real = DECL_INCOMING_RTL (VEC_index (tree, fnargs, i));
- imag = DECL_INCOMING_RTL (VEC_index (tree, fnargs, i + 1));
+ real = DECL_INCOMING_RTL (fnargs[i]);
+ imag = DECL_INCOMING_RTL (fnargs[i + 1]);
if (inner != GET_MODE (real))
{
real = gen_lowpart_SUBREG (inner, real);
{
struct assign_parm_data_all all;
tree parm;
- VEC(tree, heap) *fnargs;
+ vec<tree> fnargs;
unsigned i;
crtl->args.internal_arg_pointer
assign_parms_initialize_all (&all);
fnargs = assign_parms_augmented_arg_list (&all);
- FOR_EACH_VEC_ELT (tree, fnargs, i, parm)
+ FOR_EACH_VEC_ELT (fnargs, i, parm)
{
struct assign_parm_data_one data;
if (targetm.calls.split_complex_arg)
assign_parms_unsplit_complex (&all, fnargs);
- VEC_free (tree, heap, fnargs);
+ fnargs.release ();
/* Output all parameter conversion instructions (possibly including calls)
now that all parameters have been copied out of hard registers. */
struct assign_parm_data_all all;
tree parm;
gimple_seq stmts = NULL;
- VEC(tree, heap) *fnargs;
+ vec<tree> fnargs;
unsigned i;
assign_parms_initialize_all (&all);
fnargs = assign_parms_augmented_arg_list (&all);
- FOR_EACH_VEC_ELT (tree, fnargs, i, parm)
+ FOR_EACH_VEC_ELT (fnargs, i, parm)
{
struct assign_parm_data_one data;
}
}
- VEC_free (tree, heap, fnargs);
+ fnargs.release ();
return stmts;
}
reorder_blocks (void)
{
tree block = DECL_INITIAL (current_function_decl);
- VEC(tree,heap) *block_stack;
+ vec<tree> block_stack;
if (block == NULL_TREE)
return;
- block_stack = VEC_alloc (tree, heap, 10);
+ block_stack.create (10);
/* Reset the TREE_ASM_WRITTEN bit for all blocks. */
clear_block_marks (block);
reorder_blocks_1 (get_insns (), block, &block_stack);
BLOCK_SUBBLOCKS (block) = blocks_nreverse_all (BLOCK_SUBBLOCKS (block));
- VEC_free (tree, heap, block_stack);
+ block_stack.release ();
}
/* Helper function for reorder_blocks. Reset TREE_ASM_WRITTEN. */
}
static void
-reorder_blocks_1 (rtx insns, tree current_block, VEC(tree,heap) **p_block_stack)
+reorder_blocks_1 (rtx insns, tree current_block, vec<tree> *p_block_stack)
{
rtx insn;
tree prev_beg = NULL_TREE, prev_end = NULL_TREE;
|| BLOCK_FRAGMENT_ORIGIN (BLOCK_SUPERCONTEXT
(origin))
== current_block);
- if (VEC_empty (tree, *p_block_stack))
+ if (p_block_stack->is_empty ())
super = current_block;
else
{
- super = VEC_last (tree, *p_block_stack);
+ super = p_block_stack->last ();
gcc_assert (super == current_block
|| BLOCK_FRAGMENT_ORIGIN (super)
== current_block);
BLOCK_SUBBLOCKS (current_block) = block;
current_block = origin;
}
- VEC_safe_push (tree, heap, *p_block_stack, block);
+ p_block_stack->safe_push (block);
}
else if (NOTE_KIND (insn) == NOTE_INSN_BLOCK_END)
{
- NOTE_BLOCK (insn) = VEC_pop (tree, *p_block_stack);
+ NOTE_BLOCK (insn) = p_block_stack->pop ();
current_block = BLOCK_SUPERCONTEXT (current_block);
if (BLOCK_FRAGMENT_ORIGIN (current_block))
current_block = BLOCK_FRAGMENT_ORIGIN (current_block);
/* Initialized with NOGC, making this poisonous to the garbage collector. */
-static VEC(function_p,heap) *cfun_stack;
+static vec<function_p> cfun_stack;
/* Push the current cfun onto the stack, and set cfun to new_cfun. Also set
current_function_decl accordingly. */
{
gcc_assert ((!cfun && !current_function_decl)
|| (cfun && current_function_decl == cfun->decl));
- VEC_safe_push (function_p, heap, cfun_stack, cfun);
+ cfun_stack.safe_push (cfun);
current_function_decl = new_cfun ? new_cfun->decl : NULL_TREE;
set_cfun (new_cfun);
}
void
pop_cfun (void)
{
- struct function *new_cfun = VEC_pop (function_p, cfun_stack);
+ struct function *new_cfun = cfun_stack.pop ();
/* When in_dummy_function, we do have a cfun but current_function_decl is
NULL. We also allow pushing NULL cfun and subsequently changing
current_function_decl to something else and have both restored by
gcc_assert (in_dummy_function
|| (!cfun && !current_function_decl)
|| (cfun && current_function_decl == cfun->decl));
- VEC_safe_push (function_p, heap, cfun_stack, cfun);
+ cfun_stack.safe_push (cfun);
current_function_decl = fndecl;
allocate_struct_function (fndecl, false);
}
/* LAST_BB is a block that exits, and empty of active instructions.
Examine its predecessors for jumps that can be converted to
(conditional) returns. */
-static VEC (edge, heap) *
+static vec<edge>
convert_jumps_to_returns (basic_block last_bb, bool simple_p,
- VEC (edge, heap) *unconverted ATTRIBUTE_UNUSED)
+ vec<edge> unconverted ATTRIBUTE_UNUSED)
{
int i;
basic_block bb;
rtx label;
edge_iterator ei;
edge e;
- VEC(basic_block,heap) *src_bbs;
+ vec<basic_block> src_bbs;
- src_bbs = VEC_alloc (basic_block, heap, EDGE_COUNT (last_bb->preds));
+ src_bbs.create (EDGE_COUNT (last_bb->preds));
FOR_EACH_EDGE (e, ei, last_bb->preds)
if (e->src != ENTRY_BLOCK_PTR)
- VEC_quick_push (basic_block, src_bbs, e->src);
+ src_bbs.quick_push (e->src);
label = BB_HEAD (last_bb);
- FOR_EACH_VEC_ELT (basic_block, src_bbs, i, bb)
+ FOR_EACH_VEC_ELT (src_bbs, i, bb)
{
rtx jump = BB_END (bb);
if (dump_file)
fprintf (dump_file,
"Failed to redirect bb %d branch.\n", bb->index);
- VEC_safe_push (edge, heap, unconverted, e);
+ unconverted.safe_push (e);
}
#endif
continue;
if (dump_file)
fprintf (dump_file,
"Failed to redirect bb %d branch.\n", bb->index);
- VEC_safe_push (edge, heap, unconverted, e);
+ unconverted.safe_push (e);
}
#endif
continue;
redirect_edge_succ (e, EXIT_BLOCK_PTR);
e->flags &= ~EDGE_CROSSING;
}
- VEC_free (basic_block, heap, src_bbs);
+ src_bbs.release ();
return unconverted;
}
{
bool inserted;
#ifdef HAVE_simple_return
- VEC (edge, heap) *unconverted_simple_returns = NULL;
+ vec<edge> unconverted_simple_returns = vec<edge>();
bool nonempty_prologue;
bitmap_head bb_flags;
unsigned max_grow_size;
HARD_REG_SET prologue_clobbered, prologue_used, live_on_edge;
struct hard_reg_set_container set_up_by_prologue;
rtx p_insn;
- VEC(basic_block, heap) *vec;
+ vec<basic_block> vec;
basic_block bb;
bitmap_head bb_antic_flags;
bitmap_head bb_on_list;
/* Find the set of basic blocks that require a stack frame,
and blocks that are too big to be duplicated. */
- vec = VEC_alloc (basic_block, heap, n_basic_blocks);
+ vec.create (n_basic_blocks);
CLEAR_HARD_REG_SET (set_up_by_prologue.set);
add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
if (bb == entry_edge->dest)
goto fail_shrinkwrap;
bitmap_set_bit (&bb_flags, bb->index);
- VEC_quick_push (basic_block, vec, bb);
+ vec.quick_push (bb);
break;
}
else if (size <= max_grow_size)
/* For every basic block that needs a prologue, mark all blocks
reachable from it, so as to ensure they are also seen as
requiring a prologue. */
- while (!VEC_empty (basic_block, vec))
+ while (!vec.is_empty ())
{
- basic_block tmp_bb = VEC_pop (basic_block, vec);
+ basic_block tmp_bb = vec.pop ();
FOR_EACH_EDGE (e, ei, tmp_bb->succs)
if (e->dest != EXIT_BLOCK_PTR
&& bitmap_set_bit (&bb_flags, e->dest->index))
- VEC_quick_push (basic_block, vec, e->dest);
+ vec.quick_push (e->dest);
}
/* Find the set of basic blocks that need no prologue, have a
single successor, can be duplicated, meet a max size
requirement, and go to the exit via like blocks. */
- VEC_quick_push (basic_block, vec, EXIT_BLOCK_PTR);
- while (!VEC_empty (basic_block, vec))
+ vec.quick_push (EXIT_BLOCK_PTR);
+ while (!vec.is_empty ())
{
- basic_block tmp_bb = VEC_pop (basic_block, vec);
+ basic_block tmp_bb = vec.pop ();
FOR_EACH_EDGE (e, ei, tmp_bb->preds)
if (single_succ_p (e->src)
&& !bitmap_bit_p (&bb_flags, pe->src->index))
break;
if (pe == NULL && bitmap_set_bit (&bb_tail, e->src->index))
- VEC_quick_push (basic_block, vec, e->src);
+ vec.quick_push (e->src);
}
}
FOR_EACH_EDGE (e, ei, bb->preds)
if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
&& bitmap_set_bit (&bb_on_list, e->src->index))
- VEC_quick_push (basic_block, vec, e->src);
+ vec.quick_push (e->src);
}
- while (!VEC_empty (basic_block, vec))
+ while (!vec.is_empty ())
{
- basic_block tmp_bb = VEC_pop (basic_block, vec);
+ basic_block tmp_bb = vec.pop ();
bool all_set = true;
bitmap_clear_bit (&bb_on_list, tmp_bb->index);
FOR_EACH_EDGE (e, ei, tmp_bb->preds)
if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
&& bitmap_set_bit (&bb_on_list, e->src->index))
- VEC_quick_push (basic_block, vec, e->src);
+ vec.quick_push (e->src);
}
}
/* Find exactly one edge that leads to a block in ANTIC from
some_no_pro = true;
}
if (some_pro && some_no_pro)
- VEC_quick_push (basic_block, vec, bb);
+ vec.quick_push (bb);
else
bitmap_clear_bit (&bb_tail, bb->index);
}
/* Find the head of each tail. */
- while (!VEC_empty (basic_block, vec))
+ while (!vec.is_empty ())
{
- basic_block tbb = VEC_pop (basic_block, vec);
+ basic_block tbb = vec.pop ();
if (!bitmap_bit_p (&bb_tail, tbb->index))
continue;
bitmap_clear (&bb_tail);
bitmap_clear (&bb_antic_flags);
bitmap_clear (&bb_on_list);
- VEC_free (basic_block, heap, vec);
+ vec.release ();
}
#endif
if (LABEL_P (BB_HEAD (last_bb))
&& !active_insn_between (BB_HEAD (last_bb), BB_END (last_bb)))
- convert_jumps_to_returns (last_bb, false, NULL);
+ convert_jumps_to_returns (last_bb, false, vec<edge>());
if (EDGE_COUNT (last_bb->preds) != 0
&& single_succ_p (last_bb))
convert to conditional simple_returns, but couldn't for some
reason, create a block to hold a simple_return insn and redirect
those remaining edges. */
- if (!VEC_empty (edge, unconverted_simple_returns))
+ if (!unconverted_simple_returns.is_empty ())
{
basic_block simple_return_block_hot = NULL;
basic_block simple_return_block_cold = NULL;
pending_edge_cold = e;
}
- FOR_EACH_VEC_ELT (edge, unconverted_simple_returns, i, e)
+ FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
{
basic_block *pdest_bb;
edge pending;
}
redirect_edge_and_branch_force (e, *pdest_bb);
}
- VEC_free (edge, heap, unconverted_simple_returns);
+ unconverted_simple_returns.release ();
}
if (entry_edge != orig_entry_edge)
if (cfun)
used_types_insert_helper (t, cfun);
else
- /* So this might be a type referenced by a global variable.
- Record that type so that we can later decide to emit its debug
- information. */
- VEC_safe_push (tree, gc, types_used_by_cur_var_decl, t);
+ {
+ /* So this might be a type referenced by a global variable.
+ Record that type so that we can later decide to emit its
+ debug information. */
+ vec_safe_push (types_used_by_cur_var_decl, t);
+ }
}
}
#include "hashtab.h"
#include "vec.h"
-#include "vecprim.h"
-#include "vecir.h"
#include "machmode.h"
#include "tm.h" /* For CUMULATIVE_ARGS. */
#include "hard-reg-set.h" /* For HARD_REG_SET in struct rtl_data. */
};
typedef struct call_site_record_d *call_site_record;
-DEF_VEC_P(call_site_record);
-DEF_VEC_ALLOC_P(call_site_record, gc);
/* RTL representation of exception handling. */
struct GTY(()) rtl_eh {
rtx sjlj_fc;
rtx sjlj_exit_after;
- VEC(uchar,gc) *action_record_data;
+ vec<uchar, va_gc> *action_record_data;
- VEC(call_site_record,gc) *call_site_record_v[2];
+ vec<call_site_record, va_gc> *call_site_record_v[2];
};
#define pending_stack_adjust (crtl->expr.x_pending_stack_adjust)
struct call_site_record_d;
struct dw_fde_struct;
-DEF_VEC_P(temp_slot_p);
-DEF_VEC_ALLOC_P(temp_slot_p,gc);
struct ipa_opt_pass_d;
typedef struct ipa_opt_pass_d *ipa_opt_pass;
-DEF_VEC_P(ipa_opt_pass);
-DEF_VEC_ALLOC_P(ipa_opt_pass,heap);
struct GTY(()) varasm_status {
/* If we're using a per-function constant pool, this is it. */
rtx x_parm_birth_insn;
/* List of all used temporaries allocated, by level. */
- VEC(temp_slot_p,gc) *x_used_temp_slots;
+ vec<temp_slot_p, va_gc> *x_used_temp_slots;
/* List of available temp slots. */
struct temp_slot *x_avail_temp_slots;
tree nonlocal_goto_save_area;
/* Vector of function local variables, functions, types and constants. */
- VEC(tree,gc) *local_decls;
+ vec<tree, va_gc> *local_decls;
/* For md files. */
static inline void
add_local_decl (struct function *fun, tree d)
{
- VEC_safe_push (tree, gc, fun->local_decls, d);
+ vec_safe_push (fun->local_decls, d);
}
#define FOR_EACH_LOCAL_DECL(FUN, I, D) \
- FOR_EACH_VEC_ELT_REVERSE (tree, (FUN)->local_decls, I, D)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE ((FUN)->local_decls, I, D)
/* If va_list_[gf]pr_size is set to this, it means we don't know how
many units need to be saved. */
/* During parsing of a global variable, this vector contains the types
referenced by the global variable. */
-extern GTY(()) VEC(tree,gc) *types_used_by_cur_var_decl;
+extern GTY(()) vec<tree, va_gc> *types_used_by_cur_var_decl;
/* cfun shouldn't be set directly; use one of these functions instead. */
extern void set_cfun (struct function *new_cfun);
static int num_changes;
-DEF_VEC_P(df_ref);
-DEF_VEC_ALLOC_P(df_ref,heap);
-static VEC(df_ref,heap) *use_def_ref;
-static VEC(df_ref,heap) *reg_defs;
-static VEC(df_ref,heap) *reg_defs_stack;
+static vec<df_ref> use_def_ref;
+static vec<df_ref> reg_defs;
+static vec<df_ref> reg_defs_stack;
/* The MD bitmaps are trimmed to include only live registers to cut
memory usage on testcases like insn-recog.c. Track live registers
static inline df_ref
get_def_for_use (df_ref use)
{
- return VEC_index (df_ref, use_def_ref, DF_REF_ID (use));
+ return use_def_ref[DF_REF_ID (use)];
}
df_ref def;
while ((def = *def_rec++) != NULL)
{
- df_ref curr_def = VEC_index (df_ref, reg_defs, DF_REF_REGNO (def));
+ df_ref curr_def = reg_defs[DF_REF_REGNO (def)];
unsigned int dregno;
if ((DF_REF_FLAGS (def) & DF_REF_AT_TOP) != top_flag)
dregno = DF_REF_REGNO (def);
if (curr_def)
- VEC_safe_push (df_ref, heap, reg_defs_stack, curr_def);
+ reg_defs_stack.safe_push (curr_def);
else
{
/* Do not store anything if "transitioning" from NULL to NULL. But
if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS)
;
else
- VEC_safe_push (df_ref, heap, reg_defs_stack, def);
+ reg_defs_stack.safe_push (def);
}
if (DF_REF_FLAGS (def) & DF_MD_GEN_FLAGS)
{
bitmap_set_bit (local_md, dregno);
- VEC_replace (df_ref, reg_defs, dregno, NULL);
+ reg_defs[dregno] = NULL;
}
else
{
bitmap_clear_bit (local_md, dregno);
- VEC_replace (df_ref, reg_defs, dregno, def);
+ reg_defs[dregno] = def;
}
}
}
if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == top_flag)
{
unsigned int uregno = DF_REF_REGNO (use);
- if (VEC_index (df_ref, reg_defs, uregno)
+ if (reg_defs[uregno]
&& !bitmap_bit_p (local_md, uregno)
&& bitmap_bit_p (local_lr, uregno))
- VEC_replace (df_ref, use_def_ref, DF_REF_ID (use),
- VEC_index (df_ref, reg_defs, uregno));
+ use_def_ref[DF_REF_ID (use)] = reg_defs[uregno];
}
}
bitmap_copy (local_lr, &lr_bb_info->in);
/* Push a marker for the leave_block callback. */
- VEC_safe_push (df_ref, heap, reg_defs_stack, NULL);
+ reg_defs_stack.safe_push (NULL);
process_uses (df_get_artificial_uses (bb_index), DF_REF_AT_TOP);
process_defs (df_get_artificial_defs (bb_index), DF_REF_AT_TOP);
basic_block bb ATTRIBUTE_UNUSED)
{
df_ref saved_def;
- while ((saved_def = VEC_pop (df_ref, reg_defs_stack)) != NULL)
+ while ((saved_def = reg_defs_stack.pop ()) != NULL)
{
unsigned int dregno = DF_REF_REGNO (saved_def);
/* See also process_defs. */
- if (saved_def == VEC_index (df_ref, reg_defs, dregno))
- VEC_replace (df_ref, reg_defs, dregno, NULL);
+ if (saved_def == reg_defs[dregno])
+ reg_defs[dregno] = NULL;
else
- VEC_replace (df_ref, reg_defs, dregno, saved_def);
+ reg_defs[dregno] = saved_def;
}
}
df_analyze ();
df_maybe_reorganize_use_refs (DF_REF_ORDER_BY_INSN_WITH_NOTES);
- use_def_ref = VEC_alloc (df_ref, heap, DF_USES_TABLE_SIZE ());
- VEC_safe_grow_cleared (df_ref, heap, use_def_ref, DF_USES_TABLE_SIZE ());
+ use_def_ref.create (DF_USES_TABLE_SIZE ());
+ use_def_ref.safe_grow_cleared (DF_USES_TABLE_SIZE ());
- reg_defs = VEC_alloc (df_ref, heap, max_reg_num ());
- VEC_safe_grow_cleared (df_ref, heap, reg_defs, max_reg_num ());
+ reg_defs.create (max_reg_num ());
+ reg_defs.safe_grow_cleared (max_reg_num ());
- reg_defs_stack = VEC_alloc (df_ref, heap, n_basic_blocks * 10);
+ reg_defs_stack.create (n_basic_blocks * 10);
local_md = BITMAP_ALLOC (NULL);
local_lr = BITMAP_ALLOC (NULL);
BITMAP_FREE (local_lr);
BITMAP_FREE (local_md);
- VEC_free (df_ref, heap, reg_defs);
- VEC_free (df_ref, heap, reg_defs_stack);
+ reg_defs.release ();
+ reg_defs_stack.release ();
}
\f
int regno = DF_REF_REGNO (use);
/* Set up the use-def chain. */
- if (DF_REF_ID (use) >= (int) VEC_length (df_ref, use_def_ref))
- VEC_safe_grow_cleared (df_ref, heap, use_def_ref,
- DF_REF_ID (use) + 1);
+ if (DF_REF_ID (use) >= (int) use_def_ref.length ())
+ use_def_ref.safe_grow_cleared (DF_REF_ID (use) + 1);
#ifdef ENABLE_CHECKING
gcc_assert (sparseset_bit_p (active_defs_check, regno));
#endif
- VEC_replace (df_ref, use_def_ref, DF_REF_ID (use), active_defs[regno]);
+ use_def_ref[DF_REF_ID (use)] = active_defs[regno];
}
}
{
loop_optimizer_finalize ();
- VEC_free (df_ref, heap, use_def_ref);
+ use_def_ref.release ();
free (active_defs);
#ifdef ENABLE_CHECKING
sparseset_free (active_defs_check);
static const int n_default_compilers = ARRAY_SIZE (default_compilers) - 1;
typedef char *char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(char_p);
-DEF_VEC_ALLOC_P(char_p,heap);
/* A vector of options to give to the linker.
These options are accumulated by %x,
and substituted into the linker command with %X. */
-static VEC(char_p,heap) *linker_options;
+static vec<char_p> linker_options;
/* A vector of options to give to the assembler.
These options are accumulated by -Wa,
and substituted into the assembler command with %Y. */
-static VEC(char_p,heap) *assembler_options;
+static vec<char_p> assembler_options;
/* A vector of options to give to the preprocessor.
These options are accumulated by -Wp,
and substituted into the preprocessor command with %Z. */
-static VEC(char_p,heap) *preprocessor_options;
+static vec<char_p> preprocessor_options;
\f
static char *
skip_whitespace (char *p)
/* Accumulate a command (program name and args), and run it. */
typedef const char *const_char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(const_char_p);
-DEF_VEC_ALLOC_P(const_char_p,heap);
/* Vector of pointers to arguments in the current line of specifications. */
-static VEC(const_char_p,heap) *argbuf;
+static vec<const_char_p> argbuf;
/* Position in the argbuf vector containing the name of the output file
(the value associated with the "-o" flag). */
static void
alloc_args (void)
{
- argbuf = VEC_alloc (const_char_p, heap, 10);
+ argbuf.create (10);
}
/* Clear out the vector of arguments (after a command is executed). */
static void
clear_args (void)
{
- VEC_truncate (const_char_p, argbuf, 0);
+ argbuf.truncate (0);
}
/* Add one argument to the vector at the end.
static void
store_arg (const char *arg, int delete_always, int delete_failure)
{
- VEC_safe_push (const_char_p, heap, argbuf, arg);
+ argbuf.safe_push (arg);
if (strcmp (arg, "-o") == 0)
- have_o_argbuf_index = VEC_length (const_char_p, argbuf);
+ have_o_argbuf_index = argbuf.length ();
if (delete_always || delete_failure)
{
const char *p;
if (wrapper_string)
{
string = find_a_file (&exec_prefixes,
- VEC_index (const_char_p, argbuf, 0), X_OK, false);
+ argbuf[0], X_OK, false);
if (string)
- VEC_replace (const_char_p, argbuf, 0, string);
+ argbuf[0] = string;
insert_wrapper (wrapper_string);
}
/* Count # of piped commands. */
- for (n_commands = 1, i = 0; VEC_iterate (const_char_p, argbuf, i, arg); i++)
+ for (n_commands = 1, i = 0; argbuf.iterate (i, &arg); i++)
if (strcmp (arg, "|") == 0)
n_commands++;
and record info about each one.
Also search for the programs that are to be run. */
- VEC_safe_push (const_char_p, heap, argbuf, 0);
+ argbuf.safe_push (0);
- commands[0].prog = VEC_index (const_char_p, argbuf, 0); /* first command. */
- commands[0].argv = VEC_address (const_char_p, argbuf);
+ commands[0].prog = argbuf[0]; /* first command. */
+ commands[0].argv = argbuf.address ();
if (!wrapper_string)
{
commands[0].argv[0] = (string) ? string : commands[0].argv[0];
}
- for (n_commands = 1, i = 0; VEC_iterate (const_char_p, argbuf, i, arg); i++)
+ for (n_commands = 1, i = 0; argbuf.iterate (i, &arg); i++)
if (arg && strcmp (arg, "|") == 0)
{ /* each command. */
#if defined (__MSDOS__) || defined (OS2) || defined (VMS)
fatal_error ("-pipe not supported");
#endif
- VEC_replace (const_char_p, argbuf, i, 0); /* Termination of
+ argbuf[i] = 0; /* Termination of
command args. */
- commands[n_commands].prog = VEC_index (const_char_p, argbuf, i + 1);
+ commands[n_commands].prog = argbuf[i + 1];
commands[n_commands].argv
- = &(VEC_address (const_char_p, argbuf))[i + 1];
+ = &(argbuf.address ())[i + 1];
string = find_a_file (&exec_prefixes, commands[n_commands].prog,
X_OK, false);
if (string)
static void
add_preprocessor_option (const char *option, int len)
{
- VEC_safe_push (char_p, heap, preprocessor_options,
- save_string (option, len));
+ preprocessor_options.safe_push (save_string (option, len));
}
static void
add_assembler_option (const char *option, int len)
{
- VEC_safe_push (char_p, heap, assembler_options, save_string (option, len));
+ assembler_options.safe_push (save_string (option, len));
}
static void
add_linker_option (const char *option, int len)
{
- VEC_safe_push (char_p, heap, linker_options, save_string (option, len));
+ linker_options.safe_push (save_string (option, len));
}
\f
/* Allocate space for an input file in infiles. */
int i;
char *buf = xstrdup (wrapper);
char *p = buf;
- unsigned int old_length = VEC_length (const_char_p, argbuf);
+ unsigned int old_length = argbuf.length ();
do
{
}
while ((p = strchr (p, ',')) != NULL);
- VEC_safe_grow (const_char_p, heap, argbuf, old_length + n);
- memmove (VEC_address (const_char_p, argbuf) + n,
- VEC_address (const_char_p, argbuf),
+ argbuf.safe_grow (old_length + n);
+ memmove (argbuf.address () + n,
+ argbuf.address (),
old_length * sizeof (const_char_p));
i = 0;
*p = 0;
p++;
}
- VEC_replace (const_char_p, argbuf, i, p);
+ argbuf[i] = p;
i++;
}
while ((p = strchr (p, ',')) != NULL);
If -pipe, this forces out the last command if it ended in `|'. */
if (value == 0)
{
- if (VEC_length (const_char_p, argbuf) > 0
- && !strcmp (VEC_last (const_char_p, argbuf), "|"))
- VEC_pop (const_char_p, argbuf);
+ if (argbuf.length () > 0
+ && !strcmp (argbuf.last (), "|"))
+ argbuf.pop ();
set_collect_gcc_options ();
- if (VEC_length (const_char_p, argbuf) > 0)
+ if (argbuf.length () > 0)
value = execute ();
}
if ((switches[i].live_cond & SWITCH_IGNORE))
switches[i].live_cond |= SWITCH_IGNORE_PERMANENTLY;
- if (VEC_length (const_char_p, argbuf) > 0)
+ if (argbuf.length () > 0)
{
const char **argbuf_copy;
struct cl_decoded_option *decoded_options;
/* Create a copy of argbuf with a dummy argv[0] entry for
decode_cmdline_options_to_array. */
argbuf_copy = XNEWVEC (const char *,
- VEC_length (const_char_p, argbuf) + 1);
+ argbuf.length () + 1);
argbuf_copy[0] = "";
- memcpy (argbuf_copy + 1, VEC_address (const_char_p, argbuf),
- VEC_length (const_char_p, argbuf) * sizeof (const char *));
+ memcpy (argbuf_copy + 1, argbuf.address (),
+ argbuf.length () * sizeof (const char *));
- decode_cmdline_options_to_array (VEC_length (const_char_p, argbuf) + 1,
+ decode_cmdline_options_to_array (argbuf.length () + 1,
argbuf_copy,
CL_DRIVER, &decoded_options,
&decoded_options_count);
/* Process each member of VEC as a spec. */
static void
-do_specs_vec (VEC(char_p,heap) *vec)
+do_specs_vec (vec<char_p> vec)
{
unsigned ix;
char *opt;
- FOR_EACH_VEC_ELT (char_p, vec, ix, opt)
+ FOR_EACH_VEC_ELT (vec, ix, opt)
{
do_spec_1 (opt, 1, NULL);
/* Make each accumulated option a separate argument. */
case '\n':
end_going_arg ();
- if (VEC_length (const_char_p, argbuf) > 0
- && !strcmp (VEC_last (const_char_p, argbuf), "|"))
+ if (argbuf.length () > 0
+ && !strcmp (argbuf.last (), "|"))
{
/* A `|' before the newline means use a pipe here,
but only if -pipe was specified.
break;
}
else
- VEC_pop (const_char_p, argbuf);
+ argbuf.pop ();
}
set_collect_gcc_options ();
- if (VEC_length (const_char_p, argbuf) > 0)
+ if (argbuf.length () > 0)
{
value = execute ();
if (value)
case 'W':
{
- unsigned int cur_index = VEC_length (const_char_p, argbuf);
+ unsigned int cur_index = argbuf.length ();
/* Handle the {...} following the %W. */
if (*p != '{')
fatal_error ("spec %qs has invalid %<%%W%c%>", spec, *p);
end_going_arg ();
/* If any args were output, mark the last one for deletion
on failure. */
- if (VEC_length (const_char_p, argbuf) != cur_index)
- record_temp_file (VEC_last (const_char_p, argbuf), 0, 1);
+ if (argbuf.length () != cur_index)
+ record_temp_file (argbuf.last (), 0, 1);
break;
}
string = save_string (p1 + 1, p - p1 - 2);
/* See if we already recorded this option. */
- FOR_EACH_VEC_ELT (char_p, linker_options, ix, opt)
+ FOR_EACH_VEC_ELT (linker_options, ix, opt)
if (! strcmp (string, opt))
{
free (string);
const char *funcval;
/* Saved spec processing context. */
- VEC(const_char_p,heap) *save_argbuf;
+ vec<const_char_p> save_argbuf;
int save_arg_going;
int save_delete_this_arg;
/* argbuf_index is an index for the next argument to be inserted, and
so contains the count of the args already inserted. */
- funcval = (*sf->func) (VEC_length (const_char_p, argbuf),
- VEC_address (const_char_p, argbuf));
+ funcval = (*sf->func) (argbuf.length (),
+ argbuf.address ());
/* Pop the spec processing context. */
- VEC_free (const_char_p, heap, argbuf);
+ argbuf.release ();
argbuf = save_argbuf;
arg_going = save_arg_going;
&& !no_sysroot_suffix
&& do_spec_2 (sysroot_suffix_spec) == 0)
{
- if (VEC_length (const_char_p, argbuf) > 1)
+ if (argbuf.length () > 1)
error ("spec failure: more than one arg to SYSROOT_SUFFIX_SPEC");
- else if (VEC_length (const_char_p, argbuf) == 1)
- target_sysroot_suffix = xstrdup (VEC_last (const_char_p, argbuf));
+ else if (argbuf.length () == 1)
+ target_sysroot_suffix = xstrdup (argbuf.last ());
}
#ifdef HAVE_LD_SYSROOT
&& !no_sysroot_suffix
&& do_spec_2 (sysroot_hdrs_suffix_spec) == 0)
{
- if (VEC_length (const_char_p, argbuf) > 1)
+ if (argbuf.length () > 1)
error ("spec failure: more than one arg to SYSROOT_HEADERS_SUFFIX_SPEC");
- else if (VEC_length (const_char_p, argbuf) == 1)
- target_sysroot_hdrs_suffix = xstrdup (VEC_last (const_char_p, argbuf));
+ else if (argbuf.length () == 1)
+ target_sysroot_hdrs_suffix = xstrdup (argbuf.last ());
}
/* Look for startfiles in the standard places. */
{
const char *arg;
int ndx;
- FOR_EACH_VEC_ELT (const_char_p, argbuf, ndx, arg)
+ FOR_EACH_VEC_ELT (argbuf, ndx, arg)
add_sysrooted_prefix (&startfile_prefixes, arg, "BINUTILS",
PREFIX_PRIORITY_LAST, 0, 1);
}
do_spec_2 ("%{fdump-final-insns=*:%*}");
do_spec_1 (" ", 0, NULL);
- if (VEC_length (const_char_p, argbuf) > 0
- && strcmp (argv[VEC_length (const_char_p, argbuf) - 1], "."))
+ if (argbuf.length () > 0
+ && strcmp (argv[argbuf.length () - 1], "."))
{
if (!compare_debug)
return NULL;
- name = xstrdup (argv[VEC_length (const_char_p, argbuf) - 1]);
+ name = xstrdup (argv[argbuf.length () - 1]);
ret = NULL;
}
else
{
const char *ext = NULL;
- if (VEC_length (const_char_p, argbuf) > 0)
+ if (argbuf.length () > 0)
{
do_spec_2 ("%{o*:%*}%{!o:%{!S:%b%O}%{S:%b.s}}");
ext = ".gkd";
do_spec_1 (" ", 0, NULL);
- gcc_assert (VEC_length (const_char_p, argbuf) > 0);
+ gcc_assert (argbuf.length () > 0);
- name = concat (VEC_last (const_char_p, argbuf), ext, NULL);
+ name = concat (argbuf.last (), ext, NULL);
ret = concat ("-fdump-final-insns=", name, NULL);
}
do_spec_2 ("%{c|S:%{o*:%*}}");
do_spec_1 (" ", 0, NULL);
- if (VEC_length (const_char_p, argbuf) > 0)
+ if (argbuf.length () > 0)
debug_auxbase_opt = concat ("-auxbase-strip ",
- VEC_last (const_char_p, argbuf),
+ argbuf.last (),
NULL);
else
debug_auxbase_opt = NULL;
};
typedef struct occr *occr_t;
-DEF_VEC_P (occr_t);
-DEF_VEC_ALLOC_P (occr_t, heap);
/* Expression hash tables.
Each hash table is an array of buckets.
/* Array, indexed by basic block number for a list of insns which modify
memory within that block. */
-static VEC (rtx,heap) **modify_mem_list;
+static vec<rtx> *modify_mem_list;
static bitmap modify_mem_list_set;
typedef struct modify_pair_s
rtx dest_addr; /* The canonical address of `dest'. */
} modify_pair;
-DEF_VEC_O(modify_pair);
-DEF_VEC_ALLOC_O(modify_pair,heap);
/* This array parallels modify_mem_list, except that it stores MEMs
being set and their canonicalized memory addresses. */
-static VEC (modify_pair,heap) **canon_modify_mem_list;
+static vec<modify_pair> *canon_modify_mem_list;
/* Bitmap indexed by block numbers to record which blocks contain
function calls. */
reg_set_bitmap = ALLOC_REG_SET (NULL);
/* Allocate array to keep a list of insns which modify memory in each
- basic block. */
- modify_mem_list = GCNEWVEC (VEC (rtx,heap) *, last_basic_block);
- canon_modify_mem_list = GCNEWVEC (VEC (modify_pair,heap) *,
- last_basic_block);
+ basic block. The two typedefs are needed to work around the
+ pre-processor limitation with template types in macro arguments. */
+ typedef vec<rtx> vec_rtx_heap;
+ typedef vec<modify_pair> vec_modify_pair_heap;
+ modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block);
+ canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap, last_basic_block);
modify_mem_list_set = BITMAP_ALLOC (NULL);
blocks_with_calls = BITMAP_ALLOC (NULL);
}
load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
int avail_p)
{
- VEC (rtx,heap) *list = modify_mem_list[bb->index];
+ vec<rtx> list = modify_mem_list[bb->index];
rtx setter;
unsigned ix;
if (MEM_READONLY_P (x))
return 0;
- FOR_EACH_VEC_ELT_REVERSE (rtx, list, ix, setter)
+ FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
{
struct mem_conflict_info mci;
pair.dest = dest;
pair.dest_addr = dest_addr;
- VEC_safe_push (modify_pair, heap, canon_modify_mem_list[bb], pair);
+ canon_modify_mem_list[bb].safe_push (pair);
}
/* Record memory modification information for INSN. We do not actually care
/* load_killed_in_block_p will handle the case of calls clobbering
everything. */
- VEC_safe_push (rtx, heap, modify_mem_list[bb], insn);
+ modify_mem_list[bb].safe_push (insn);
bitmap_set_bit (modify_mem_list_set, bb);
if (CALL_P (insn))
EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
{
- VEC_free (rtx, heap, modify_mem_list[i]);
- VEC_free (modify_pair, heap, canon_modify_mem_list[i]);
+ modify_mem_list[i].release ();
+ canon_modify_mem_list[i].release ();
}
bitmap_clear (modify_mem_list_set);
bitmap_clear (blocks_with_calls);
blocks_with_calls,
0, bb_index, bi)
{
- VEC (modify_pair,heap) *list
+ vec<modify_pair> list
= canon_modify_mem_list[bb_index];
modify_pair *pair;
unsigned ix;
- FOR_EACH_VEC_ELT_REVERSE (modify_pair, list, ix, pair)
+ FOR_EACH_VEC_ELT_REVERSE (list, ix, pair)
{
rtx dest = pair->dest;
rtx dest_addr = pair->dest_addr;
hoist_code (void)
{
basic_block bb, dominated;
- VEC (basic_block, heap) *dom_tree_walk;
+ vec<basic_block> dom_tree_walk;
unsigned int dom_tree_walk_index;
- VEC (basic_block, heap) *domby;
+ vec<basic_block> domby;
unsigned int i, j, k;
struct expr **index_map;
struct expr *expr;
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
- FOR_EACH_VEC_ELT (basic_block, dom_tree_walk, dom_tree_walk_index, bb)
+ FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
{
domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
- if (VEC_length (basic_block, domby) == 0)
+ if (domby.length () == 0)
continue;
/* Examine each expression that is very busy at the exit of this
/* Number of occurrences of EXPR that can be hoisted to BB. */
int hoistable = 0;
/* Occurrences reachable from BB. */
- VEC (occr_t, heap) *occrs_to_hoist = NULL;
+ vec<occr_t> occrs_to_hoist = vec<occr_t>();
/* We want to insert the expression into BB only once, so
note when we've inserted it. */
int insn_inserted_p;
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- FOR_EACH_VEC_ELT (basic_block, domby, j, dominated)
+ FOR_EACH_VEC_ELT (domby, j, dominated)
{
int max_distance;
hoisted_bbs, occr->insn))
{
hoistable++;
- VEC_safe_push (occr_t, heap,
- occrs_to_hoist, occr);
+ occrs_to_hoist.safe_push (occr);
bitmap_set_bit (from_bbs, dominated->index);
}
}
to nullify any benefit we get from code hoisting. */
if (hoistable > 1 && dbg_cnt (hoist_insn))
{
- /* If (hoistable != VEC_length), then there is
+ /* If (hoistable != vec::length), then there is
an occurrence of EXPR in BB itself. Don't waste
time looking for LCA in this case. */
- if ((unsigned) hoistable
- == VEC_length (occr_t, occrs_to_hoist))
+ if ((unsigned) hoistable == occrs_to_hoist.length ())
{
basic_block lca;
if (lca != bb)
/* Punt, it's better to hoist these occurrences to
LCA. */
- VEC_free (occr_t, heap, occrs_to_hoist);
+ occrs_to_hoist.release ();
}
}
else
/* Punt, no point hoisting a single occurence. */
- VEC_free (occr_t, heap, occrs_to_hoist);
+ occrs_to_hoist.release ();
if (flag_ira_hoist_pressure
- && !VEC_empty (occr_t, occrs_to_hoist))
+ && !occrs_to_hoist.is_empty ())
{
/* Increase register pressure of basic blocks to which
expr is hoisted because of extended live range of
/* Walk through occurrences of I'th expressions we want
to hoist to BB and make the transformations. */
- FOR_EACH_VEC_ELT (occr_t, occrs_to_hoist, j, occr)
+ FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
{
rtx insn;
rtx set;
}
}
- VEC_free (occr_t, heap, occrs_to_hoist);
+ occrs_to_hoist.release ();
bitmap_clear (from_bbs);
}
}
- VEC_free (basic_block, heap, domby);
+ domby.release ();
}
- VEC_free (basic_block, heap, dom_tree_walk);
+ dom_tree_walk.release ();
BITMAP_FREE (from_bbs);
if (flag_ira_hoist_pressure)
BITMAP_FREE (hoisted_bbs);
static void gen_attr (rtx);
-static VEC (rtx, heap) *const_attrs, *reservations;
+static vec<rtx> const_attrs, reservations;
static void
int is_const = GET_CODE (XEXP (attr, 2)) == CONST;
if (is_const)
- VEC_safe_push (rtx, heap, const_attrs, attr);
+ const_attrs.safe_push (attr);
printf ("#define HAVE_ATTR_%s 1\n", XSTR (attr, 0));
if (strcmp (XSTR (exp, 0), "alternative") == 0)
return false;
- FOR_EACH_VEC_ELT (rtx, const_attrs, i, attr)
+ FOR_EACH_VEC_ELT (const_attrs, i, attr)
if (strcmp (XSTR (attr, 0), XSTR (exp, 0)) == 0)
{
unsigned int j;
rtx resv;
- FOR_EACH_VEC_ELT (rtx, reservations, j, resv)
+ FOR_EACH_VEC_ELT (reservations, j, resv)
if (! check_tune_attr (XSTR (attr, 0), XEXP (resv, 2)))
return false;
return true;
else if (GET_CODE (desc) == DEFINE_INSN_RESERVATION)
{
num_insn_reservations++;
- VEC_safe_push (rtx, heap, reservations, desc);
+ reservations.safe_push (desc);
}
}
if (num_insn_reservations > 0)
{
bool has_tune_attr
- = find_tune_attr (XEXP (VEC_index (rtx, reservations, 0), 2));
+ = find_tune_attr (XEXP (reservations[0], 2));
/* Output interface for pipeline hazards recognition based on
DFA (deterministic finite state automata. */
printf ("\n/* DFA based pipeline interface. */");
#include "errors.h"
#include "read-md.h"
#include "gensupport.h"
-#include "vecprim.h"
#include "fnmatch.h"
#define DEBUG 0
\f
/* Declare vector types for various data structures: */
-DEF_VEC_P(alt_state_t);
-DEF_VEC_ALLOC_P(alt_state_t, heap);
-DEF_VEC_P(ainsn_t);
-DEF_VEC_ALLOC_P(ainsn_t, heap);
-DEF_VEC_P(state_t);
-DEF_VEC_ALLOC_P(state_t, heap);
-DEF_VEC_P(decl_t);
-DEF_VEC_ALLOC_P(decl_t, heap);
-DEF_VEC_P(reserv_sets_t);
-DEF_VEC_ALLOC_P(reserv_sets_t, heap);
-
-DEF_VEC_I(vect_el_t);
-DEF_VEC_ALLOC_I(vect_el_t, heap);
-typedef VEC(vect_el_t, heap) *vla_hwint_t;
+
+typedef vec<vect_el_t> vla_hwint_t;
\f
/* Forward declarations of functions used before their definitions, only. */
static regexp_t gen_regexp_sequence (const char *);
/* Pointers to all declarations during IR generation are stored in the
following. */
-static VEC(decl_t, heap) *decls;
+static vec<decl_t> decls;
/* Given a pointer to a (char *) and a separator, return an alloc'ed
string containing the next separated element, taking parentheses
DECL_UNIT (decl)->query_p = 0;
DECL_UNIT (decl)->min_occ_cycle_num = -1;
DECL_UNIT (decl)->in_set_p = 0;
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
}
DECL_UNIT (decl)->name = check_name (str_cpu_units [i], decl->pos);
DECL_UNIT (decl)->automaton_name = XSTR (def, 1);
DECL_UNIT (decl)->query_p = 1;
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
}
DECL_BYPASS (decl)->out_pattern = out_patterns[i];
DECL_BYPASS (decl)->in_pattern = in_patterns[j];
DECL_BYPASS (decl)->bypass_guard_name = XSTR (def, 3);
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
}
else
DECL_EXCL (decl)->names [i]
= second_str_cpu_units [i - first_vect_length];
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
/* Process a PRESENCE_SET, a FINAL_PRESENCE_SET, an ABSENCE_SET,
DECL_ABSENCE (decl)->patterns_num = patterns_length;
DECL_ABSENCE (decl)->final_p = final_p;
}
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
/* Process a PRESENCE_SET.
decl->mode = dm_automaton;
decl->pos = 0;
DECL_AUTOMATON (decl)->name = check_name (str_automata [i], decl->pos);
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
}
decl->pos = 0;
DECL_RESERV (decl)->name = check_name (XSTR (def, 0), decl->pos);
DECL_RESERV (decl)->regexp = gen_regexp (XSTR (def, 1));
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
/* Process a DEFINE_INSN_RESERVATION.
DECL_INSN_RESERV (decl)->default_latency = XINT (def, 1);
DECL_INSN_RESERV (decl)->condexp = XEXP (def, 2);
DECL_INSN_RESERV (decl)->regexp = gen_regexp (XSTR (def, 3));
- VEC_safe_push (decl_t, heap, decls, decl);
+ decls.safe_push (decl);
}
\f
uniq_sort_alt_states (alt_state_t alt_states_list)
{
alt_state_t curr_alt_state;
- VEC(alt_state_t, heap) *alt_states;
+ vec<alt_state_t> alt_states;
size_t i;
size_t prev_unique_state_ind;
alt_state_t result;
if (alt_states_list->next_alt_state == 0)
return alt_states_list;
- alt_states = VEC_alloc (alt_state_t, heap, 150);
+ alt_states.create (150);
for (curr_alt_state = alt_states_list;
curr_alt_state != NULL;
curr_alt_state = curr_alt_state->next_alt_state)
- VEC_safe_push (alt_state_t, heap, alt_states, curr_alt_state);
+ alt_states.safe_push (curr_alt_state);
- VEC_qsort (alt_state_t, alt_states, alt_state_cmp);
+ alt_states.qsort (alt_state_cmp);
prev_unique_state_ind = 0;
- for (i = 1; i < VEC_length (alt_state_t, alt_states); i++)
- if (VEC_index (alt_state_t, alt_states, prev_unique_state_ind)->state
- != VEC_index (alt_state_t, alt_states, i)->state)
+ for (i = 1; i < alt_states.length (); i++)
+ if (alt_states[prev_unique_state_ind]->state != alt_states[i]->state)
{
prev_unique_state_ind++;
- VEC_replace (alt_state_t, alt_states, prev_unique_state_ind,
- VEC_index (alt_state_t, alt_states, i));
+ alt_states[prev_unique_state_ind] = alt_states[i];
}
- VEC_truncate (alt_state_t, alt_states, prev_unique_state_ind + 1);
+ alt_states.truncate (prev_unique_state_ind + 1);
- for (i = 1; i < VEC_length (alt_state_t, alt_states); i++)
- VEC_index (alt_state_t, alt_states, i-1)->next_sorted_alt_state
- = VEC_index (alt_state_t, alt_states, i);
- VEC_last (alt_state_t, alt_states)->next_sorted_alt_state = 0;
+ for (i = 1; i < alt_states.length (); i++)
+ alt_states[i-1]->next_sorted_alt_state
+ = alt_states[i];
+ alt_states.last ()->next_sorted_alt_state = 0;
- result = VEC_index (alt_state_t, alt_states, 0);
+ result = alt_states[0];
- VEC_free (alt_state_t, heap, alt_states);
+ alt_states.release ();
return result;
}
static int annotation_message_reported_p;
/* The vector contains all decls which are automata. */
-static VEC(decl_t, heap) *automaton_decls;
+static vec<decl_t> automaton_decls;
/* The following structure describes usage of a unit in a reservation. */
struct unit_usage
};
typedef struct unit_usage *unit_usage_t;
-DEF_VEC_P(unit_usage_t);
-DEF_VEC_ALLOC_P(unit_usage_t, heap);
/* Obstack for unit_usage structures. */
static struct obstack unit_usages;
alternative with given number are referred through element with
index equals to the cycle * number of all alternatives in the
regexp + the alternative number. */
-static VEC(unit_usage_t, heap) *cycle_alt_unit_usages;
+static vec<unit_usage_t> cycle_alt_unit_usages;
/* The following function creates the structure unit_usage for UNIT on
CYCLE in REGEXP alternative with ALT_NUM. The structure is made
unit_decl = REGEXP_UNIT (unit)->unit_decl;
length = (cycle + 1) * REGEXP_ONEOF (regexp)->regexps_num;
- while (VEC_length (unit_usage_t, cycle_alt_unit_usages) < length)
- VEC_safe_push (unit_usage_t, heap, cycle_alt_unit_usages,
- (unit_usage_t) NULL);
+ while (cycle_alt_unit_usages.length () < length)
+ cycle_alt_unit_usages.safe_push (NULL);
index = cycle * REGEXP_ONEOF (regexp)->regexps_num + alt_num;
prev = NULL;
- for (curr = VEC_index (unit_usage_t, cycle_alt_unit_usages, index);
+ for (curr = cycle_alt_unit_usages[index];
curr != NULL;
prev = curr, curr = curr->next)
if (curr->unit_decl >= unit_decl)
unit_decl->last_distribution_check_cycle = -1; /* undefined */
unit_usage_ptr->next = curr;
if (prev == NULL)
- VEC_replace (unit_usage_t, cycle_alt_unit_usages, index, unit_usage_ptr);
+ cycle_alt_unit_usages[index] = unit_usage_ptr;
else
prev->next = unit_usage_ptr;
}
unit_usage_t list1, list2;
for (i = 0;
- i < (int) VEC_length (unit_usage_t, cycle_alt_unit_usages);
+ i < (int) cycle_alt_unit_usages.length ();
i += n_alts)
{
- for (list1 = VEC_index (unit_usage_t, cycle_alt_unit_usages, i + alt1),
- list2 = VEC_index (unit_usage_t, cycle_alt_unit_usages, i + alt2);;
+ for (list1 = cycle_alt_unit_usages[i + alt1],
+ list2 = cycle_alt_unit_usages[i + alt2];;
list1 = list1->next, list2 = list2->next)
{
while (list1 != NULL
return true;
}
-DEF_VEC_I(int);
-DEF_VEC_ALLOC_I(int, heap);
/* The function processes given REGEXP to find units with the wrong
distribution. */
bool annotation_reservation_message_reported_p;
regexp_t seq, allof, unit;
struct unit_usage *unit_usage_ptr;
- VEC(int, heap) *marked;
+ vec<int> marked;
if (regexp == NULL || regexp->mode != rm_oneof)
return;
/* Store all unit usages in the regexp: */
obstack_init (&unit_usages);
- cycle_alt_unit_usages = VEC_alloc (unit_usage_t, heap, 10);
+ cycle_alt_unit_usages.create (10);
for (i = REGEXP_ONEOF (regexp)->regexps_num - 1; i >= 0; i--)
{
}
}
/* Check distribution: */
- for (i = 0; i < (int) VEC_length (unit_usage_t, cycle_alt_unit_usages); i++)
- for (unit_usage_ptr = VEC_index (unit_usage_t, cycle_alt_unit_usages, i);
+ for (i = 0; i < (int) cycle_alt_unit_usages.length (); i++)
+ for (unit_usage_ptr = cycle_alt_unit_usages[i];
unit_usage_ptr != NULL;
unit_usage_ptr = unit_usage_ptr->next)
unit_usage_ptr->unit_decl->last_distribution_check_cycle = -1;
n_alts = REGEXP_ONEOF (regexp)->regexps_num;
- marked = VEC_alloc (int, heap, n_alts);
+ marked.create (n_alts);
for (i = 0; i < n_alts; i++)
- VEC_safe_push (int, heap, marked, 0);
+ marked.safe_push (0);
annotation_reservation_message_reported_p = false;
- for (i = 0; i < (int) VEC_length (unit_usage_t, cycle_alt_unit_usages); i++)
+ for (i = 0; i < (int) cycle_alt_unit_usages.length (); i++)
{
cycle = i / n_alts;
start = cycle * n_alts;
- for (unit_usage_ptr = VEC_index (unit_usage_t, cycle_alt_unit_usages, i);
+ for (unit_usage_ptr = cycle_alt_unit_usages[i];
unit_usage_ptr != NULL;
unit_usage_ptr = unit_usage_ptr->next)
{
continue;
unit_usage_ptr->unit_decl->last_distribution_check_cycle = cycle;
for (alt = 0; alt < n_alts; alt++)
- if (! unit_present_on_list_p (VEC_index (unit_usage_t,
- cycle_alt_unit_usages,
- start + alt),
+ if (! unit_present_on_list_p (cycle_alt_unit_usages[start + alt],
unit_usage_ptr->unit_decl))
break;
if (alt >= n_alts)
continue;
- memset (VEC_address (int, marked), 0, n_alts * sizeof (int));
+ memset (marked.address (), 0, n_alts * sizeof (int));
for (alt = 0; alt < n_alts; alt++)
{
- if (! unit_present_on_list_p (VEC_index (unit_usage_t,
- cycle_alt_unit_usages,
- start + alt),
+ if (! unit_present_on_list_p (cycle_alt_unit_usages[start + alt],
unit_usage_ptr->unit_decl))
continue;
for (j = 0;
- j < (int) VEC_length (unit_usage_t, cycle_alt_unit_usages);
+ j < (int) cycle_alt_unit_usages.length ();
j++)
{
alt2 = j % n_alts;
if (! unit_present_on_list_p
- (VEC_index (unit_usage_t, cycle_alt_unit_usages,
- start + alt2),
+ (cycle_alt_unit_usages[start + alt2],
unit_usage_ptr->unit_decl)
&& equal_alternatives_p (alt, alt2, n_alts,
unit_usage_ptr
->unit_decl->automaton_decl))
{
- VEC_replace (int, marked, alt, 1);
- VEC_replace (int, marked, alt2, 1);
+ marked[alt] = 1;
+ marked[alt2] = 1;
}
}
}
- for (alt = 0; alt < n_alts && VEC_index (int, marked, alt); alt++)
+ for (alt = 0; alt < n_alts && marked[alt]; alt++)
;
if (alt < n_alts && 0)
{
}
}
}
- VEC_free (int, heap, marked);
- VEC_free (unit_usage_t, heap, cycle_alt_unit_usages);
+ marked.release ();
+ cycle_alt_unit_usages.release ();
obstack_free (&unit_usages, NULL);
}
if (progress_flag)
fprintf (stderr, "Check unit distributions to automata...");
- automaton_decls = NULL;
+ automaton_decls.create (0);
for (i = 0; i < description->decls_num; i++)
{
decl = description->decls [i];
if (decl->mode == dm_automaton)
- VEC_safe_push (decl_t, heap, automaton_decls, decl);
+ automaton_decls.safe_push (decl);
}
- if (VEC_length (decl_t, automaton_decls) > 1)
+ if (automaton_decls.length () > 1)
{
annotation_message_reported_p = FALSE;
for (i = 0; i < description->decls_num; i++)
DECL_INSN_RESERV (decl)->transformed_regexp);
}
}
- VEC_free (decl_t, heap, automaton_decls);
+ automaton_decls.release ();
if (progress_flag)
fprintf (stderr, "done\n");
}
{
ainsn_t curr_ainsn;
size_t i;
- VEC(ainsn_t, heap) *last_insns = VEC_alloc (ainsn_t, heap, 150);
+ vec<ainsn_t> last_insns;
+ last_insns.create (150);
for (curr_ainsn = automaton->ainsn_list;
curr_ainsn != NULL;
}
else
{
- for (i = 0; i < VEC_length (ainsn_t, last_insns); i++)
+ for (i = 0; i < last_insns.length (); i++)
if (alt_states_eq
(curr_ainsn->sorted_alt_states,
- VEC_index (ainsn_t, last_insns, i)->sorted_alt_states))
+ last_insns[i]->sorted_alt_states))
break;
curr_ainsn->next_same_reservs_insn = NULL;
- if (i < VEC_length (ainsn_t, last_insns))
+ if (i < last_insns.length ())
{
curr_ainsn->first_insn_with_same_reservs = 0;
- VEC_index (ainsn_t, last_insns, i)->next_same_reservs_insn
- = curr_ainsn;
- VEC_replace (ainsn_t, last_insns, i, curr_ainsn);
+ last_insns[i]->next_same_reservs_insn = curr_ainsn;
+ last_insns[i] = curr_ainsn;
}
else
{
- VEC_safe_push (ainsn_t, heap, last_insns, curr_ainsn);
+ last_insns.safe_push (curr_ainsn);
curr_ainsn->first_insn_with_same_reservs = 1;
}
}
- VEC_free (ainsn_t, heap, last_insns);
+ last_insns.release ();
}
/* Forming unit reservations which can affect creating the automaton
state_t state;
state_t start_state;
state_t state2;
- VEC(state_t, heap) *state_stack = VEC_alloc(state_t, heap, 150);
+ vec<state_t> state_stack;
+ state_stack.create (150);
int states_n;
reserv_sets_t reservs_matter = form_reservs_matter (automaton);
start_state = insert_state (get_free_state (1, automaton));
automaton->start_state = start_state;
start_state->it_was_placed_in_stack_for_NDFA_forming = 1;
- VEC_safe_push (state_t, heap, state_stack, start_state);
+ state_stack.safe_push (start_state);
states_n = 1;
- while (VEC_length (state_t, state_stack) != 0)
+ while (state_stack.length () != 0)
{
- state = VEC_pop (state_t, state_stack);
+ state = state_stack.pop ();
for (ainsn = automaton->ainsn_list;
ainsn != NULL;
ainsn = ainsn->next_ainsn)
{
state2->it_was_placed_in_stack_for_NDFA_forming
= 1;
- VEC_safe_push (state_t, heap, state_stack, state2);
+ state_stack.safe_push (state2);
states_n++;
if (progress_flag && states_n % 100 == 0)
fprintf (stderr, ".");
if (!state2->it_was_placed_in_stack_for_NDFA_forming)
{
state2->it_was_placed_in_stack_for_NDFA_forming = 1;
- VEC_safe_push (state_t, heap, state_stack, state2);
+ state_stack.safe_push (state2);
states_n++;
if (progress_flag && states_n % 100 == 0)
fprintf (stderr, ".");
}
add_arc (state, state2, automaton->advance_ainsn);
}
- VEC_free (state_t, heap, state_stack);
+ state_stack.release ();
}
/* Form lists of all arcs of STATE marked by the same ainsn. */
static int
create_composed_state (state_t original_state, arc_t arcs_marked_by_insn,
- VEC(state_t, heap) **state_stack)
+ vec<state_t> *state_stack)
{
state_t state;
alt_state_t alt_state, curr_alt_state;
if (!state->it_was_placed_in_stack_for_DFA_forming)
{
state->it_was_placed_in_stack_for_DFA_forming = 1;
- VEC_safe_push (state_t, heap, *state_stack, state);
+ state_stack->safe_push (state);
}
return new_state_p;
}
state_t start_state;
state_t state;
decl_t decl;
- VEC(state_t, heap) *state_stack;
+ vec<state_t> state_stack;
int i;
int states_n;
- state_stack = VEC_alloc (state_t, heap, 0);
+ state_stack.create (0);
/* Create the start state (empty state). */
start_state = automaton->start_state;
start_state->it_was_placed_in_stack_for_DFA_forming = 1;
- VEC_safe_push (state_t, heap, state_stack, start_state);
+ state_stack.safe_push (start_state);
states_n = 1;
- while (VEC_length (state_t, state_stack) != 0)
+ while (state_stack.length () != 0)
{
- state = VEC_pop (state_t, state_stack);
+ state = state_stack.pop ();
form_arcs_marked_by_insn (state);
for (i = 0; i < description->decls_num; i++)
{
if (!state2->it_was_placed_in_stack_for_DFA_forming)
{
state2->it_was_placed_in_stack_for_DFA_forming = 1;
- VEC_safe_push (state_t, heap, state_stack, state2);
+ state_stack.safe_push (state2);
}
add_arc (state, state2, automaton->collapse_ainsn);
}
add_arc (state, state, automaton->collapse_ainsn);
}
}
- VEC_free (state_t, heap, state_stack);
+ state_stack.release ();
}
/* The following variable value is current number (1, 2, ...) of passing
/* The following vla is used for storing pointers to all achieved
states. */
-static VEC(state_t, heap) *all_achieved_states;
+static vec<state_t> all_achieved_states;
/* This function is called by function pass_states to add an achieved
STATE. */
static void
add_achieved_state (state_t state)
{
- VEC_safe_push (state_t, heap, all_achieved_states, state);
+ all_achieved_states.safe_push (state);
}
/* The function sets up equivalence numbers of insns which mark all
}
/* The function makes initial partition of STATES on equivalent
- classes and saves it into *CLASSES. This function requires the input
+ classes and saves it into CLASSES. This function requires the input
to be sorted via compare_states_for_equiv(). */
static int
-init_equiv_class (VEC(state_t, heap) *states, VEC (state_t, heap) **classes)
+init_equiv_class (vec<state_t> states, vec<state_t> *classes)
{
size_t i;
state_t prev = 0;
int class_num = 1;
- *classes = VEC_alloc (state_t, heap, 150);
- for (i = 0; i < VEC_length (state_t, states); i++)
+ classes->create (150);
+ for (i = 0; i < states.length (); i++)
{
- state_t state = VEC_index (state_t, states, i);
+ state_t state = states[i];
if (prev)
{
if (compare_states_for_equiv (&prev, &state) != 0)
{
- VEC_safe_push (state_t, heap, *classes, prev);
+ classes->safe_push (prev);
class_num++;
prev = NULL;
}
prev = state;
}
if (prev)
- VEC_safe_push (state_t, heap, *classes, prev);
+ classes->safe_push (prev);
return class_num;
}
/* The function copies pointers to equivalent states from vla FROM
into vla TO. */
static void
-copy_equiv_class (VEC(state_t, heap) **to, VEC(state_t, heap) *from)
+copy_equiv_class (vec<state_t> *to, vec<state_t> from)
{
- VEC_free (state_t, heap, *to);
- *to = VEC_copy (state_t, heap, from);
+ to->release ();
+ *to = from.copy ();
}
/* The function processes equivalence class given by its first state,
partitioned, the function returns nonzero value. */
static int
partition_equiv_class (state_t first_state, int odd_iteration_flag,
- VEC(state_t, heap) **next_iteration_classes,
+ vec<state_t> *next_iteration_classes,
int *new_equiv_class_num_ptr)
{
state_t new_equiv_class;
clear_arc_insns_equiv_num (first_state);
}
if (new_equiv_class != NULL)
- VEC_safe_push (state_t, heap, *next_iteration_classes, new_equiv_class);
+ next_iteration_classes->safe_push (new_equiv_class);
first_state = new_equiv_class;
}
return partition_p;
/* The function finds equivalent states of AUTOMATON. */
static void
-evaluate_equiv_classes (automaton_t automaton,
- VEC(state_t, heap) **equiv_classes)
+evaluate_equiv_classes (automaton_t automaton, vec<state_t> *equiv_classes)
{
int new_equiv_class_num;
int odd_iteration_flag;
int finish_flag;
- VEC (state_t, heap) *next_iteration_classes;
+ vec<state_t> next_iteration_classes;
size_t i;
- all_achieved_states = VEC_alloc (state_t, heap, 1500);
+ all_achieved_states.create (1500);
pass_states (automaton, add_achieved_state);
pass_states (automaton, cache_presence);
- VEC_qsort (state_t, all_achieved_states, compare_states_for_equiv);
+ all_achieved_states.qsort (compare_states_for_equiv);
odd_iteration_flag = 0;
new_equiv_class_num = init_equiv_class (all_achieved_states,
copy_equiv_class (equiv_classes, next_iteration_classes);
/* Transfer equiv numbers for the next iteration. */
- for (i = 0; i < VEC_length (state_t, all_achieved_states); i++)
+ for (i = 0; i < all_achieved_states.length (); i++)
if (odd_iteration_flag)
- VEC_index (state_t, all_achieved_states, i)->equiv_class_num_2
- = VEC_index (state_t, all_achieved_states, i)->equiv_class_num_1;
+ all_achieved_states[i]->equiv_class_num_2
+ = all_achieved_states[i]->equiv_class_num_1;
else
- VEC_index (state_t, all_achieved_states, i)->equiv_class_num_1
- = VEC_index (state_t, all_achieved_states, i)->equiv_class_num_2;
+ all_achieved_states[i]->equiv_class_num_1
+ = all_achieved_states[i]->equiv_class_num_2;
- for (i = 0; i < VEC_length (state_t, *equiv_classes); i++)
- if (partition_equiv_class (VEC_index (state_t, *equiv_classes, i),
+ for (i = 0; i < equiv_classes->length (); i++)
+ if (partition_equiv_class ((*equiv_classes)[i],
odd_iteration_flag,
&next_iteration_classes,
&new_equiv_class_num))
finish_flag = 0;
}
while (!finish_flag);
- VEC_free (state_t, heap, next_iteration_classes);
- VEC_free (state_t, heap, all_achieved_states);
+ next_iteration_classes.release ();
+ all_achieved_states.release ();
}
/* The function merges equivalent states of AUTOMATON. */
static void
-merge_states (automaton_t automaton, VEC(state_t, heap) *equiv_classes)
+merge_states (automaton_t automaton, vec<state_t> equiv_classes)
{
state_t curr_state;
state_t new_state;
/* Create states corresponding to equivalence classes containing two
or more states. */
- for (i = 0; i < VEC_length (state_t, equiv_classes); i++)
+ for (i = 0; i < equiv_classes.length (); i++)
{
- curr_state = VEC_index (state_t, equiv_classes, i);
+ curr_state = equiv_classes[i];
if (curr_state->next_equiv_class_state != NULL)
{
/* There are more one states in the class equivalence. */
curr_state->equiv_class_state = curr_state;
}
- for (i = 0; i < VEC_length (state_t, equiv_classes); i++)
+ for (i = 0; i < equiv_classes.length (); i++)
{
- curr_state = VEC_index (state_t, equiv_classes, i);
+ curr_state = equiv_classes[i];
if (curr_state->next_equiv_class_state != NULL)
{
first_class_state = curr_state;
static void
minimize_DFA (automaton_t automaton)
{
- VEC(state_t, heap) *equiv_classes = 0;
+ vec<state_t> equiv_classes = vec<state_t>();
evaluate_equiv_classes (automaton, &equiv_classes);
merge_states (automaton, equiv_classes);
pass_states (automaton, set_new_cycle_flags);
- VEC_free (state_t, heap, equiv_classes);
+ equiv_classes.release ();
}
/* Values of two variables are counted number of states and arcs in an
output_vect (vla_hwint_t vect)
{
int els_on_line;
- size_t vect_length = VEC_length (vect_el_t, vect);
+ size_t vect_length = vect.length ();
size_t i;
els_on_line = 1;
else
for (i = 0; i < vect_length; i++)
{
- fprintf (output_file, "%5ld", (long) VEC_index (vect_el_t, vect, i));
+ fprintf (output_file, "%5ld", (long) vect[i]);
if (els_on_line == 10)
{
els_on_line = 0;
int insn_value;
vla_hwint_t translate_vect;
- translate_vect = VEC_alloc (vect_el_t, heap, description->insns_num);
+ translate_vect.create (description->insns_num);
for (insn_value = 0; insn_value < description->insns_num; insn_value++)
/* Undefined value */
- VEC_quick_push (vect_el_t, translate_vect,
- automaton->insn_equiv_classes_num);
+ translate_vect.quick_push (automaton->insn_equiv_classes_num);
for (ainsn = automaton->ainsn_list; ainsn != NULL; ainsn = ainsn->next_ainsn)
- VEC_replace (vect_el_t, translate_vect,
- ainsn->insn_reserv_decl->insn_num,
- ainsn->insn_equiv_class_num);
+ translate_vect[ainsn->insn_reserv_decl->insn_num] =
+ ainsn->insn_equiv_class_num;
fprintf (output_file,
"/* Vector translating external insn codes to internal ones.*/\n");
fprintf (output_file, "[] ATTRIBUTE_UNUSED = {\n");
output_vect (translate_vect);
fprintf (output_file, "};\n\n");
- VEC_free (vect_el_t, heap, translate_vect);
+ translate_vect.release ();
}
/* The value in a table state x ainsn -> something which represents
{
if (no_comb_flag)
return false;
- return (2 * VEC_length (vect_el_t, tab->full_vect)
- > 5 * VEC_length (vect_el_t, tab->comb_vect));
+ return (2 * tab->full_vect.length () > 5 * tab->comb_vect.length ());
}
/* The following function creates new table for AUTOMATON. */
tab = XCREATENODE (struct state_ainsn_table);
tab->automaton = automaton;
- tab->comb_vect = VEC_alloc (vect_el_t, heap, 10000);
- tab->check_vect = VEC_alloc (vect_el_t, heap, 10000);
+ tab->comb_vect.create (10000);
+ tab->check_vect.create (10000);
- tab->base_vect = 0;
- VEC_safe_grow (vect_el_t, heap, tab->base_vect,
- automaton->achieved_states_num);
+ tab->base_vect.create (0);
+ tab->base_vect.safe_grow (automaton->achieved_states_num);
full_vect_length = (automaton->insn_equiv_classes_num
* automaton->achieved_states_num);
- tab->full_vect = VEC_alloc (vect_el_t, heap, full_vect_length);
+ tab->full_vect.create (full_vect_length);
for (i = 0; i < full_vect_length; i++)
- VEC_quick_push (vect_el_t, tab->full_vect, undefined_vect_el_value);
+ tab->full_vect.quick_push (undefined_vect_el_value);
tab->min_base_vect_el_value = 0;
tab->max_base_vect_el_value = 0;
int i;
unsigned long vect_mask, comb_vect_mask;
- vect_length = VEC_length (vect_el_t, vect);
+ vect_length = vect.length ();
gcc_assert (vect_length);
- gcc_assert (VEC_last (vect_el_t, vect) != undefined_vect_el_value);
+ gcc_assert (vect.last () != undefined_vect_el_value);
real_vect_length = tab->automaton->insn_equiv_classes_num;
/* Form full vector in the table: */
{
size_t full_base = tab->automaton->insn_equiv_classes_num * vect_num;
- if (VEC_length (vect_el_t, tab->full_vect) < full_base + vect_length)
- VEC_safe_grow (vect_el_t, heap, tab->full_vect,
- full_base + vect_length);
+ if (tab->full_vect.length () < full_base + vect_length)
+ tab->full_vect.safe_grow (full_base + vect_length);
for (i = 0; i < vect_length; i++)
- VEC_replace (vect_el_t, tab->full_vect, full_base + i,
- VEC_index (vect_el_t, vect, i));
+ tab->full_vect[full_base + i] = vect[i];
}
/* The comb_vect min/max values are also used for the full vector, so
compute them now. */
for (vect_index = 0; vect_index < vect_length; vect_index++)
- if (VEC_index (vect_el_t, vect, vect_index) != undefined_vect_el_value)
+ if (vect[vect_index] != undefined_vect_el_value)
{
- vect_el_t x = VEC_index (vect_el_t, vect, vect_index);
+ vect_el_t x = vect[vect_index];
gcc_assert (x >= 0);
if (tab->max_comb_vect_el_value < x)
tab->max_comb_vect_el_value = x;
return;
/* Form comb vector in the table: */
- gcc_assert (VEC_length (vect_el_t, tab->comb_vect)
- == VEC_length (vect_el_t, tab->check_vect));
+ gcc_assert (tab->comb_vect.length () == tab->check_vect.length ());
- comb_vect_els_num = VEC_length (vect_el_t, tab->comb_vect);
+ comb_vect_els_num = tab->comb_vect.length ();
for (first_unempty_vect_index = 0;
first_unempty_vect_index < vect_length;
first_unempty_vect_index++)
- if (VEC_index (vect_el_t, vect, first_unempty_vect_index)
+ if (vect[first_unempty_vect_index]
!= undefined_vect_el_value)
break;
vect_index < vect_length
&& vect_index + comb_vect_index < comb_vect_els_num;
vect_index++)
- if (VEC_index (vect_el_t, vect, vect_index)
+ if (vect[vect_index]
!= undefined_vect_el_value
- && (VEC_index (vect_el_t, tab->comb_vect,
- vect_index + comb_vect_index)
+ && (tab->comb_vect[vect_index + comb_vect_index]
!= undefined_vect_el_value))
break;
if (vect_index >= vect_length
vect_index++)
{
vect_mask = vect_mask << 1;
- if (VEC_index (vect_el_t, vect, vect_index) != undefined_vect_el_value)
+ if (vect[vect_index] != undefined_vect_el_value)
vect_mask |= 1;
}
{
comb_vect_mask <<= 1;
if (vect_index + comb_vect_index < comb_vect_els_num
- && VEC_index (vect_el_t, tab->comb_vect, vect_index + comb_vect_index)
+ && tab->comb_vect[vect_index + comb_vect_index]
!= undefined_vect_el_value)
comb_vect_mask |= 1;
}
comb_vect_index++, i++)
{
comb_vect_mask = (comb_vect_mask << 1) | 1;
- comb_vect_mask ^= (VEC_index (vect_el_t, tab->comb_vect, i)
+ comb_vect_mask ^= (tab->comb_vect[i]
== undefined_vect_el_value);
if ((vect_mask & comb_vect_mask) == 0)
goto found;
no_state_value = tab->automaton->achieved_states_num;
while (additional_els_num > 0)
{
- VEC_safe_push (vect_el_t, heap, tab->comb_vect, vect_el);
- VEC_safe_push (vect_el_t, heap, tab->check_vect, no_state_value);
+ tab->comb_vect.safe_push (vect_el);
+ tab->check_vect.safe_push (no_state_value);
additional_els_num--;
}
- gcc_assert (VEC_length (vect_el_t, tab->comb_vect)
+ gcc_assert (tab->comb_vect.length ()
>= comb_vect_index + real_vect_length);
/* Fill comb and check vectors. */
for (vect_index = 0; vect_index < vect_length; vect_index++)
- if (VEC_index (vect_el_t, vect, vect_index) != undefined_vect_el_value)
+ if (vect[vect_index] != undefined_vect_el_value)
{
- vect_el_t x = VEC_index (vect_el_t, vect, vect_index);
- gcc_assert (VEC_index (vect_el_t, tab->comb_vect,
- comb_vect_index + vect_index)
+ vect_el_t x = vect[vect_index];
+ gcc_assert (tab->comb_vect[comb_vect_index + vect_index]
== undefined_vect_el_value);
gcc_assert (x >= 0);
- VEC_replace (vect_el_t, tab->comb_vect,
- comb_vect_index + vect_index, x);
- VEC_replace (vect_el_t, tab->check_vect,
- comb_vect_index + vect_index, vect_num);
+ tab->comb_vect[comb_vect_index + vect_index] = x;
+ tab->check_vect[comb_vect_index + vect_index] = vect_num;
}
if (tab->max_comb_vect_el_value < undefined_vect_el_value)
tab->max_comb_vect_el_value = undefined_vect_el_value;
if (tab->min_base_vect_el_value > comb_vect_index)
tab->min_base_vect_el_value = comb_vect_index;
- VEC_replace (vect_el_t, tab->base_vect, vect_num, comb_vect_index);
+ tab->base_vect[vect_num] = comb_vect_index;
}
/* Return number of out arcs of STATE. */
/* The function adds element EL_VALUE to vector VECT for a table state
x AINSN. */
static void
-add_vect_el (vla_hwint_t *vect, ainsn_t ainsn, int el_value)
+add_vect_el (vla_hwint_t &vect, ainsn_t ainsn, int el_value)
{
int equiv_class_num;
int vect_index;
gcc_assert (ainsn);
equiv_class_num = ainsn->insn_equiv_class_num;
- for (vect_index = VEC_length (vect_el_t, *vect);
+ for (vect_index = vect.length ();
vect_index <= equiv_class_num;
vect_index++)
- VEC_safe_push (vect_el_t, heap, *vect, undefined_vect_el_value);
- VEC_replace (vect_el_t, *vect, equiv_class_num, el_value);
+ vect.safe_push (undefined_vect_el_value);
+ vect[equiv_class_num] = el_value;
}
/* This is for forming vector of states of an automaton. */
-static VEC(state_t, heap) *output_states_vect;
+static vec<state_t> output_states_vect;
/* The function is called by function pass_states. The function adds
STATE to `output_states_vect'. */
static void
add_states_vect_el (state_t state)
{
- VEC_safe_push (state_t, heap, output_states_vect, state);
+ output_states_vect.safe_push (state);
}
/* Form and output vectors (comb, check, base or full vector)
{
size_t i;
arc_t arc;
- vla_hwint_t transition_vect = 0;
+ vla_hwint_t transition_vect = vla_hwint_t();
undefined_vect_el_value = automaton->achieved_states_num;
automaton->trans_table = create_state_ainsn_table (automaton);
/* Create vect of pointers to states ordered by num of transitions
from the state (state with the maximum num is the first). */
- output_states_vect = 0;
+ output_states_vect.create (0);
pass_states (automaton, add_states_vect_el);
- VEC_qsort (state_t, output_states_vect, compare_transition_els_num);
+ output_states_vect.qsort (compare_transition_els_num);
- for (i = 0; i < VEC_length (state_t, output_states_vect); i++)
+ for (i = 0; i < output_states_vect.length (); i++)
{
- VEC_truncate (vect_el_t, transition_vect, 0);
- for (arc = first_out_arc (VEC_index (state_t, output_states_vect, i));
+ transition_vect.truncate (0);
+ for (arc = first_out_arc (output_states_vect[i]);
arc != NULL;
arc = next_out_arc (arc))
{
gcc_assert (arc->insn);
if (arc->insn->first_ainsn_with_given_equivalence_num)
- add_vect_el (&transition_vect, arc->insn,
+ add_vect_el (transition_vect, arc->insn,
arc->to_state->order_state_num);
}
add_vect (automaton->trans_table,
- VEC_index (state_t, output_states_vect, i)->order_state_num,
+ output_states_vect[i]->order_state_num,
transition_vect);
}
output_state_ainsn_table
output_trans_full_vect_name, output_trans_comb_vect_name,
output_trans_check_vect_name, output_trans_base_vect_name);
- VEC_free (state_t, heap, output_states_vect);
- VEC_free (vect_el_t, heap, transition_vect);
+ output_states_vect.release ();
+ transition_vect.release ();
}
/* Form and output vectors representing minimal issue delay table of
/* Create vect of pointers to states ordered by num of transitions
from the state (state with the maximum num is the first). */
- output_states_vect = 0;
+ output_states_vect.create (0);
pass_states (automaton, add_states_vect_el);
- min_issue_delay_len = (VEC_length (state_t, output_states_vect)
+ min_issue_delay_len = (output_states_vect.length ()
* automaton->insn_equiv_classes_num);
- min_issue_delay_vect = VEC_alloc (vect_el_t, heap, min_issue_delay_len);
+ min_issue_delay_vect.create (min_issue_delay_len);
for (i = 0; i < min_issue_delay_len; i++)
- VEC_quick_push (vect_el_t, min_issue_delay_vect, -1);
+ min_issue_delay_vect.quick_push (-1);
automaton->max_min_delay = 0;
changed = 0;
- for (state_no = 0; state_no < VEC_length (state_t, output_states_vect);
+ for (state_no = 0; state_no < output_states_vect.length ();
state_no++)
{
- state_t s = VEC_index (state_t, output_states_vect, state_no);
+ state_t s = output_states_vect[state_no];
arc_t arc;
for (arc = first_out_arc (s); arc; arc = next_out_arc (arc))
* automaton->insn_equiv_classes_num
+ arc->insn->insn_equiv_class_num;
- if (VEC_index (vect_el_t, min_issue_delay_vect, asn))
+ if (min_issue_delay_vect[asn])
{
- VEC_replace (vect_el_t, min_issue_delay_vect, asn,
- (vect_el_t) 0);
+ min_issue_delay_vect[asn] = (vect_el_t) 0;
changed = 1;
}
n1 = arc->to_state->order_state_num
* automaton->insn_equiv_classes_num
+ k;
- delay0 = VEC_index (vect_el_t, min_issue_delay_vect, n0);
- delay1 = VEC_index (vect_el_t, min_issue_delay_vect, n1);
+ delay0 = min_issue_delay_vect[n0];
+ delay1 = min_issue_delay_vect[n1];
if (delay1 != -1)
{
if (arc->insn->insn_reserv_decl
delay1++;
if (delay1 < delay0 || delay0 == -1)
{
- VEC_replace (vect_el_t, min_issue_delay_vect, n0, delay1);
+ min_issue_delay_vect[n0] = delay1;
changed = 1;
}
}
for (ainsn = automaton->ainsn_list; ainsn; ainsn = ainsn->next_ainsn)
if (ainsn->first_ainsn_with_given_equivalence_num)
{
- for (i = 0; i < VEC_length (state_t, output_states_vect); i++)
+ for (i = 0; i < output_states_vect.length (); i++)
{
- state_t s = VEC_index (state_t, output_states_vect, i);
+ state_t s = output_states_vect[i];
size_t np = s->order_state_num
* automaton->insn_equiv_classes_num
+ ainsn->insn_equiv_class_num;
- vect_el_t x = VEC_index (vect_el_t, min_issue_delay_vect, np);
+ vect_el_t x = min_issue_delay_vect[np];
if (automaton->max_min_delay < x)
automaton->max_min_delay = x;
if (x == -1)
- VEC_replace (vect_el_t, min_issue_delay_vect, np,
- (vect_el_t) 0);
+ min_issue_delay_vect[np] = (vect_el_t) 0;
}
}
automaton->min_issue_delay_table_compression_factor = cfactor;
compressed_min_issue_delay_len = (min_issue_delay_len+cfactor-1) / cfactor;
- compressed_min_issue_delay_vect
- = VEC_alloc (vect_el_t, heap, compressed_min_issue_delay_len);
+ compressed_min_issue_delay_vect.create (compressed_min_issue_delay_len);
for (i = 0; i < compressed_min_issue_delay_len; i++)
- VEC_quick_push (vect_el_t, compressed_min_issue_delay_vect, 0);
+ compressed_min_issue_delay_vect.quick_push (0);
for (i = 0; i < min_issue_delay_len; i++)
{
size_t ci = i / cfactor;
- vect_el_t x = VEC_index (vect_el_t, min_issue_delay_vect, i);
- vect_el_t cx = VEC_index (vect_el_t, compressed_min_issue_delay_vect, ci);
+ vect_el_t x = min_issue_delay_vect[i];
+ vect_el_t cx = compressed_min_issue_delay_vect[ci];
cx |= x << (8 - (i % cfactor + 1) * (8 / cfactor));
- VEC_replace (vect_el_t, compressed_min_issue_delay_vect, ci, cx);
+ compressed_min_issue_delay_vect[ci] = cx;
}
output_vect (compressed_min_issue_delay_vect);
fprintf (output_file, "};\n\n");
- VEC_free (state_t, heap, output_states_vect);
- VEC_free (vect_el_t, heap, min_issue_delay_vect);
- VEC_free (vect_el_t, heap, compressed_min_issue_delay_vect);
+ output_states_vect.release ();
+ min_issue_delay_vect.release ();
+ compressed_min_issue_delay_vect.release ();
}
/* Form and output vector representing the locked states of
{
size_t i;
arc_t arc;
- vla_hwint_t dead_lock_vect = 0;
+ vla_hwint_t dead_lock_vect = vla_hwint_t();
/* Create vect of pointers to states ordered by num of
transitions from the state (state with the maximum num is the
first). */
automaton->locked_states = 0;
- output_states_vect = 0;
+ output_states_vect.create (0);
pass_states (automaton, add_states_vect_el);
- VEC_safe_grow (vect_el_t, heap, dead_lock_vect,
- VEC_length (state_t, output_states_vect));
- for (i = 0; i < VEC_length (state_t, output_states_vect); i++)
+ dead_lock_vect.safe_grow (output_states_vect.length ());
+ for (i = 0; i < output_states_vect.length (); i++)
{
- state_t s = VEC_index (state_t, output_states_vect, i);
+ state_t s = output_states_vect[i];
arc = first_out_arc (s);
gcc_assert (arc);
if (next_out_arc (arc) == NULL
&& (arc->insn->insn_reserv_decl
== DECL_INSN_RESERV (advance_cycle_insn_decl)))
{
- VEC_replace (vect_el_t, dead_lock_vect, s->order_state_num, 1);
+ dead_lock_vect[s->order_state_num] = 1;
automaton->locked_states++;
}
else
- VEC_replace (vect_el_t, dead_lock_vect, s->order_state_num,
- (vect_el_t) 0);
+ dead_lock_vect[s->order_state_num] = (vect_el_t) 0;
}
if (automaton->locked_states == 0)
return;
fprintf (output_file, "[] = {\n");
output_vect (dead_lock_vect);
fprintf (output_file, "};\n\n");
- VEC_free (state_t, heap, output_states_vect);
- VEC_free (vect_el_t, heap, dead_lock_vect);
+ output_states_vect.release ();
+ dead_lock_vect.release ();
}
/* Form and output vector representing reserved units of the states of
static void
output_reserved_units_table (automaton_t automaton)
{
- vla_hwint_t reserved_units_table = 0;
+ vla_hwint_t reserved_units_table = vla_hwint_t();
int state_byte_size;
int reserved_units_size;
size_t n;
return;
/* Create vect of pointers to states. */
- output_states_vect = 0;
+ output_states_vect.create (0);
pass_states (automaton, add_states_vect_el);
/* Create vector. */
state_byte_size = (description->query_units_num + 7) / 8;
- reserved_units_size = (VEC_length (state_t, output_states_vect)
+ reserved_units_size = (output_states_vect.length ()
* state_byte_size);
- reserved_units_table = VEC_alloc (vect_el_t, heap, reserved_units_size);
+ reserved_units_table.create (reserved_units_size);
for (i = 0; i < reserved_units_size; i++)
- VEC_quick_push (vect_el_t, reserved_units_table, 0);
- for (n = 0; n < VEC_length (state_t, output_states_vect); n++)
+ reserved_units_table.quick_push (0);
+ for (n = 0; n < output_states_vect.length (); n++)
{
- state_t s = VEC_index (state_t, output_states_vect, n);
+ state_t s = output_states_vect[n];
for (i = 0; i < description->units_num; i++)
if (units_array [i]->query_p
&& first_cycle_unit_presence (s, i))
{
int ri = (s->order_state_num * state_byte_size
+ units_array [i]->query_num / 8);
- vect_el_t x = VEC_index (vect_el_t, reserved_units_table, ri);
+ vect_el_t x = reserved_units_table[ri];
x += 1 << (units_array [i]->query_num % 8);
- VEC_replace (vect_el_t, reserved_units_table, ri, x);
+ reserved_units_table[ri] = x;
}
}
fprintf (output_file, "\n#if %s\n", CPU_UNITS_QUERY_MACRO_NAME);
fprintf (output_file, "};\n#endif /* #if %s */\n\n",
CPU_UNITS_QUERY_MACRO_NAME);
- VEC_free (state_t, heap, output_states_vect);
- VEC_free (vect_el_t, heap, reserved_units_table);
+ output_states_vect.release ();
+ reserved_units_table.release ();
}
/* The function outputs all tables representing DFA(s) used for fast
/* The following variable is used for forming array of all possible cpu unit
reservations described by the current DFA state. */
-static VEC(reserv_sets_t, heap) *state_reservs;
+static vec<reserv_sets_t> state_reservs;
/* The function forms `state_reservs' for STATE. */
static void
curr_alt_state = curr_alt_state->next_sorted_alt_state)
add_state_reservs (curr_alt_state->state);
else
- VEC_safe_push (reserv_sets_t, heap, state_reservs, state->reservs);
+ state_reservs.safe_push (state->reservs);
}
/* The function outputs readable representation of all out arcs of
{
size_t i, j;
- for (i = 1, j = 0; i < VEC_length (reserv_sets_t, state_reservs); i++)
- if (reserv_sets_cmp (VEC_index (reserv_sets_t, state_reservs, j),
- VEC_index (reserv_sets_t, state_reservs, i)))
+ for (i = 1, j = 0; i < state_reservs.length (); i++)
+ if (reserv_sets_cmp (state_reservs[j], state_reservs[i]))
{
j++;
- VEC_replace (reserv_sets_t, state_reservs, j,
- VEC_index (reserv_sets_t, state_reservs, i));
+ state_reservs[j] = state_reservs[i];
}
- VEC_truncate (reserv_sets_t, state_reservs, j + 1);
+ state_reservs.truncate (j + 1);
}
/* The following function output readable representation of DFA(s)
{
size_t i;
- state_reservs = 0;
+ state_reservs.create (0);
fprintf (output_description_file, " State #%d", state->order_state_num);
fprintf (output_description_file,
state->new_cycle_p ? " (new cycle)\n" : "\n");
add_state_reservs (state);
- VEC_qsort (reserv_sets_t, state_reservs, state_reservs_cmp);
+ state_reservs.qsort (state_reservs_cmp);
remove_state_duplicate_reservs ();
- for (i = 0; i < VEC_length (reserv_sets_t, state_reservs); i++)
+ for (i = 0; i < state_reservs.length (); i++)
{
fprintf (output_description_file, " ");
- output_reserv_sets (output_description_file,
- VEC_index (reserv_sets_t, state_reservs, i));
+ output_reserv_sets (output_description_file, state_reservs[i]);
fprintf (output_description_file, "\n");
}
fprintf (output_description_file, "\n");
output_state_arcs (state);
- VEC_free (reserv_sets_t, heap, state_reservs);
+ state_reservs.release ();
}
/* The following function output readable representation of
#ifndef NDEBUG
fprintf
(f, "%5ld transition comb vector els, %5ld trans table els: %s\n",
- (long) VEC_length (vect_el_t, automaton->trans_table->comb_vect),
- (long) VEC_length (vect_el_t, automaton->trans_table->full_vect),
+ (long) automaton->trans_table->comb_vect.length (),
+ (long) automaton->trans_table->full_vect.length (),
(comb_vect_p (automaton->trans_table)
? "use comb vect" : "use simple vect"));
fprintf
(long) states_num * automaton->insn_equiv_classes_num,
automaton->min_issue_delay_table_compression_factor);
transition_comb_vect_els
- += VEC_length (vect_el_t, automaton->trans_table->comb_vect);
+ += automaton->trans_table->comb_vect.length ();
transition_full_vect_els
- += VEC_length (vect_el_t, automaton->trans_table->full_vect);
+ += automaton->trans_table->full_vect.length ();
min_issue_delay_vect_els
+= states_num * automaton->insn_equiv_classes_num;
locked_states
/* The following vla is used for storing pointers to all achieved
states. */
-static VEC(state_t, heap) *automaton_states;
+static vec<state_t> automaton_states;
/* This function is called by function pass_states to add an achieved
STATE. */
static void
add_automaton_state (state_t state)
{
- VEC_safe_push (state_t, heap, automaton_states, state);
+ automaton_states.safe_push (state);
}
/* The following function forms list of important automata (whose
int i;
size_t n;
- automaton_states = 0;
+ automaton_states.create (0);
/* Mark important ainsns. */
for (automaton = description->first_automaton;
automaton != NULL;
automaton = automaton->next_automaton)
{
- VEC_truncate (state_t, automaton_states, 0);
+ automaton_states.truncate (0);
pass_states (automaton, add_automaton_state);
- for (n = 0; n < VEC_length (state_t, automaton_states); n++)
+ for (n = 0; n < automaton_states.length (); n++)
{
- state_t s = VEC_index (state_t, automaton_states, n);
+ state_t s = automaton_states[n];
for (arc = first_out_arc (s);
arc != NULL;
arc = next_out_arc (arc))
}
}
}
- VEC_free (state_t, heap, automaton_states);
+ automaton_states.release ();
/* Create automata sets for the insns. */
for (i = 0; i < description->decls_num; i++)
description = XCREATENODEVAR (struct description,
sizeof (struct description)
/* Two entries for special insns. */
- + sizeof (decl_t) * (VEC_length (decl_t, decls) + 1));
- description->decls_num = VEC_length (decl_t, decls);
+ + sizeof (decl_t) * (decls.length () + 1));
+ description->decls_num = decls.length ();
description->normal_decls_num = description->decls_num;
description->query_units_num = 0;
for (i = 0; i < description->decls_num; i++)
{
- description->decls [i] = VEC_index (decl_t, decls, i);
+ description->decls [i] = decls[i];
if (description->decls [i]->mode == dm_unit
&& DECL_UNIT (description->decls [i])->query_p)
DECL_UNIT (description->decls [i])->query_num
if (have_error)
return FATAL_EXIT_CODE;
- if (VEC_length (decl_t, decls) > 0)
+ if (decls.length () > 0)
{
expand_automata ();
if (!have_error)
#include "read-md.h"
#include "gensupport.h"
#include "vec.h"
-#include "vecprim.h"
/* This structure contains all the information needed to describe one
set of extractions methods. Each method may be used by more than
/* This structure is used by gen_insn and walk_rtx to accumulate the
data that will be used to produce an extractions structure. */
-DEF_VEC_P(locstr);
-DEF_VEC_ALLOC_P(locstr,heap);
struct accum_extract
{
- VEC(locstr,heap) *oplocs;
- VEC(locstr,heap) *duplocs;
- VEC(int,heap) *dupnums;
- VEC(char,heap) *pathstr;
+ vec<locstr> oplocs;
+ vec<locstr> duplocs;
+ vec<int> dupnums;
+ vec<char> pathstr;
};
int line_no;
struct code_ptr *link;
struct accum_extract acc;
- acc.oplocs = VEC_alloc (locstr,heap, 10);
- acc.duplocs = VEC_alloc (locstr,heap, 10);
- acc.dupnums = VEC_alloc (int,heap, 10);
- acc.pathstr = VEC_alloc (char,heap, 20);
+ acc.oplocs.create (10);
+ acc.duplocs.create (10);
+ acc.dupnums.create (10);
+ acc.pathstr.create (20);
/* Walk the insn's pattern, remembering at all times the path
down to the walking point. */
else
for (i = XVECLEN (insn, 1) - 1; i >= 0; i--)
{
- VEC_safe_push (char,heap, acc.pathstr, 'a' + i);
+ acc.pathstr.safe_push ('a' + i);
walk_rtx (XVECEXP (insn, 1, i), &acc);
- VEC_pop (char, acc.pathstr);
+ acc.pathstr.pop ();
}
link = XNEW (struct code_ptr);
/* See if we find something that already had this extraction method. */
- op_count = VEC_length (locstr, acc.oplocs);
- dup_count = VEC_length (locstr, acc.duplocs);
- gcc_assert (dup_count == VEC_length (int, acc.dupnums));
+ op_count = acc.oplocs.length ();
+ dup_count = acc.duplocs.length ();
+ gcc_assert (dup_count == acc.dupnums.length ());
for (p = extractions; p; p = p->next)
{
for (j = 0; j < op_count; j++)
{
char *a = p->oplocs[j];
- char *b = VEC_index (locstr, acc.oplocs, j);
+ char *b = acc.oplocs[j];
if (a != b && (!a || !b || strcmp (a, b)))
break;
}
continue;
for (j = 0; j < dup_count; j++)
- if (p->dupnums[j] != VEC_index (int, acc.dupnums, j)
- || strcmp (p->duplocs[j], VEC_index (locstr, acc.duplocs, j)))
+ if (p->dupnums[j] != acc.dupnums[j]
+ || strcmp (p->duplocs[j], acc.duplocs[j]))
break;
if (j != dup_count)
p->duplocs = p->oplocs + op_count;
p->dupnums = (int *)(p->duplocs + dup_count);
- memcpy(p->oplocs, VEC_address(locstr,acc.oplocs), op_count*sizeof(locstr));
- memcpy(p->duplocs, VEC_address(locstr,acc.duplocs), dup_count*sizeof(locstr));
- memcpy(p->dupnums, VEC_address(int, acc.dupnums), dup_count*sizeof(int));
+ memcpy(p->oplocs, acc.oplocs.address(), op_count*sizeof(locstr));
+ memcpy(p->duplocs, acc.duplocs.address(), dup_count*sizeof(locstr));
+ memcpy(p->dupnums, acc.dupnums.address(), dup_count*sizeof(int));
done:
- VEC_free (locstr,heap, acc.oplocs);
- VEC_free (locstr,heap, acc.duplocs);
- VEC_free (int,heap, acc.dupnums);
- VEC_free (char,heap, acc.pathstr);
+ acc.oplocs.release ();
+ acc.duplocs.release ();
+ acc.dupnums.release ();
+ acc.pathstr.release ();
}
\f
-/* Helper subroutine of walk_rtx: given a VEC(locstr), an index, and a
+/* Helper subroutine of walk_rtx: given a vec<locstr>, an index, and a
string, insert the string at the index, which should either already
exist and be NULL, or not yet exist within the vector. In the latter
case the vector is enlarged as appropriate. */
static void
-VEC_safe_set_locstr (VEC(locstr,heap) **vp, unsigned int ix, char *str)
+VEC_safe_set_locstr (vec<locstr> *vp, unsigned int ix, char *str)
{
- if (ix < VEC_length (locstr, *vp))
+ if (ix < (*vp).length ())
{
- if (VEC_index (locstr, *vp, ix))
+ if ((*vp)[ix])
{
message_with_line (line_no, "repeated operand number %d", ix);
have_error = 1;
}
else
- VEC_replace (locstr, *vp, ix, str);
+ (*vp)[ix] = str;
}
else
{
- while (ix > VEC_length (locstr, *vp))
- VEC_safe_push (locstr, heap, *vp, NULL);
- VEC_safe_push (locstr, heap, *vp, str);
+ while (ix > (*vp).length ())
+ vp->safe_push (NULL);
+ vp->safe_push (str);
}
}
-/* Another helper subroutine of walk_rtx: given a VEC(char), convert it
+/* Another helper subroutine of walk_rtx: given a vec<char>, convert it
to a NUL-terminated string in malloc memory. */
static char *
-VEC_char_to_string (VEC(char,heap) *v)
+VEC_char_to_string (vec<char> v)
{
- size_t n = VEC_length (char, v);
+ size_t n = v.length ();
char *s = XNEWVEC (char, n + 1);
- memcpy (s, VEC_address (char, v), n);
+ memcpy (s, v.address (), n);
s[n] = '\0';
return s;
}
base = (code == MATCH_OPERATOR ? '0' : 'a');
for (i = XVECLEN (x, 2) - 1; i >= 0; i--)
{
- VEC_safe_push (char,heap, acc->pathstr, base + i);
+ acc->pathstr.safe_push (base + i);
walk_rtx (XVECEXP (x, 2, i), acc);
- VEC_pop (char, acc->pathstr);
+ acc->pathstr.pop ();
}
return;
case MATCH_DUP:
case MATCH_PAR_DUP:
case MATCH_OP_DUP:
- VEC_safe_push (locstr,heap, acc->duplocs,
- VEC_char_to_string (acc->pathstr));
- VEC_safe_push (int,heap, acc->dupnums, XINT (x, 0));
+ acc->duplocs.safe_push (VEC_char_to_string (acc->pathstr));
+ acc->dupnums.safe_push (XINT (x, 0));
if (code == MATCH_DUP)
break;
base = (code == MATCH_OP_DUP ? '0' : 'a');
for (i = XVECLEN (x, 1) - 1; i >= 0; i--)
{
- VEC_safe_push (char,heap, acc->pathstr, base + i);
+ acc->pathstr.safe_push (base + i);
walk_rtx (XVECEXP (x, 1, i), acc);
- VEC_pop (char, acc->pathstr);
+ acc->pathstr.pop ();
}
return;
{
if (fmt[i] == 'e' || fmt[i] == 'u')
{
- VEC_safe_push (char,heap, acc->pathstr, '0' + i);
+ acc->pathstr.safe_push ('0' + i);
walk_rtx (XEXP (x, i), acc);
- VEC_pop (char, acc->pathstr);
+ acc->pathstr.pop ();
}
else if (fmt[i] == 'E')
{
int j;
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
{
- VEC_safe_push (char,heap, acc->pathstr, 'a' + j);
+ acc->pathstr.safe_push ('a' + j);
walk_rtx (XVECEXP (x, i, j), acc);
- VEC_pop (char, acc->pathstr);
+ acc->pathstr.pop ();
}
}
}
return IGNORABLE_CXX_KEYWORD;
}
"GTY"/{EOID} { return GTY_TOKEN; }
-"VEC"/{EOID} { return VEC_TOKEN; }
"union"/{EOID} { return UNION; }
"struct"/{EOID} { return STRUCT; }
"class"/{EOID} { return STRUCT; }
}
"..." { return ELLIPSIS; }
-[(){},*:<>;=%|+-] { return yytext[0]; }
+[(){},*:<>;=%|+\!\?\.-] { return yytext[0]; }
/* ignore pp-directives */
^{HWS}"#"{HWS}[a-z_]+[^\n]*\n {lexer_line.line++;}
"union",
"struct",
"enum",
- "VEC",
"...",
"ptr_alias",
"nested_ptr",
}
-/* typedef_name: either an ID, or VEC(x,y), or a template type
- specification of the form ID<t1,t2,...,tn>.
-
- FIXME cxx-conversion. VEC(x,y) is currently translated to the
- template 'vec_t<x>'. This is to support the transition to C++ and
- avoid re-writing all the 'VEC(x,y)' declarations in the code. This
- needs to be fixed when the branch is merged into trunk. */
+/* typedef_name: either an ID, or a template type
+ specification of the form ID<t1,t2,...,tn>. */
static const char *
typedef_name (void)
{
- if (token () == VEC_TOKEN)
- {
- const char *c1, *r;
- advance ();
- require ('(');
- c1 = require2 (ID, SCALAR);
- require (',');
- require (ID);
- require (')');
- r = concat ("vec_t<", c1, ">", (char *) 0);
- free (CONST_CAST (char *, c1));
- return r;
- }
-
const char *id = require (ID);
if (token () == '<')
return require_template_declaration (id);
return create_scalar_type (s);
case ID:
- case VEC_TOKEN:
s = typedef_name ();
return resolve_typedef (s, &lexer_line);
fields = NULL;
kind = TYPE_USER_STRUCT;
consume_balanced ('{', '}');
+ return create_user_defined_type (s, &lexer_line);
}
return new_structure (s, kind, &lexer_line, fields, opts);
case TYPE_UNION:
case TYPE_LANG_STRUCT:
case TYPE_USER_STRUCT:
+ case TYPE_UNDEFINED:
return CONST_CAST (struct fileloc*, &ty->u.s.line);
case TYPE_PARAM_STRUCT:
return CONST_CAST (struct fileloc*, &ty->u.param_struct.line);
fatal ("Unexpected type in write_state_string_type");
}
+/* Write an undefined type. */
+static void
+write_state_undefined_type (type_p current)
+{
+ DBGPRINTF ("undefined type @ %p #%d '%s'", (void *) current,
+ current->state_number, current->u.s.tag);
+ fprintf (state_file, "undefined ");
+ gcc_assert (current->gc_used == GC_UNUSED);
+ write_state_common_type_content (current);
+ if (current->u.s.tag != NULL)
+ write_state_a_string (current->u.s.tag);
+ else
+ fprintf (state_file, "nil");
+
+ write_state_fileloc (type_lineloc (current));
+}
+
/* Common code to write structure like types. */
static void
{
case TYPE_NONE:
gcc_unreachable ();
+ case TYPE_UNDEFINED:
+ write_state_undefined_type (current);
+ break;
case TYPE_STRUCT:
write_state_struct_type (current);
break;
}
+/* Read an undefined type. */
+static void
+read_state_undefined_type (type_p type)
+{
+ struct state_token_st *t0;
+
+ type->kind = TYPE_UNDEFINED;
+ read_state_common_type_content (type);
+ t0 = peek_state_token (0);
+ if (state_token_kind (t0) == STOK_STRING)
+ {
+ if (state_token_is_name (t0, "nil"))
+ {
+ type->u.s.tag = NULL;
+ DBGPRINTF ("read anonymous undefined type @%p #%d",
+ (void *) type, type->state_number);
+ }
+ else
+ {
+ type->u.s.tag = xstrdup (t0->stok_un.stok_string);
+ DBGPRINTF ("read undefined type @%p #%d '%s'",
+ (void *) type, type->state_number, type->u.s.tag);
+ }
+
+ next_state_tokens (1);
+ read_state_fileloc (&(type->u.s.line));
+ }
+ else
+ {
+ fatal_reading_state (t0, "Bad tag in undefined type");
+ }
+}
+
+
/* Read a GTY-ed struct type. */
static void
read_state_struct_type (type_p type)
next_state_tokens (1);
read_state_string_type (current);
}
+ else if (state_token_is_name (t0, "undefined"))
+ {
+ *current = XCNEW (struct type);
+ next_state_tokens (1);
+ read_state_undefined_type (*current);
+ }
else if (state_token_is_name (t0, "struct"))
{
*current = XCNEW (struct type);
int nb_types = 0, nb_scalar = 0, nb_string = 0;
int nb_struct = 0, nb_union = 0, nb_array = 0, nb_pointer = 0;
int nb_lang_struct = 0, nb_param_struct = 0;
- int nb_user_struct = 0;
+ int nb_user_struct = 0, nb_undefined = 0;
type_p p = NULL;
for (p = t; p; p = p->next)
{
nb_types++;
switch (p->kind)
{
+ case TYPE_UNDEFINED:
+ nb_undefined++;
case TYPE_SCALAR:
nb_scalar++;
break;
case TYPE_PARAM_STRUCT:
nb_param_struct++;
break;
- default:
+ case TYPE_NONE:
gcc_unreachable ();
}
}
nb_lang_struct, nb_param_struct);
if (nb_user_struct > 0)
fprintf (stderr, "@@%%@@ %d user_structs\n", nb_user_struct);
+ if (nb_undefined > 0)
+ fprintf (stderr, "@@%%@@ %d undefined types\n", nb_undefined);
fprintf (stderr, "\n");
}
#endif /* ENABLE_CHECKING */
/* Define TYPE_NAME to be a user defined type at location POS. */
-static type_p
+type_p
create_user_defined_type (const char *type_name, struct fileloc *pos)
{
type_p ty = find_structure (type_name, TYPE_USER_STRUCT);
}
-/* Return the type previously defined for S. Use POS to report errors. */
+/* Given a typedef name S, return its associated type. Return NULL if
+ S is not a registered type name. */
-type_p
-resolve_typedef (const char *s, struct fileloc *pos)
+static type_p
+type_for_name (const char *s)
{
pair_p p;
for (p = typedefs; p != NULL; p = p->next)
if (strcmp (p->name, s) == 0)
return p->type;
+ return NULL;
+}
+
+
+/* Create an undefined type with name S and location POS. Return the
+ newly created type. */
+
+static type_p
+create_undefined_type (const char *s, struct fileloc *pos)
+{
+ type_p ty = find_structure (s, TYPE_UNDEFINED);
+ ty->u.s.line = *pos;
+ ty->u.s.bitmap = get_lang_bitmap (pos->file);
+ do_typedef (s, ty, pos);
+ return ty;
+}
+
+
+/* Return the type previously defined for S. Use POS to report errors. */
- /* If we did not find a typedef registered, assume this is a name
- for a user-defined type which will need to provide its own
- marking functions. */
- return create_user_defined_type (s, pos);
+type_p
+resolve_typedef (const char *s, struct fileloc *pos)
+{
+ bool is_template_instance = (strchr (s, '<') != NULL);
+ type_p p = type_for_name (s);
+
+ /* If we did not find a typedef registered, generate a TYPE_UNDEFINED
+ type for regular type identifiers. If the type identifier S is a
+ template instantiation, however, we treat it as a user defined
+ type.
+
+ FIXME, this is actually a limitation in gengtype. Supporting
+ template types and their instances would require keeping separate
+ track of the basic types definition and its instances. This
+ essentially forces all template classes in GC to be marked
+ GTY((user)). */
+ if (!p)
+ p = (is_template_instance)
+ ? create_user_defined_type (s, pos)
+ : create_undefined_type (s, pos);
+
+ return p;
}
type_p s;
bool isunion = (kind == TYPE_UNION);
- gcc_assert (union_or_struct_p (kind));
+ gcc_assert (kind == TYPE_UNDEFINED || union_or_struct_p (kind));
for (s = structures; s != NULL; s = s->next)
if (strcmp (name, s->u.s.tag) == 0 && UNION_P (s) == isunion)
}
\f
-static void set_gc_used_type (type_p, enum gc_used_enum, type_p *);
+static void set_gc_used_type (type_p, enum gc_used_enum, type_p *,
+ bool = false);
static void set_gc_used (pair_p);
/* Handle OPT for set_gc_used_type. */
}
-/* Set the gc_used field of T to LEVEL, and handle the types it references. */
+/* Set the gc_used field of T to LEVEL, and handle the types it references.
+
+ If ALLOWED_UNDEFINED_TYPES is true, types of kind TYPE_UNDEFINED
+ are set to GC_UNUSED. Otherwise, an error is emitted for
+ TYPE_UNDEFINED types. This is used to support user-defined
+ template types with non-type arguments.
+
+ For instance, when we parse a template type with enum arguments
+ (e.g. MyType<AnotherType, EnumValue>), the parser created two
+ artificial fields for 'MyType', one for 'AnotherType', the other
+ one for 'EnumValue'.
+
+ At the time that we parse this type we don't know that 'EnumValue'
+ is really an enum value, so the parser creates a TYPE_UNDEFINED
+ type for it. Since 'EnumValue' is never resolved to a known
+ structure, it will stay with TYPE_UNDEFINED.
+
+ Since 'MyType' is a TYPE_USER_STRUCT, we can simply ignore
+ 'EnumValue'. Generating marking code for it would cause
+ compilation failures since the marking routines assumes that
+ 'EnumValue' is a type. */
+
static void
-set_gc_used_type (type_p t, enum gc_used_enum level, type_p param[NUM_PARAM])
+set_gc_used_type (type_p t, enum gc_used_enum level, type_p param[NUM_PARAM],
+ bool allow_undefined_types)
{
if (t->gc_used >= level)
return;
pair_p f;
int dummy;
type_p dummy2;
+ bool allow_undefined_field_types = (t->kind == TYPE_USER_STRUCT);
process_gc_options (t->u.s.opt, level, &dummy, &dummy, &dummy, &dummy,
&dummy2);
else if (skip)
; /* target type is not used through this field */
else
- set_gc_used_type (f->type, GC_USED, pass_param ? param : NULL);
+ set_gc_used_type (f->type, GC_USED, pass_param ? param : NULL,
+ allow_undefined_field_types);
}
break;
}
+ case TYPE_UNDEFINED:
+ if (level > GC_UNUSED)
+ {
+ if (!allow_undefined_types)
+ error_at_line (&t->u.s.line, "undefined type `%s'", t->u.s.tag);
+ t->gc_used = GC_UNUSED;
+ }
+ break;
+
case TYPE_POINTER:
set_gc_used_type (t->u.p, GC_POINTED_TO, NULL);
break;
size_t i;
char *s = xstrdup (type_name);
for (i = 0; i < strlen (s); i++)
- if (s[i] == '<' || s[i] == '>' || s[i] == ':')
+ if (s[i] == '<' || s[i] == '>' || s[i] == ':' || s[i] == ',')
s[i] = '_';
return s;
}
switch (t->kind)
{
case TYPE_NONE:
+ case TYPE_UNDEFINED:
gcc_unreachable ();
break;
case TYPE_POINTER:
d->process_field (t, d);
break;
- default:
+ case TYPE_NONE:
+ case TYPE_UNDEFINED:
gcc_unreachable ();
}
}
switch (f->kind)
{
case TYPE_NONE:
+ case TYPE_UNDEFINED:
gcc_unreachable ();
case TYPE_POINTER:
oprintf (d->of, "%*s%s (%s%s", d->indent, "",
gcc_unreachable ();
}
-
/* Write on OF a user-callable routine to act as an entry point for
the marking routine for S, generated by write_func_for_structure.
PREFIX is the prefix to use to distinguish ggc and pch markers. */
oprintf (d.of, " *)x_p;\n");
if (chain_next != NULL)
{
+ /* TYPE_USER_STRUCTs should not occur here. These structures
+ are completely handled by user code. */
+ gcc_assert (orig_s->kind != TYPE_USER_STRUCT);
+
oprintf (d.of, " ");
write_type_decl (d.of, s);
oprintf (d.of, " * xlimit = x;\n");
case TYPE_SCALAR:
break;
- default:
+ case TYPE_ARRAY:
+ case TYPE_NONE:
+ case TYPE_UNDEFINED:
gcc_unreachable ();
}
}
case TYPE_SCALAR:
break;
- default:
+ case TYPE_ARRAY:
+ case TYPE_NONE:
+ case TYPE_UNDEFINED:
gcc_unreachable ();
}
}
return 0;
case TYPE_ARRAY:
return contains_scalar_p (t->u.a.p);
+ case TYPE_USER_STRUCT:
+ /* User-marked structures will typically contain pointers. */
+ return 0;
default:
/* Could also check for structures that have no non-pointer
fields, but there aren't enough of those to worry about. */
break;
case TYPE_USER_STRUCT:
- write_root (f, v, type->u.a.p, name, has_length, line, if_marked,
- emit_pch);
+ error_at_line (line, "`%s' must be a pointer type, because it is "
+ "a GC root and its type is marked with GTY((user))",
+ v->name);
break;
case TYPE_POINTER:
case TYPE_SCALAR:
break;
- default:
+ case TYPE_NONE:
+ case TYPE_UNDEFINED:
+ case TYPE_UNION:
+ case TYPE_LANG_STRUCT:
+ case TYPE_PARAM_STRUCT:
error_at_line (line, "global `%s' is unimplemented type", name);
}
}
output_typename (of, t->u.param_struct.stru);
break;
}
- default:
+ case TYPE_NONE:
+ case TYPE_UNDEFINED:
+ case TYPE_ARRAY:
gcc_unreachable ();
}
}
case TYPE_STRUCT:
printf ("TYPE_STRUCT");
break;
+ case TYPE_UNDEFINED:
+ printf ("TYPE_UNDEFINED");
+ break;
case TYPE_USER_STRUCT:
printf ("TYPE_USER_STRUCT");
break;
enum typekind {
TYPE_NONE=0, /* Never used, so zeroed memory is invalid. */
+ TYPE_UNDEFINED, /* We have not yet seen a definition for this type.
+ If a type is still undefined when generating code,
+ an error will be generated. */
TYPE_SCALAR, /* Scalar types like char. */
TYPE_STRING, /* The string type. */
TYPE_STRUCT, /* Type for GTY-ed structs. */
extern type_p new_structure (const char *name, enum typekind kind,
struct fileloc *pos, pair_p fields,
options_p o);
+type_p create_user_defined_type (const char *, struct fileloc *);
extern type_p find_structure (const char *s, enum typekind kind);
extern type_p create_scalar_type (const char *name);
extern type_p create_pointer (type_p t);
UNION,
STRUCT,
ENUM,
- VEC_TOKEN,
ELLIPSIS,
PTR_ALIAS,
NESTED_PTR,
unsigned int sort_num;
} pattern;
-DEF_VEC_O(pattern);
-DEF_VEC_ALLOC_O(pattern, heap);
-static VEC(pattern, heap) *patterns;
+static vec<pattern> patterns;
static bool
match_pattern (pattern *p, const char *name, const char *pat)
{
p.op = optabs[pindex].op;
p.sort_num = (p.op << 16) | (p.m2 << 8) | p.m1;
- VEC_safe_push (pattern, heap, patterns, p);
+ patterns.safe_push (p);
return;
}
}
}
/* Sort the collected patterns. */
- qsort (VEC_address (pattern, patterns), VEC_length (pattern, patterns),
+ qsort (patterns.address (), patterns.length (),
sizeof (pattern), pattern_cmp);
/* Now that we've handled the "extra" patterns, eliminate them from
fprintf (h_file, "#define NUM_NORMLIB_OPTABS %u\n",
last_kind[3] - last_kind[2]);
fprintf (h_file, "#define NUM_OPTAB_PATTERNS %u\n",
- (unsigned) VEC_length (pattern, patterns));
+ (unsigned) patterns.length ());
fprintf (s_file,
"#include \"config.h\"\n"
fprintf (s_file,
"static const struct optab_pat pats[NUM_OPTAB_PATTERNS] = {\n");
- for (i = 0; VEC_iterate (pattern, patterns, i, p); ++i)
+ for (i = 0; patterns.iterate (i, &p); ++i)
fprintf (s_file, " { %#08x, CODE_FOR_%s },\n", p->sort_num, p->name);
fprintf (s_file, "};\n\n");
fprintf (s_file, "void\ninit_all_optabs (void)\n{\n");
fprintf (s_file, " bool *ena = this_target_optabs->pat_enable;\n");
- for (i = 0; VEC_iterate (pattern, patterns, i, p); ++i)
+ for (i = 0; patterns.iterate (i, &p); ++i)
fprintf (s_file, " ena[%u] = HAVE_%s;\n", i, p->name);
fprintf (s_file, "}\n\n");
tables, for instance from some plugins; this vector is on the heap
since it is used by GGC internally. */
typedef const struct ggc_root_tab *const_ggc_root_tab_t;
-DEF_VEC_P(const_ggc_root_tab_t);
-DEF_VEC_ALLOC_P(const_ggc_root_tab_t, heap);
-static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;
+static vec<const_ggc_root_tab_t> extra_root_vec;
/* Dynamically register a new GGC root table RT. This is useful for
plugins. */
ggc_register_root_tab (const struct ggc_root_tab* rt)
{
if (rt)
- VEC_safe_push (const_ggc_root_tab_t, heap, extra_root_vec, rt);
+ extra_root_vec.safe_push (rt);
}
/* This extra vector of dynamically registered cache_tab-s is used by
tables, for instance from some plugins; this vector is on the heap
since it is used by GGC internally. */
typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
-DEF_VEC_P(const_ggc_cache_tab_t);
-DEF_VEC_ALLOC_P(const_ggc_cache_tab_t, heap);
-static VEC(const_ggc_cache_tab_t, heap) *extra_cache_vec;
+static vec<const_ggc_cache_tab_t> extra_cache_vec;
/* Dynamically register a new GGC cache table CT. This is useful for
plugins. */
ggc_register_cache_tab (const struct ggc_cache_tab* ct)
{
if (ct)
- VEC_safe_push (const_ggc_cache_tab_t, heap, extra_cache_vec, ct);
+ extra_cache_vec.safe_push (ct);
}
/* Scan a hash table that has objects which are to be deleted if they are not
for (rt = gt_ggc_rtab; *rt; rt++)
ggc_mark_root_tab (*rt);
- FOR_EACH_VEC_ELT (const_ggc_root_tab_t, extra_root_vec, i, rtp)
+ FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
ggc_mark_root_tab (rtp);
if (ggc_protect_identifiers)
for (ct = gt_ggc_cache_rtab; *ct; ct++)
ggc_scan_cache_tab (*ct);
- FOR_EACH_VEC_ELT (const_ggc_cache_tab_t, extra_cache_vec, i, ctp)
+ FOR_EACH_VEC_ELT (extra_cache_vec, i, ctp)
ggc_scan_cache_tab (ctp);
if (! ggc_protect_identifiers)
/* Used by the gt_pch_n_* routines. Register an object in the hash table. */
extern int gt_pch_note_object (void *, void *, gt_note_pointers,
- enum gt_types_enum);
+ enum gt_types_enum = gt_types_enum_last);
/* Used by the gt_pch_n_* routines. Register that an object has a reorder
function. */
};
typedef struct return_statements_t return_statements_t;
-DEF_VEC_O(return_statements_t);
-DEF_VEC_ALLOC_O(return_statements_t,heap);
struct lower_data
{
/* A vector of label and return statements to be moved to the end
of the function. */
- VEC(return_statements_t,heap) *return_statements;
+ vec<return_statements_t> return_statements;
/* True if the current statement cannot fall through. */
bool cannot_fallthru;
BLOCK_SUBBLOCKS (data.block) = NULL_TREE;
BLOCK_CHAIN (data.block) = NULL_TREE;
TREE_ASM_WRITTEN (data.block) = 1;
- data.return_statements = VEC_alloc (return_statements_t, heap, 8);
+ data.return_statements.create (8);
bind = gimple_seq_first_stmt (body);
lowered_body = NULL;
If we've already got one in the return_statements vector, we don't
need to do anything special. Otherwise build one by hand. */
if (gimple_seq_may_fallthru (lowered_body)
- && (VEC_empty (return_statements_t, data.return_statements)
- || gimple_return_retval (VEC_last (return_statements_t,
- data.return_statements).stmt) != NULL))
+ && (data.return_statements.is_empty ()
+ || gimple_return_retval (data.return_statements.last().stmt) != NULL))
{
x = gimple_build_return (NULL);
gimple_set_location (x, cfun->function_end_locus);
/* If we lowered any return statements, emit the representative
at the end of the function. */
- while (!VEC_empty (return_statements_t, data.return_statements))
+ while (!data.return_statements.is_empty ())
{
- return_statements_t t;
-
- /* Unfortunately, we can't use VEC_pop because it returns void for
- objects. */
- t = VEC_last (return_statements_t, data.return_statements);
- VEC_truncate (return_statements_t,
- data.return_statements,
- VEC_length (return_statements_t,
- data.return_statements) - 1);
-
+ return_statements_t t = data.return_statements.pop ();
x = gimple_build_label (t.label);
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING);
= blocks_nreverse (BLOCK_SUBBLOCKS (data.block));
clear_block_marks (data.block);
- VEC_free(return_statements_t, heap, data.return_statements);
+ data.return_statements.release ();
return 0;
}
return_statements_t tmp_rs;
/* Match this up with an existing return statement that's been created. */
- for (i = VEC_length (return_statements_t, data->return_statements) - 1;
+ for (i = data->return_statements.length () - 1;
i >= 0; i--)
{
- tmp_rs = VEC_index (return_statements_t, data->return_statements, i);
+ tmp_rs = data->return_statements[i];
if (gimple_return_retval (stmt) == gimple_return_retval (tmp_rs.stmt))
{
/* Not found. Create a new label and record the return statement. */
tmp_rs.label = create_artificial_label (cfun->function_end_locus);
tmp_rs.stmt = stmt;
- VEC_safe_push (return_statements_t, heap, data->return_statements, tmp_rs);
+ data->return_statements.safe_push (tmp_rs);
/* Generate a goto statement and remove the return statement. */
found:
/* Candidates are maintained in a vector. If candidate X dominates
candidate Y, then X appears before Y in the vector; but the
converse does not necessarily hold. */
-DEF_VEC_P (slsr_cand_t);
-DEF_VEC_ALLOC_P (slsr_cand_t, heap);
-static VEC (slsr_cand_t, heap) *cand_vec;
+static vec<slsr_cand_t> cand_vec;
enum cost_consts
{
static slsr_cand_t
lookup_cand (cand_idx idx)
{
- return VEC_index (slsr_cand_t, cand_vec, idx - 1);
+ return cand_vec[idx - 1];
}
/* Callback to produce a hash value for a candidate chain header. */
c->index = index;
c->cand_type = ctype;
c->kind = kind;
- c->cand_num = VEC_length (slsr_cand_t, cand_vec) + 1;
+ c->cand_num = cand_vec.length () + 1;
c->next_interp = 0;
c->dependent = 0;
c->sibling = 0;
c->def_phi = NULL;
c->dead_savings = savings;
- VEC_safe_push (slsr_cand_t, heap, cand_vec, c);
+ cand_vec.safe_push (c);
c->basis = find_basis_for_candidate (c);
record_potential_basis (c);
fprintf (dump_file, "\nStrength reduction candidate vector:\n\n");
- FOR_EACH_VEC_ELT (slsr_cand_t, cand_vec, i, c)
+ FOR_EACH_VEC_ELT (cand_vec, i, c)
dump_candidate (c);
}
dependent is the root of a tree of related statements.
Analyze each tree to determine a subset of those
statements that can be replaced with maximum benefit. */
- FOR_EACH_VEC_ELT (slsr_cand_t, cand_vec, i, c)
+ FOR_EACH_VEC_ELT (cand_vec, i, c)
{
slsr_cand_t first_dep;
gcc_obstack_init (&cand_obstack);
/* Allocate the candidate vector. */
- cand_vec = VEC_alloc (slsr_cand_t, heap, 128);
+ cand_vec.create (128);
/* Allocate the mapping from statements to candidate indices. */
stmt_cand_map = pointer_map_create ();
htab_delete (base_cand_map);
obstack_free (&chain_obstack, NULL);
pointer_map_destroy (stmt_cand_map);
- VEC_free (slsr_cand_t, heap, cand_vec);
+ cand_vec.release ();
obstack_free (&cand_obstack, NULL);
return 0;
gimple result;
ix = streamer_read_uhwi (ib);
- phi_result = VEC_index (tree, SSANAMES (fn), ix);
+ phi_result = (*SSANAMES (fn))[ix];
len = EDGE_COUNT (bb->preds);
result = create_phi_node (phi_result, bb);
specified in vector ARGS. */
gimple
-gimple_build_call_vec (tree fn, VEC(tree, heap) *args)
+gimple_build_call_vec (tree fn, vec<tree> args)
{
unsigned i;
- unsigned nargs = VEC_length (tree, args);
+ unsigned nargs = args.length ();
gimple call = gimple_build_call_1 (fn, nargs);
for (i = 0; i < nargs; i++)
- gimple_call_set_arg (call, i, VEC_index (tree, args, i));
+ gimple_call_set_arg (call, i, args[i]);
return call;
}
specified in vector ARGS. */
gimple
-gimple_build_call_internal_vec (enum internal_fn fn, VEC(tree, heap) *args)
+gimple_build_call_internal_vec (enum internal_fn fn, vec<tree> args)
{
unsigned i, nargs;
gimple call;
- nargs = VEC_length (tree, args);
+ nargs = args.length ();
call = gimple_build_call_internal_1 (fn, nargs);
for (i = 0; i < nargs; i++)
- gimple_call_set_arg (call, i, VEC_index (tree, args, i));
+ gimple_call_set_arg (call, i, args[i]);
return call;
}
LABELS is a vector of destination labels. */
gimple
-gimple_build_asm_vec (const char *string, VEC(tree,gc)* inputs,
- VEC(tree,gc)* outputs, VEC(tree,gc)* clobbers,
- VEC(tree,gc)* labels)
+gimple_build_asm_vec (const char *string, vec<tree, va_gc> *inputs,
+ vec<tree, va_gc> *outputs, vec<tree, va_gc> *clobbers,
+ vec<tree, va_gc> *labels)
{
gimple p;
unsigned i;
p = gimple_build_asm_1 (string,
- VEC_length (tree, inputs),
- VEC_length (tree, outputs),
- VEC_length (tree, clobbers),
- VEC_length (tree, labels));
+ vec_safe_length (inputs),
+ vec_safe_length (outputs),
+ vec_safe_length (clobbers),
+ vec_safe_length (labels));
- for (i = 0; i < VEC_length (tree, inputs); i++)
- gimple_asm_set_input_op (p, i, VEC_index (tree, inputs, i));
+ for (i = 0; i < vec_safe_length (inputs); i++)
+ gimple_asm_set_input_op (p, i, (*inputs)[i]);
- for (i = 0; i < VEC_length (tree, outputs); i++)
- gimple_asm_set_output_op (p, i, VEC_index (tree, outputs, i));
+ for (i = 0; i < vec_safe_length (outputs); i++)
+ gimple_asm_set_output_op (p, i, (*outputs)[i]);
- for (i = 0; i < VEC_length (tree, clobbers); i++)
- gimple_asm_set_clobber_op (p, i, VEC_index (tree, clobbers, i));
+ for (i = 0; i < vec_safe_length (clobbers); i++)
+ gimple_asm_set_clobber_op (p, i, (*clobbers)[i]);
- for (i = 0; i < VEC_length (tree, labels); i++)
- gimple_asm_set_label_op (p, i, VEC_index (tree, labels, i));
+ for (i = 0; i < vec_safe_length (labels); i++)
+ gimple_asm_set_label_op (p, i, (*labels)[i]);
return p;
}
ARGS is a vector of labels excluding the default. */
gimple
-gimple_build_switch (tree index, tree default_label, VEC(tree, heap) *args)
+gimple_build_switch (tree index, tree default_label, vec<tree> args)
{
- unsigned i, nlabels = VEC_length (tree, args);
+ unsigned i, nlabels = args.length ();
gimple p = gimple_build_switch_nlabels (nlabels, index, default_label);
/* Copy the labels from the vector to the switch statement. */
for (i = 0; i < nlabels; i++)
- gimple_switch_set_label (p, i + 1, VEC_index (tree, args, i));
+ gimple_switch_set_label (p, i + 1, args[i]);
return p;
}
uid = LABEL_DECL_UID (t);
if (uid == -1)
{
- unsigned old_len = VEC_length (basic_block, label_to_block_map);
+ unsigned old_len = vec_safe_length (label_to_block_map);
LABEL_DECL_UID (t) = uid = cfun->cfg->last_label_uid++;
if (old_len <= (unsigned) uid)
{
unsigned new_len = 3 * uid / 2 + 1;
- VEC_safe_grow_cleared (basic_block, gc, label_to_block_map,
- new_len);
+ vec_safe_grow_cleared (label_to_block_map, new_len);
}
}
- VEC_replace (basic_block, label_to_block_map, uid, bb);
+ (*label_to_block_map)[uid] = bb;
}
}
{
int i;
int nargs = gimple_call_num_args (stmt);
- VEC(tree, heap) *vargs = VEC_alloc (tree, heap, nargs);
+ vec<tree> vargs;
+ vargs.create (nargs);
gimple new_stmt;
for (i = 0; i < nargs; i++)
if (!bitmap_bit_p (args_to_skip, i))
- VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
+ vargs.quick_push (gimple_call_arg (stmt, i));
if (gimple_call_internal_p (stmt))
new_stmt = gimple_build_call_internal_vec (gimple_call_internal_fn (stmt),
vargs);
else
new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
- VEC_free (tree, heap, vargs);
+ vargs.release ();
if (gimple_call_lhs (stmt))
gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
#include "pointer-set.h"
#include "vec.h"
-#include "vecprim.h"
-#include "vecir.h"
#include "ggc.h"
#include "basic-block.h"
#include "tree.h"
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
-typedef VEC(gimple, heap) *gimple_vec;
-DEF_VEC_P (gimple_vec);
-DEF_VEC_ALLOC_P (gimple_vec, heap);
+typedef vec<gimple> gimple_vec;
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
-gimple gimple_build_call_vec (tree, VEC(tree, heap) *);
+gimple gimple_build_call_vec (tree, vec<tree> );
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
-gimple gimple_build_call_internal_vec (enum internal_fn, VEC(tree, heap) *);
+gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> );
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
-gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
- VEC(tree,gc) *, VEC(tree,gc) *);
+gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
+ vec<tree, va_gc> *, vec<tree, va_gc> *,
+ vec<tree, va_gc> *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
-gimple gimple_build_switch (tree, tree, VEC(tree,heap) *);
+gimple gimple_build_switch (tree, tree, vec<tree> );
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
-void sort_case_labels (VEC(tree,heap) *);
-void preprocess_case_label_vec_for_gimple (VEC(tree,heap) *, tree, tree *);
+void sort_case_labels (vec<tree> );
+void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
{
struct gimplify_ctx *prev_context;
- VEC(gimple,heap) *bind_expr_stack;
+ vec<gimple> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
- VEC(tree,heap) *case_labels;
+ vec<tree> case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
-extern VEC(gimple, heap) *gimple_bind_expr_stack (void);
+extern vec<gimple> gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree force_labels_r (tree *, int *, void *);
{
struct gimplify_ctx *c = gimplify_ctxp;
- gcc_assert (c && (c->bind_expr_stack == NULL
- || VEC_empty (gimple, c->bind_expr_stack)));
- VEC_free (gimple, heap, c->bind_expr_stack);
+ gcc_assert (c
+ && (!c->bind_expr_stack.exists ()
+ || c->bind_expr_stack.is_empty ()));
+ c->bind_expr_stack.release ();
gimplify_ctxp = c->prev_context;
if (body)
static void
gimple_push_bind_expr (gimple gimple_bind)
{
- if (gimplify_ctxp->bind_expr_stack == NULL)
- gimplify_ctxp->bind_expr_stack = VEC_alloc (gimple, heap, 8);
- VEC_safe_push (gimple, heap, gimplify_ctxp->bind_expr_stack, gimple_bind);
+ gimplify_ctxp->bind_expr_stack.reserve (8);
+ gimplify_ctxp->bind_expr_stack.safe_push (gimple_bind);
}
/* Pop the first element off the stack of bindings. */
static void
gimple_pop_bind_expr (void)
{
- VEC_pop (gimple, gimplify_ctxp->bind_expr_stack);
+ gimplify_ctxp->bind_expr_stack.pop ();
}
/* Return the first element of the stack of bindings. */
gimple
gimple_current_bind_expr (void)
{
- return VEC_last (gimple, gimplify_ctxp->bind_expr_stack);
+ return gimplify_ctxp->bind_expr_stack.last ();
}
/* Return the stack of bindings created during gimplification. */
-VEC(gimple, heap) *
+vec<gimple>
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
&& !is_gimple_reg (t)
&& flag_stack_reuse != SR_NONE)
{
- tree clobber = build_constructor (TREE_TYPE (t), NULL);
+ tree clobber = build_constructor (TREE_TYPE (t),
+ NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimplify_seq_add_stmt (&cleanup, gimple_build_assign (t, clobber));
}
/* Sort the case labels in LABEL_VEC in place in ascending order. */
void
-sort_case_labels (VEC(tree,heap)* label_vec)
+sort_case_labels (vec<tree> label_vec)
{
- VEC_qsort (tree, label_vec, compare_case_labels);
+ label_vec.qsort (compare_case_labels);
}
\f
/* Prepare a vector of case labels to be used in a GIMPLE_SWITCH statement.
found or not. */
void
-preprocess_case_label_vec_for_gimple (VEC(tree,heap) *labels,
+preprocess_case_label_vec_for_gimple (vec<tree> labels,
tree index_type,
tree *default_casep)
{
i = 0;
min_value = TYPE_MIN_VALUE (index_type);
max_value = TYPE_MAX_VALUE (index_type);
- while (i < VEC_length (tree, labels))
+ while (i < labels.length ())
{
- tree elt = VEC_index (tree, labels, i);
+ tree elt = labels[i];
tree low = CASE_LOW (elt);
tree high = CASE_HIGH (elt);
bool remove_element = FALSE;
}
if (remove_element)
- VEC_ordered_remove (tree, labels, i);
+ labels.ordered_remove (i);
else
i++;
}
len = i;
- if (!VEC_empty (tree, labels))
+ if (!labels.is_empty ())
sort_case_labels (labels);
if (default_casep && !default_case)
if (len
&& TYPE_MIN_VALUE (index_type)
&& TYPE_MAX_VALUE (index_type)
- && tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)),
+ && tree_int_cst_equal (CASE_LOW (labels[0]),
TYPE_MIN_VALUE (index_type)))
{
- tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1));
+ tree low, high = CASE_HIGH (labels[len - 1]);
if (!high)
- high = CASE_LOW (VEC_index (tree, labels, len - 1));
+ high = CASE_LOW (labels[len - 1]);
if (tree_int_cst_equal (high, TYPE_MAX_VALUE (index_type)))
{
for (i = 1; i < len; i++)
{
- high = CASE_LOW (VEC_index (tree, labels, i));
- low = CASE_HIGH (VEC_index (tree, labels, i - 1));
+ high = CASE_LOW (labels[i]);
+ low = CASE_HIGH (labels[i - 1]);
if (!low)
- low = CASE_LOW (VEC_index (tree, labels, i - 1));
+ low = CASE_LOW (labels[i - 1]);
if ((TREE_INT_CST_LOW (low) + 1
!= TREE_INT_CST_LOW (high))
|| (TREE_INT_CST_HIGH (low)
}
if (i == len)
{
- tree label = CASE_LABEL (VEC_index (tree, labels, 0));
+ tree label = CASE_LABEL (labels[0]);
default_case = build_case_label (NULL_TREE, NULL_TREE,
label);
}
if (SWITCH_BODY (switch_expr))
{
- VEC (tree,heap) *labels;
- VEC (tree,heap) *saved_labels;
+ vec<tree> labels;
+ vec<tree> saved_labels;
tree default_case = NULL_TREE;
gimple gimple_switch;
/* Save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
- gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8);
+ gimplify_ctxp->case_labels.create (8);
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
labels = gimplify_ctxp->case_labels;
default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
- VEC_free(tree, heap, labels);
+ labels.release ();
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
- if (ctxp->case_labels)
+ if (ctxp->case_labels.exists ())
break;
gimple_label = gimple_build_label (CASE_LABEL (*expr_p));
- VEC_safe_push (tree, heap, ctxp->case_labels, *expr_p);
+ ctxp->case_labels.safe_push (*expr_p);
gimplify_seq_add_stmt (pre_p, gimple_label);
return GS_ALL_DONE;
fallback_t fallback)
{
tree *p;
- VEC(tree,heap) *expr_stack;
+ vec<tree> expr_stack;
enum gimplify_status ret = GS_ALL_DONE, tret;
int i;
location_t loc = EXPR_LOCATION (*expr_p);
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
- expr_stack = VEC_alloc (tree, heap, 10);
+ expr_stack.create (10);
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
else
break;
- VEC_safe_push (tree, heap, expr_stack, *p);
+ expr_stack.safe_push (*p);
}
- gcc_assert (VEC_length (tree, expr_stack));
+ gcc_assert (expr_stack.length ());
/* Now EXPR_STACK is a stack of pointers to all the refs we've
walked through and P points to the innermost expression.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
- for (i = VEC_length (tree, expr_stack) - 1; i >= 0; i--)
+ for (i = expr_stack.length () - 1; i >= 0; i--)
{
- tree t = VEC_index (tree, expr_stack, i);
+ tree t = expr_stack[i];
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* And finally, the indices and operands of ARRAY_REF. During this
loop we also remove any useless conversions. */
- for (; VEC_length (tree, expr_stack) > 0; )
+ for (; expr_stack.length () > 0; )
{
- tree t = VEC_pop (tree, expr_stack);
+ tree t = expr_stack.pop ();
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
canonicalize_component_ref (expr_p);
}
- VEC_free (tree, heap, expr_stack);
+ expr_stack.release ();
gcc_assert (*expr_p == expr || ret != GS_ALL_DONE);
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
- && VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (from)));
+ && vec_safe_is_empty (CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
- VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p);
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (*expr_p);
- FOR_EACH_VEC_ELT (constructor_elt, v, ix, ce)
+ FOR_EACH_VEC_SAFE_ELT (v, ix, ce)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
-static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *,
+static void gimplify_init_ctor_eval (tree, vec<constructor_elt, va_gc> *,
gimple_seq *, bool);
static void
zeroed first. */
static void
-gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts,
+gimplify_init_ctor_eval (tree object, vec<constructor_elt, va_gc> *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
optimize_compound_literals_in_ctor (tree orig_ctor)
{
tree ctor = orig_ctor;
- VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (ctor);
- unsigned int idx, num = VEC_length (constructor_elt, elts);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor);
+ unsigned int idx, num = vec_safe_length (elts);
for (idx = 0; idx < num; idx++)
{
- tree value = VEC_index (constructor_elt, elts, idx).value;
+ tree value = (*elts)[idx].value;
tree newval = value;
if (TREE_CODE (value) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (value);
if (ctor == orig_ctor)
{
ctor = copy_node (orig_ctor);
- CONSTRUCTOR_ELTS (ctor) = VEC_copy (constructor_elt, gc, elts);
+ CONSTRUCTOR_ELTS (ctor) = vec_safe_copy (elts);
elts = CONSTRUCTOR_ELTS (ctor);
}
- VEC_index (constructor_elt, elts, idx).value = newval;
+ (*elts)[idx].value = newval;
}
return ctor;
}
{
tree object, ctor, type;
enum gimplify_status ret;
- VEC(constructor_elt,gc) *elts;
+ vec<constructor_elt, va_gc> *elts;
gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR);
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
- if (VEC_empty (constructor_elt, elts))
+ if (vec_safe_is_empty (elts))
{
if (notify_temp_creation)
return GS_OK;
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0
- && VEC_length (constructor_elt, elts) > 1)
+ && vec_safe_length (elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL);
TREE_OPERAND (*expr_p, 0) = temp;
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
- gcc_assert (VEC_length (constructor_elt, elts) == 2);
- r = VEC_index (constructor_elt, elts, 0).value;
- i = VEC_index (constructor_elt, elts, 1).value;
+ gcc_assert (elts->length () == 2);
+ r = (*elts)[0].value;
+ i = (*elts)[1].value;
if (r == NULL || i == NULL)
{
tree zero = build_zero_cst (TREE_TYPE (type));
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
- FOR_EACH_VEC_ELT (constructor_elt, elts, ix, ce)
+ FOR_EACH_VEC_SAFE_ELT (elts, ix, ce)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gimple stmt;
- VEC(tree, gc) *inputs;
- VEC(tree, gc) *outputs;
- VEC(tree, gc) *clobbers;
- VEC(tree, gc) *labels;
+ vec<tree, va_gc> *inputs;
+ vec<tree, va_gc> *outputs;
+ vec<tree, va_gc> *clobbers;
+ vec<tree, va_gc> *labels;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
- inputs = outputs = clobbers = labels = NULL;
+ inputs = NULL;
+ outputs = NULL;
+ clobbers = NULL;
+ labels = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
ret = tret;
}
- VEC_safe_push (tree, gc, outputs, link);
+ vec_safe_push (outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
}
TREE_CHAIN (link) = NULL_TREE;
- VEC_safe_push (tree, gc, inputs, link);
+ vec_safe_push (inputs, link);
}
for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link))
- VEC_safe_push (tree, gc, clobbers, link);
+ vec_safe_push (clobbers, link);
for (link = ASM_LABELS (expr); link; ++i, link = TREE_CHAIN (link))
- VEC_safe_push (tree, gc, labels, link);
+ vec_safe_push (labels, link);
/* Do not add ASMs with errors to the gimple IL stream. */
if (ret != GS_ERROR)
&& needs_to_live_in_memory (temp)
&& flag_stack_reuse == SR_ALL)
{
- tree clobber = build_constructor (TREE_TYPE (temp), NULL);
+ tree clobber = build_constructor (TREE_TYPE (temp),
+ NULL);
TREE_THIS_VOLATILE (clobber) = true;
clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber);
if (cleanup)
}
typedef char *char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(char_p);
-DEF_VEC_ALLOC_P(char_p,heap);
/* Return whether we should exclude FNDECL from instrumentation. */
static bool
flag_instrument_functions_exclude_p (tree fndecl)
{
- VEC(char_p,heap) *vec;
+ vec<char_p> *v;
- vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_functions;
- if (VEC_length (char_p, vec) > 0)
+ v = (vec<char_p> *) flag_instrument_functions_exclude_functions;
+ if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = lang_hooks.decl_printable_name (fndecl, 0);
- FOR_EACH_VEC_ELT (char_p, vec, i, s)
+ FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
- vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_files;
- if (VEC_length (char_p, vec) > 0)
+ v = (vec<char_p> *) flag_instrument_functions_exclude_files;
+ if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = DECL_SOURCE_FILE (fndecl);
- FOR_EACH_VEC_ELT (char_p, vec, i, s)
+ FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
+2012-11-17 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * go-lang.c: Use new vec API in vec.h.
+
2012-11-16 Ian Lance Taylor <iant@google.com>
* Make-lang.in (gccgo$(exeext)): Add + at start of command.
opts->x_flag_non_call_exceptions = 1;
}
-/* Infrastructure for a VEC of char * pointers. */
+/* Infrastructure for a vector of char * pointers. */
typedef const char *go_char_p;
-DEF_VEC_P(go_char_p);
-DEF_VEC_ALLOC_P(go_char_p, heap);
/* The list of directories to search after all the Go specific
directories have been searched. */
-static VEC(go_char_p, heap) *go_search_dirs;
+static vec<go_char_p> go_search_dirs;
/* Handle Go specific options. Return 0 if we didn't do anything. */
/* Search ARG too, but only after we've searched to Go
specific directories for all -L arguments. */
- VEC_safe_push (go_char_p, heap, go_search_dirs, arg);
+ go_search_dirs.safe_push (arg);
}
break;
gcc_assert (num_in_fnames > 0);
- FOR_EACH_VEC_ELT (go_char_p, go_search_dirs, ix, dir)
+ FOR_EACH_VEC_ELT (go_search_dirs, ix, dir)
go_add_search_path (dir);
- VEC_free (go_char_p, heap, go_search_dirs);
- go_search_dirs = NULL;
+ go_search_dirs.release ();
if (flag_excess_precision_cmdline == EXCESS_PRECISION_DEFAULT)
flag_excess_precision_cmdline = EXCESS_PRECISION_STANDARD;
/* A queue of decls to output. */
-static GTY(()) VEC(tree,gc) *queue;
+static GTY(()) vec<tree, va_gc> *queue;
/* A hash table of macros we have seen. */
|| DECL_IS_BUILTIN (decl)
|| DECL_NAME (decl) == NULL_TREE)
return;
- VEC_safe_push (tree, gc, queue, decl);
+ vec_safe_push (queue, decl);
}
/* A function decl. */
|| TREE_CODE (TYPE_NAME (TREE_TYPE (decl))) != IDENTIFIER_NODE)
&& TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE)
return;
- VEC_safe_push (tree, gc, queue, decl);
+ vec_safe_push (queue, decl);
}
/* A container for the data we pass around when generating information
keyword_hash_init (&container);
- FOR_EACH_VEC_ELT (tree, queue, ix, decl)
+ FOR_EACH_VEC_SAFE_ELT (queue, ix, decl)
{
switch (TREE_CODE (decl))
{
htab_delete (container.keyword_hash);
obstack_free (&container.type_obstack, NULL);
- queue = NULL;
+ vec_free (queue);
if (fclose (go_dump_file) != 0)
error ("could not close Go dump file: %m");
char *buf = XALLOCAVEC (char, namelen + extlen);
FILE *fp;
- if (basic_block_info == NULL)
+ if (!basic_block_info)
return;
memcpy (buf, base, namelen);
#include "obstack.h"
#include "bitmap.h"
#include "vec.h"
-#include "vecprim.h"
#include "graphds.h"
/* Dumps graph G into F. */
of the graph (number of the restarts of DFS). */
int
-graphds_dfs (struct graph *g, int *qs, int nq, VEC (int, heap) **qt,
+graphds_dfs (struct graph *g, int *qs, int nq, vec<int> *qt,
bool forward, bitmap subgraph)
{
int i, tick = 0, v, comp = 0, top;
if (!e)
{
if (qt)
- VEC_safe_push (int, heap, *qt, v);
+ qt->safe_push (v);
g->vertices[v].post = tick++;
if (!top)
graphds_scc (struct graph *g, bitmap subgraph)
{
int *queue = XNEWVEC (int, g->n_vertices);
- VEC (int, heap) *postorder = NULL;
+ vec<int> postorder = vec<int>();
int nq, i, comp;
unsigned v;
bitmap_iterator bi;
}
graphds_dfs (g, queue, nq, &postorder, false, subgraph);
- gcc_assert (VEC_length (int, postorder) == (unsigned) nq);
+ gcc_assert (postorder.length () == (unsigned) nq);
for (i = 0; i < nq; i++)
- queue[i] = VEC_index (int, postorder, nq - i - 1);
+ queue[i] = postorder[nq - i - 1];
comp = graphds_dfs (g, queue, nq, NULL, true, subgraph);
free (queue);
- VEC_free (int, heap, postorder);
+ postorder.release ();
return comp;
}
graphds_domtree (struct graph *g, int entry,
int *parent, int *son, int *brother)
{
- VEC (int, heap) *postorder = NULL;
+ vec<int> postorder = vec<int>();
int *marks = XCNEWVEC (int, g->n_vertices);
int mark = 1, i, v, idom;
bool changed = true;
brother[i] = -1;
}
graphds_dfs (g, &entry, 1, &postorder, true, NULL);
- gcc_assert (VEC_length (int, postorder) == (unsigned) g->n_vertices);
- gcc_assert (VEC_index (int, postorder, g->n_vertices - 1) == entry);
+ gcc_assert (postorder.length () == (unsigned) g->n_vertices);
+ gcc_assert (postorder[g->n_vertices - 1] == entry);
while (changed)
{
for (i = g->n_vertices - 2; i >= 0; i--)
{
- v = VEC_index (int, postorder, i);
+ v = postorder[i];
idom = -1;
for (e = g->vertices[v].pred; e; e = e->pred_next)
{
}
free (marks);
- VEC_free (int, heap, postorder);
+ postorder.release ();
for (i = 0; i < g->n_vertices; i++)
if (parent[i] != -1)
struct graph_edge *add_edge (struct graph *, int, int);
void identify_vertices (struct graph *, int, int);
int graphds_dfs (struct graph *, int *, int,
- VEC (int, heap) **, bool, bitmap);
+ vec<int> *, bool, bitmap);
int graphds_scc (struct graph *, bitmap);
void graphds_domtree (struct graph *, int, int *, int *, int *);
typedef void (*graphds_edge_callback) (struct graph *, struct graph_edge *);
{
int res = 0;
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
res += lst_do_strip_mine_loop (l, depth, stride);
return res;
|| !LST_LOOP_P (lst))
return false;
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
res += lst_do_strip_mine (l, stride);
depth = lst_depth (lst);
parameter in PARAMS. */
typedef struct ivs_params {
- VEC (tree, heap) *params, **newivs;
+ vec<tree> params, *newivs;
htab_t newivs_index, params_index;
sese region;
} *ivs_params_p;
{
int index;
- if (ip->params && ip->params_index)
+ if (ip->params.exists () && ip->params_index)
{
index = clast_name_to_index (name, ip->params_index);
if (index >= 0)
- return VEC_index (tree, ip->params, index);
+ return ip->params[index];
}
- gcc_assert (*(ip->newivs) && ip->newivs_index);
+ gcc_assert (ip->newivs && ip->newivs_index);
index = clast_name_to_index (name, ip->newivs_index);
gcc_assert (index >= 0);
- return VEC_index (tree, *(ip->newivs), index);
+ return (*ip->newivs)[index];
}
/* Returns the maximal precision type for expressions TYPE1 and TYPE2. */
{
bool found = false;
- if (ip->params && ip->params_index)
+ if (ip->params.exists () && ip->params_index)
found = clast_name_to_lb_ub (name, ip->params_index, bound_one, bound_two);
if (!found)
{
- gcc_assert (*(ip->newivs) && ip->newivs_index);
+ gcc_assert (ip->newivs && ip->newivs_index);
found = clast_name_to_lb_ub (name, ip->newivs_index, bound_one,
bound_two);
gcc_assert (found);
mpz_init (up);
compute_bounds_for_loop (stmt, low, up);
save_clast_name_index (ip->newivs_index, stmt->iterator,
- VEC_length (tree, *(ip->newivs)), level, low, up);
+ (*ip->newivs).length (), level, low, up);
mpz_clear (low);
mpz_clear (up);
- VEC_safe_push (tree, heap, *(ip->newivs), iv);
+ (*ip->newivs).safe_push (iv);
return loop;
}
induction variables of the loops around GBB in SESE. */
static void
-build_iv_mapping (VEC (tree, heap) *iv_map, struct clast_user_stmt *user_stmt,
+build_iv_mapping (vec<tree> iv_map, struct clast_user_stmt *user_stmt,
ivs_params_p ip)
{
struct clast_stmt *t;
tree new_name = clast_to_gcc_expression (type, expr, ip);
loop_p old_loop = gbb_loop_at_index (gbb, ip->region, depth);
- VEC_replace (tree, iv_map, old_loop->num, new_name);
+ iv_map[old_loop->num] = new_name;
}
mpz_clear (bound_one);
scop_p
get_loop_body_pbbs (loop_p loop, htab_t bb_pbb_mapping,
- VEC (poly_bb_p, heap) **pbbs)
+ vec<poly_bb_p> *pbbs)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
continue;
scop = PBB_SCOP (pbb);
- VEC_safe_push (poly_bb_p, heap, *pbbs, pbb);
+ (*pbbs).safe_push (pbb);
}
free (bbs);
basic_block new_bb;
poly_bb_p pbb = (poly_bb_p) stmt->statement->usr;
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
- VEC (tree, heap) *iv_map;
+ vec<tree> iv_map;
if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
return next_e;
nb_loops = number_of_loops ();
- iv_map = VEC_alloc (tree, heap, nb_loops);
+ iv_map.create (nb_loops);
for (i = 0; i < nb_loops; i++)
- VEC_quick_push (tree, iv_map, NULL_TREE);
+ iv_map.quick_push (NULL_TREE);
build_iv_mapping (iv_map, stmt, ip);
next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), ip->region,
next_e, iv_map, &gloog_error);
- VEC_free (tree, heap, iv_map);
+ iv_map.release ();
new_bb = next_e->src;
mark_bb_with_pbb (pbb, new_bb, bb_pbb_mapping);
}
save_clast_name_index (ip->newivs_index, stmt->LHS,
- VEC_length (tree, *(ip->newivs)), level,
+ (*ip->newivs).length (), level,
bound_one, bound_two);
- VEC_safe_push (tree, heap, *(ip->newivs), new_name);
+ (*ip->newivs).safe_push (new_name);
mpz_clear (bound_one);
mpz_clear (bound_two);
sese region = SCOP_REGION (scop);
int i;
int nb_iterators = scop_max_loop_depth (scop);
- int nb_parameters = VEC_length (tree, SESE_PARAMS (region));
+ int nb_parameters = SESE_PARAMS (region).length ();
mpz_t bound_one, bound_two;
mpz_init (bound_one);
for (i = 0; i < nb_parameters; i++)
{
- tree param = VEC_index (tree, SESE_PARAMS (region), i);
+ tree param = SESE_PARAMS (region)[i];
const char *name = get_name (param);
int len;
char *parameter;
CloogUnionDomain *union_domain =
cloog_union_domain_alloc (scop_nb_params (scop));
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
CloogDomain *domain;
CloogScattering *scattering;
poly_bb_p pbb;
int scattering_dims = 0;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
int pbb_scatt_dims = isl_map_dim (pbb->transformed, isl_dim_out);
if (pbb_scatt_dims > scattering_dims)
bool
gloog (scop_p scop, htab_t bb_pbb_mapping)
{
- VEC (tree, heap) *newivs = VEC_alloc (tree, heap, 10);
+ vec<tree> newivs;
+ newivs.create (10);
loop_p context_loop;
sese region = SCOP_REGION (scop);
ifsese if_region = NULL;
htab_delete (newivs_index);
htab_delete (params_index);
- VEC_free (tree, heap, newivs);
+ newivs.release ();
cloog_clast_free (clast);
timevar_pop (TV_GRAPHITE_CODE_GEN);
/* Returns all the memory reads in SCOP. */
static isl_union_map *
-scop_get_reads (scop_p scop, VEC (poly_bb_p, heap) *pbbs)
+scop_get_reads (scop_p scop, vec<poly_bb_p> pbbs)
{
int i, j;
poly_bb_p pbb;
isl_space *space = isl_set_get_space (scop->context);
isl_union_map *res = isl_union_map_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
{
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_read_p (pdr))
res = isl_union_map_add_map (res, add_pdr_constraints (pdr, pbb));
}
/* Returns all the memory must writes in SCOP. */
static isl_union_map *
-scop_get_must_writes (scop_p scop, VEC (poly_bb_p, heap) *pbbs)
+scop_get_must_writes (scop_p scop, vec<poly_bb_p> pbbs)
{
int i, j;
poly_bb_p pbb;
isl_space *space = isl_set_get_space (scop->context);
isl_union_map *res = isl_union_map_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
{
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_write_p (pdr))
res = isl_union_map_add_map (res, add_pdr_constraints (pdr, pbb));
}
/* Returns all the memory may writes in SCOP. */
static isl_union_map *
-scop_get_may_writes (scop_p scop, VEC (poly_bb_p, heap) *pbbs)
+scop_get_may_writes (scop_p scop, vec<poly_bb_p> pbbs)
{
int i, j;
poly_bb_p pbb;
isl_space *space = isl_set_get_space (scop->context);
isl_union_map *res = isl_union_map_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
{
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_may_write_p (pdr))
res = isl_union_map_add_map (res, add_pdr_constraints (pdr, pbb));
}
/* Returns all the original schedules in SCOP. */
static isl_union_map *
-scop_get_original_schedule (scop_p scop, VEC (poly_bb_p, heap) *pbbs)
+scop_get_original_schedule (scop_p scop, vec<poly_bb_p> pbbs)
{
int i;
poly_bb_p pbb;
isl_space *space = isl_set_get_space (scop->context);
isl_union_map *res = isl_union_map_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
{
res = isl_union_map_add_map
(res, constrain_domain (isl_map_copy (pbb->schedule),
/* Returns all the transformed schedules in SCOP. */
static isl_union_map *
-scop_get_transformed_schedule (scop_p scop, VEC (poly_bb_p, heap) *pbbs)
+scop_get_transformed_schedule (scop_p scop, vec<poly_bb_p> pbbs)
{
int i;
poly_bb_p pbb;
isl_space *space = isl_set_get_space (scop->context);
isl_union_map *res = isl_union_map_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
{
res = isl_union_map_add_map
(res, constrain_domain (isl_map_copy (pbb->transformed),
static void
subtract_commutative_associative_deps (scop_p scop,
- VEC (poly_bb_p, heap) *pbbs,
+ vec<poly_bb_p> pbbs,
isl_union_map *original,
isl_union_map **must_raw,
isl_union_map **may_raw,
poly_dr_p pdr;
isl_space *space = isl_set_get_space (scop->context);
- FOR_EACH_VEC_ELT (poly_bb_p, pbbs, i, pbb)
+ FOR_EACH_VEC_ELT (pbbs, i, pbb)
if (PBB_IS_REDUCTION (pbb))
{
int res;
isl_union_map *x_must_waw_no_source;
isl_union_map *x_may_waw_no_source;
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_read_p (pdr))
r = isl_union_map_add_map (r, add_pdr_constraints (pdr, pbb));
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_write_p (pdr))
must_w = isl_union_map_add_map (must_w,
add_pdr_constraints (pdr, pbb));
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), j, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), j, pdr)
if (pdr_may_write_p (pdr))
may_w = isl_union_map_add_map (may_w,
add_pdr_constraints (pdr, pbb));
writes in PBBS. */
void
-compute_deps (scop_p scop, VEC (poly_bb_p, heap) *pbbs,
+compute_deps (scop_p scop, vec<poly_bb_p> pbbs,
isl_union_map **must_raw,
isl_union_map **may_raw,
isl_union_map **must_raw_no_source,
the body of the loop. */
static bool
-loop_level_carries_dependences (scop_p scop, VEC (poly_bb_p, heap) *body,
+loop_level_carries_dependences (scop_p scop, vec<poly_bb_p> body,
int depth)
{
isl_union_map *transform = scop_get_transformed_schedule (scop, body);
{
bool dependences;
scop_p scop;
- VEC (poly_bb_p, heap) *body = VEC_alloc (poly_bb_p, heap, 3);
+ vec<poly_bb_p> body;
+ body.create (3);
timevar_push (TV_GRAPHITE_DATA_DEPS);
scop = get_loop_body_pbbs (loop, bb_pbb_mapping, &body);
dependences = loop_level_carries_dependences (scop, body, depth);
- VEC_free (poly_bb_p, heap, body);
+ body.release ();
timevar_pop (TV_GRAPHITE_DATA_DEPS);
return !dependences;
mpz_init (s);
mpz_init (n);
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (loop), j, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (loop), j, l)
if (LST_LOOP_P (l))
memory_strides_in_loop_1 (l, depth, strides);
else
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (LST_PBB (l)), i, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (LST_PBB (l)), i, pdr)
{
pdr_stride_in_loop (s, depth, pdr);
mpz_set_si (n, PDR_NB_REFS (pdr));
int i;
lst_p l;
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
lst_apply_interchange (l, depth1, depth2);
}
else
if (!LST_LOOP_P (loop1))
return false;
- return VEC_length (lst_p, LST_SEQ (loop1)) == 1
- && lst_perfectly_nested_p (VEC_index (lst_p, LST_SEQ (loop1), 0), loop2);
+ return LST_SEQ (loop1).length () == 1
+ && lst_perfectly_nested_p (LST_SEQ (loop1)[0], loop2);
}
/* Transform the loop nest between LOOP1 and LOOP2 into a perfect
gcc_assert (outer_father
&& LST_LOOP_P (outer_father)
- && LST_LOOP_P (VEC_index (lst_p, LST_SEQ (outer_father), outer))
+ && LST_LOOP_P (LST_SEQ (outer_father)[outer])
&& inner_father
&& LST_LOOP_P (inner_father));
- loop1 = VEC_index (lst_p, LST_SEQ (outer_father), outer);
+ loop1 = LST_SEQ (outer_father)[outer];
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (inner_father), inner, loop2)
+ FOR_EACH_VEC_ELT (LST_SEQ (inner_father), inner, loop2)
if (LST_LOOP_P (loop2)
&& (lst_try_interchange_loops (scop, loop1, loop2)
|| lst_interchange_select_inner (scop, outer_father, outer, loop2)))
while (lst_interchange_select_inner (scop, father, outer, loop))
{
res++;
- loop = VEC_index (lst_p, LST_SEQ (father), outer);
+ loop = LST_SEQ (father)[outer];
}
}
if (LST_LOOP_P (loop))
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (loop), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (loop), i, l)
if (LST_LOOP_P (l))
res += lst_interchange_select_outer (scop, l, i);
isl_space *space = isl_set_get_space (scop->context);
isl_union_set *res = isl_union_set_empty (space);
- FOR_EACH_VEC_ELT (poly_bb_p, scop->bbs, i, pbb)
+ FOR_EACH_VEC_ELT (scop->bbs, i, pbb)
res = isl_union_set_add_set (res, isl_set_copy (pbb->domain));
return res;
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, scop->bbs, i, pbb)
+ FOR_EACH_VEC_ELT (scop->bbs, i, pbb)
{
isl_set *domain = isl_set_copy (pbb->domain);
isl_union_map *stmtBand;
poly_bb_p pbb;
int max_nb_loops = 0;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
int nb_loops = pbb_dim_iter_domain (pbb);
if (max_nb_loops < nb_loops)
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
print_scattering_function (file, pbb, verbosity);
}
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
print_iteration_domain (file, pbb, verbosity);
}
PDR_TYPE (pdr) = type;
PDR_CDR (pdr) = cdr;
PDR_NB_SUBSCRIPTS (pdr) = nb_subscripts;
- VEC_safe_push (poly_dr_p, heap, PBB_DRS (pbb), pdr);
+ PBB_DRS (pbb).safe_push (pdr);
}
/* Free polyhedral data reference PDR. */
PBB_TRANSFORMED (pbb) = NULL;
PBB_SAVED (pbb) = NULL;
PBB_ORIGINAL (pbb) = NULL;
- PBB_DRS (pbb) = VEC_alloc (poly_dr_p, heap, 3);
+ PBB_DRS (pbb).create (3);
PBB_IS_REDUCTION (pbb) = false;
GBB_PBB ((gimple_bb_p) black_box) = pbb;
isl_map_free (pbb->transformed);
isl_map_free (pbb->saved);
- if (PBB_DRS (pbb))
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), i, pdr)
+ if (PBB_DRS (pbb).exists ())
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), i, pdr)
free_poly_dr (pdr);
- VEC_free (poly_dr_p, heap, PBB_DRS (pbb));
+ PBB_DRS (pbb).release ();
XDELETE (pbb);
}
scop->must_waw_no_source = NULL;
scop->may_waw_no_source = NULL;
scop_set_region (scop, region);
- SCOP_BBS (scop) = VEC_alloc (poly_bb_p, heap, 3);
+ SCOP_BBS (scop).create (3);
SCOP_ORIGINAL_SCHEDULE (scop) = NULL;
SCOP_TRANSFORMED_SCHEDULE (scop) = NULL;
SCOP_SAVED_SCHEDULE (scop) = NULL;
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
free_poly_bb (pbb);
- VEC_free (poly_bb_p, heap, SCOP_BBS (scop));
+ SCOP_BBS (scop).release ();
isl_set_free (scop->context);
isl_union_map_free (scop->must_raw);
{
int i;
gimple stmt;
- VEC (gimple, heap) *cases;
+ vec<gimple> cases;
if (!gbb)
return;
cases = GBB_CONDITION_CASES (gbb);
- if (VEC_empty (gimple, cases))
+ if (cases.is_empty ())
return;
fprintf (file, "# cases bb_%d (\n", GBB_BB (gbb)->index);
- FOR_EACH_VEC_ELT (gimple, cases, i, stmt)
+ FOR_EACH_VEC_ELT (cases, i, stmt)
{
fprintf (file, "# ");
print_gimple_stmt (file, stmt, 0, 0);
{
int i;
gimple stmt;
- VEC (gimple, heap) *conditions;
+ vec<gimple> conditions;
if (!gbb)
return;
conditions = GBB_CONDITIONS (gbb);
- if (VEC_empty (gimple, conditions))
+ if (conditions.is_empty ())
return;
fprintf (file, "# conditions bb_%d (\n", GBB_BB (gbb)->index);
- FOR_EACH_VEC_ELT (gimple, conditions, i, stmt)
+ FOR_EACH_VEC_ELT (conditions, i, stmt)
{
fprintf (file, "# ");
print_gimple_stmt (file, stmt, 0, 0);
int nb_reads = 0;
int nb_writes = 0;
- if (VEC_length (poly_dr_p, PBB_DRS (pbb)) == 0)
+ if (PBB_DRS (pbb).length () == 0)
{
if (verbosity > 0)
fprintf (file, "# Access informations are not provided\n");\
fprintf (file, "# Access informations are provided\n");
fprintf (file, "1\n");
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), i, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), i, pdr)
if (PDR_TYPE (pdr) == PDR_READ)
nb_reads++;
else
fprintf (file, "# Read access informations\n");
fprintf (file, "%d\n", nb_reads);
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), i, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), i, pdr)
if (PDR_TYPE (pdr) == PDR_READ)
print_pdr (file, pdr, verbosity);
fprintf (file, "# Write access informations\n");
fprintf (file, "%d\n", nb_writes);
- FOR_EACH_VEC_ELT (poly_dr_p, PBB_DRS (pbb), i, pdr)
+ FOR_EACH_VEC_ELT (PBB_DRS (pbb), i, pdr)
if (PDR_TYPE (pdr) != PDR_READ)
print_pdr (file, pdr, verbosity);
if (verbosity > 1)
fprintf (file, "# parameters (\n");
- if (VEC_length (tree, SESE_PARAMS (SCOP_REGION (scop))))
+ if (SESE_PARAMS (SCOP_REGION (scop)).length ())
{
if (verbosity > 0)
fprintf (file, "# Parameter names are provided\n");
fprintf (file, "0\n");
}
- FOR_EACH_VEC_ELT (tree, SESE_PARAMS (SCOP_REGION (scop)), i, t)
+ FOR_EACH_VEC_ELT (SESE_PARAMS (SCOP_REGION (scop)), i, t)
{
print_generic_expr (file, t, 0);
fprintf (file, " ");
if (verbosity > 0)
fprintf (file, "# Number of statements\n");
- fprintf (file, "%d\n",VEC_length (poly_bb_p, SCOP_BBS (scop)));
+ fprintf (file, "%d\n", SCOP_BBS (scop).length ());
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
print_pbb (file, pbb, verbosity);
if (verbosity > 1)
if (verbosity > 0)
fprintf (file, "# Number of statements\n");
- fprintf (file, "%d\n", VEC_length (poly_bb_p, SCOP_BBS (scop)));
+ fprintf (file, "%d\n", SCOP_BBS (scop).length ());
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
if (verbosity > 1)
fprintf (file, "# pbb_%d (\n", pbb_index (pbb));
if (verbosity > 0)
fprintf (file, "# Number of scattering functions\n");
- fprintf (file, "%d\n", VEC_length (poly_bb_p, SCOP_BBS (scop)));
+ fprintf (file, "%d\n", SCOP_BBS (scop).length ());
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
if (!(pbb->transformed || pbb->schedule))
continue;
/* Translates LOOP to LST. */
static lst_p
-loop_to_lst (loop_p loop, VEC (poly_bb_p, heap) *bbs, int *i)
+loop_to_lst (loop_p loop, vec<poly_bb_p> bbs, int *i)
{
poly_bb_p pbb;
- VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 5);
+ vec<lst_p> seq;
+ seq.create (5);
- for (; VEC_iterate (poly_bb_p, bbs, *i, pbb); (*i)++)
+ for (; bbs.iterate (*i, &pbb); (*i)++)
{
lst_p stmt;
basic_block bb = GBB_BB (PBB_BLACK_BOX (pbb));
return new_lst_loop (seq);
}
- VEC_safe_push (lst_p, heap, seq, stmt);
+ seq.safe_push (stmt);
}
return new_lst_loop (seq);
scop_to_lst (scop_p scop)
{
lst_p res;
- int i, n = VEC_length (poly_bb_p, SCOP_BBS (scop));
- VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 5);
+ int i, n = SCOP_BBS (scop).length ();
+ vec<lst_p> seq;
+ seq.create (5);
sese region = SCOP_REGION (scop);
for (i = 0; i < n; i++)
{
- poly_bb_p pbb = VEC_index (poly_bb_p, SCOP_BBS (scop), i);
+ poly_bb_p pbb = SCOP_BBS (scop)[i];
loop_p loop = outermost_loop_in_sese (region, GBB_BB (PBB_BLACK_BOX (pbb)));
if (loop_in_sese_p (loop, region))
else
res = new_lst_stmt (pbb);
- VEC_safe_push (lst_p, heap, seq, res);
+ seq.safe_push (res);
}
res = new_lst_loop (seq);
else
fprintf (file, "#(root");
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
print_lst (file, l, indent + 2);
fprintf (file, ")");
lst_depth (lst),
lst_dewey_number (lst));
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
dot_lst_1 (file, l);
}
/* Reverse the loop at level DEPTH for all the PBBS. */
isl_union_map *
-reverse_loop_for_pbbs (scop_p scop, VEC (poly_bb_p, heap) *pbbs, int depth)
+reverse_loop_for_pbbs (scop_p scop, vec<poly_bb_p> pbbs, int depth)
{
poly_bb_p pbb;
int i;
isl_space *space = isl_space_from_domain (isl_set_get_space (scop->context));
isl_union_map *res = isl_union_map_empty (space);
- for (i = 0; VEC_iterate (poly_bb_p, pbbs, i, pbb); i++)
+ for (i = 0; pbbs.iterate (i, &pbb); i++)
res = isl_union_map_add_map (res, reverse_loop_at_level (pbb, depth));
return res;
#define GCC_GRAPHITE_POLY_H
typedef struct poly_dr *poly_dr_p;
-DEF_VEC_P(poly_dr_p);
-DEF_VEC_ALLOC_P (poly_dr_p, heap);
typedef struct poly_bb *poly_bb_p;
-DEF_VEC_P(poly_bb_p);
-DEF_VEC_ALLOC_P (poly_bb_p, heap);
typedef struct scop *scop_p;
-DEF_VEC_P(scop_p);
-DEF_VEC_ALLOC_P (scop_p, heap);
typedef unsigned graphite_dim_t;
isl_set *domain;
/* The data references we access. */
- VEC (poly_dr_p, heap) *drs;
+ vec<poly_dr_p> drs;
/* The original scattering. */
poly_scattering_p _original;
int i;
poly_dr_p pdr;
- for (i = 0; VEC_iterate (poly_dr_p, PBB_DRS (pbb), i, pdr); i++)
+ for (i = 0; PBB_DRS (pbb).iterate (i, &pdr); i++)
if (PDR_TYPE (pdr) == PDR_WRITE)
res++;
}
typedef struct lst *lst_p;
-DEF_VEC_P(lst_p);
-DEF_VEC_ALLOC_P (lst_p, heap);
/* Loops and Statements Tree. */
struct lst {
contain a pointer to their polyhedral representation PBB. */
union {
poly_bb_p pbb;
- VEC (lst_p, heap) *seq;
+ vec<lst_p> seq;
} node;
};
/* Creates a new LST loop with SEQ. */
static inline lst_p
-new_lst_loop (VEC (lst_p, heap) *seq)
+new_lst_loop (vec<lst_p> seq)
{
lst_p lst = XNEW (struct lst);
int i;
mpz_init (LST_LOOP_MEMORY_STRIDES (lst));
mpz_set_si (LST_LOOP_MEMORY_STRIDES (lst), -1);
- for (i = 0; VEC_iterate (lst_p, seq, i, l); i++)
+ for (i = 0; seq.iterate (i, &l); i++)
LST_LOOP_FATHER (l) = lst;
return lst;
int i;
lst_p l;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
free_lst (l);
mpz_clear (LST_LOOP_MEMORY_STRIDES (lst));
- VEC_free (lst_p, heap, LST_SEQ (lst));
+ LST_SEQ (lst).release ();
}
free (lst);
{
int i;
lst_p l;
- VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 5);
+ vec<lst_p> seq;
+ seq.create (5);
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
- VEC_safe_push (lst_p, heap, seq, copy_lst (l));
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
+ seq.safe_push (copy_lst (l));
return new_lst_loop (seq);
}
static inline void
lst_add_loop_under_loop (lst_p lst)
{
- VEC (lst_p, heap) *seq = VEC_alloc (lst_p, heap, 1);
+ vec<lst_p> seq;
+ seq.create (1);
lst_p l = new_lst_loop (LST_SEQ (lst));
gcc_assert (LST_LOOP_P (lst));
LST_LOOP_FATHER (l) = lst;
- VEC_quick_push (lst_p, seq, l);
+ seq.quick_push (l);
LST_SEQ (lst) = seq;
}
if (!LST_LOOP_FATHER (lst))
return 0;
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (LST_LOOP_FATHER (lst)), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (LST_LOOP_FATHER (lst)), i, l)
if (l == lst)
return i;
return NULL;
father = LST_LOOP_FATHER (lst);
- return VEC_index (lst_p, LST_SEQ (father), dewey - 1);
+ return LST_SEQ (father)[dewey - 1];
}
/* Returns the successor of LST in the sequence of its loop father.
dewey = lst_dewey_number (lst);
father = LST_LOOP_FATHER (lst);
- if (VEC_length (lst_p, LST_SEQ (father)) == (unsigned) dewey + 1)
+ if (LST_SEQ (father).length () == (unsigned) dewey + 1)
return NULL;
- return VEC_index (lst_p, LST_SEQ (father), dewey + 1);
+ return LST_SEQ (father)[dewey + 1];
}
if (!LST_LOOP_P (lst))
return (pbb == LST_PBB (lst)) ? lst : NULL;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
{
lst_p res = lst_find_pbb (l, pbb);
if (res)
if (!LST_LOOP_P (lst))
return lst;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
{
lst_p res = lst_find_first_pbb (l);
if (res)
if (!LST_LOOP_P (lst))
return lst;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
{
lst_p last = lst_find_last_pbb (l);
lst_create_nest (int nb_loops, lst_p lst)
{
lst_p res, loop;
- VEC (lst_p, heap) *seq;
+ vec<lst_p> seq;
if (nb_loops == 0)
return lst;
- seq = VEC_alloc (lst_p, heap, 1);
+ seq.create (1);
loop = lst_create_nest (nb_loops - 1, lst);
- VEC_quick_push (lst_p, seq, loop);
+ seq.quick_push (loop);
res = new_lst_loop (seq);
LST_LOOP_FATHER (loop) = res;
gcc_assert (lst && father && dewey >= 0);
- VEC_ordered_remove (lst_p, LST_SEQ (father), dewey);
+ LST_SEQ (father).ordered_remove (dewey);
LST_LOOP_FATHER (lst) = NULL;
}
gcc_assert (lst && father && dewey >= 0);
- VEC_ordered_remove (lst_p, LST_SEQ (father), dewey);
+ LST_SEQ (father).ordered_remove (dewey);
LST_LOOP_FATHER (lst) = NULL;
- FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l)
+ FOR_EACH_VEC_ELT (LST_SEQ (lst), i, l)
{
- VEC_safe_insert (lst_p, heap, LST_SEQ (father), dewey + i, l);
+ LST_SEQ (father).safe_insert (dewey + i, l);
LST_LOOP_FATHER (l) = father;
}
}
gcc_assert (lst && level >= 0 && dewey >= 0);
if (LST_LOOP_P (lst))
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
lst_update_scattering_under (l, level, dewey);
else
pbb_update_scattering (LST_PBB (lst), level, dewey);
gcc_assert (lst && father && dewey >= 0 && level >= 0);
- for (i = dewey; VEC_iterate (lst_p, LST_SEQ (father), i, l); i++)
+ for (i = dewey; LST_SEQ (father).iterate (i, &l); i++)
lst_update_scattering_under (l, level, i);
}
if (LST_LOOP_P (lst))
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (lst), i, l); i++)
+ for (i = 0; LST_SEQ (lst).iterate (i, &l); i++)
lst_update_scattering (l);
}
gcc_assert (lst2 && father && dewey >= 0);
- VEC_safe_insert (lst_p, heap, LST_SEQ (father), before ? dewey : dewey + 1,
- lst1);
+ LST_SEQ (father).safe_insert (before ? dewey : dewey + 1, lst1);
LST_LOOP_FATHER (lst1) = father;
}
father = LST_LOOP_FATHER (lst1);
dewey = lst_dewey_number (lst1);
LST_LOOP_FATHER (lst2) = father;
- VEC_replace (lst_p, LST_SEQ (father), dewey, lst2);
+ LST_SEQ (father)[dewey] = lst2;
}
/* Returns a copy of ROOT where LST has been replaced by a copy of the
{
int i;
lst_p l;
- VEC (lst_p, heap) *seq;
+ vec<lst_p> seq;
if (!root)
return NULL;
if (!LST_LOOP_P (root))
return new_lst_stmt (LST_PBB (root));
- seq = VEC_alloc (lst_p, heap, 5);
+ seq.create (5);
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (root), i, l); i++)
+ for (i = 0; LST_SEQ (root).iterate (i, &l); i++)
if (l != lst)
- VEC_safe_push (lst_p, heap, seq, lst_substitute_3 (l, lst, a, b, c));
+ seq.safe_push (lst_substitute_3 (l, lst, a, b, c));
else
{
if (!lst_empty_p (a))
- VEC_safe_push (lst_p, heap, seq, copy_lst (a));
+ seq.safe_push (copy_lst (a));
if (!lst_empty_p (b))
- VEC_safe_push (lst_p, heap, seq, copy_lst (b));
+ seq.safe_push (copy_lst (b));
if (!lst_empty_p (c))
- VEC_safe_push (lst_p, heap, seq, copy_lst (c));
+ seq.safe_push (copy_lst (c));
}
return new_lst_loop (seq);
if (!loop || !LST_LOOP_P (loop))
return before;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (loop), i, l);)
+ for (i = 0; LST_SEQ (loop).iterate (i, &l);)
if (LST_LOOP_P (l))
{
before = lst_remove_all_before_including_pbb (l, pbb, before);
- if (VEC_length (lst_p, LST_SEQ (l)) == 0)
+ if (LST_SEQ (l).length () == 0)
{
- VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
+ LST_SEQ (loop).ordered_remove (i);
free_lst (l);
}
else
if (LST_PBB (l) == pbb)
before = false;
- VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
+ LST_SEQ (loop).ordered_remove (i);
free_lst (l);
}
else if (LST_PBB (l) == pbb)
{
before = true;
- VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
+ LST_SEQ (loop).ordered_remove (i);
free_lst (l);
}
else
if (!loop || !LST_LOOP_P (loop))
return before;
- for (i = 0; VEC_iterate (lst_p, LST_SEQ (loop), i, l);)
+ for (i = 0; LST_SEQ (loop).iterate (i, &l);)
if (LST_LOOP_P (l))
{
before = lst_remove_all_before_excluding_pbb (l, pbb, before);
- if (VEC_length (lst_p, LST_SEQ (l)) == 0)
+ if (LST_SEQ (l).length () == 0)
{
- VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
+ LST_SEQ (loop).ordered_remove (i);
free_lst (l);
continue;
}
{
if (before && LST_PBB (l) != pbb)
{
- VEC_ordered_remove (lst_p, LST_SEQ (loop), i);
+ LST_SEQ (loop).ordered_remove (i);
free_lst (l);
continue;
}
/* All the basic blocks in this scop that contain memory references
and that will be represented as statements in the polyhedral
representation. */
- VEC (poly_bb_p, heap) *bbs;
+ vec<poly_bb_p> bbs;
/* Original, transformed and saved schedules. */
lst_p original_schedule, transformed_schedule, saved_schedule;
extern scop_p new_scop (void *);
extern void free_scop (scop_p);
-extern void free_scops (VEC (scop_p, heap) *);
+extern void free_scops (vec<scop_p> );
extern void print_generated_program (FILE *, scop_p);
extern void debug_generated_program (scop_p);
extern void print_scattering_function (FILE *, poly_bb_p, int);
int i;
poly_bb_p pbb;
- for (i = 0; VEC_iterate (poly_bb_p, SCOP_BBS (scop), i, pbb); i++)
+ for (i = 0; SCOP_BBS (scop).iterate (i, &pbb); i++)
store_scattering_pbb (pbb);
store_lst_schedule (scop);
int i;
poly_bb_p pbb;
- for (i = 0; VEC_iterate (poly_bb_p, SCOP_BBS (scop), i, pbb); i++)
+ for (i = 0; SCOP_BBS (scop).iterate (i, &pbb); i++)
restore_scattering_pbb (pbb);
restore_lst_schedule (scop);
bool graphite_legal_transform (scop_p);
poly_bb_p find_pbb_via_hash (htab_t, basic_block);
bool loop_is_parallel_p (loop_p, htab_t, int);
-scop_p get_loop_body_pbbs (loop_p, htab_t, VEC (poly_bb_p, heap) **);
+scop_p get_loop_body_pbbs (loop_p, htab_t, vec<poly_bb_p> *);
isl_map *reverse_loop_at_level (poly_bb_p, int);
-isl_union_map *reverse_loop_for_pbbs (scop_p, VEC (poly_bb_p, heap) *, int);
+isl_union_map *reverse_loop_for_pbbs (scop_p, vec<poly_bb_p> , int);
__isl_give isl_union_map *extend_schedule (__isl_take isl_union_map *);
void
-compute_deps (scop_p scop, VEC (poly_bb_p, heap) *pbbs,
+compute_deps (scop_p scop, vec<poly_bb_p> pbbs,
isl_union_map **must_raw,
isl_union_map **may_raw,
isl_union_map **must_raw_no_source,
static gbb_type
get_bb_type (basic_block bb, struct loop *last_loop)
{
- VEC (basic_block, heap) *dom;
+ vec<basic_block> dom;
int nb_dom;
struct loop *loop = bb->loop_father;
}
dom = get_dominated_by (CDI_DOMINATORS, bb);
- nb_dom = VEC_length (basic_block, dom);
- VEC_free (basic_block, heap, dom);
+ nb_dom = dom.length ();
+ dom.release ();
if (nb_dom == 0)
return GBB_LAST;
basic_block exit;
} sd_region;
-DEF_VEC_O(sd_region);
-DEF_VEC_ALLOC_O(sd_region, heap);
/* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
static void
-move_sd_regions (VEC (sd_region, heap) **source,
- VEC (sd_region, heap) **target)
+move_sd_regions (vec<sd_region> *source, vec<sd_region> *target)
{
sd_region *s;
int i;
- FOR_EACH_VEC_ELT (sd_region, *source, i, s)
- VEC_safe_push (sd_region, heap, *target, *s);
+ FOR_EACH_VEC_ELT (*source, i, s)
+ target->safe_push (*s);
- VEC_free (sd_region, heap, *source);
+ source->release ();
}
/* Something like "n * m" is not allowed. */
unsigned i;
int j;
bool res = true;
- VEC (data_reference_p, heap) *drs = NULL;
+ vec<data_reference_p> drs = vec<data_reference_p>();
loop_p outer;
for (outer = loop_containing_stmt (stmt); outer; outer = loop_outer (outer))
loop_containing_stmt (stmt),
stmt, &drs);
- FOR_EACH_VEC_ELT (data_reference_p, drs, j, dr)
+ FOR_EACH_VEC_ELT (drs, j, dr)
for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
if (!graphite_can_represent_scev (DR_ACCESS_FN (dr, i)))
{
}
free_data_refs (drs);
- drs = NULL;
+ drs.create (0);
}
done:
};
static struct scopdet_info build_scops_1 (basic_block, loop_p,
- VEC (sd_region, heap) **, loop_p);
+ vec<sd_region> *, loop_p);
/* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
to SCOPS. TYPE is the gbb_type of BB. */
static struct scopdet_info
scopdet_basic_block_info (basic_block bb, loop_p outermost_loop,
- VEC (sd_region, heap) **scops, gbb_type type)
+ vec<sd_region> *scops, gbb_type type)
{
loop_p loop = bb->loop_father;
struct scopdet_info result;
case GBB_LOOP_SING_EXIT_HEADER:
{
- VEC (sd_region, heap) *regions = VEC_alloc (sd_region, heap, 3);
+ vec<sd_region> regions;
+ regions.create (3);
struct scopdet_info sinfo;
edge exit_e = single_exit (loop);
{
outermost_loop = loop;
- VEC_free (sd_region, heap, regions);
- regions = VEC_alloc (sd_region, heap, 3);
+ regions.release ();
+ regions.create (3);
sinfo = scopdet_basic_block_info (bb, outermost_loop, scops, type);
sd_region open_scop;
open_scop.entry = bb;
open_scop.exit = exit_e->dest;
- VEC_safe_push (sd_region, heap, *scops, open_scop);
- VEC_free (sd_region, heap, regions);
+ scops->safe_push (open_scop);
+ regions.release ();
}
}
else
if (result.difficult)
move_sd_regions (®ions, scops);
else
- VEC_free (sd_region, heap, regions);
+ regions.release ();
}
break;
{
/* XXX: For now we just do not join loops with multiple exits. If the
exits lead to the same bb it may be possible to join the loop. */
- VEC (sd_region, heap) *regions = VEC_alloc (sd_region, heap, 3);
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<sd_region> regions;
+ regions.create (3);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge e;
int i;
build_scops_1 (bb, loop, ®ions, loop);
- The exit destinations are dominated by another bb inside
the loop.
- The loop dominates bbs, that are not exit destinations. */
- FOR_EACH_VEC_ELT (edge, exits, i, e)
+ FOR_EACH_VEC_ELT (exits, i, e)
if (e->src->loop_father == loop
&& dominated_by_p (CDI_DOMINATORS, e->dest, e->src))
{
result.difficult = true;
result.exits = false;
move_sd_regions (®ions, scops);
- VEC_free (edge, heap, exits);
+ exits.release ();
break;
}
case GBB_COND_HEADER:
{
- VEC (sd_region, heap) *regions = VEC_alloc (sd_region, heap, 3);
+ vec<sd_region> regions;
+ regions.create (3);
struct scopdet_info sinfo;
- VEC (basic_block, heap) *dominated;
+ vec<basic_block> dominated;
int i;
basic_block dom_bb;
basic_block last_exit = NULL;
/* First check the successors of BB, and check if it is
possible to join the different branches. */
- FOR_EACH_VEC_ELT (edge, bb->succs, i, e)
+ FOR_EACH_VEC_SAFE_ELT (bb->succs, i, e)
{
/* Ignore loop exits. They will be handled after the loop
body. */
result.exit = last_exit;
- VEC_free (sd_region, heap, regions);
+ regions.release ();
break;
}
/* Scan remaining bbs dominated by BB. */
dominated = get_dominated_by (CDI_DOMINATORS, bb);
- FOR_EACH_VEC_ELT (basic_block, dominated, i, dom_bb)
+ FOR_EACH_VEC_ELT (dominated, i, dom_bb)
{
/* Ignore loop exits: they will be handled after the loop body. */
if (loop_depth (find_common_loop (loop, dom_bb->loop_father))
result.exit = NULL;
}
- VEC_free (basic_block, heap, dominated);
+ dominated.release ();
result.next = NULL;
move_sd_regions (®ions, scops);
static struct scopdet_info
build_scops_1 (basic_block current, loop_p outermost_loop,
- VEC (sd_region, heap) **scops, loop_p loop)
+ vec<sd_region> *scops, loop_p loop)
{
bool in_scop = false;
sd_region open_scop;
else if (in_scop && (sinfo.exits || sinfo.difficult))
{
open_scop.exit = current;
- VEC_safe_push (sd_region, heap, *scops, open_scop);
+ scops->safe_push (open_scop);
in_scop = false;
}
{
open_scop.exit = sinfo.exit;
gcc_assert (open_scop.exit);
- VEC_safe_push (sd_region, heap, *scops, open_scop);
+ scops->safe_push (open_scop);
}
result.exit = sinfo.exit;
See comment in "create_single_exit_edge". */
static void
-unmark_exit_edges (VEC (sd_region, heap) *regions)
+unmark_exit_edges (vec<sd_region> regions)
{
int i;
sd_region *s;
edge e;
edge_iterator ei;
- FOR_EACH_VEC_ELT (sd_region, regions, i, s)
+ FOR_EACH_VEC_ELT (regions, i, s)
FOR_EACH_EDGE (e, ei, s->exit->preds)
e->aux = NULL;
}
See comment in "create_single_exit_edge". */
static void
-mark_exit_edges (VEC (sd_region, heap) *regions)
+mark_exit_edges (vec<sd_region> regions)
{
int i;
sd_region *s;
edge e;
edge_iterator ei;
- FOR_EACH_VEC_ELT (sd_region, regions, i, s)
+ FOR_EACH_VEC_ELT (regions, i, s)
FOR_EACH_EDGE (e, ei, s->exit->preds)
if (bb_in_sd_region (e->src, s))
e->aux = s;
/* Create for all scop regions a single entry and a single exit edge. */
static void
-create_sese_edges (VEC (sd_region, heap) *regions)
+create_sese_edges (vec<sd_region> regions)
{
int i;
sd_region *s;
- FOR_EACH_VEC_ELT (sd_region, regions, i, s)
+ FOR_EACH_VEC_ELT (regions, i, s)
create_single_entry_edge (s);
mark_exit_edges (regions);
- FOR_EACH_VEC_ELT (sd_region, regions, i, s)
+ FOR_EACH_VEC_ELT (regions, i, s)
/* Don't handle multiple edges exiting the function. */
if (!find_single_exit_edge (s)
&& s->exit != EXIT_BLOCK_PTR)
/* Create graphite SCoPs from an array of scop detection REGIONS. */
static void
-build_graphite_scops (VEC (sd_region, heap) *regions,
- VEC (scop_p, heap) **scops)
+build_graphite_scops (vec<sd_region> regions,
+ vec<scop_p> *scops)
{
int i;
sd_region *s;
- FOR_EACH_VEC_ELT (sd_region, regions, i, s)
+ FOR_EACH_VEC_ELT (regions, i, s)
{
edge entry = find_single_entry_edge (s);
edge exit = find_single_exit_edge (s);
continue;
scop = new_scop (new_sese (entry, exit));
- VEC_safe_push (scop_p, heap, *scops, scop);
+ scops->safe_push (scop);
/* Are there overlapping SCoPs? */
#ifdef ENABLE_CHECKING
int j;
sd_region *s2;
- FOR_EACH_VEC_ELT (sd_region, regions, j, s2)
+ FOR_EACH_VEC_ELT (regions, j, s2)
if (s != s2)
gcc_assert (!bb_in_sd_region (s->entry, s2));
}
/* Print statistics for SCOPS to FILE. */
static void
-print_graphite_statistics (FILE* file, VEC (scop_p, heap) *scops)
+print_graphite_statistics (FILE* file, vec<scop_p> scops)
{
int i;
scop_p scop;
- FOR_EACH_VEC_ELT (scop_p, scops, i, scop)
+ FOR_EACH_VEC_ELT (scops, i, scop)
print_graphite_scop_statistics (file, scop);
}
SCoP frontiers. */
static void
-limit_scops (VEC (scop_p, heap) **scops)
+limit_scops (vec<scop_p> *scops)
{
- VEC (sd_region, heap) *regions = VEC_alloc (sd_region, heap, 3);
+ vec<sd_region> regions;
+ regions.create (3);
int i;
scop_p scop;
- FOR_EACH_VEC_ELT (scop_p, *scops, i, scop)
+ FOR_EACH_VEC_ELT (*scops, i, scop)
{
int j;
loop_p loop;
sese region = SCOP_REGION (scop);
build_sese_loop_nests (region);
- FOR_EACH_VEC_ELT (loop_p, SESE_LOOP_NEST (region), j, loop)
+ FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), j, loop)
if (!loop_in_sese_p (loop_outer (loop), region)
&& single_exit (loop))
{
&& contains_only_close_phi_nodes (open_scop.exit))
open_scop.exit = single_succ_edge (open_scop.exit)->dest;
- VEC_safe_push (sd_region, heap, regions, open_scop);
+ regions.safe_push (open_scop);
}
}
free_scops (*scops);
- *scops = VEC_alloc (scop_p, heap, 3);
+ scops->create (3);
create_sese_edges (regions);
build_graphite_scops (regions, scops);
- VEC_free (sd_region, heap, regions);
+ regions.release ();
}
/* Returns true when P1 and P2 are close phis with the same
them to SCOPS. */
void
-build_scops (VEC (scop_p, heap) **scops)
+build_scops (vec<scop_p> *scops)
{
struct loop *loop = current_loops->tree_root;
- VEC (sd_region, heap) *regions = VEC_alloc (sd_region, heap, 3);
+ vec<sd_region> regions;
+ regions.create (3);
canonicalize_loop_closed_ssa_form ();
build_scops_1 (single_succ (ENTRY_BLOCK_PTR), ENTRY_BLOCK_PTR->loop_father,
print_graphite_statistics (dump_file, *scops);
limit_scops (scops);
- VEC_free (sd_region, heap, regions);
+ regions.release ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nnumber of SCoPs: %d\n",
- VEC_length (scop_p, *scops));
+ scops ? scops->length () : 0);
}
/* Pretty print to FILE all the SCoPs in DOT format and mark them with
exit nodes of the SCOP. These are not part of SCoP. */
static void
-dot_all_scops_1 (FILE *file, VEC (scop_p, heap) *scops)
+dot_all_scops_1 (FILE *file, vec<scop_p> scops)
{
basic_block bb;
edge e;
fprintf (file, "CELLSPACING=\"0\">\n");
/* Select color for SCoP. */
- FOR_EACH_VEC_ELT (scop_p, scops, i, scop)
+ FOR_EACH_VEC_ELT (scops, i, scop)
{
sese region = SCOP_REGION (scop);
if (bb_in_sese_p (bb, region)
/* Display all SCoPs using dotty. */
DEBUG_FUNCTION void
-dot_all_scops (VEC (scop_p, heap) *scops)
+dot_all_scops (vec<scop_p> scops)
{
/* When debugging, enable the following code. This cannot be used
in production compilers because it calls "system". */
DEBUG_FUNCTION void
dot_scop (scop_p scop)
{
- VEC (scop_p, heap) *scops = NULL;
+ vec<scop_p> scops = vec<scop_p>();
if (scop)
- VEC_safe_push (scop_p, heap, scops, scop);
+ scops.safe_push (scop);
/* When debugging, enable the following code. This cannot be used
in production compilers because it calls "system". */
dot_all_scops_1 (stderr, scops);
#endif
- VEC_free (scop_p, heap, scops);
+ scops.release ();
}
#endif
<http://www.gnu.org/licenses/>. */
-extern void build_scops (VEC (scop_p, heap) **);
-extern void dot_all_scops (VEC (scop_p, heap) *);
+extern void build_scops (vec<scop_p> *);
+extern void dot_all_scops (vec<scop_p> );
extern void dot_scop (scop_p);
/* Store the GRAPHITE representation of BB. */
static gimple_bb_p
-new_gimple_bb (basic_block bb, VEC (data_reference_p, heap) *drs)
+new_gimple_bb (basic_block bb, vec<data_reference_p> drs)
{
struct gimple_bb *gbb;
bb->aux = gbb;
GBB_BB (gbb) = bb;
GBB_DATA_REFS (gbb) = drs;
- GBB_CONDITIONS (gbb) = NULL;
- GBB_CONDITION_CASES (gbb) = NULL;
+ GBB_CONDITIONS (gbb).create (0);
+ GBB_CONDITION_CASES (gbb).create (0);
return gbb;
}
static void
-free_data_refs_aux (VEC (data_reference_p, heap) *datarefs)
+free_data_refs_aux (vec<data_reference_p> datarefs)
{
unsigned int i;
struct data_reference *dr;
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
if (dr->aux)
{
base_alias_pair *bap = (base_alias_pair *)(dr->aux);
free_data_refs_aux (GBB_DATA_REFS (gbb));
free_data_refs (GBB_DATA_REFS (gbb));
- VEC_free (gimple, heap, GBB_CONDITIONS (gbb));
- VEC_free (gimple, heap, GBB_CONDITION_CASES (gbb));
+ GBB_CONDITIONS (gbb).release ();
+ GBB_CONDITION_CASES (gbb).release ();
GBB_BB (gbb)->aux = 0;
XDELETE (gbb);
}
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
free_gimple_bb (PBB_BLACK_BOX (pbb));
}
/* Deletes all scops in SCOPS. */
void
-free_scops (VEC (scop_p, heap) *scops)
+free_scops (vec<scop_p> scops)
{
int i;
scop_p scop;
- FOR_EACH_VEC_ELT (scop_p, scops, i, scop)
+ FOR_EACH_VEC_ELT (scops, i, scop)
{
remove_gbbs_in_scop (scop);
free_sese (SCOP_REGION (scop));
free_scop (scop);
}
- VEC_free (scop_p, heap, scops);
+ scops.release ();
}
/* Same as outermost_loop_in_sese, returns the outermost loop
static gimple_bb_p
try_generate_gimple_bb (scop_p scop, basic_block bb)
{
- VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 5);
+ vec<data_reference_p> drs;
+ drs.create (5);
sese region = SCOP_REGION (scop);
loop_p nest = outermost_loop_in_sese_1 (region, bb);
gimple_stmt_iterator gsi;
a deepest loop level. */
static void
-graphite_sort_dominated_info (VEC (basic_block, heap) *dom)
+graphite_sort_dominated_info (vec<basic_block> dom)
{
- VEC_qsort (basic_block, dom, compare_bb_depths);
+ dom.qsort (compare_bb_depths);
}
/* Recursive helper function for build_scops_bbs. */
build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
{
sese region = SCOP_REGION (scop);
- VEC (basic_block, heap) *dom;
+ vec<basic_block> dom;
poly_bb_p pbb;
if (bitmap_bit_p (visited, bb->index)
return;
pbb = new_poly_bb (scop, try_generate_gimple_bb (scop, bb));
- VEC_safe_push (poly_bb_p, heap, SCOP_BBS (scop), pbb);
+ SCOP_BBS (scop).safe_push (pbb);
bitmap_set_bit (visited, bb->index);
dom = get_dominated_by (CDI_DOMINATORS, bb);
- if (dom == NULL)
+ if (!dom.exists ())
return;
graphite_sort_dominated_info (dom);
- while (!VEC_empty (basic_block, dom))
+ while (!dom.is_empty ())
{
int i;
basic_block dom_bb;
- FOR_EACH_VEC_ELT (basic_block, dom, i, dom_bb)
+ FOR_EACH_VEC_ELT (dom, i, dom_bb)
if (all_non_dominated_preds_marked_p (dom_bb, visited))
{
build_scop_bbs_1 (scop, visited, dom_bb);
- VEC_unordered_remove (basic_block, dom, i);
+ dom.unordered_remove (i);
break;
}
}
- VEC_free (basic_block, heap, dom);
+ dom.release ();
}
/* Gather the basic blocks belonging to the SCOP. */
incremented before copying. */
static_sched = isl_aff_add_coefficient_si (static_sched, isl_dim_in, 0, -1);
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
int prefix;
gcc_assert (TREE_CODE (name) == SSA_NAME);
- FOR_EACH_VEC_ELT (tree, SESE_PARAMS (region), i, p)
+ FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, p)
if (p == name)
return i;
gcc_assert (SESE_ADD_PARAMS (region));
- i = VEC_length (tree, SESE_PARAMS (region));
- VEC_safe_push (tree, heap, SESE_PARAMS (region), name);
+ i = SESE_PARAMS (region).length ();
+ SESE_PARAMS (region).safe_push (name);
return i;
}
loop_p loop = GBB_BB (gbb)->loop_father;
/* Find parameters in the access functions of data references. */
- FOR_EACH_VEC_ELT (data_reference_p, GBB_DATA_REFS (gbb), i, dr)
+ FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
for (j = 0; j < DR_NUM_DIMENSIONS (dr); j++)
scan_tree_for_params (region, DR_ACCESS_FN (dr, j));
/* Find parameters in conditional statements. */
- FOR_EACH_VEC_ELT (gimple, GBB_CONDITIONS (gbb), i, stmt)
+ FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
{
tree lhs = scalar_evolution_in_region (region, loop,
gimple_cond_lhs (stmt));
int nbp;
/* Find the parameters used in the loop bounds. */
- FOR_EACH_VEC_ELT (loop_p, SESE_LOOP_NEST (region), i, loop)
+ FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
{
tree nb_iters = number_of_latch_executions (loop);
}
/* Find the parameters used in data accesses. */
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
find_params_in_bb (region, PBB_BLACK_BOX (pbb));
nbp = sese_nb_params (region);
tree e;
isl_space *space = isl_space_set_alloc (scop->ctx, nbp, 0);
- FOR_EACH_VEC_ELT (tree, SESE_PARAMS (region), i, e)
+ FOR_EACH_VEC_ELT (SESE_PARAMS (region), i, e)
space = isl_space_set_dim_id (space, isl_dim_param, i,
isl_id_for_ssa_name (scop, e));
gimple stmt;
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
- if (VEC_empty (gimple, GBB_CONDITIONS (gbb)))
+ if (GBB_CONDITIONS (gbb).is_empty ())
return;
- FOR_EACH_VEC_ELT (gimple, GBB_CONDITIONS (gbb), i, stmt)
+ FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb), i, stmt)
switch (gimple_code (stmt))
{
case GIMPLE_COND:
enum tree_code code = gimple_cond_code (stmt);
/* The conditions for ELSE-branches are inverted. */
- if (!VEC_index (gimple, GBB_CONDITION_CASES (gbb), i))
+ if (!GBB_CONDITION_CASES (gbb)[i])
code = invert_tree_comparison (code, false);
add_condition_to_pbb (pbb, stmt, code);
int i;
poly_bb_p pbb;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
add_conditions_to_domain (pbb);
}
struct bsc
{
- VEC (gimple, heap) **conditions, **cases;
+ vec<gimple> *conditions, *cases;
sese region;
};
basic_block bb)
{
struct bsc *data = (struct bsc *) dw_data->global_data;
- VEC (gimple, heap) **conditions = data->conditions;
- VEC (gimple, heap) **cases = data->cases;
+ vec<gimple> *conditions = data->conditions;
+ vec<gimple> *cases = data->cases;
gimple_bb_p gbb;
gimple stmt;
{
edge e = single_pred_edge (bb);
- VEC_safe_push (gimple, heap, *conditions, stmt);
+ conditions->safe_push (stmt);
if (e->flags & EDGE_TRUE_VALUE)
- VEC_safe_push (gimple, heap, *cases, stmt);
+ cases->safe_push (stmt);
else
- VEC_safe_push (gimple, heap, *cases, NULL);
+ cases->safe_push (NULL);
}
gbb = gbb_from_bb (bb);
if (gbb)
{
- GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
- GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
+ GBB_CONDITIONS (gbb) = conditions->copy ();
+ GBB_CONDITION_CASES (gbb) = cases->copy ();
}
}
basic_block bb)
{
struct bsc *data = (struct bsc *) dw_data->global_data;
- VEC (gimple, heap) **conditions = data->conditions;
- VEC (gimple, heap) **cases = data->cases;
+ vec<gimple> *conditions = data->conditions;
+ vec<gimple> *cases = data->cases;
if (!bb_in_sese_p (bb, data->region))
return;
if (single_pred_cond_non_loop_exit (bb))
{
- VEC_pop (gimple, *conditions);
- VEC_pop (gimple, *cases);
+ conditions->pop ();
+ cases->pop ();
}
}
build_sese_conditions (sese region)
{
struct dom_walk_data walk_data;
- VEC (gimple, heap) *conditions = VEC_alloc (gimple, heap, 3);
- VEC (gimple, heap) *cases = VEC_alloc (gimple, heap, 3);
+ vec<gimple> conditions;
+ conditions.create (3);
+ vec<gimple> cases;
+ cases.create (3);
struct bsc data;
data.conditions = &conditions;
walk_dominator_tree (&walk_data, SESE_ENTRY_BB (region));
fini_walk_dominator_tree (&walk_data);
- VEC_free (gimple, heap, conditions);
- VEC_free (gimple, heap, cases);
+ conditions.release ();
+ cases.release ();
}
/* Add constraints on the possible values of parameter P from the type
static void
add_param_constraints (scop_p scop, graphite_dim_t p)
{
- tree parameter = VEC_index (tree, SESE_PARAMS (SCOP_REGION (scop)), p);
+ tree parameter = SESE_PARAMS (SCOP_REGION (scop))[p];
tree type = TREE_TYPE (parameter);
tree lb = NULL_TREE;
tree ub = NULL_TREE;
int nb_loops = number_of_loops ();
isl_set **doms = XCNEWVEC (isl_set *, nb_loops);
- FOR_EACH_VEC_ELT (loop_p, SESE_LOOP_NEST (region), i, loop)
+ FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop)
if (!loop_in_sese_p (loop_outer (loop), region))
build_loop_iteration_domains (scop, loop, 0,
isl_set_copy (scop->context), doms);
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
{
loop = pbb_loop (pbb);
static inline bool
write_alias_graph_to_ascii_dimacs (FILE *file, char *comment,
- VEC (data_reference_p, heap) *drs)
+ vec<data_reference_p> drs)
{
- int num_vertex = VEC_length (data_reference_p, drs);
+ int num_vertex = drs.length ();
int edge_num = 0;
data_reference_p dr1, dr2;
int i, j;
if (num_vertex == 0)
return true;
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i + 1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i + 1; drs.iterate (j, &dr2); j++)
if (dr_may_alias_p (dr1, dr2, true))
edge_num++;
fprintf (file, "p edge %d %d\n", num_vertex, edge_num);
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i + 1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i + 1; drs.iterate (j, &dr2); j++)
if (dr_may_alias_p (dr1, dr2, true))
fprintf (file, "e %d %d\n", i + 1, j + 1);
static inline bool
write_alias_graph_to_ascii_dot (FILE *file, char *comment,
- VEC (data_reference_p, heap) *drs)
+ vec<data_reference_p> drs)
{
- int num_vertex = VEC_length (data_reference_p, drs);
+ int num_vertex = drs.length ();
data_reference_p dr1, dr2;
int i, j;
fprintf (file, "c %s\n", comment);
/* First print all the vertices. */
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
fprintf (file, "n%d;\n", i);
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i + 1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i + 1; drs.iterate (j, &dr2); j++)
if (dr_may_alias_p (dr1, dr2, true))
fprintf (file, "n%d n%d\n", i, j);
static inline bool
write_alias_graph_to_ascii_ecc (FILE *file, char *comment,
- VEC (data_reference_p, heap) *drs)
+ vec<data_reference_p> drs)
{
- int num_vertex = VEC_length (data_reference_p, drs);
+ int num_vertex = drs.length ();
data_reference_p dr1, dr2;
int i, j;
if (comment)
fprintf (file, "c %s\n", comment);
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i + 1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i + 1; drs.iterate (j, &dr2); j++)
if (dr_may_alias_p (dr1, dr2, true))
fprintf (file, "%d %d\n", i, j);
true (1) if the above test is true, and false (0) otherwise. */
static int
-build_alias_set_optimal_p (VEC (data_reference_p, heap) *drs)
+build_alias_set_optimal_p (vec<data_reference_p> drs)
{
- int num_vertices = VEC_length (data_reference_p, drs);
+ int num_vertices = drs.length ();
struct graph *g = new_graph (num_vertices);
data_reference_p dr1, dr2;
int i, j;
int this_component_is_clique;
int all_components_are_cliques = 1;
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i+1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i+1; drs.iterate (j, &dr2); j++)
if (dr_may_alias_p (dr1, dr2, true))
{
add_edge (g, i, j);
NULL, true, NULL);
for (i = 0; i < g->n_vertices; i++)
{
- data_reference_p dr = VEC_index (data_reference_p, drs, i);
+ data_reference_p dr = drs[i];
base_alias_pair *bap;
gcc_assert (dr->aux);
/* Group each data reference in DRS with its base object set num. */
static void
-build_base_obj_set_for_drs (VEC (data_reference_p, heap) *drs)
+build_base_obj_set_for_drs (vec<data_reference_p> drs)
{
- int num_vertex = VEC_length (data_reference_p, drs);
+ int num_vertex = drs.length ();
struct graph *g = new_graph (num_vertex);
data_reference_p dr1, dr2;
int i, j;
int *queue;
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr1)
- for (j = i + 1; VEC_iterate (data_reference_p, drs, j, dr2); j++)
+ FOR_EACH_VEC_ELT (drs, i, dr1)
+ for (j = i + 1; drs.iterate (j, &dr2); j++)
if (dr_same_base_object_p (dr1, dr2))
{
add_edge (g, i, j);
for (i = 0; i < g->n_vertices; i++)
{
- data_reference_p dr = VEC_index (data_reference_p, drs, i);
+ data_reference_p dr = drs[i];
base_alias_pair *bap;
gcc_assert (dr->aux);
{
int j;
data_reference_p dr;
- VEC (data_reference_p, heap) *gbb_drs = GBB_DATA_REFS (PBB_BLACK_BOX (pbb));
+ vec<data_reference_p> gbb_drs = GBB_DATA_REFS (PBB_BLACK_BOX (pbb));
- FOR_EACH_VEC_ELT (data_reference_p, gbb_drs, j, dr)
+ FOR_EACH_VEC_ELT (gbb_drs, j, dr)
build_poly_dr (dr, pbb);
}
/* Dump to file the alias graphs for the data references in DRS. */
static void
-dump_alias_graphs (VEC (data_reference_p, heap) *drs)
+dump_alias_graphs (vec<data_reference_p> drs)
{
char comment[100];
FILE *file_dimacs, *file_ecc, *file_dot;
int i, j;
poly_bb_p pbb;
data_reference_p dr;
- VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 3);
+ vec<data_reference_p> drs;
+ drs.create (3);
/* Remove all the PBBs that do not have data references: these basic
blocks are not handled in the polyhedral representation. */
- for (i = 0; VEC_iterate (poly_bb_p, SCOP_BBS (scop), i, pbb); i++)
- if (VEC_empty (data_reference_p, GBB_DATA_REFS (PBB_BLACK_BOX (pbb))))
+ for (i = 0; SCOP_BBS (scop).iterate (i, &pbb); i++)
+ if (GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).is_empty ())
{
free_gimple_bb (PBB_BLACK_BOX (pbb));
free_poly_bb (pbb);
- VEC_ordered_remove (poly_bb_p, SCOP_BBS (scop), i);
+ SCOP_BBS (scop).ordered_remove (i);
i--;
}
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
- for (j = 0; VEC_iterate (data_reference_p,
- GBB_DATA_REFS (PBB_BLACK_BOX (pbb)), j, dr); j++)
- VEC_safe_push (data_reference_p, heap, drs, dr);
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
+ for (j = 0; GBB_DATA_REFS (PBB_BLACK_BOX (pbb)).iterate (j, &dr); j++)
+ drs.safe_push (dr);
- FOR_EACH_VEC_ELT (data_reference_p, drs, i, dr)
+ FOR_EACH_VEC_ELT (drs, i, dr)
dr->aux = XNEW (base_alias_pair);
if (!build_alias_set_optimal_p (drs))
if (0)
dump_alias_graphs (drs);
- VEC_free (data_reference_p, heap, drs);
+ drs.release ();
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
build_pbb_drs (pbb);
}
GBB_DATA_REFS vector of BB. */
static void
-analyze_drs_in_stmts (scop_p scop, basic_block bb, VEC (gimple, heap) *stmts)
+analyze_drs_in_stmts (scop_p scop, basic_block bb, vec<gimple> stmts)
{
loop_p nest;
gimple_bb_p gbb;
nest = outermost_loop_in_sese_1 (region, bb);
gbb = gbb_from_bb (bb);
- FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
+ FOR_EACH_VEC_ELT (stmts, i, stmt)
{
loop_p loop;
gimple_stmt_iterator insert_gsi)
{
gimple_stmt_iterator gsi;
- VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
+ vec<gimple> x;
+ x.create (3);
gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
- VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
+ x.safe_push (gsi_stmt (gsi));
gsi_insert_seq_before (&insert_gsi, stmts, GSI_SAME_STMT);
analyze_drs_in_stmts (scop, gsi_bb (insert_gsi), x);
- VEC_free (gimple, heap, x);
+ x.release ();
}
/* Insert the assignment "RES := EXPR" just after AFTER_STMT. */
gimple_stmt_iterator gsi;
tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
gimple stmt = gimple_build_assign (res, var);
- VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
+ vec<gimple> x;
+ x.create (3);
gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
- VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
+ x.safe_push (gsi_stmt (gsi));
if (gimple_code (after_stmt) == GIMPLE_PHI)
{
}
analyze_drs_in_stmts (scop, gimple_bb (after_stmt), x);
- VEC_free (gimple, heap, x);
+ x.release ();
}
/* Creates a poly_bb_p for basic_block BB from the existing PBB. */
static void
new_pbb_from_pbb (scop_p scop, poly_bb_p pbb, basic_block bb)
{
- VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 3);
+ vec<data_reference_p> drs;
+ drs.create (3);
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
gimple_bb_p gbb1 = new_gimple_bb (bb, drs);
poly_bb_p pbb1 = new_poly_bb (scop, gbb1);
- int index, n = VEC_length (poly_bb_p, SCOP_BBS (scop));
+ int index, n = SCOP_BBS (scop).length ();
/* The INDEX of PBB in SCOP_BBS. */
for (index = 0; index < n; index++)
- if (VEC_index (poly_bb_p, SCOP_BBS (scop), index) == pbb)
+ if (SCOP_BBS (scop)[index] == pbb)
break;
pbb1->domain = isl_set_copy (pbb->domain);
GBB_PBB (gbb1) = pbb1;
- GBB_CONDITIONS (gbb1) = VEC_copy (gimple, heap, GBB_CONDITIONS (gbb));
- GBB_CONDITION_CASES (gbb1) = VEC_copy (gimple, heap, GBB_CONDITION_CASES (gbb));
- VEC_safe_insert (poly_bb_p, heap, SCOP_BBS (scop), index + 1, pbb1);
+ GBB_CONDITIONS (gbb1) = GBB_CONDITIONS (gbb).copy ();
+ GBB_CONDITION_CASES (gbb1) = GBB_CONDITION_CASES (gbb).copy ();
+ SCOP_BBS (scop).safe_insert (index + 1, pbb1);
}
/* Insert on edge E the assignment "RES := EXPR". */
tree var = force_gimple_operand (expr, &stmts, true, NULL_TREE);
gimple stmt = gimple_build_assign (res, var);
basic_block bb;
- VEC (gimple, heap) *x = VEC_alloc (gimple, heap, 3);
+ vec<gimple> x;
+ x.create (3);
gimple_seq_add_stmt (&stmts, stmt);
for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
- VEC_safe_push (gimple, heap, x, gsi_stmt (gsi));
+ x.safe_push (gsi_stmt (gsi));
gsi_insert_seq_on_edge (e, stmts);
gsi_commit_edge_inserts ();
new_pbb_from_pbb (scop, pbb_from_bb (e->src), bb);
analyze_drs_in_stmts (scop, bb, x);
- VEC_free (gimple, heap, x);
+ x.release ();
}
/* Creates a zero dimension array of the same type as VAR. */
poly_bb_p pbb;
int res = 0;
- FOR_EACH_VEC_ELT (poly_bb_p, SCOP_BBS (scop), i, pbb)
+ FOR_EACH_VEC_ELT (SCOP_BBS (scop), i, pbb)
if (loop_in_sese_p (gbb_loop (PBB_BLACK_BOX (pbb)), SCOP_REGION (scop)))
res++;
/* A part of the data references will end in a different basic block
after the split: move the DRs from the original GBB to the newly
created GBB1. */
- FOR_EACH_VEC_ELT (data_reference_p, GBB_DATA_REFS (gbb), i, dr)
+ FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
{
basic_block bb1 = gimple_bb (DR_STMT (dr));
if (bb1 != bb)
{
gimple_bb_p gbb1 = gbb_from_bb (bb1);
- VEC_safe_push (data_reference_p, heap, GBB_DATA_REFS (gbb1), dr);
- VEC_ordered_remove (data_reference_p, GBB_DATA_REFS (gbb), i);
+ GBB_DATA_REFS (gbb1).safe_push (dr);
+ GBB_DATA_REFS (gbb).ordered_remove (i);
i--;
}
}
static gimple
detect_commutative_reduction_arg (tree lhs, gimple stmt, tree arg,
- VEC (gimple, heap) **in,
- VEC (gimple, heap) **out)
+ vec<gimple> *in,
+ vec<gimple> *out)
{
gimple phi = follow_ssa_with_commutative_ops (arg, lhs);
if (!phi)
return NULL;
- VEC_safe_push (gimple, heap, *in, stmt);
- VEC_safe_push (gimple, heap, *out, stmt);
+ in->safe_push (stmt);
+ out->safe_push (stmt);
return phi;
}
STMT. Return the phi node of the reduction cycle, or NULL. */
static gimple
-detect_commutative_reduction_assign (gimple stmt, VEC (gimple, heap) **in,
- VEC (gimple, heap) **out)
+detect_commutative_reduction_assign (gimple stmt, vec<gimple> *in,
+ vec<gimple> *out)
{
tree lhs = gimple_assign_lhs (stmt);
node of the reduction cycle, or NULL. */
static gimple
-detect_commutative_reduction (scop_p scop, gimple stmt, VEC (gimple, heap) **in,
- VEC (gimple, heap) **out)
+detect_commutative_reduction (scop_p scop, gimple stmt, vec<gimple> *in,
+ vec<gimple> *out)
{
if (scalar_close_phi_node_p (stmt))
{
|| !has_single_use (gimple_phi_result (phi))))
return NULL;
- VEC_safe_push (gimple, heap, *in, loop_phi);
- VEC_safe_push (gimple, heap, *out, close_phi);
+ in->safe_push (loop_phi);
+ out->safe_push (close_phi);
return phi;
}
tree def;
use_operand_p use_p;
gimple_stmt_iterator gsi;
- VEC (gimple, heap) *update = VEC_alloc (gimple, heap, 3);
+ vec<gimple> update;
+ update.create (3);
unsigned int i;
gimple stmt;
if (is_gimple_debug (stmt))
{
gimple_debug_bind_reset_value (stmt);
- VEC_safe_push (gimple, heap, update, stmt);
+ update.safe_push (stmt);
}
}
- FOR_EACH_VEC_ELT (gimple, update, i, stmt)
+ FOR_EACH_VEC_ELT (update, i, stmt)
update_stmt (stmt);
- VEC_free (gimple, heap, update);
+ update.release ();
gsi = gsi_for_phi_node (phi);
remove_phi_node (&gsi, false);
static void
translate_scalar_reduction_to_array (scop_p scop,
- VEC (gimple, heap) *in,
- VEC (gimple, heap) *out)
+ vec<gimple> in,
+ vec<gimple> out)
{
gimple loop_phi;
- unsigned int i = VEC_length (gimple, out) - 1;
- tree red = close_phi_written_to_memory (VEC_index (gimple, out, i));
+ unsigned int i = out.length () - 1;
+ tree red = close_phi_written_to_memory (out[i]);
- FOR_EACH_VEC_ELT (gimple, in, i, loop_phi)
+ FOR_EACH_VEC_ELT (in, i, loop_phi)
{
- gimple close_phi = VEC_index (gimple, out, i);
+ gimple close_phi = out[i];
if (i == 0)
{
red = create_zero_dim_array
(gimple_assign_lhs (stmt), "Commutative_Associative_Reduction");
- translate_scalar_reduction_to_array_for_stmt
- (scop, red, stmt, VEC_index (gimple, in, 1));
+ translate_scalar_reduction_to_array_for_stmt (scop, red, stmt, in[1]);
continue;
}
- if (i == VEC_length (gimple, in) - 1)
+ if (i == in.length () - 1)
{
insert_out_of_ssa_copy (scop, gimple_phi_result (close_phi),
unshare_expr (red), close_phi);
gimple close_phi)
{
bool res;
- VEC (gimple, heap) *in = VEC_alloc (gimple, heap, 10);
- VEC (gimple, heap) *out = VEC_alloc (gimple, heap, 10);
+ vec<gimple> in;
+ in.create (10);
+ vec<gimple> out;
+ out.create (10);
detect_commutative_reduction (scop, close_phi, &in, &out);
- res = VEC_length (gimple, in) > 1;
+ res = in.length () > 1;
if (res)
translate_scalar_reduction_to_array (scop, in, out);
- VEC_free (gimple, heap, in);
- VEC_free (gimple, heap, out);
+ in.release ();
+ out.release ();
return res;
}
/* Print statistics for SCOPS to FILE. */
static void
-print_graphite_statistics (FILE* file, VEC (scop_p, heap) *scops)
+print_graphite_statistics (FILE* file, vec<scop_p> scops)
{
int i;
scop_p scop;
- FOR_EACH_VEC_ELT (scop_p, scops, i, scop)
+ FOR_EACH_VEC_ELT (scops, i, scop)
print_graphite_scop_statistics (file, scop);
}
int i;
scop_p scop;
bool need_cfg_cleanup_p = false;
- VEC (scop_p, heap) *scops = NULL;
+ vec<scop_p> scops = vec<scop_p>();
htab_t bb_pbb_mapping;
isl_ctx *ctx;
bb_pbb_mapping = htab_create (10, bb_pbb_map_hash, eq_bb_pbb_map, free);
- FOR_EACH_VEC_ELT (scop_p, scops, i, scop)
+ FOR_EACH_VEC_ELT (scops, i, scop)
if (dbg_cnt (graphite_scop))
{
scop->ctx = ctx;
#include "target.h"
#include "common/common-target.h"
#include "params.h"
-#include "vecprim.h"
#include "dbgcnt.h"
#include "cfgloop.h"
#include "ira.h"
/* This records the actual schedule. It is built up during the main phase
of schedule_block, and afterwards used to reorder the insns in the RTL. */
-static VEC(rtx, heap) *scheduled_insns;
+static vec<rtx> scheduled_insns;
static int may_trap_exp (const_rtx, int);
};
/* Mapping from instruction UID to its Logical UID. */
-VEC (int, heap) *sched_luids = NULL;
+vec<int> sched_luids = vec<int>();
/* Next LUID to assign to an instruction. */
int sched_max_luid = 1;
/* Haifa Instruction Data. */
-VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
+vec<haifa_insn_data_def> h_i_d = vec<haifa_insn_data_def>();
void (* sched_init_only_bb) (basic_block, basic_block);
static void restore_bb_notes (basic_block);
static void fix_jump_move (rtx);
static void move_block_after_check (rtx);
-static void move_succs (VEC(edge,gc) **, basic_block);
+static void move_succs (vec<edge, va_gc> **, basic_block);
static void sched_remove_insn (rtx);
static void clear_priorities (rtx, rtx_vec_t *);
static void calc_priorities (rtx_vec_t);
INSN_TICK (insn) = INVALID_TICK;
}
-DEF_VEC_P(dep_t);
-DEF_VEC_ALLOC_P(dep_t, heap);
/* Two VECs, one to hold dependencies for which pattern replacements
need to be applied or restored at the start of the next cycle, and
another to hold an integer that is either one, to apply the
corresponding replacement, or zero to restore it. */
-static VEC(dep_t, heap) *next_cycle_replace_deps;
-static VEC(int, heap) *next_cycle_apply;
+static vec<dep_t> next_cycle_replace_deps;
+static vec<int> next_cycle_apply;
static void apply_replacement (dep_t, bool);
static void restore_pattern (dep_t, bool);
REG_DEP_CONTROL; if the condition register isn't modified after it,
we know that it still has the right value. */
if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
- FOR_EACH_VEC_ELT_REVERSE (rtx, scheduled_insns, i, prev)
+ FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
{
HARD_REG_SET t;
/* Index POINT gives the instruction at point POINT of the model schedule.
This array doesn't change during main scheduling. */
-static VEC (rtx, heap) *model_schedule;
+static vec<rtx> model_schedule;
/* The list of instructions in the model worklist, sorted in order of
decreasing priority. */
/* The instruction at point POINT of the model schedule. */
#define MODEL_INSN(POINT) \
- (VEC_index (rtx, model_schedule, POINT))
+ (model_schedule[POINT])
/* Return INSN's index in the model schedule, or model_num_insns if it
gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
- point = VEC_length (rtx, model_schedule);
- VEC_quick_push (rtx, model_schedule, insn);
+ point = model_schedule.length ();
+ model_schedule.quick_push (insn);
INSN_MODEL_INDEX (insn) = point + 1;
}
unsigned int i;
rtx insn;
- FOR_EACH_VEC_ELT (rtx, model_schedule, i, insn)
+ FOR_EACH_VEC_ELT (model_schedule, i, insn)
QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
}
basic_block bb;
model_next_priority = 1;
- model_schedule = VEC_alloc (rtx, heap, sched_max_luid);
+ model_schedule.create (sched_max_luid);
model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
bb = BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head));
model_init_pressure_group (&model_before_pressure);
while (model_worklist)
model_choose_insn ();
- gcc_assert (model_num_insns == (int) VEC_length (rtx, model_schedule));
+ gcc_assert (model_num_insns == (int) model_schedule.length ());
if (sched_verbose >= 2)
fprintf (sched_dump, "\n");
model_end_schedule (void)
{
model_finalize_pressure_group (&model_before_pressure);
- VEC_free (rtx, heap, model_schedule);
+ model_schedule.release ();
}
\f
/* A structure that holds local state for the loop in schedule_block. */
/* Describe pattern replacements that occurred since this backtrack point
was queued. */
- VEC (dep_t, heap) *replacement_deps;
- VEC (int, heap) *replace_apply;
+ vec<dep_t> replacement_deps;
+ vec<int> replace_apply;
/* A copy of the next-cycle replacement vectors at the time of the backtrack
point. */
- VEC (dep_t, heap) *next_cycle_deps;
- VEC (int, heap) *next_cycle_apply;
+ vec<dep_t> next_cycle_deps;
+ vec<int> next_cycle_apply;
};
/* A record, in reverse order, of all scheduled insns which have delay slots
save->sched_block = sched_block;
- save->replacement_deps = NULL;
- save->replace_apply = NULL;
- save->next_cycle_deps = VEC_copy (dep_t, heap, next_cycle_replace_deps);
- save->next_cycle_apply = VEC_copy (int, heap, next_cycle_apply);
+ save->replacement_deps.create (0);
+ save->replace_apply.create (0);
+ save->next_cycle_deps = next_cycle_replace_deps.copy ();
+ save->next_cycle_apply = next_cycle_apply.copy ();
if (current_sched_info->save_state)
save->fe_saved_data = (*current_sched_info->save_state) ();
static void
undo_replacements_for_backtrack (struct haifa_saved_data *save)
{
- while (!VEC_empty (dep_t, save->replacement_deps))
+ while (!save->replacement_deps.is_empty ())
{
- dep_t dep = VEC_pop (dep_t, save->replacement_deps);
- int apply_p = VEC_pop (int, save->replace_apply);
+ dep_t dep = save->replacement_deps.pop ();
+ int apply_p = save->replace_apply.pop ();
if (apply_p)
restore_pattern (dep, true);
else
apply_replacement (dep, true);
}
- VEC_free (dep_t, heap, save->replacement_deps);
- VEC_free (int, heap, save->replace_apply);
+ save->replacement_deps.release ();
+ save->replace_apply.release ();
}
/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
static void
unschedule_insns_until (rtx insn)
{
- VEC (rtx, heap) *recompute_vec;
-
- recompute_vec = VEC_alloc (rtx, heap, 0);
+ vec<rtx> recompute_vec = vec<rtx>();
/* Make two passes over the insns to be unscheduled. First, we clear out
dependencies and other trivial bookkeeping. */
sd_iterator_def sd_it;
dep_t dep;
- last = VEC_pop (rtx, scheduled_insns);
+ last = scheduled_insns.pop ();
/* This will be changed by restore_backtrack_point if the insn is in
any queue. */
if (!MUST_RECOMPUTE_SPEC_P (con))
{
MUST_RECOMPUTE_SPEC_P (con) = 1;
- VEC_safe_push (rtx, heap, recompute_vec, con);
+ recompute_vec.safe_push (con);
}
}
popped the scheduled_insns vector up to the point where we
restart scheduling, as recompute_todo_spec requires it to be
up-to-date. */
- while (!VEC_empty (rtx, recompute_vec))
+ while (!recompute_vec.is_empty ())
{
rtx con;
- con = VEC_pop (rtx, recompute_vec);
+ con = recompute_vec.pop ();
MUST_RECOMPUTE_SPEC_P (con) = 0;
if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
{
else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
TODO_SPEC (con) = recompute_todo_spec (con, true);
}
- VEC_free (rtx, heap, recompute_vec);
+ recompute_vec.release ();
}
/* Restore scheduler state from the topmost entry on the backtracking queue.
mark_backtrack_feeds (save->delay_pair->i2, 0);
- gcc_assert (VEC_empty (dep_t, next_cycle_replace_deps));
- next_cycle_replace_deps = VEC_copy (dep_t, heap, save->next_cycle_deps);
- next_cycle_apply = VEC_copy (int, heap, save->next_cycle_apply);
+ gcc_assert (next_cycle_replace_deps.is_empty ());
+ next_cycle_replace_deps = save->next_cycle_deps.copy ();
+ next_cycle_apply = save->next_cycle_apply.copy ();
free (save);
}
else
{
- VEC_free (dep_t, heap, save->replacement_deps);
- VEC_free (int, heap, save->replace_apply);
+ save->replacement_deps.release ();
+ save->replace_apply.release ();
}
if (targetm.sched.free_sched_context)
struct dep_replacement *desc = DEP_REPLACE (dep);
if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
{
- VEC_safe_push (dep_t, heap, next_cycle_replace_deps, dep);
- VEC_safe_push (int, heap, next_cycle_apply, 1);
+ next_cycle_replace_deps.safe_push (dep);
+ next_cycle_apply.safe_push (1);
}
else
{
if (backtrack_queue != NULL)
{
- VEC_safe_push (dep_t, heap, backtrack_queue->replacement_deps, dep);
- VEC_safe_push (int, heap, backtrack_queue->replace_apply, 1);
+ backtrack_queue->replacement_deps.safe_push (dep);
+ backtrack_queue->replace_apply.safe_push (1);
}
}
}
if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
{
- VEC_safe_push (dep_t, heap, next_cycle_replace_deps, dep);
- VEC_safe_push (int, heap, next_cycle_apply, 0);
+ next_cycle_replace_deps.safe_push (dep);
+ next_cycle_apply.safe_push (0);
return;
}
update_insn_after_change (desc->insn);
if (backtrack_queue != NULL)
{
- VEC_safe_push (dep_t, heap, backtrack_queue->replacement_deps, dep);
- VEC_safe_push (int, heap, backtrack_queue->replace_apply, 0);
+ backtrack_queue->replacement_deps.safe_push (dep);
+ backtrack_queue->replace_apply.safe_push (0);
}
}
INSN_TICK (next) = tick;
{
int i;
dep_t dep;
- FOR_EACH_VEC_ELT (dep_t, next_cycle_replace_deps, i, dep)
+ FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
{
- int apply_p = VEC_index (int, next_cycle_apply, i);
+ int apply_p = next_cycle_apply[i];
if (apply_p)
apply_replacement (dep, true);
else
restore_pattern (dep, true);
}
- VEC_truncate (dep_t, next_cycle_replace_deps, 0);
- VEC_truncate (int, next_cycle_apply, 0);
+ next_cycle_replace_deps.truncate (0);
+ next_cycle_apply.truncate (0);
}
/* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
if (QUEUE_INDEX (insn) >= 0)
queue_remove (insn);
- VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ scheduled_insns.safe_push (insn);
/* Update dependent instructions. */
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
rtx insn;
int i;
- FOR_EACH_VEC_ELT (rtx, scheduled_insns, i, insn)
+ FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
{
sd_iterator_def sd_it;
dep_t dep;
{
rtx prev_insn;
int n_cycles;
- int i = VEC_length (rtx, scheduled_insns);
+ int i = scheduled_insns.length ();
for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
{
while (i-- > 0)
{
int cost;
- prev_insn = VEC_index (rtx, scheduled_insns, i);
+ prev_insn = scheduled_insns[i];
if (!NOTE_P (prev_insn))
{
last_scheduled_insn = prev_head;
for (i = 0;
- VEC_iterate (rtx, scheduled_insns, i, insn);
+ scheduled_insns.iterate (i, &insn);
i++)
{
if (control_flow_insn_p (last_scheduled_insn)
last_scheduled_insn = insn;
}
- VEC_truncate (rtx, scheduled_insns, 0);
+ scheduled_insns.truncate (0);
}
/* Examine all insns on the ready list and queue those which can't be
advance = 0;
- gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
+ gcc_assert (scheduled_insns.length () == 0);
sort_p = TRUE;
must_backtrack = false;
modulo_insns_scheduled = 0;
rtx insn = ready_remove_first (&ready);
gcc_assert (DEBUG_INSN_P (insn));
(*current_sched_info->begin_schedule_ready) (insn);
- VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ scheduled_insns.safe_push (insn);
last_scheduled_insn = insn;
advance = schedule_insn (insn);
gcc_assert (advance == 0);
/* Update counters, etc in the scheduler's front end. */
(*current_sched_info->begin_schedule_ready) (insn);
- VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ scheduled_insns.safe_push (insn);
gcc_assert (NONDEBUG_INSN_P (insn));
last_nondebug_scheduled_insn = last_scheduled_insn = insn;
else
last_scheduled_insn = tail;
- VEC_truncate (rtx, scheduled_insns, 0);
+ scheduled_insns.truncate (0);
if (!current_sched_info->queue_must_finish_empty
|| haifa_recovery_bb_recently_added_p)
setup_sched_dump ();
sched_init ();
- scheduled_insns = VEC_alloc (rtx, heap, 0);
+ scheduled_insns.create (0);
if (spec_info != NULL)
{
/* Initialize luids, dependency caches, target and h_i_d for the
whole function. */
{
- bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
+ bb_vec_t bbs;
+ bbs.create (n_basic_blocks);
basic_block bb;
sched_init_bbs ();
FOR_EACH_BB (bb)
- VEC_quick_push (basic_block, bbs, bb);
+ bbs.quick_push (bb);
sched_init_luids (bbs);
sched_deps_init (true);
sched_extend_target ();
haifa_init_h_i_d (bbs);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
}
sched_init_only_bb = haifa_init_only_bb;
c, nr_be_in_control);
}
- VEC_free (rtx, heap, scheduled_insns);
+ scheduled_insns.release ();
/* Finalize h_i_d, dependency caches, and luids for the whole
function. Target will be finalized in md_global_finish (). */
{
i = 0;
sched_ready_n_insns = 0;
- VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
+ scheduled_insns.reserve (new_sched_ready_n_insns);
}
else
i = sched_ready_n_insns + 1;
sd_iterator_next (&sd_it);
}
- priorities_roots = NULL;
+ priorities_roots.create (0);
clear_priorities (insn, &priorities_roots);
while (1)
}
calc_priorities (priorities_roots);
- VEC_free (rtx, heap, priorities_roots);
+ priorities_roots.release ();
}
/* Extends and fills with zeros (only the new part) array pointed to by P. */
/* Add new blocks to the root loop. */
if (current_loops != NULL)
{
- add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
- add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
+ add_bb_to_loop (single, (*current_loops->larray)[0]);
+ add_bb_to_loop (empty, (*current_loops->larray)[0]);
}
single->count = last->count;
/* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
because it'll be done later in add_to_speculative_block. */
{
- rtx_vec_t priorities_roots = NULL;
+ rtx_vec_t priorities_roots = rtx_vec_t();
clear_priorities (twin, &priorities_roots);
calc_priorities (priorities_roots);
- VEC_free (rtx, heap, priorities_roots);
+ priorities_roots.release ();
}
}
move_block_after_check (rtx jump)
{
basic_block bb, jump_bb, jump_bb_next;
- VEC(edge,gc) *t;
+ vec<edge, va_gc> *t;
bb = BLOCK_FOR_INSN (PREV_INSN (jump));
jump_bb = BLOCK_FOR_INSN (jump);
This functions attaches edge vector pointed to by SUCCSP to
block TO. */
static void
-move_succs (VEC(edge,gc) **succsp, basic_block to)
+move_succs (vec<edge, va_gc> **succsp, basic_block to)
{
edge e;
edge_iterator ei;
}
if (insn_is_root_p)
- VEC_safe_push (rtx, heap, *roots_ptr, insn);
+ roots_ptr->safe_push (insn);
}
/* Recompute priorities of instructions, whose priorities might have been
int i;
rtx insn;
- FOR_EACH_VEC_ELT (rtx, roots, i, insn)
+ FOR_EACH_VEC_ELT (roots, i, insn)
priority (insn);
}
{
int new_luids_max_uid = get_max_uid () + 1;
- VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
+ sched_luids.safe_grow_cleared (new_luids_max_uid);
}
/* Initialize LUID for INSN. */
basic_block bb;
sched_extend_luids ();
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
rtx insn;
void
sched_finish_luids (void)
{
- VEC_free (int, heap, sched_luids);
+ sched_luids.release ();
sched_max_luid = 1;
}
static void
extend_h_i_d (void)
{
- int reserve = (get_max_uid () + 1
- - VEC_length (haifa_insn_data_def, h_i_d));
+ int reserve = (get_max_uid () + 1 - h_i_d.length ());
if (reserve > 0
- && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
+ && ! h_i_d.space (reserve))
{
- VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
- 3 * get_max_uid () / 2);
+ h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
sched_extend_target ();
}
}
basic_block bb;
extend_h_i_d ();
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
rtx insn;
haifa_insn_data_t data;
struct reg_use_data *use, *next;
- FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
+ FOR_EACH_VEC_ELT (h_i_d, i, data)
{
free (data->max_reg_pressure);
free (data->reg_pressure);
free (use);
}
}
- VEC_free (haifa_insn_data_def, heap, h_i_d);
+ h_i_d.release ();
}
/* Init data for the new insn INSN. */
current_sched_info->add_remove_insn (insn, 0);
(*current_sched_info->begin_schedule_ready) (insn);
- VEC_safe_push (rtx, heap, scheduled_insns, insn);
+ scheduled_insns.safe_push (insn);
last_scheduled_insn = insn;
return insn;
loop->depth, REGNO (loop->iter_reg));
fprintf (dump_file, " blocks: [ ");
- for (ix = 0; VEC_iterate (basic_block, loop->blocks, ix, b); ix++)
+ for (ix = 0; loop->blocks.iterate (ix, &b); ix++)
fprintf (dump_file, "%d ", b->index);
fprintf (dump_file, "] ");
fprintf (dump_file, " inner loops: [ ");
- for (ix = 0; VEC_iterate (hwloop_info, loop->loops, ix, i); ix++)
+ for (ix = 0; loop->loops.iterate (ix, &i); ix++)
fprintf (dump_file, "%d ", i->loop_no);
fprintf (dump_file, "]\n");
}
REGNO (loop->iter_reg)))
loop->iter_reg_used_outside = true;
- for (ix = 0; VEC_iterate (basic_block, loop->blocks, ix, bb); ix++)
+ for (ix = 0; loop->blocks.iterate (ix, &bb); ix++)
{
rtx insn;
edge e;
fprintf (dump_file,
";; Adding forwarder block %d to loop %d and retrying\n",
e->src->index, loop->loop_no);
- VEC_safe_push (basic_block, heap, loop->blocks, e->src);
+ loop->blocks.safe_push (e->src);
bitmap_set_bit (loop->block_bitmap, e->src->index);
FOR_EACH_EDGE (e2, ei2, e->src->preds)
- VEC_safe_push (edge, gc, loop->incoming, e2);
- VEC_unordered_remove (edge, loop->incoming, ei.index);
+ vec_safe_push (loop->incoming, e2);
+ loop->incoming->unordered_remove (ei.index);
return true;
}
}
bool found_tail;
unsigned dwork = 0;
basic_block bb;
- VEC (basic_block,heap) *works;
+ vec<basic_block> works;
loop->tail = tail_bb;
loop->loop_end = tail_insn;
loop->iter_reg = reg;
- loop->incoming = VEC_alloc (edge, gc, 2);
+ vec_alloc (loop->incoming, 2);
loop->start_label = JUMP_LABEL (tail_insn);
if (EDGE_COUNT (tail_bb->succs) != 2)
loop->head = BRANCH_EDGE (tail_bb)->dest;
loop->successor = FALLTHRU_EDGE (tail_bb)->dest;
- works = VEC_alloc (basic_block, heap, 20);
- VEC_safe_push (basic_block, heap, works, loop->head);
+ works.create (20);
+ works.safe_push (loop->head);
found_tail = false;
- for (dwork = 0; VEC_iterate (basic_block, works, dwork, bb); dwork++)
+ for (dwork = 0; works.iterate (dwork, &bb); dwork++)
{
edge e;
edge_iterator ei;
/* We've not seen this block before. Add it to the loop's
list and then add each successor to the work list. */
- VEC_safe_push (basic_block, heap, loop->blocks, bb);
+ loop->blocks.safe_push (bb);
bitmap_set_bit (loop->block_bitmap, bb->index);
if (bb == tail_bb)
basic_block succ = EDGE_SUCC (bb, ei.index)->dest;
if (REGNO_REG_SET_P (df_get_live_in (succ),
REGNO (loop->iter_reg)))
- VEC_safe_push (basic_block, heap, works, succ);
+ works.safe_push (succ);
}
}
}
/* Find the predecessor, and make sure nothing else jumps into this loop. */
if (!loop->bad)
{
- FOR_EACH_VEC_ELT (basic_block, loop->blocks, dwork, bb)
+ FOR_EACH_VEC_ELT (loop->blocks, dwork, bb)
{
edge e;
edge_iterator ei;
fprintf (dump_file, ";; Loop %d: incoming edge %d -> %d\n",
loop->loop_no, pred->index,
e->dest->index);
- VEC_safe_push (edge, gc, loop->incoming, e);
+ vec_safe_push (loop->incoming, e);
}
}
}
}
}
- VEC_free (basic_block, heap, works);
+ works.release ();
}
/* Analyze the structure of the loops in the current function. Use
loop->next = loops;
loops = loop;
loop->loop_no = nloops++;
- loop->blocks = VEC_alloc (basic_block, heap, 20);
+ loop->blocks.create (20);
loop->block_bitmap = BITMAP_ALLOC (loop_stack);
if (dump_file)
continue;
if (!bitmap_intersect_compl_p (other->block_bitmap,
loop->block_bitmap))
- VEC_safe_push (hwloop_info, heap, loop->loops, other);
+ loop->loops.safe_push (other);
else if (!bitmap_intersect_compl_p (loop->block_bitmap,
other->block_bitmap))
- VEC_safe_push (hwloop_info, heap, other->loops, loop);
+ other->loops.safe_push (loop);
else
{
if (dump_file)
{
hwloop_info loop = loops;
loops = loop->next;
- VEC_free (hwloop_info, heap, loop->loops);
- VEC_free (basic_block, heap, loop->blocks);
+ loop->loops.release ();
+ loop->blocks.release ();
BITMAP_FREE (loop->block_bitmap);
XDELETE (loop);
}
a depth-first search here and never visit a loop more than once.
Recursion depth is effectively limited by the number of available
hardware registers. */
- for (ix = 0; VEC_iterate (hwloop_info, loop->loops, ix, inner); ix++)
+ for (ix = 0; loop->loops.iterate (ix, &inner); ix++)
{
optimize_loop (inner, hooks);
/* We need to keep a vector of loops */
typedef struct hwloop_info_d *hwloop_info;
-DEF_VEC_P (hwloop_info);
-DEF_VEC_ALLOC_P (hwloop_info,heap);
/* Information about a loop we have found (or are in the process of
finding). */
/* Vector of blocks only within the loop, including those within
inner loops. */
- VEC (basic_block, heap) *blocks;
+ vec<basic_block> blocks;
/* Same information in a bitmap. */
bitmap block_bitmap;
/* Vector of inner loops within this loop. Includes loops of every
nesting level. */
- VEC (hwloop_info, heap) *loops;
+ vec<hwloop_info> loops;
/* All edges that jump into the loop. */
- VEC(edge, gc) *incoming;
+ vec<edge, va_gc> *incoming;
/* The ports currently using this infrastructure can typically
handle two cases: all incoming edges have the same destination
#include "df.h"
#include "vec.h"
#include "pointer-set.h"
-#include "vecprim.h"
#include "dbgcnt.h"
#ifndef HAVE_conditional_move
static int
check_cond_move_block (basic_block bb,
struct pointer_map_t *vals,
- VEC (rtx, heap) **regs,
+ vec<rtx> *regs,
rtx cond)
{
rtx insn;
slot = pointer_map_insert (vals, (void *) dest);
*slot = (void *) src;
- VEC_safe_push (rtx, heap, *regs, dest);
+ regs->safe_push (dest);
}
return TRUE;
int c;
struct pointer_map_t *then_vals;
struct pointer_map_t *else_vals;
- VEC (rtx, heap) *then_regs = NULL;
- VEC (rtx, heap) *else_regs = NULL;
+ vec<rtx> then_regs = vec<rtx>();
+ vec<rtx> else_regs = vec<rtx>();
unsigned int i;
int success_p = FALSE;
source register does not change after the assignment. Also count
the number of registers set in only one of the blocks. */
c = 0;
- FOR_EACH_VEC_ELT (rtx, then_regs, i, reg)
+ FOR_EACH_VEC_ELT (then_regs, i, reg)
{
void **then_slot = pointer_map_contains (then_vals, reg);
void **else_slot = pointer_map_contains (else_vals, reg);
}
/* Finish off c for MAX_CONDITIONAL_EXECUTE. */
- FOR_EACH_VEC_ELT (rtx, else_regs, i, reg)
+ FOR_EACH_VEC_ELT (else_regs, i, reg)
{
gcc_checking_assert (pointer_map_contains (else_vals, reg));
if (!pointer_map_contains (then_vals, reg))
done:
pointer_map_destroy (then_vals);
pointer_map_destroy (else_vals);
- VEC_free (rtx, heap, then_regs);
- VEC_free (rtx, heap, else_regs);
+ then_regs.release ();
+ else_regs.release ();
return success_p;
}
#ifndef GCC_INSN_ADDR_H
#define GCC_INSN_ADDR_H
-#include "vecprim.h"
-
-extern VEC(int,heap) *insn_addresses_;
+extern vec<int> insn_addresses_;
extern int insn_current_address;
-#define INSN_ADDRESSES(id) (*&(VEC_address (int, insn_addresses_) [id]))
+#define INSN_ADDRESSES(id) (insn_addresses_[id])
#define INSN_ADDRESSES_ALLOC(size) \
do \
{ \
- insn_addresses_ = VEC_alloc (int, heap, size); \
- VEC_safe_grow (int, heap, insn_addresses_, size); \
- memset (VEC_address (int, insn_addresses_), \
+ insn_addresses_.create (size); \
+ insn_addresses_.safe_grow_cleared (size); \
+ memset (insn_addresses_.address (), \
0, sizeof (int) * size); \
} \
while (0)
-#define INSN_ADDRESSES_FREE() (VEC_free (int, heap, insn_addresses_))
-#define INSN_ADDRESSES_SET_P() (insn_addresses_ != 0)
-#define INSN_ADDRESSES_SIZE() (VEC_length (int, insn_addresses_))
+#define INSN_ADDRESSES_FREE() (insn_addresses_.release ())
+#define INSN_ADDRESSES_SET_P() (insn_addresses_.exists ())
+#define INSN_ADDRESSES_SIZE() (insn_addresses_.length ())
static inline void
insn_addresses_new (rtx insn, int insn_addr)
if (size <= insn_uid)
{
int *p;
- VEC_safe_grow (int, heap, insn_addresses_, insn_uid + 1);
- p = VEC_address (int, insn_addresses_);
+ insn_addresses_.safe_grow (insn_uid + 1);
+ p = insn_addresses_.address ();
memset (&p[size],
0, sizeof (int) * (insn_uid + 1 - size));
}
idx = ipa_get_jf_ancestor_formal_id (jfunc);
if (info->ipcp_orig_node)
- input = VEC_index (tree, info->known_vals, idx);
+ input = info->known_vals[idx];
else
{
struct ipcp_lattice *lat;
if (set_check_aggs_by_ref (dest_plats, jfunc->agg.by_ref))
return true;
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, jfunc->agg.items, i, item)
+ FOR_EACH_VEC_ELT (*jfunc->agg.items, i, item)
{
HOST_WIDE_INT val_size;
tree
ipa_get_indirect_edge_target (struct cgraph_edge *ie,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs)
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs)
{
int param_index = ie->indirect_info->param_index;
HOST_WIDE_INT token, anc_offset;
if (ie->indirect_info->agg_contents)
{
- if (VEC_length (ipa_agg_jump_function_p, known_aggs)
+ if (known_aggs.length ()
> (unsigned int) param_index)
{
struct ipa_agg_jump_function *agg;
- agg = VEC_index (ipa_agg_jump_function_p, known_aggs,
- param_index);
+ agg = known_aggs[param_index];
t = ipa_find_agg_cst_for_param (agg, ie->indirect_info->offset,
ie->indirect_info->by_ref);
}
t = NULL;
}
else
- t = (VEC_length (tree, known_vals) > (unsigned int) param_index
- ? VEC_index (tree, known_vals, param_index) : NULL);
+ t = (known_vals.length () > (unsigned int) param_index
+ ? known_vals[param_index] : NULL);
if (t &&
TREE_CODE (t) == ADDR_EXPR
anc_offset = ie->indirect_info->offset;
otr_type = ie->indirect_info->otr_type;
- t = VEC_index (tree, known_vals, param_index);
- if (!t && known_binfos
- && VEC_length (tree, known_binfos) > (unsigned int) param_index)
- t = VEC_index (tree, known_binfos, param_index);
+ t = known_vals[param_index];
+ if (!t && known_binfos.length () > (unsigned int) param_index)
+ t = known_binfos[param_index];
if (!t)
return NULL_TREE;
static int
devirtualization_time_bonus (struct cgraph_node *node,
- VEC (tree, heap) *known_csts,
- VEC (tree, heap) *known_binfos)
+ vec<tree> known_csts,
+ vec<tree> known_binfos)
{
struct cgraph_edge *ie;
int res = 0;
tree target;
target = ipa_get_indirect_edge_target (ie, known_csts, known_binfos,
- NULL);
+ vec<ipa_agg_jump_function_p>());
if (!target)
continue;
/* Return all context independent values from aggregate lattices in PLATS in a
vector. Return NULL if there are none. */
-static VEC (ipa_agg_jf_item_t, gc) *
+static vec<ipa_agg_jf_item_t, va_gc> *
context_independent_aggregate_values (struct ipcp_param_lattices *plats)
{
- VEC (ipa_agg_jf_item_t, gc) *res = NULL;
+ vec<ipa_agg_jf_item_t, va_gc> *res = NULL;
if (plats->aggs_bottom
|| plats->aggs_contain_variable
struct ipa_agg_jf_item item;
item.offset = aglat->offset;
item.value = aglat->values->value;
- VEC_safe_push (ipa_agg_jf_item_t, gc, res, item);
+ vec_safe_push (res, item);
}
return res;
}
static bool
gather_context_independent_values (struct ipa_node_params *info,
- VEC (tree, heap) **known_csts,
- VEC (tree, heap) **known_binfos,
- VEC (ipa_agg_jump_function_t, heap) **known_aggs,
+ vec<tree> *known_csts,
+ vec<tree> *known_binfos,
+ vec<ipa_agg_jump_function_t> *known_aggs,
int *removable_params_cost)
{
int i, count = ipa_get_param_count (info);
bool ret = false;
- *known_csts = NULL;
- *known_binfos = NULL;
- VEC_safe_grow_cleared (tree, heap, *known_csts, count);
- VEC_safe_grow_cleared (tree, heap, *known_binfos, count);
+ known_csts->create (0);
+ known_binfos->create (0);
+ known_csts->safe_grow_cleared (count);
+ known_binfos->safe_grow_cleared (count);
if (known_aggs)
{
- *known_aggs = NULL;
- VEC_safe_grow_cleared (ipa_agg_jump_function_t, heap, *known_aggs, count);
+ known_aggs->create (0);
+ known_aggs->safe_grow_cleared (count);
}
if (removable_params_cost)
struct ipcp_value *val = lat->values;
if (TREE_CODE (val->value) != TREE_BINFO)
{
- VEC_replace (tree, *known_csts, i, val->value);
+ (*known_csts)[i] = val->value;
if (removable_params_cost)
*removable_params_cost
+= estimate_move_cost (TREE_TYPE (val->value));
}
else if (plats->virt_call)
{
- VEC_replace (tree, *known_binfos, i, val->value);
+ (*known_binfos)[i] = val->value;
ret = true;
}
else if (removable_params_cost
if (known_aggs)
{
- VEC (ipa_agg_jf_item_t, gc) *agg_items;
+ vec<ipa_agg_jf_item_t, va_gc> *agg_items;
struct ipa_agg_jump_function *ajf;
agg_items = context_independent_aggregate_values (plats);
- ajf = &VEC_index (ipa_agg_jump_function_t, *known_aggs, i);
+ ajf = &(*known_aggs)[i];
ajf->items = agg_items;
ajf->by_ref = plats->aggs_by_ref;
ret |= agg_items != NULL;
I'd like to discuss how to change it first and this demonstrates the
issue. */
-static VEC (ipa_agg_jump_function_p, heap) *
-agg_jmp_p_vec_for_t_vec (VEC (ipa_agg_jump_function_t, heap) *known_aggs)
+static vec<ipa_agg_jump_function_p>
+agg_jmp_p_vec_for_t_vec (vec<ipa_agg_jump_function_t> known_aggs)
{
- VEC (ipa_agg_jump_function_p, heap) *ret;
+ vec<ipa_agg_jump_function_p> ret;
struct ipa_agg_jump_function *ajf;
int i;
- ret = VEC_alloc (ipa_agg_jump_function_p, heap,
- VEC_length (ipa_agg_jump_function_t, known_aggs));
- FOR_EACH_VEC_ELT (ipa_agg_jump_function_t, known_aggs, i, ajf)
- VEC_quick_push (ipa_agg_jump_function_p, ret, ajf);
+ ret.create (known_aggs.length ());
+ FOR_EACH_VEC_ELT (known_aggs, i, ajf)
+ ret.quick_push (ajf);
return ret;
}
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
- VEC (tree, heap) *known_csts, *known_binfos;
- VEC (ipa_agg_jump_function_t, heap) *known_aggs;
- VEC (ipa_agg_jump_function_p, heap) *known_aggs_ptrs;
+ vec<tree> known_csts, known_binfos;
+ vec<ipa_agg_jump_function_t> known_aggs;
+ vec<ipa_agg_jump_function_p> known_aggs_ptrs;
bool always_const;
int base_time = inline_summary (node)->time;
int removable_params_cost;
if (lat->bottom
|| !lat->values
- || VEC_index (tree, known_csts, i)
- || VEC_index (tree, known_binfos, i))
+ || known_csts[i]
+ || known_binfos[i])
continue;
for (val = lat->values; val; val = val->next)
if (TREE_CODE (val->value) != TREE_BINFO)
{
- VEC_replace (tree, known_csts, i, val->value);
- VEC_replace (tree, known_binfos, i, NULL_TREE);
+ known_csts[i] = val->value;
+ known_binfos[i] = NULL_TREE;
emc = estimate_move_cost (TREE_TYPE (val->value));
}
else if (plats->virt_call)
{
- VEC_replace (tree, known_csts, i, NULL_TREE);
- VEC_replace (tree, known_binfos, i, val->value);
+ known_csts[i] = NULL_TREE;
+ known_binfos[i] = val->value;
emc = 0;
}
else
val->local_time_benefit = time_benefit;
val->local_size_cost = size;
}
- VEC_replace (tree, known_binfos, i, NULL_TREE);
- VEC_replace (tree, known_csts, i, NULL_TREE);
+ known_binfos[i] = NULL_TREE;
+ known_csts[i] = NULL_TREE;
}
for (i = 0; i < count ; i++)
if (plats->aggs_bottom || !plats->aggs)
continue;
- ajf = &VEC_index (ipa_agg_jump_function_t, known_aggs, i);
+ ajf = &known_aggs[i];
for (aglat = plats->aggs; aglat; aglat = aglat->next)
{
struct ipcp_value *val;
item.offset = aglat->offset;
item.value = val->value;
- VEC_safe_push (ipa_agg_jf_item_t, gc, ajf->items, item);
+ vec_safe_push (ajf->items, item);
estimate_ipcp_clone_size_and_time (node, known_csts, known_binfos,
known_aggs_ptrs, &size, &time,
val->local_time_benefit = time_benefit;
val->local_size_cost = size;
- VEC_pop (ipa_agg_jf_item_t, ajf->items);
+ ajf->items->pop ();
}
}
}
for (i = 0; i < count ; i++)
- {
- VEC_free (ipa_agg_jf_item_t, gc,
- VEC_index (ipa_agg_jump_function_t, known_aggs, i).items);
- VEC_index (ipa_agg_jump_function_t, known_aggs, i).items = NULL;
- }
+ vec_free (known_aggs[i].items);
- VEC_free (tree, heap, known_csts);
- VEC_free (tree, heap, known_binfos);
- VEC_free (ipa_agg_jump_function_t, heap, known_aggs);
- VEC_free (ipa_agg_jump_function_p, heap, known_aggs_ptrs);
+ known_csts.release ();
+ known_binfos.release ();
+ known_aggs.release ();
+ known_aggs_ptrs.release ();
}
static void
ipcp_discover_new_direct_edges (struct cgraph_node *node,
- VEC (tree, heap) *known_vals)
+ vec<tree> known_vals)
{
struct cgraph_edge *ie, *next_ie;
bool found = false;
tree target;
next_ie = ie->next_callee;
- target = ipa_get_indirect_edge_target (ie, known_vals, NULL, NULL);
+ target = ipa_get_indirect_edge_target (ie, known_vals,
+ vec<tree>(),
+ vec<ipa_agg_jump_function_p>());
if (target)
{
ipa_make_edge_direct_to_target (ie, target);
/* Vector of pointers which for linked lists of clones of an original crgaph
edge. */
-static VEC (cgraph_edge_p, heap) *next_edge_clone;
+static vec<cgraph_edge_p> next_edge_clone;
static inline void
grow_next_edge_clone_vector (void)
{
- if (VEC_length (cgraph_edge_p, next_edge_clone)
+ if (next_edge_clone.length ()
<= (unsigned) cgraph_edge_max_uid)
- VEC_safe_grow_cleared (cgraph_edge_p, heap, next_edge_clone,
- cgraph_edge_max_uid + 1);
+ next_edge_clone.safe_grow_cleared (cgraph_edge_max_uid + 1);
}
/* Edge duplication hook to grow the appropriate linked list in
__attribute__((unused)) void *data)
{
grow_next_edge_clone_vector ();
- VEC_replace (cgraph_edge_p, next_edge_clone, dst->uid,
- VEC_index (cgraph_edge_p, next_edge_clone, src->uid));
- VEC_replace (cgraph_edge_p, next_edge_clone, src->uid, dst);
+ next_edge_clone[dst->uid] = next_edge_clone[src->uid];
+ next_edge_clone[src->uid] = dst;
}
/* See if NODE is a clone with a known aggregate value at a given OFFSET of a
{
tree t;
if (src->offset == -1)
- t = VEC_index (tree, caller_info->known_vals, src->index);
+ t = caller_info->known_vals[src->index];
else
t = get_clone_agg_value (cs->caller, src->offset, src->index);
return (t != NULL_TREE
static inline struct cgraph_edge *
get_next_cgraph_edge_clone (struct cgraph_edge *cs)
{
- return VEC_index (cgraph_edge_p, next_edge_clone, cs->uid);
+ return next_edge_clone[cs->uid];
}
/* Given VAL, iterate over all its sources and if they still hold, add their
/* Return a vector of incoming edges that do bring value VAL. It is assumed
their number is known and equal to CALLER_COUNT. */
-static VEC (cgraph_edge_p,heap) *
+static vec<cgraph_edge_p>
gather_edges_for_value (struct ipcp_value *val, int caller_count)
{
struct ipcp_value_source *src;
- VEC (cgraph_edge_p,heap) *ret;
+ vec<cgraph_edge_p> ret;
- ret = VEC_alloc (cgraph_edge_p, heap, caller_count);
+ ret.create (caller_count);
for (src = val->sources; src; src = src->next)
{
struct cgraph_edge *cs = src->cs;
while (cs)
{
if (cgraph_edge_brings_value_p (cs, src))
- VEC_quick_push (cgraph_edge_p, ret, cs);
+ ret.quick_push (cs);
cs = get_next_cgraph_edge_clone (cs);
}
}
static struct cgraph_node *
create_specialized_node (struct cgraph_node *node,
- VEC (tree, heap) *known_vals,
+ vec<tree> known_vals,
struct ipa_agg_replacement_value *aggvals,
- VEC (cgraph_edge_p,heap) *callers)
+ vec<cgraph_edge_p> callers)
{
struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
- VEC (ipa_replace_map_p,gc)* replace_trees = NULL;
+ vec<ipa_replace_map_p, va_gc> *replace_trees = NULL;
struct cgraph_node *new_node;
int i, count = ipa_get_param_count (info);
bitmap args_to_skip;
args_to_skip = BITMAP_GGC_ALLOC ();
for (i = 0; i < count; i++)
{
- tree t = VEC_index (tree, known_vals, i);
+ tree t = known_vals[i];
if ((t && TREE_CODE (t) != TREE_BINFO)
|| !ipa_is_param_used (info, i))
for (i = 0; i < count ; i++)
{
- tree t = VEC_index (tree, known_vals, i);
+ tree t = known_vals[i];
if (t && TREE_CODE (t) != TREE_BINFO)
{
struct ipa_replace_map *replace_map;
replace_map = get_replacement_map (t, ipa_get_param (info, i));
if (replace_map)
- VEC_safe_push (ipa_replace_map_p, gc, replace_trees, replace_map);
+ vec_safe_push (replace_trees, replace_map);
}
}
if (aggvals)
ipa_dump_agg_replacement_values (dump_file, aggvals);
}
- gcc_checking_assert (ipa_node_params_vector
- && (VEC_length (ipa_node_params_t,
- ipa_node_params_vector)
+ gcc_checking_assert (ipa_node_params_vector.exists ()
+ && (ipa_node_params_vector.length ()
> (unsigned) cgraph_max_uid));
update_profiling_info (node, new_node);
new_info = IPA_NODE_REF (new_node);
ipcp_discover_new_direct_edges (new_node, known_vals);
- VEC_free (cgraph_edge_p, heap, callers);
+ callers.release ();
return new_node;
}
static void
find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
- VEC (tree, heap) *known_vals,
- VEC (cgraph_edge_p,heap) *callers)
+ vec<tree> known_vals,
+ vec<cgraph_edge_p> callers)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
tree newval = NULL_TREE;
int j;
- if (ipa_get_scalar_lat (info, i)->bottom
- || VEC_index (tree, known_vals, i))
+ if (ipa_get_scalar_lat (info, i)->bottom || known_vals[i])
continue;
- FOR_EACH_VEC_ELT (cgraph_edge_p, callers, j, cs)
+ FOR_EACH_VEC_ELT (callers, j, cs)
{
struct ipa_jump_func *jump_func;
tree t;
fprintf (dump_file, "\n");
}
- VEC_replace (tree, known_vals, i, newval);
+ known_vals[i] = newval;
}
}
}
/* Go through PLATS and create a vector of values consisting of values and
offsets (minus OFFSET) of lattices that contain only a single value. */
-static VEC (ipa_agg_jf_item_t, heap) *
+static vec<ipa_agg_jf_item_t>
copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
- VEC (ipa_agg_jf_item_t, heap) *res = NULL;
+ vec<ipa_agg_jf_item_t> res = vec<ipa_agg_jf_item_t>();
if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
- return NULL;
+ return vec<ipa_agg_jf_item>();
for (struct ipcp_agg_lattice *aglat = plats->aggs; aglat; aglat = aglat->next)
if (ipa_lat_is_single_const (aglat))
struct ipa_agg_jf_item ti;
ti.offset = aglat->offset - offset;
ti.value = aglat->values->value;
- VEC_safe_push (ipa_agg_jf_item_t, heap, res, ti);
+ res.safe_push (ti);
}
return res;
}
static void
intersect_with_plats (struct ipcp_param_lattices *plats,
- VEC (ipa_agg_jf_item_t, heap) **inter,
+ vec<ipa_agg_jf_item_t> *inter,
HOST_WIDE_INT offset)
{
struct ipcp_agg_lattice *aglat;
if (!plats->aggs || plats->aggs_contain_variable || plats->aggs_bottom)
{
- VEC_free (ipa_agg_jf_item_t, heap, *inter);
- *inter = NULL;
+ inter->release ();
return;
}
aglat = plats->aggs;
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, *inter, k, item)
+ FOR_EACH_VEC_ELT (*inter, k, item)
{
bool found = false;
if (!item->value)
/* Copy agggregate replacement values of NODE (which is an IPA-CP clone) to the
vector result while subtracting OFFSET from the individual value offsets. */
-static VEC (ipa_agg_jf_item_t, heap) *
+static vec<ipa_agg_jf_item_t>
agg_replacements_to_vector (struct cgraph_node *node, HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *av;
- VEC (ipa_agg_jf_item_t, heap) *res = NULL;
+ vec<ipa_agg_jf_item_t> res = vec<ipa_agg_jf_item_t>();
for (av = ipa_get_agg_replacements_for_node (node); av; av = av->next)
{
gcc_checking_assert (av->value);
item.offset = av->offset - offset;
item.value = av->value;
- VEC_safe_push (ipa_agg_jf_item_t, heap, res, item);
+ res.safe_push (item);
}
return res;
static void
intersect_with_agg_replacements (struct cgraph_node *node, int index,
- VEC (ipa_agg_jf_item_t, heap) **inter,
+ vec<ipa_agg_jf_item_t> *inter,
HOST_WIDE_INT offset)
{
struct ipa_agg_replacement_value *srcvals;
srcvals = ipa_get_agg_replacements_for_node (node);
if (!srcvals)
{
- VEC_free (ipa_agg_jf_item_t, heap, *inter);
- *inter = NULL;
+ inter->release ();
return;
}
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, *inter, i, item)
+ FOR_EACH_VEC_ELT (*inter, i, item)
{
struct ipa_agg_replacement_value *av;
bool found = false;
static struct ipa_agg_replacement_value *
find_aggregate_values_for_callers_subset (struct cgraph_node *node,
- VEC (cgraph_edge_p,heap) *callers)
+ vec<cgraph_edge_p> callers)
{
struct ipa_node_params *info = IPA_NODE_REF (node);
struct ipa_agg_replacement_value *res = NULL;
struct cgraph_edge *cs;
int i, j, count = ipa_get_param_count (info);
- FOR_EACH_VEC_ELT (cgraph_edge_p, callers, j, cs)
+ FOR_EACH_VEC_ELT (callers, j, cs)
{
int c = ipa_get_cs_argument_count (IPA_EDGE_REF (cs));
if (c < count)
for (i = 0; i < count ; i++)
{
struct cgraph_edge *cs;
- VEC (ipa_agg_jf_item_t, heap) *inter = NULL;
+ vec<ipa_agg_jf_item_t> inter = vec<ipa_agg_jf_item_t>();
struct ipa_agg_jf_item *item;
int j;
if (ipa_get_parm_lattices (info, i)->aggs_bottom)
continue;
- FOR_EACH_VEC_ELT (cgraph_edge_p, callers, j, cs)
+ FOR_EACH_VEC_ELT (callers, j, cs)
{
struct ipa_jump_func *jfunc;
jfunc = ipa_get_ith_jump_func (IPA_EDGE_REF (cs), i);
src_idx);
if (agg_pass_through_permissible_p (orig_plats, jfunc))
{
- if (!inter)
+ if (!inter.exists ())
inter = agg_replacements_to_vector (cs->caller, 0);
else
intersect_with_agg_replacements (cs->caller, src_idx,
/* Currently we do not produce clobber aggregate jump
functions, adjust when we do. */
gcc_checking_assert (!jfunc->agg.items);
- if (!inter)
+ if (!inter.exists ())
inter = copy_plats_to_inter (src_plats, 0);
else
intersect_with_plats (src_plats, &inter, 0);
if (info->ipcp_orig_node)
{
- if (!inter)
+ if (!inter.exists ())
inter = agg_replacements_to_vector (cs->caller, delta);
else
intersect_with_agg_replacements (cs->caller, i, &inter,
/* Currently we do not produce clobber aggregate jump
functions, adjust when we do. */
gcc_checking_assert (!src_plats->aggs || !jfunc->agg.items);
- if (!inter)
+ if (!inter.exists ())
inter = copy_plats_to_inter (src_plats, delta);
else
intersect_with_plats (src_plats, &inter, delta);
{
int k;
- if (!inter)
- inter = VEC_copy (ipa_agg_jf_item, heap, jfunc->agg.items);
+ if (!inter.exists ())
+ for (unsigned i = 0; i < jfunc->agg.items->length (); i++)
+ inter.safe_push ((*jfunc->agg.items)[i]);
else
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, inter, k, item)
+ FOR_EACH_VEC_ELT (inter, k, item)
{
int l = 0;
bool found = false;;
if (!item->value)
continue;
- while ((unsigned) l < VEC_length (ipa_agg_jf_item_t,
- jfunc->agg.items))
+ while ((unsigned) l < jfunc->agg.items->length ())
{
struct ipa_agg_jf_item *ti;
- ti = &VEC_index (ipa_agg_jf_item_t,
- jfunc->agg.items, l);
+ ti = &(*jfunc->agg.items)[l];
if (ti->offset > item->offset)
break;
if (ti->offset == item->offset)
else
goto next_param;
- if (!inter)
+ if (!inter.exists ())
goto next_param;
}
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, inter, j, item)
+ FOR_EACH_VEC_ELT (inter, j, item)
{
struct ipa_agg_replacement_value *v;
}
next_param:
- if (inter)
- VEC_free (ipa_agg_jf_item, heap, inter);
+ if (inter.exists ())
+ inter.release ();
}
return res;
}
/* Turn KNOWN_AGGS into a list of aggreate replacement values. */
static struct ipa_agg_replacement_value *
-known_aggs_to_agg_replacement_list (VEC (ipa_agg_jump_function_t,
- heap) *known_aggs)
+known_aggs_to_agg_replacement_list (vec<ipa_agg_jump_function_t> known_aggs)
{
struct ipa_agg_replacement_value *res = NULL;
struct ipa_agg_jump_function *aggjf;
struct ipa_agg_jf_item *item;
int i, j;
- FOR_EACH_VEC_ELT (ipa_agg_jump_function_t, known_aggs, i, aggjf)
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, aggjf->items, j, item)
+ FOR_EACH_VEC_ELT (known_aggs, i, aggjf)
+ FOR_EACH_VEC_SAFE_ELT (aggjf->items, j, item)
{
struct ipa_agg_replacement_value *v;
v = ggc_alloc_ipa_agg_replacement_value ();
struct ipa_jump_func *jump_func;
tree val, t;
- val = VEC_index (tree, dest_info->known_vals, i);
+ val = dest_info->known_vals[i];
if (!val)
continue;
/* Copy KNOWN_BINFOS to KNOWN_VALS. */
static void
-move_binfos_to_values (VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos)
+move_binfos_to_values (vec<tree> known_vals,
+ vec<tree> known_binfos)
{
tree t;
int i;
- for (i = 0; VEC_iterate (tree, known_binfos, i, t); i++)
+ for (i = 0; known_binfos.iterate (i, &t); i++)
if (t)
- VEC_replace (tree, known_vals, i, t);
+ known_vals[i] = t;
}
/* Return true if there is a replacement equivalent to VALUE, INDEX and OFFSET
static bool
decide_about_value (struct cgraph_node *node, int index, HOST_WIDE_INT offset,
- struct ipcp_value *val, VEC (tree, heap) *known_csts,
- VEC (tree, heap) *known_binfos)
+ struct ipcp_value *val, vec<tree> known_csts,
+ vec<tree> known_binfos)
{
struct ipa_agg_replacement_value *aggvals;
int freq_sum, caller_count;
gcov_type count_sum;
- VEC (cgraph_edge_p, heap) *callers;
- VEC (tree, heap) *kv;
+ vec<cgraph_edge_p> callers;
+ vec<tree> kv;
if (val->spec_node)
{
cgraph_node_name (node), node->uid);
callers = gather_edges_for_value (val, caller_count);
- kv = VEC_copy (tree, heap, known_csts);
+ kv = known_csts.copy ();
move_binfos_to_values (kv, known_binfos);
if (offset == -1)
- VEC_replace (tree, kv, index, val->value);
+ kv[index] = val->value;
find_more_scalar_values_for_callers_subset (node, kv, callers);
aggvals = find_aggregate_values_for_callers_subset (node, callers);
gcc_checking_assert (offset == -1
{
struct ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
- VEC (tree, heap) *known_csts, *known_binfos;
- VEC (ipa_agg_jump_function_t, heap) *known_aggs = NULL;
+ vec<tree> known_csts, known_binfos;
+ vec<ipa_agg_jump_function_t> known_aggs = vec<ipa_agg_jump_function_t>();
bool ret = false;
if (count == 0)
struct ipcp_value *val;
if (!lat->bottom
- && !VEC_index (tree, known_csts, i)
- && !VEC_index (tree, known_binfos, i))
+ && !known_csts[i]
+ && !known_binfos[i])
for (val = lat->values; val; val = val->next)
ret |= decide_about_value (node, i, -1, val, known_csts,
known_binfos);
if (info->clone_for_all_contexts)
{
- VEC (cgraph_edge_p, heap) *callers;
+ vec<cgraph_edge_p> callers;
if (dump_file)
fprintf (dump_file, " - Creating a specialized node of %s/%i "
ret = true;
}
else
- VEC_free (tree, heap, known_csts);
+ known_csts.release ();
- VEC_free (tree, heap, known_binfos);
+ known_binfos.release ();
return ret;
}
/* Free all IPCP structures. */
free_toporder_info (&topo);
- VEC_free (cgraph_edge_p, heap, next_edge_clone);
+ next_edge_clone.release ();
cgraph_remove_edge_duplication_hook (edge_duplication_hook_holder);
ipa_free_all_structures_after_ipa_cp ();
if (dump_file)
/* VECtor holding inline summaries.
In GGC memory because conditions might point to constant trees. */
-VEC(inline_summary_t,gc) *inline_summary_vec;
-VEC(inline_edge_summary_t,heap) *inline_edge_summary_vec;
+vec<inline_summary_t, va_gc> *inline_summary_vec;
+vec<inline_edge_summary_t> inline_edge_summary_vec;
/* Cached node/edge growths. */
-VEC(int,heap) *node_growth_cache;
-VEC(edge_growth_cache_entry,heap) *edge_growth_cache;
+vec<int> node_growth_cache;
+vec<edge_growth_cache_entry> edge_growth_cache;
/* Edge predicates goes here. */
static alloc_pool edge_predicate_pool;
}
gcc_checking_assert (operand_num >= 0);
- for (i = 0; VEC_iterate (condition, summary->conds, i, c); i++)
+ for (i = 0; vec_safe_iterate (summary->conds, i, &c); i++)
{
if (c->operand_num == operand_num
&& c->code == code
new_cond.agg_contents = agg_contents;
new_cond.by_ref = by_ref;
new_cond.offset = offset;
- VEC_safe_push (condition, gc, summary->conds, new_cond);
+ vec_safe_push (summary->conds, new_cond);
return single_cond_predicate (i + predicate_first_dynamic_condition);
}
condition *cc1;
if (!(clause & (1 << c1)))
continue;
- cc1 = &VEC_index (condition,
- conditions,
- c1 - predicate_first_dynamic_condition);
+ cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
/* We have no way to represent !CHANGED and !IS_NOT_CONSTANT
and thus there is no point for looking for them. */
if (cc1->code == CHANGED
for (c2 = c1 + 1; c2 <= NUM_CONDITIONS; c2++)
if (clause & (1 << c2))
{
- condition *cc1 = &VEC_index (condition,
- conditions,
- c1 - predicate_first_dynamic_condition);
- condition *cc2 = &VEC_index (condition,
- conditions,
- c2 - predicate_first_dynamic_condition);
+ condition *cc1 = &(*conditions)[c1 - predicate_first_dynamic_condition];
+ condition *cc2 = &(*conditions)[c2 - predicate_first_dynamic_condition];
if (cc1->operand_num == cc2->operand_num
&& cc1->val == cc2->val
&& cc2->code != IS_NOT_CONSTANT
static int
predicate_probability (conditions conds,
struct predicate *p, clause_t possible_truths,
- VEC (inline_param_summary_t, heap) *inline_param_summary)
+ vec<inline_param_summary_t> inline_param_summary)
{
int i;
int combined_prob = REG_BR_PROB_BASE;
{
int this_prob = 0;
int i2;
- if (!inline_param_summary)
+ if (!inline_param_summary.exists ())
return REG_BR_PROB_BASE;
for (i2 = 0; i2 < NUM_CONDITIONS; i2++)
if ((p->clause[i] & possible_truths) & (1 << i2))
{
if (i2 >= predicate_first_dynamic_condition)
{
- condition *c = &VEC_index
- (condition, conds,
- i2 - predicate_first_dynamic_condition);
+ condition *c = &(*conds)[i2 - predicate_first_dynamic_condition];
if (c->code == CHANGED
&& (c->operand_num
- < (int) VEC_length (inline_param_summary_t,
- inline_param_summary)))
+ < (int) inline_param_summary.length ()))
{
- int iprob = VEC_index (inline_param_summary_t,
- inline_param_summary,
- c->operand_num).change_prob;
+ int iprob = inline_param_summary[c->operand_num].change_prob;
this_prob = MAX (this_prob, iprob);
}
else
fprintf (f, "not inlined");
else
{
- c = &VEC_index (condition, conditions,
- cond - predicate_first_dynamic_condition);
+ c = &(*conditions)[cond - predicate_first_dynamic_condition];
fprintf (f, "op%i", c->operand_num);
if (c->agg_contents)
fprintf (f, "[%soffset: " HOST_WIDE_INT_PRINT_DEC "]",
time = MAX_TIME * INLINE_TIME_SCALE;
gcc_assert (time >= 0);
- for (i = 0; VEC_iterate (size_time_entry, summary->entry, i, e); i++)
+ for (i = 0; vec_safe_iterate (summary->entry, i, &e); i++)
if (predicates_equal_p (&e->predicate, pred))
{
found = true;
{
i = 0;
found = true;
- e = &VEC_index (size_time_entry, summary->entry, 0);
+ e = &(*summary->entry)[0];
gcc_assert (!e->predicate.clause[0]);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\t\tReached limit on number of entries, ignoring the predicate.");
new_entry.size = size;
new_entry.time = time;
new_entry.predicate = *pred;
- VEC_safe_push (size_time_entry, gc, summary->entry, new_entry);
+ vec_safe_push (summary->entry, new_entry);
}
else
{
static clause_t
evaluate_conditions_for_known_args (struct cgraph_node *node,
bool inline_p,
- VEC (tree, heap) *known_vals,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs)
+ vec<tree> known_vals,
+ vec<ipa_agg_jump_function_p> known_aggs)
{
clause_t clause = inline_p ? 0 : 1 << predicate_not_inlined_condition;
struct inline_summary *info = inline_summary (node);
int i;
struct condition *c;
- for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
+ for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
tree val;
tree res;
(especially for K&R style programs). So bound check here (we assume
known_aggs vector, if non-NULL, has the same length as
known_vals). */
- gcc_checking_assert (!known_aggs
- || (VEC_length (tree, known_vals)
- == VEC_length (ipa_agg_jump_function_p,
- known_aggs)));
- if (c->operand_num >= (int) VEC_length (tree, known_vals))
+ gcc_checking_assert (!known_aggs.exists ()
+ || (known_vals.length () == known_aggs.length ()));
+ if (c->operand_num >= (int) known_vals.length ())
{
clause |= 1 << (i + predicate_first_dynamic_condition);
continue;
if (c->code == CHANGED
&& !c->by_ref
- && (VEC_index (tree, known_vals, c->operand_num)
+ && (known_vals[c->operand_num]
== error_mark_node))
continue;
- if (known_aggs)
+ if (known_aggs.exists ())
{
- agg = VEC_index (ipa_agg_jump_function_p, known_aggs,
- c->operand_num);
+ agg = known_aggs[c->operand_num];
val = ipa_find_agg_cst_for_param (agg, c->offset, c->by_ref);
}
else
}
else
{
- val = VEC_index (tree, known_vals, c->operand_num);
+ val = known_vals[c->operand_num];
if (val == error_mark_node && c->code != CHANGED)
val = NULL_TREE;
}
static void
evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
- clause_t *clause_ptr,
- VEC (tree, heap) **known_vals_ptr,
- VEC (tree, heap) **known_binfos_ptr,
- VEC (ipa_agg_jump_function_p, heap) **known_aggs_ptr)
+ clause_t *clause_ptr,
+ vec<tree> *known_vals_ptr,
+ vec<tree> *known_binfos_ptr,
+ vec<ipa_agg_jump_function_p> *known_aggs_ptr)
{
struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
struct inline_summary *info = inline_summary (callee);
- VEC (tree, heap) *known_vals = NULL;
- VEC (ipa_agg_jump_function_p, heap) *known_aggs = NULL;
+ vec<tree> known_vals = vec<tree>();
+ vec<ipa_agg_jump_function_p> known_aggs
+ = vec<ipa_agg_jump_function_p>();
if (clause_ptr)
*clause_ptr = inline_p ? 0 : 1 << predicate_not_inlined_condition;
if (known_vals_ptr)
- *known_vals_ptr = NULL;
+ known_vals_ptr->create (0);
if (known_binfos_ptr)
- *known_binfos_ptr = NULL;
+ known_binfos_ptr->create (0);
- if (ipa_node_params_vector
+ if (ipa_node_params_vector.exists ()
&& !e->call_stmt_cannot_inline_p
- && ((clause_ptr && info->conds) || known_vals_ptr || known_binfos_ptr))
+ && ((clause_ptr && info->conds)
+ || known_vals_ptr || known_binfos_ptr))
{
struct ipa_node_params *parms_info;
struct ipa_edge_args *args = IPA_EDGE_REF (e);
parms_info = IPA_NODE_REF (e->caller);
if (count && (info->conds || known_vals_ptr))
- VEC_safe_grow_cleared (tree, heap, known_vals, count);
+ known_vals.safe_grow_cleared (count);
if (count && (info->conds || known_aggs_ptr))
- VEC_safe_grow_cleared (ipa_agg_jump_function_p, heap, known_aggs,
- count);
+ known_aggs.safe_grow_cleared (count);
if (count && known_binfos_ptr)
- VEC_safe_grow_cleared (tree, heap, *known_binfos_ptr, count);
+ known_binfos_ptr->safe_grow_cleared (count);
for (i = 0; i < count; i++)
{
tree cst = ipa_value_from_jfunc (parms_info, jf);
if (cst)
{
- if (known_vals && TREE_CODE (cst) != TREE_BINFO)
- VEC_replace (tree, known_vals, i, cst);
+ if (known_vals.exists () && TREE_CODE (cst) != TREE_BINFO)
+ known_vals[i] = cst;
else if (known_binfos_ptr != NULL && TREE_CODE (cst) == TREE_BINFO)
- VEC_replace (tree, *known_binfos_ptr, i, cst);
+ (*known_binfos_ptr)[i] = cst;
}
- else if (inline_p
- && !VEC_index (inline_param_summary_t,
- es->param,
- i).change_prob)
- VEC_replace (tree, known_vals, i, error_mark_node);
+ else if (inline_p && !es->param[i].change_prob)
+ known_vals[i] = error_mark_node;
/* TODO: When IPA-CP starts propagating and merging aggregate jump
functions, use its knowledge of the caller too, just like the
scalar case above. */
- VEC_replace (ipa_agg_jump_function_p, known_aggs, i, &jf->agg);
+ known_aggs[i] = &jf->agg;
}
}
if (known_vals_ptr)
*known_vals_ptr = known_vals;
else
- VEC_free (tree, heap, known_vals);
+ known_vals.release ();
if (known_aggs_ptr)
*known_aggs_ptr = known_aggs;
else
- VEC_free (ipa_agg_jump_function_p, heap, known_aggs);
+ known_aggs.release ();
}
edge_duplication_hook_holder =
cgraph_add_edge_duplication_hook (&inline_edge_duplication_hook, NULL);
- if (VEC_length (inline_summary_t, inline_summary_vec)
- <= (unsigned) cgraph_max_uid)
- VEC_safe_grow_cleared (inline_summary_t, gc,
- inline_summary_vec, cgraph_max_uid + 1);
- if (VEC_length (inline_edge_summary_t, inline_edge_summary_vec)
- <= (unsigned) cgraph_edge_max_uid)
- VEC_safe_grow_cleared (inline_edge_summary_t, heap,
- inline_edge_summary_vec, cgraph_edge_max_uid + 1);
+ if (vec_safe_length (inline_summary_vec) <= (unsigned) cgraph_max_uid)
+ vec_safe_grow_cleared (inline_summary_vec, cgraph_max_uid + 1);
+ if (inline_edge_summary_vec.length () <= (unsigned) cgraph_edge_max_uid)
+ inline_edge_summary_vec.safe_grow_cleared (cgraph_edge_max_uid + 1);
if (!edge_predicate_pool)
edge_predicate_pool = create_alloc_pool ("edge predicates",
sizeof (struct predicate),
static void
reset_inline_edge_summary (struct cgraph_edge *e)
{
- if (e->uid
- < (int)VEC_length (inline_edge_summary_t, inline_edge_summary_vec))
+ if (e->uid < (int)inline_edge_summary_vec.length ())
{
struct inline_edge_summary *es = inline_edge_summary (e);
if (es->predicate)
pool_free (edge_predicate_pool, es->predicate);
es->predicate = NULL;
- VEC_free (inline_param_summary_t, heap, es->param);
+ es->param.release ();
}
}
pool_free (edge_predicate_pool, info->array_index);
info->array_index = NULL;
}
- VEC_free (condition, gc, info->conds);
- VEC_free (size_time_entry,gc, info->entry);
+ vec_free (info->conds);
+ vec_free (info->entry);
for (e = node->callees; e; e = e->next_callee)
reset_inline_edge_summary (e);
for (e = node->indirect_calls; e; e = e->next_callee)
inline_node_removal_hook (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
{
struct inline_summary *info;
- if (VEC_length (inline_summary_t, inline_summary_vec)
- <= (unsigned)node->uid)
+ if (vec_safe_length (inline_summary_vec) <= (unsigned)node->uid)
return;
info = inline_summary (node);
reset_inline_summary (node);
sizeof (struct inline_summary));
/* TODO: as an optimization, we may avoid copying conditions
that are known to be false or true. */
- info->conds = VEC_copy (condition, gc, info->conds);
+ info->conds = vec_safe_copy (info->conds);
/* When there are any replacements in the function body, see if we can figure
out that something was optimized out. */
- if (ipa_node_params_vector && dst->clone.tree_map)
+ if (ipa_node_params_vector.exists ()
+ && dst->clone.tree_map)
{
- VEC(size_time_entry,gc) *entry = info->entry;
+ vec<size_time_entry, va_gc> *entry = info->entry;
/* Use SRC parm info since it may not be copied yet. */
struct ipa_node_params *parms_info = IPA_NODE_REF (src);
- VEC (tree, heap) *known_vals = NULL;
+ vec<tree> known_vals = vec<tree>();
int count = ipa_get_param_count (parms_info);
int i,j;
clause_t possible_truths;
struct cgraph_edge *edge;
info->entry = 0;
- VEC_safe_grow_cleared (tree, heap, known_vals, count);
+ known_vals.safe_grow_cleared (count);
for (i = 0; i < count; i++)
{
tree t = ipa_get_param (parms_info, i);
struct ipa_replace_map *r;
- for (j = 0;
- VEC_iterate (ipa_replace_map_p, dst->clone.tree_map, j, r);
- j++)
+ for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++)
{
if (r->old_tree == t
&& r->replace_p
&& !r->ref_p)
{
- VEC_replace (tree, known_vals, i, r->new_tree);
+ known_vals[i] = r->new_tree;
break;
}
}
}
possible_truths = evaluate_conditions_for_known_args (dst, false,
- known_vals, NULL);
- VEC_free (tree, heap, known_vals);
+ known_vals,
+ vec<ipa_agg_jump_function_p>());
+ known_vals.release ();
account_size_time (info, 0, 0, &true_pred);
to be false.
TODO: as on optimization, we can also eliminate conditions known
to be true. */
- for (i = 0; VEC_iterate (size_time_entry, entry, i, e); i++)
+ for (i = 0; vec_safe_iterate (entry, i, &e); i++)
{
struct predicate new_predicate;
new_predicate = remap_predicate_after_duplication (&e->predicate,
}
else
{
- info->entry = VEC_copy (size_time_entry, gc, info->entry);
+ info->entry = vec_safe_copy (info->entry);
if (info->loop_iterations)
{
predicate p = *info->loop_iterations;
sizeof (struct inline_edge_summary));
info->predicate = NULL;
edge_set_predicate (dst, srcinfo->predicate);
- info->param = VEC_copy (inline_param_summary_t, heap, srcinfo->param);
+ info->param = srcinfo->param.copy ();
}
static void
inline_edge_removal_hook (struct cgraph_edge *edge, void *data ATTRIBUTE_UNUSED)
{
- if (edge_growth_cache)
+ if (edge_growth_cache.exists ())
reset_edge_growth_cache (edge);
reset_inline_edge_summary (edge);
}
initialize_growth_caches (void)
{
if (cgraph_edge_max_uid)
- VEC_safe_grow_cleared (edge_growth_cache_entry, heap, edge_growth_cache,
- cgraph_edge_max_uid);
+ edge_growth_cache.safe_grow_cleared (cgraph_edge_max_uid);
if (cgraph_max_uid)
- VEC_safe_grow_cleared (int, heap, node_growth_cache, cgraph_max_uid);
+ node_growth_cache.safe_grow_cleared (cgraph_max_uid);
}
void
free_growth_caches (void)
{
- VEC_free (edge_growth_cache_entry, heap, edge_growth_cache);
- edge_growth_cache = 0;
- VEC_free (int, heap, node_growth_cache);
- node_growth_cache = 0;
+ edge_growth_cache.release ();
+ node_growth_cache.release ();
}
}
else
fprintf (f, "\n");
- if (es->param)
- for (i = 0; i < (int)VEC_length (inline_param_summary_t, es->param);
- i++)
+ if (es->param.exists ())
+ for (i = 0; i < (int)es->param.length (); i++)
{
- int prob = VEC_index (inline_param_summary_t,
- es->param, i).change_prob;
+ int prob = es->param[i].change_prob;
if (!prob)
fprintf (f, "%*s op%i is compile time invariant\n",
if (s->scc_no)
fprintf (f, " In SCC: %i\n",
(int) s->scc_no);
- for (i = 0;
- VEC_iterate (size_time_entry, s->entry, i, e);
- i++)
+ for (i = 0; vec_safe_iterate (s->entry, i, &e); i++)
{
fprintf (f, " size:%f, time:%f, predicate:",
(double) e->size / INLINE_SIZE_SCALE,
/* We keep info about constantness of SSA names. */
typedef struct predicate predicate_t;
-DEF_VEC_O (predicate_t);
-DEF_VEC_ALLOC_O (predicate_t, heap);
/* Return predicate specifying when the STMT might have result that is not
a compile time constant. */
will_be_nonconstant_expr_predicate (struct ipa_node_params *info,
struct inline_summary *summary,
tree expr,
- VEC (predicate_t, heap) *nonconstant_names)
+ vec<predicate_t> nonconstant_names)
{
tree parm;
int index;
if (is_gimple_min_invariant (expr))
return false_predicate ();
if (TREE_CODE (expr) == SSA_NAME)
- return VEC_index (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (expr));
+ return nonconstant_names[SSA_NAME_VERSION (expr)];
if (BINARY_CLASS_P (expr)
|| COMPARISON_CLASS_P (expr))
{
will_be_nonconstant_predicate (struct ipa_node_params *info,
struct inline_summary *summary,
gimple stmt,
- VEC (predicate_t, heap) *nonconstant_names)
+ vec<predicate_t> nonconstant_names)
{
struct predicate p = true_predicate ();
ssa_op_iter iter;
return p;
/* If we know when operand is constant,
we still can say something useful. */
- if (!true_predicate_p (&VEC_index (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (use))))
+ if (!true_predicate_p (&nonconstant_names[SSA_NAME_VERSION (use)]))
continue;
return p;
}
continue;
}
else
- p = VEC_index (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (use));
+ p = nonconstant_names[SSA_NAME_VERSION (use)];
op_non_const = or_predicates (summary->conds, &p, &op_non_const);
}
if (gimple_code (stmt) == GIMPLE_ASSIGN
&& TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
- VEC_replace (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (gimple_assign_lhs (stmt)), op_non_const);
+ nonconstant_names[SSA_NAME_VERSION (gimple_assign_lhs (stmt))]
+ = op_non_const;
return op_non_const;
}
phi_result_unknown_predicate (struct ipa_node_params *info,
struct inline_summary *summary, basic_block bb,
struct predicate *p,
- VEC (predicate_t, heap) *nonconstant_names)
+ vec<predicate_t> nonconstant_names)
{
edge e;
edge_iterator ei;
static void
predicate_for_phi_result (struct inline_summary *summary, gimple phi,
struct predicate *p,
- VEC (predicate_t, heap) *nonconstant_names)
+ vec<predicate_t> nonconstant_names)
{
unsigned i;
{
gcc_assert (TREE_CODE (arg) == SSA_NAME);
*p = or_predicates (summary->conds, p,
- &VEC_index (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (arg)));
+ &nonconstant_names[SSA_NAME_VERSION (arg)]);
if (true_predicate_p (p))
return;
}
fprintf (dump_file, "\t\tphi predicate: ");
dump_predicate (dump_file, summary->conds, p);
}
- VEC_replace (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (gimple_phi_result (phi)), *p);
+ nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p;
}
/* Return predicate specifying when array index in access OP becomes non-constant. */
static struct predicate
array_index_predicate (struct inline_summary *info,
- VEC (predicate_t, heap) *nonconstant_names, tree op)
+ vec<predicate_t> nonconstant_names, tree op)
{
struct predicate p = false_predicate ();
while (handled_component_p (op))
{
if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME)
p = or_predicates (info->conds, &p,
- &VEC_index (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (TREE_OPERAND (op, 1))));
+ &nonconstant_names[
+ SSA_NAME_VERSION (TREE_OPERAND (op, 1))]);
}
op = TREE_OPERAND (op, 0);
}
struct inline_summary *info = inline_summary (node);
struct predicate bb_predicate;
struct ipa_node_params *parms_info = NULL;
- VEC (predicate_t, heap) *nonconstant_names = NULL;
+ vec<predicate_t> nonconstant_names = vec<predicate_t>();
int nblocks, n;
int *order;
predicate array_index = true_predicate ();
- info->conds = 0;
- info->entry = 0;
+ info->conds = NULL;
+ info->entry = NULL;
if (optimize && !early)
{
calculate_dominance_info (CDI_DOMINATORS);
loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
- if (ipa_node_params_vector)
+ if (ipa_node_params_vector.exists ())
{
parms_info = IPA_NODE_REF (node);
- VEC_safe_grow_cleared (predicate_t, heap, nonconstant_names,
- VEC_length (tree, SSANAMES (my_function)));
+ nonconstant_names.safe_grow_cleared(SSANAMES (my_function)->length());
}
}
dump_predicate (dump_file, info->conds, &bb_predicate);
}
- if (parms_info && nonconstant_names)
+ if (parms_info && nonconstant_names.exists ())
{
struct predicate phi_predicate;
bool first_phi = true;
((double)freq)/CGRAPH_FREQ_BASE, this_size, this_time);
}
- if (gimple_assign_load_p (stmt) && nonconstant_names)
+ if (gimple_assign_load_p (stmt) && nonconstant_names.exists ())
{
struct predicate this_array_index;
this_array_index = array_index_predicate (info, nonconstant_names,
if (!false_predicate_p (&this_array_index))
array_index = and_predicates (info->conds, &array_index, &this_array_index);
}
- if (gimple_store_p (stmt) && nonconstant_names)
+ if (gimple_store_p (stmt) && nonconstant_names.exists ())
{
struct predicate this_array_index;
this_array_index = array_index_predicate (info, nonconstant_names,
/* Special case: results of BUILT_IN_CONSTANT_P will be always
resolved as constant. We however don't want to optimize
out the cgraph edges. */
- if (nonconstant_names
+ if (nonconstant_names.exists ()
&& gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)
&& gimple_call_lhs (stmt)
&& TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME)
{
struct predicate false_p = false_predicate ();
- VEC_replace (predicate_t, nonconstant_names,
- SSA_NAME_VERSION (gimple_call_lhs (stmt)),
- false_p);
+ nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))]
+ = false_p;
}
- if (ipa_node_params_vector)
+ if (ipa_node_params_vector.exists ())
{
int count = gimple_call_num_args (stmt);
int i;
if (count)
- VEC_safe_grow_cleared (inline_param_summary_t, heap,
- es->param, count);
+ es->param.safe_grow_cleared (count);
for (i = 0; i < count; i++)
{
int prob = param_change_prob (stmt, i);
gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
- VEC_index (inline_param_summary_t,
- es->param, i).change_prob = prob;
+ es->param[i].change_prob = prob;
}
}
time = MAX_TIME;
free (order);
- if (!early && nonconstant_names)
+ if (!early && nonconstant_names.exists ())
{
struct loop *loop;
loop_iterator li;
scev_initialize ();
FOR_EACH_LOOP (li, loop, 0)
{
- VEC (edge, heap) *exits;
+ vec<edge> exits;
edge ex;
unsigned int j, i;
struct tree_niter_desc niter_desc;
bb_predicate = *(struct predicate *)loop->header->aux;
exits = get_loop_exit_edges (loop);
- FOR_EACH_VEC_ELT (edge, exits, j, ex)
+ FOR_EACH_VEC_ELT (exits, j, ex)
if (number_of_iterations_exit (loop, ex, &niter_desc, false)
&& !is_gimple_min_invariant (niter_desc.niter))
{
independent predicate. */
loop_iterations = and_predicates (info->conds, &loop_iterations, &will_be_nonconstant);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
for (i = 0; i < loop->num_nodes; i++)
{
}
inline_summary (node)->self_time = time;
inline_summary (node)->self_size = size;
- VEC_free (predicate_t, heap, nonconstant_names);
+ nonconstant_names.release ();
if (optimize && !early)
{
loop_optimizer_finalize ();
static bool
estimate_edge_devirt_benefit (struct cgraph_edge *ie,
int *size, int *time,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs)
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs)
{
tree target;
struct cgraph_node *callee;
struct inline_summary *isummary;
- if (!known_vals && !known_binfos)
+ if (!known_vals.exists () && !known_binfos.exists ())
return false;
if (!flag_indirect_inlining)
return false;
static inline void
estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *time,
int prob,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs,
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs,
inline_hints *hints)
{
estimate_calls_size_and_time (struct cgraph_node *node, int *size, int *time,
inline_hints *hints,
clause_t possible_truths,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs)
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
static void
estimate_node_size_and_time (struct cgraph_node *node,
clause_t possible_truths,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs,
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs,
int *ret_size, int *ret_time,
inline_hints *ret_hints,
- VEC (inline_param_summary_t, heap)
- *inline_param_summary)
+ vec<inline_param_summary_t>
+ inline_param_summary)
{
struct inline_summary *info = inline_summary (node);
size_time_entry *e;
for (i = predicate_not_inlined_condition;
i < (predicate_first_dynamic_condition
- + (int)VEC_length (condition, info->conds)); i++)
+ + (int)vec_safe_length (info->conds)); i++)
if (!(possible_truths & (1 << i)))
{
if (found)
}
}
- for (i = 0; VEC_iterate (size_time_entry, info->entry, i, e); i++)
+ for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
if (evaluate_predicate (&e->predicate, possible_truths))
{
size += e->size;
gcc_checking_assert (e->time >= 0);
gcc_checking_assert (time >= 0);
- if (!inline_param_summary)
+ if (!inline_param_summary.exists ())
time += e->time;
else
{
void
estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
- VEC (tree, heap) *known_vals,
- VEC (tree, heap) *known_binfos,
- VEC (ipa_agg_jump_function_p, heap) *known_aggs,
+ vec<tree> known_vals,
+ vec<tree> known_binfos,
+ vec<ipa_agg_jump_function_p> known_aggs,
int *ret_size, int *ret_time,
inline_hints *hints)
{
clause = evaluate_conditions_for_known_args (node, false, known_vals,
known_aggs);
estimate_node_size_and_time (node, clause, known_vals, known_binfos,
- known_aggs, ret_size, ret_time, hints, NULL);
+ known_aggs, ret_size, ret_time, hints,
+ vec<inline_param_summary_t>());
}
/* Translate all conditions from callee representation into caller
remap_predicate (struct inline_summary *info,
struct inline_summary *callee_info,
struct predicate *p,
- VEC (int, heap) *operand_map,
- VEC (int, heap) *offset_map,
+ vec<int> operand_map,
+ vec<int> offset_map,
clause_t possible_truths,
struct predicate *toplev_predicate)
{
{
struct condition *c;
- c = &VEC_index (condition, callee_info->conds,
- cond - predicate_first_dynamic_condition);
+ c = &(*callee_info->conds)[cond
+ - predicate_first_dynamic_condition];
/* See if we can remap condition operand to caller's operand.
Otherwise give up. */
- if (!operand_map
- || (int)VEC_length (int, operand_map) <= c->operand_num
- || VEC_index (int, operand_map, c->operand_num) == -1
+ if (!operand_map.exists ()
+ || (int)operand_map.length () <= c->operand_num
+ || operand_map[c->operand_num] == -1
/* TODO: For non-aggregate conditions, adding an offset is
basically an arithmetic jump function processing which
we should support in future. */
|| ((!c->agg_contents || !c->by_ref)
- && VEC_index (int, offset_map, c->operand_num) > 0)
+ && offset_map[c->operand_num] > 0)
|| (c->agg_contents && c->by_ref
- && VEC_index (int, offset_map, c->operand_num) < 0))
+ && offset_map[c->operand_num] < 0))
cond_predicate = true_predicate ();
else
{
struct agg_position_info ap;
- HOST_WIDE_INT offset_delta = VEC_index (int, offset_map,
- c->operand_num);
+ HOST_WIDE_INT offset_delta = offset_map[c->operand_num];
if (offset_delta < 0)
{
gcc_checking_assert (!c->agg_contents || !c->by_ref);
ap.agg_contents = c->agg_contents;
ap.by_ref = c->by_ref;
cond_predicate = add_condition (info,
- VEC_index (int,
- operand_map,
- c->operand_num),
+ operand_map[c->operand_num],
&ap, c->code, c->val);
}
}
remap_edge_change_prob (struct cgraph_edge *inlined_edge,
struct cgraph_edge *edge)
{
- if (ipa_node_params_vector)
+ if (ipa_node_params_vector.exists ())
{
int i;
struct ipa_edge_args *args = IPA_EDGE_REF (edge);
struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
if (jfunc->type == IPA_JF_PASS_THROUGH
&& (ipa_get_jf_pass_through_formal_id (jfunc)
- < (int) VEC_length (inline_param_summary_t,
- inlined_es->param)))
+ < (int) inlined_es->param.length ()))
{
int jf_formal_id = ipa_get_jf_pass_through_formal_id (jfunc);
- int prob1 = VEC_index (inline_param_summary_t,
- es->param, i).change_prob;
- int prob2 = VEC_index
- (inline_param_summary_t,
- inlined_es->param, jf_formal_id).change_prob;
+ int prob1 = es->param[i].change_prob;
+ int prob2 = inlined_es->param[jf_formal_id].change_prob;
int prob = ((prob1 * prob2 + REG_BR_PROB_BASE / 2)
/ REG_BR_PROB_BASE);
if (prob1 && prob2 && !prob)
prob = 1;
- VEC_index (inline_param_summary_t,
- es->param, i).change_prob = prob;
+ es->param[i].change_prob = prob;
}
}
}
struct cgraph_node *node,
struct inline_summary *info,
struct inline_summary *callee_info,
- VEC (int, heap) *operand_map,
- VEC (int, heap) *offset_map,
+ vec<int> operand_map,
+ vec<int> offset_map,
clause_t possible_truths,
struct predicate *toplev_predicate)
{
remap_hint_predicate (struct inline_summary *info,
struct inline_summary *callee_info,
struct predicate **hint,
- VEC (int, heap) *operand_map,
- VEC (int, heap) *offset_map,
+ vec<int> operand_map,
+ vec<int> offset_map,
clause_t possible_truths,
struct predicate *toplev_predicate)
{
struct inline_summary *info = inline_summary (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
- VEC (int, heap) *operand_map = NULL;
- VEC (int, heap) *offset_map = NULL;
+ vec<int> operand_map = vec<int>();
+ vec<int> offset_map = vec<int>();
int i;
struct predicate toplev_predicate;
struct predicate true_p = true_predicate ();
else
toplev_predicate = true_predicate ();
- if (ipa_node_params_vector && callee_info->conds)
+ if (ipa_node_params_vector.exists () && callee_info->conds)
{
struct ipa_edge_args *args = IPA_EDGE_REF (edge);
int count = ipa_get_cs_argument_count (args);
evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL);
if (count)
{
- VEC_safe_grow_cleared (int, heap, operand_map, count);
- VEC_safe_grow_cleared (int, heap, offset_map, count);
+ operand_map.safe_grow_cleared (count);
+ offset_map.safe_grow_cleared (count);
}
for (i = 0; i < count; i++)
{
if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
map = ipa_get_jf_pass_through_formal_id (jfunc);
if (!ipa_get_jf_pass_through_agg_preserved (jfunc))
- VEC_replace (int, offset_map, i, -1);
+ offset_map[i] = -1;
}
else if (jfunc->type == IPA_JF_ANCESTOR)
{
map = ipa_get_jf_ancestor_formal_id (jfunc);
if (!ipa_get_jf_ancestor_agg_preserved (jfunc))
offset = -1;
- VEC_replace (int, offset_map, i, offset);
+ offset_map[i] = offset;
}
}
- VEC_replace (int, operand_map, i, map);
+ operand_map[i] = map;
gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to)));
}
}
- for (i = 0; VEC_iterate (size_time_entry, callee_info->entry, i, e); i++)
+ for (i = 0; vec_safe_iterate (callee_info->entry, i, &e); i++)
{
struct predicate p = remap_predicate (info, callee_info,
&e->predicate, operand_map,
/* We do not maintain predicates of inlined edges, free it. */
edge_set_predicate (edge, &true_p);
/* Similarly remove param summaries. */
- VEC_free (inline_param_summary_t, heap, es->param);
- VEC_free (int, heap, operand_map);
- VEC_free (int, heap, offset_map);
+ es->param.release ();
+ operand_map.release ();
+ offset_map.release ();
}
/* For performance reasons inline_merge_summary is not updating overall size
info->size = 0;
info->time = 0;
- for (i = 0; VEC_iterate (size_time_entry, info->entry, i, e); i++)
+ for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
{
info->size += e->size, info->time += e->time;
if (info->time > MAX_TIME * INLINE_TIME_SCALE)
}
estimate_calls_size_and_time (node, &info->size, &info->time, NULL,
~(clause_t)(1 << predicate_false_condition),
- NULL, NULL, NULL);
+ vec<tree>(),
+ vec<tree>(),
+ vec<ipa_agg_jump_function_p>());
info->time = (info->time + INLINE_TIME_SCALE / 2) / INLINE_TIME_SCALE;
info->size = (info->size + INLINE_SIZE_SCALE / 2) / INLINE_SIZE_SCALE;
}
inline_hints hints;
struct cgraph_node *callee;
clause_t clause;
- VEC (tree, heap) *known_vals;
- VEC (tree, heap) *known_binfos;
- VEC (ipa_agg_jump_function_p, heap) *known_aggs;
+ vec<tree> known_vals;
+ vec<tree> known_binfos;
+ vec<ipa_agg_jump_function_p> known_aggs;
struct inline_edge_summary *es = inline_edge_summary (edge);
callee = cgraph_function_or_thunk_node (edge->callee, NULL);
&known_aggs);
estimate_node_size_and_time (callee, clause, known_vals, known_binfos,
known_aggs, &size, &time, &hints, es->param);
- VEC_free (tree, heap, known_vals);
- VEC_free (tree, heap, known_binfos);
- VEC_free (ipa_agg_jump_function_p, heap, known_aggs);
+ known_vals.release ();
+ known_binfos.release ();
+ known_aggs.release ();
gcc_checking_assert (size >= 0);
gcc_checking_assert (time >= 0);
/* When caching, update the cache entry. */
- if (edge_growth_cache)
+ if (edge_growth_cache.exists ())
{
- if ((int)VEC_length (edge_growth_cache_entry, edge_growth_cache)
- <= edge->uid)
- VEC_safe_grow_cleared (edge_growth_cache_entry, heap, edge_growth_cache,
- cgraph_edge_max_uid);
- VEC_index (edge_growth_cache_entry, edge_growth_cache, edge->uid).time
- = time + (time >= 0);
+ if ((int)edge_growth_cache.length () <= edge->uid)
+ edge_growth_cache.safe_grow_cleared (cgraph_edge_max_uid);
+ edge_growth_cache[edge->uid].time = time + (time >= 0);
- VEC_index (edge_growth_cache_entry, edge_growth_cache, edge->uid).size
- = size + (size >= 0);
+ edge_growth_cache[edge->uid].size = size + (size >= 0);
hints |= simple_edge_hints (edge);
- VEC_index (edge_growth_cache_entry, edge_growth_cache, edge->uid).hints
- = hints + 1;
+ edge_growth_cache[edge->uid].hints = hints + 1;
}
return time;
}
int size;
struct cgraph_node *callee;
clause_t clause;
- VEC (tree, heap) *known_vals;
- VEC (tree, heap) *known_binfos;
- VEC (ipa_agg_jump_function_p, heap) *known_aggs;
+ vec<tree> known_vals;
+ vec<tree> known_binfos;
+ vec<ipa_agg_jump_function_p> known_aggs;
/* When we do caching, use do_estimate_edge_time to populate the entry. */
- if (edge_growth_cache)
+ if (edge_growth_cache.exists ())
{
do_estimate_edge_time (edge);
- size = VEC_index (edge_growth_cache_entry,
- edge_growth_cache,
- edge->uid).size;
+ size = edge_growth_cache[edge->uid].size;
gcc_checking_assert (size);
return size - (size > 0);
}
&clause, &known_vals, &known_binfos,
&known_aggs);
estimate_node_size_and_time (callee, clause, known_vals, known_binfos,
- known_aggs, &size, NULL, NULL, NULL);
- VEC_free (tree, heap, known_vals);
- VEC_free (tree, heap, known_binfos);
- VEC_free (ipa_agg_jump_function_p, heap, known_aggs);
+ known_aggs, &size, NULL, NULL,
+ vec<inline_param_summary_t>());
+ known_vals.release ();
+ known_binfos.release ();
+ known_aggs.release ();
return size;
}
inline_hints hints;
struct cgraph_node *callee;
clause_t clause;
- VEC (tree, heap) *known_vals;
- VEC (tree, heap) *known_binfos;
- VEC (ipa_agg_jump_function_p, heap) *known_aggs;
+ vec<tree> known_vals;
+ vec<tree> known_binfos;
+ vec<ipa_agg_jump_function_p> known_aggs;
/* When we do caching, use do_estimate_edge_time to populate the entry. */
- if (edge_growth_cache)
+ if (edge_growth_cache.exists ())
{
do_estimate_edge_time (edge);
- hints = VEC_index (edge_growth_cache_entry,
- edge_growth_cache,
- edge->uid).hints;
+ hints = edge_growth_cache[edge->uid].hints;
gcc_checking_assert (hints);
return hints - 1;
}
&clause, &known_vals, &known_binfos,
&known_aggs);
estimate_node_size_and_time (callee, clause, known_vals, known_binfos,
- known_aggs, NULL, NULL, &hints, NULL);
- VEC_free (tree, heap, known_vals);
- VEC_free (tree, heap, known_binfos);
- VEC_free (ipa_agg_jump_function_p, heap, known_aggs);
+ known_aggs, NULL, NULL, &hints,
+ vec<inline_param_summary_t>());
+ known_vals.release ();
+ known_binfos.release ();
+ known_aggs.release ();
hints |= simple_edge_hints (edge);
return hints;
}
+ 50) / 100;
}
- if (node_growth_cache)
+ if (node_growth_cache.exists ())
{
- if ((int)VEC_length (int, node_growth_cache) <= node->uid)
- VEC_safe_grow_cleared (int, heap, node_growth_cache, cgraph_max_uid);
- VEC_replace (int, node_growth_cache, node->uid,
- d.growth + (d.growth >= 0));
+ if ((int)node_growth_cache.length () <= node->uid)
+ node_growth_cache.safe_grow_cleared (cgraph_max_uid);
+ node_growth_cache[node->uid] = d.growth + (d.growth >= 0);
}
return d.growth;
}
length = streamer_read_uhwi (ib);
if (length)
{
- VEC_safe_grow_cleared (inline_param_summary_t, heap, es->param, length);
+ es->param.safe_grow_cleared (length);
for (i = 0; i < length; i++)
- VEC_index (inline_param_summary_t, es->param, i).change_prob
+ es->param[i].change_prob
= streamer_read_uhwi (ib);
}
}
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution_t>());
f_count = streamer_read_uhwi (&ib);
for (i = 0; i < f_count; i++)
{
c.by_ref = bp_unpack_value (&bp, 1);
if (c.agg_contents)
c.offset = streamer_read_uhwi (&ib);
- VEC_safe_push (condition, gc, info->conds, c);
+ vec_safe_push (info->conds, c);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->entry);
e.time = streamer_read_uhwi (&ib);
e.predicate = read_predicate (&ib);
- VEC_safe_push (size_time_entry, gc, info->entry, e);
+ vec_safe_push (info->entry, e);
}
p = read_predicate (&ib);
streamer_write_uhwi (ob, es->call_stmt_time);
streamer_write_uhwi (ob, es->loop_depth);
write_predicate (ob, es->predicate);
- streamer_write_uhwi (ob, VEC_length (inline_param_summary_t, es->param));
- for (i = 0; i < (int)VEC_length (inline_param_summary_t, es->param); i++)
- streamer_write_uhwi (ob, VEC_index (inline_param_summary_t,
- es->param, i).change_prob);
+ streamer_write_uhwi (ob, es->param.length ());
+ for (i = 0; i < (int)es->param.length (); i++)
+ streamer_write_uhwi (ob, es->param[i].change_prob);
}
bp = bitpack_create (ob->main_stream);
bp_pack_value (&bp, info->inlinable, 1);
streamer_write_bitpack (&bp);
- streamer_write_uhwi (ob, VEC_length (condition, info->conds));
- for (i = 0; VEC_iterate (condition, info->conds, i, c); i++)
+ streamer_write_uhwi (ob, vec_safe_length (info->conds));
+ for (i = 0; vec_safe_iterate (info->conds, i, &c); i++)
{
streamer_write_uhwi (ob, c->operand_num);
streamer_write_uhwi (ob, c->code);
if (c->agg_contents)
streamer_write_uhwi (ob, c->offset);
}
- streamer_write_uhwi (ob, VEC_length (size_time_entry, info->entry));
- for (i = 0;
- VEC_iterate (size_time_entry, info->entry, i, e);
- i++)
+ streamer_write_uhwi (ob, vec_safe_length (info->entry));
+ for (i = 0; vec_safe_iterate (info->entry, i, &e); i++)
{
streamer_write_uhwi (ob, e->size);
streamer_write_uhwi (ob, e->time);
inline_free_summary (void)
{
struct cgraph_node *node;
- if (inline_edge_summary_vec == NULL)
+ if (!inline_edge_summary_vec.exists ())
return;
FOR_EACH_DEFINED_FUNCTION (node)
reset_inline_summary (node);
if (edge_duplication_hook_holder)
cgraph_remove_edge_duplication_hook (edge_duplication_hook_holder);
edge_duplication_hook_holder = NULL;
- VEC_free (inline_summary_t, gc, inline_summary_vec);
- inline_summary_vec = NULL;
- VEC_free (inline_edge_summary_t, heap, inline_edge_summary_vec);
- inline_edge_summary_vec = NULL;
+ vec_free (inline_summary_vec);
+ inline_edge_summary_vec.release ();
if (edge_predicate_pool)
free_alloc_pool (edge_predicate_pool);
edge_predicate_pool = 0;
struct cgraph_node *n;
n = cgraph_clone_node (e->callee, e->callee->symbol.decl,
e->count, e->frequency,
- update_original, NULL, true);
+ update_original, vec<cgraph_edge_p>(),
+ true);
cgraph_redirect_edge_callee (e, n);
}
}
bool
inline_call (struct cgraph_edge *e, bool update_original,
- VEC (cgraph_edge_p, heap) **new_edges,
+ vec<cgraph_edge_p> *new_edges,
int *overall_size, bool update_overall_summary)
{
int old_size = 0, new_size = 0;
/* Copy the OLD_VERSION_NODE function tree to the new version. */
tree_function_versioning (node->symbol.decl, first_clone->symbol.decl,
- NULL, true, NULL, false, NULL, NULL);
+ NULL, true, NULL, false,
+ NULL, NULL);
/* The function will be short lived and removed after we inline all the clones,
but make it internal so we won't confuse ourself. */
DECL_COMDAT_GROUP (first_clone->symbol.decl) = NULL_TREE;
TREE_PUBLIC (first_clone->symbol.decl) = 0;
DECL_COMDAT (first_clone->symbol.decl) = 0;
- VEC_free (ipa_opt_pass, heap,
- first_clone->ipa_transforms_to_apply);
- first_clone->ipa_transforms_to_apply = NULL;
+ first_clone->ipa_transforms_to_apply.release ();
/* When doing recursive inlining, the clone may become unnecessary.
This is possible i.e. in the case when the recursive function is proved to be
static bool
recursive_inlining (struct cgraph_edge *edge,
- VEC (cgraph_edge_p, heap) **new_edges)
+ vec<cgraph_edge_p> *new_edges)
{
int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
fibheap_t heap;
/* We need original clone to copy around. */
master_clone = cgraph_clone_node (node, node->symbol.decl,
node->count, CGRAPH_FREQ_BASE,
- false, NULL, true);
+ false, vec<cgraph_edge_p>(),
+ true);
for (e = master_clone->callees; e; e = e->next_callee)
if (!e->inline_failed)
clone_inlined_nodes (e, true, false, NULL);
/* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
static void
-add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
+add_new_edges_to_heap (fibheap_t heap, vec<cgraph_edge_p> new_edges)
{
- while (VEC_length (cgraph_edge_p, new_edges) > 0)
+ while (new_edges.length () > 0)
{
- struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
+ struct cgraph_edge *edge = new_edges.pop ();
gcc_assert (!edge->aux);
if (edge->inline_failed
fibheap_t edge_heap = fibheap_new ();
bitmap updated_nodes = BITMAP_ALLOC (NULL);
int min_size, max_size;
- VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
+ vec<cgraph_edge_p> new_indirect_edges = vec<cgraph_edge_p>();
int initial_size = 0;
struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
if (flag_indirect_inlining)
- new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
+ new_indirect_edges.create (8);
/* Compute overall unit size and other global parameters used by badness
metrics. */
}
free_growth_caches ();
- if (new_indirect_edges)
- VEC_free (cgraph_edge_p, heap, new_indirect_edges);
+ new_indirect_edges.release ();
fibheap_delete (edge_heap);
if (dump_file)
fprintf (dump_file,
it. This may confuse ourself when early inliner decide to inline call to
function clone, because function clones don't have parameter list in
ipa-prop matching their signature. */
- if (ipa_node_params_vector)
+ if (ipa_node_params_vector.exists ())
return 0;
#ifdef ENABLE_CHECKING
};
typedef int inline_hints;
-DEF_VEC_O (condition);
-DEF_VEC_ALLOC_O (condition, gc);
-typedef VEC(condition,gc) *conditions;
+typedef vec<condition, va_gc> *conditions;
/* Representation of predicates i.e. formulas using conditions defined
above. Predicates are simple logical formulas in conjunctive-disjunctive
int size;
int time;
} size_time_entry;
-DEF_VEC_O (size_time_entry);
-DEF_VEC_ALLOC_O (size_time_entry, gc);
/* Function inlining information. */
struct GTY(()) inline_summary
/* Conditional size/time information. The summaries are being
merged during inlining. */
conditions conds;
- VEC(size_time_entry,gc) *entry;
+ vec<size_time_entry, va_gc> *entry;
/* Predicate on when some loop in the function becomes to have known
bounds. */
typedef struct inline_summary inline_summary_t;
-DEF_VEC_O(inline_summary_t);
-DEF_VEC_ALLOC_O(inline_summary_t,gc);
-extern GTY(()) VEC(inline_summary_t,gc) *inline_summary_vec;
+extern GTY(()) vec<inline_summary_t, va_gc> *inline_summary_vec;
/* Information kept about parameter of call site. */
struct inline_param_summary
int change_prob;
};
typedef struct inline_param_summary inline_param_summary_t;
-DEF_VEC_O(inline_param_summary_t);
-DEF_VEC_ALLOC_O(inline_param_summary_t,heap);
/* Information kept about callgraph edges. */
struct inline_edge_summary
/* Array indexed by parameters.
0 means that parameter change all the time, REG_BR_PROB_BASE means
that parameter is constant. */
- VEC (inline_param_summary_t, heap) *param;
+ vec<inline_param_summary_t> param;
};
typedef struct inline_edge_summary inline_edge_summary_t;
-DEF_VEC_O(inline_edge_summary_t);
-DEF_VEC_ALLOC_O(inline_edge_summary_t,heap);
-extern VEC(inline_edge_summary_t,heap) *inline_edge_summary_vec;
+extern vec<inline_edge_summary_t> inline_edge_summary_vec;
typedef struct edge_growth_cache_entry
{
int time, size;
inline_hints hints;
} edge_growth_cache_entry;
-DEF_VEC_O(edge_growth_cache_entry);
-DEF_VEC_ALLOC_O(edge_growth_cache_entry,heap);
-extern VEC(int,heap) *node_growth_cache;
-extern VEC(edge_growth_cache_entry,heap) *edge_growth_cache;
+extern vec<int> node_growth_cache;
+extern vec<edge_growth_cache_entry> edge_growth_cache;
/* In ipa-inline-analysis.c */
void debug_inline_summary (struct cgraph_node *);
int estimate_time_after_inlining (struct cgraph_node *, struct cgraph_edge *);
int estimate_size_after_inlining (struct cgraph_node *, struct cgraph_edge *);
void estimate_ipcp_clone_size_and_time (struct cgraph_node *,
- VEC (tree, heap) *, VEC (tree, heap) *,
- VEC (ipa_agg_jump_function_p, heap) *,
+ vec<tree>, vec<tree>,
+ vec<ipa_agg_jump_function_p>,
int *, int *, inline_hints *);
int do_estimate_growth (struct cgraph_node *);
void inline_merge_summary (struct cgraph_edge *edge);
void compute_inline_parameters (struct cgraph_node *, bool);
/* In ipa-inline-transform.c */
-bool inline_call (struct cgraph_edge *, bool, VEC (cgraph_edge_p, heap) **, int *, bool);
+bool inline_call (struct cgraph_edge *, bool, vec<cgraph_edge_p> *, int *, bool);
unsigned int inline_transform (struct cgraph_node *);
void clone_inlined_nodes (struct cgraph_edge *e, bool, bool, int *);
static inline struct inline_summary *
inline_summary (struct cgraph_node *node)
{
- return &VEC_index (inline_summary_t, inline_summary_vec, node->uid);
+ return &(*inline_summary_vec)[node->uid];
}
static inline struct inline_edge_summary *
inline_edge_summary (struct cgraph_edge *edge)
{
- return &VEC_index (inline_edge_summary_t,
- inline_edge_summary_vec, edge->uid);
+ return &inline_edge_summary_vec[edge->uid];
}
/* Return estimated unit growth after inlning all calls to NODE.
estimate_growth (struct cgraph_node *node)
{
int ret;
- if ((int)VEC_length (int, node_growth_cache) <= node->uid
- || !(ret = VEC_index (int, node_growth_cache, node->uid)))
+ if ((int)node_growth_cache.length () <= node->uid
+ || !(ret = node_growth_cache[node->uid]))
return do_estimate_growth (node);
return ret - (ret > 0);
}
estimate_edge_size (struct cgraph_edge *edge)
{
int ret;
- if ((int)VEC_length (edge_growth_cache_entry, edge_growth_cache) <= edge->uid
- || !(ret = VEC_index (edge_growth_cache_entry,
- edge_growth_cache,
- edge->uid).size))
+ if ((int)edge_growth_cache.length () <= edge->uid
+ || !(ret = edge_growth_cache[edge->uid].size))
return do_estimate_edge_size (edge);
return ret - (ret > 0);
}
estimate_edge_time (struct cgraph_edge *edge)
{
int ret;
- if ((int)VEC_length (edge_growth_cache_entry, edge_growth_cache) <= edge->uid
- || !(ret = VEC_index (edge_growth_cache_entry,
- edge_growth_cache,
- edge->uid).time))
+ if ((int)edge_growth_cache.length () <= edge->uid
+ || !(ret = edge_growth_cache[edge->uid].time))
return do_estimate_edge_time (edge);
return ret - (ret > 0);
}
estimate_edge_hints (struct cgraph_edge *edge)
{
inline_hints ret;
- if ((int)VEC_length (edge_growth_cache_entry, edge_growth_cache) <= edge->uid
- || !(ret = VEC_index (edge_growth_cache_entry,
- edge_growth_cache,
- edge->uid).hints))
+ if ((int)edge_growth_cache.length () <= edge->uid
+ || !(ret = edge_growth_cache[edge->uid].hints))
return do_estimate_edge_hints (edge);
return ret - 1;
}
static inline void
reset_node_growth_cache (struct cgraph_node *node)
{
- if ((int)VEC_length (int, node_growth_cache) > node->uid)
- VEC_replace (int, node_growth_cache, node->uid, 0);
+ if ((int)node_growth_cache.length () > node->uid)
+ node_growth_cache[node->uid] = 0;
}
/* Reset cached value for EDGE. */
static inline void
reset_edge_growth_cache (struct cgraph_edge *edge)
{
- if ((int)VEC_length (edge_growth_cache_entry, edge_growth_cache) > edge->uid)
+ if ((int)edge_growth_cache.length () > edge->uid)
{
struct edge_growth_cache_entry zero = {0, 0, 0};
- VEC_replace (edge_growth_cache_entry, edge_growth_cache, edge->uid, zero);
+ edge_growth_cache[edge->uid] = zero;
}
}
};
/* Vector where the parameter infos are actually stored. */
-VEC (ipa_node_params_t, heap) *ipa_node_params_vector;
+vec<ipa_node_params_t> ipa_node_params_vector;
/* Vector of known aggregate values in cloned nodes. */
-VEC (ipa_agg_replacement_value_p, gc) *ipa_node_agg_replacements;
+vec<ipa_agg_replacement_value_p, va_gc> *ipa_node_agg_replacements;
/* Vector where the parameter infos are actually stored. */
-VEC (ipa_edge_args_t, gc) *ipa_edge_args_vector;
+vec<ipa_edge_args_t, va_gc> *ipa_edge_args_vector;
/* Holders of ipa cgraph hooks: */
static struct cgraph_edge_hook_list *edge_removal_hook_holder;
to INFO. */
static int
-ipa_get_param_decl_index_1 (VEC (ipa_param_descriptor_t, heap) *descriptors,
- tree ptree)
+ipa_get_param_decl_index_1 (vec<ipa_param_descriptor_t> descriptors, tree ptree)
{
int i, count;
- count = VEC_length (ipa_param_descriptor_t, descriptors);
+ count = descriptors.length ();
for (i = 0; i < count; i++)
- if (VEC_index (ipa_param_descriptor_t, descriptors, i).decl == ptree)
+ if (descriptors[i].decl == ptree)
return i;
return -1;
static void
ipa_populate_param_decls (struct cgraph_node *node,
- VEC (ipa_param_descriptor_t, heap) *descriptors)
+ vec<ipa_param_descriptor_t> &descriptors)
{
tree fndecl;
tree fnargs;
param_num = 0;
for (parm = fnargs; parm; parm = DECL_CHAIN (parm))
{
- VEC_index (ipa_param_descriptor_t, descriptors, param_num).decl = parm;
+ descriptors[param_num].decl = parm;
param_num++;
}
}
{
struct ipa_node_params *info = IPA_NODE_REF (node);
- if (!info->descriptors)
+ if (!info->descriptors.exists ())
{
int param_count;
param_count = count_formal_params (node->symbol.decl);
if (param_count)
{
- VEC_safe_grow_cleared (ipa_param_descriptor_t, heap,
- info->descriptors, param_count);
+ info->descriptors.safe_grow_cleared (param_count);
ipa_populate_param_decls (node, info->descriptors);
}
}
fprintf (f, " Aggregate passed by %s:\n",
jump_func->agg.by_ref ? "reference" : "value");
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, jump_func->agg.items,
- j, item)
+ FOR_EACH_VEC_SAFE_ELT (jump_func->agg.items, j, item)
{
fprintf (f, " offset: " HOST_WIDE_INT_PRINT_DEC ", ",
item->offset);
modified. Otherwise return -1. */
static int
-load_from_unmodified_param (VEC (ipa_param_descriptor_t, heap) *descriptors,
+load_from_unmodified_param (vec<ipa_param_descriptor_t> descriptors,
struct param_analysis_info *parms_ainfo,
gimple stmt)
{
reference respectively. */
static bool
-ipa_load_from_parm_agg_1 (VEC (ipa_param_descriptor_t, heap) *descriptors,
+ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor_t> descriptors,
struct param_analysis_info *parms_ainfo, gimple stmt,
tree op, int *index_p, HOST_WIDE_INT *offset_p,
bool *by_ref_p)
if (const_count)
{
jfunc->agg.by_ref = by_ref;
- jfunc->agg.items = VEC_alloc (ipa_agg_jf_item_t, gc, const_count);
+ vec_alloc (jfunc->agg.items, const_count);
while (list)
{
if (list->constant)
struct ipa_agg_jf_item item;
item.offset = list->offset - arg_offset;
item.value = prune_expression_for_jf (list->constant);
- VEC_quick_push (ipa_agg_jf_item_t, jfunc->agg.items, item);
+ jfunc->agg.items->quick_push (item);
}
list = list->next;
}
if (arg_num == 0 || args->jump_functions)
return;
- VEC_safe_grow_cleared (ipa_jump_func_t, gc, args->jump_functions, arg_num);
+ vec_safe_grow_cleared (args->jump_functions, arg_num);
for (n = 0; n < arg_num; n++)
{
replace with merging when we do. */
gcc_assert (!dst->agg.items);
- dst->agg.items = VEC_copy (ipa_agg_jf_item_t, gc, src->agg.items);
+ dst->agg.items = vec_safe_copy (src->agg.items);
dst->agg.by_ref = src->agg.by_ref;
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, dst->agg.items, j, item)
+ FOR_EACH_VEC_SAFE_ELT (dst->agg.items, j, item)
item->offset -= dst->value.ancestor.offset;
}
gcc_assert (!dst->agg.items);
dst->agg.by_ref = src->agg.by_ref;
- dst->agg.items = VEC_copy (ipa_agg_jf_item_t, gc,
- src->agg.items);
+ dst->agg.items = vec_safe_copy (src->agg.items);
}
if (!agg_p)
if (by_ref != agg->by_ref)
return NULL;
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, agg->items, i, item)
+ FOR_EACH_VEC_SAFE_ELT (agg->items, i, item)
if (item->offset == offset)
{
/* Currently we do not have clobber values, return NULL for them once
static bool
update_indirect_edges_after_inlining (struct cgraph_edge *cs,
struct cgraph_node *node,
- VEC (cgraph_edge_p, heap) **new_edges)
+ vec<cgraph_edge_p> *new_edges)
{
struct ipa_edge_args *top;
struct cgraph_edge *ie, *next_ie, *new_direct_edge;
new_direct_edge->callee->symbol.decl);
if (new_edges)
{
- VEC_safe_push (cgraph_edge_p, heap, *new_edges,
- new_direct_edge);
+ new_edges->safe_push (new_direct_edge);
top = IPA_EDGE_REF (cs);
res = true;
}
static bool
propagate_info_to_inlined_callees (struct cgraph_edge *cs,
struct cgraph_node *node,
- VEC (cgraph_edge_p, heap) **new_edges)
+ vec<cgraph_edge_p> *new_edges)
{
struct cgraph_edge *e;
bool res;
bool
ipa_propagate_indirect_call_infos (struct cgraph_edge *cs,
- VEC (cgraph_edge_p, heap) **new_edges)
+ vec<cgraph_edge_p> *new_edges)
{
bool changed;
/* Do nothing if the preparation phase has not been carried out yet
(i.e. during early inlining). */
- if (!ipa_node_params_vector)
+ if (!ipa_node_params_vector.exists ())
return false;
gcc_assert (ipa_edge_args_vector);
void
ipa_free_edge_args_substructures (struct ipa_edge_args *args)
{
- if (args->jump_functions)
- ggc_free (args->jump_functions);
-
+ vec_free (args->jump_functions);
memset (args, 0, sizeof (*args));
}
int i;
struct ipa_edge_args *args;
- FOR_EACH_VEC_ELT (ipa_edge_args_t, ipa_edge_args_vector, i, args)
+ if (!ipa_edge_args_vector)
+ return;
+
+ FOR_EACH_VEC_ELT (*ipa_edge_args_vector, i, args)
ipa_free_edge_args_substructures (args);
- VEC_free (ipa_edge_args_t, gc, ipa_edge_args_vector);
- ipa_edge_args_vector = NULL;
+ vec_free (ipa_edge_args_vector);
}
/* Frees all dynamically allocated structures that the param info points
void
ipa_free_node_params_substructures (struct ipa_node_params *info)
{
- VEC_free (ipa_param_descriptor_t, heap, info->descriptors);
+ info->descriptors.release ();
free (info->lattices);
/* Lattice values and their sources are deallocated with their alocation
pool. */
- VEC_free (tree, heap, info->known_vals);
+ info->known_vals.release ();
memset (info, 0, sizeof (*info));
}
int i;
struct ipa_node_params *info;
- FOR_EACH_VEC_ELT (ipa_node_params_t, ipa_node_params_vector, i, info)
+ FOR_EACH_VEC_ELT (ipa_node_params_vector, i, info)
ipa_free_node_params_substructures (info);
- VEC_free (ipa_node_params_t, heap, ipa_node_params_vector);
- ipa_node_params_vector = NULL;
+ ipa_node_params_vector.release ();
}
/* Set the aggregate replacements of NODE to be AGGVALS. */
ipa_set_node_agg_value_chain (struct cgraph_node *node,
struct ipa_agg_replacement_value *aggvals)
{
- if (VEC_length (ipa_agg_replacement_value_p, ipa_node_agg_replacements)
- <= (unsigned) cgraph_max_uid)
- VEC_safe_grow_cleared (ipa_agg_replacement_value_p, gc,
- ipa_node_agg_replacements, cgraph_max_uid + 1);
+ if (vec_safe_length (ipa_node_agg_replacements) <= (unsigned) cgraph_max_uid)
+ vec_safe_grow_cleared (ipa_node_agg_replacements, cgraph_max_uid + 1);
- VEC_replace (ipa_agg_replacement_value_p, ipa_node_agg_replacements,
- node->uid, aggvals);
+ (*ipa_node_agg_replacements)[node->uid] = aggvals;
}
/* Hook that is called by cgraph.c when an edge is removed. */
ipa_edge_removal_hook (struct cgraph_edge *cs, void *data ATTRIBUTE_UNUSED)
{
/* During IPA-CP updating we can be called on not-yet analyze clones. */
- if (VEC_length (ipa_edge_args_t, ipa_edge_args_vector)
- <= (unsigned)cs->uid)
+ if (vec_safe_length (ipa_edge_args_vector) <= (unsigned)cs->uid)
return;
ipa_free_edge_args_substructures (IPA_EDGE_REF (cs));
}
ipa_node_removal_hook (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
{
/* During IPA-CP updating we can be called on not-yet analyze clones. */
- if (VEC_length (ipa_node_params_t, ipa_node_params_vector)
- > (unsigned)node->uid)
+ if (ipa_node_params_vector.length () > (unsigned)node->uid)
ipa_free_node_params_substructures (IPA_NODE_REF (node));
- if (VEC_length (ipa_agg_replacement_value_p, ipa_node_agg_replacements)
- > (unsigned)node->uid)
- VEC_replace (ipa_agg_replacement_value_p, ipa_node_agg_replacements,
- (unsigned)node->uid, NULL);
+ if (vec_safe_length (ipa_node_agg_replacements) > (unsigned)node->uid)
+ (*ipa_node_agg_replacements)[(unsigned)node->uid] = NULL;
}
/* Hook that is called by cgraph.c when an edge is duplicated. */
old_args = IPA_EDGE_REF (src);
new_args = IPA_EDGE_REF (dst);
- new_args->jump_functions = VEC_copy (ipa_jump_func_t, gc,
- old_args->jump_functions);
+ new_args->jump_functions = vec_safe_copy (old_args->jump_functions);
- for (i = 0; i < VEC_length (ipa_jump_func_t, old_args->jump_functions); i++)
- VEC_index (ipa_jump_func_t, new_args->jump_functions, i).agg.items
- = VEC_copy (ipa_agg_jf_item_t, gc,
- VEC_index (ipa_jump_func_t,
- old_args->jump_functions, i).agg.items);
+ for (i = 0; i < vec_safe_length (old_args->jump_functions); i++)
+ (*new_args->jump_functions)[i].agg.items
+ = vec_safe_copy ((*old_args->jump_functions)[i].agg.items);
}
/* Hook that is called by cgraph.c when a node is duplicated. */
old_info = IPA_NODE_REF (src);
new_info = IPA_NODE_REF (dst);
- new_info->descriptors = VEC_copy (ipa_param_descriptor_t, heap,
- old_info->descriptors);
+ new_info->descriptors = old_info->descriptors.copy ();
new_info->lattices = NULL;
new_info->ipcp_orig_node = old_info->ipcp_orig_node;
/* Return a heap allocated vector containing formal parameters of FNDECL. */
-VEC(tree, heap) *
+vec<tree>
ipa_get_vector_of_formal_parms (tree fndecl)
{
- VEC(tree, heap) *args;
+ vec<tree> args;
int count;
tree parm;
count = count_formal_params (fndecl);
- args = VEC_alloc (tree, heap, count);
+ args.create (count);
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
- VEC_quick_push (tree, args, parm);
+ args.quick_push (parm);
return args;
}
/* Return a heap allocated vector containing types of formal parameters of
function type FNTYPE. */
-static inline VEC(tree, heap) *
+static inline vec<tree>
get_vector_of_formal_parm_types (tree fntype)
{
- VEC(tree, heap) *types;
+ vec<tree> types;
int count = 0;
tree t;
for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
count++;
- types = VEC_alloc (tree, heap, count);
+ types.create (count);
for (t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
- VEC_quick_push (tree, types, TREE_VALUE (t));
+ types.quick_push (TREE_VALUE (t));
return types;
}
ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments,
const char *synth_parm_prefix)
{
- VEC(tree, heap) *oparms, *otypes;
+ vec<tree> oparms, otypes;
tree orig_type, new_type = NULL;
tree old_arg_types, t, new_arg_types = NULL;
tree parm, *link = &DECL_ARGUMENTS (fndecl);
- int i, len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ int i, len = adjustments.length ();
tree new_reversed = NULL;
bool care_for_types, last_parm_void;
== void_type_node);
otypes = get_vector_of_formal_parm_types (orig_type);
if (last_parm_void)
- gcc_assert (VEC_length (tree, oparms) + 1 == VEC_length (tree, otypes));
+ gcc_assert (oparms.length () + 1 == otypes.length ());
else
- gcc_assert (VEC_length (tree, oparms) == VEC_length (tree, otypes));
+ gcc_assert (oparms.length () == otypes.length ());
}
else
{
last_parm_void = false;
- otypes = NULL;
+ otypes.create (0);
}
for (i = 0; i < len; i++)
struct ipa_parm_adjustment *adj;
gcc_assert (link);
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
- parm = VEC_index (tree, oparms, adj->base_index);
+ adj = &adjustments[i];
+ parm = oparms[adj->base_index];
adj->base = parm;
if (adj->copy_param)
{
if (care_for_types)
- new_arg_types = tree_cons (NULL_TREE, VEC_index (tree, otypes,
- adj->base_index),
+ new_arg_types = tree_cons (NULL_TREE, otypes[adj->base_index],
new_arg_types);
*link = parm;
link = &DECL_CHAIN (parm);
When we are asked to remove it, we need to build new FUNCTION_TYPE
instead. */
if (TREE_CODE (orig_type) != METHOD_TYPE
- || (VEC_index (ipa_parm_adjustment_t, adjustments, 0).copy_param
- && VEC_index (ipa_parm_adjustment_t, adjustments, 0).base_index == 0))
+ || (adjustments[0].copy_param
+ && adjustments[0].base_index == 0))
{
new_type = build_distinct_type_copy (orig_type);
TYPE_ARG_TYPES (new_type) = new_reversed;
TREE_TYPE (fndecl) = new_type;
DECL_VIRTUAL_P (fndecl) = 0;
- if (otypes)
- VEC_free (tree, heap, otypes);
- VEC_free (tree, heap, oparms);
+ otypes.release ();
+ oparms.release ();
}
/* Modify actual arguments of a function call CS as indicated in ADJUSTMENTS.
ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt,
ipa_parm_adjustment_vec adjustments)
{
- VEC(tree, heap) *vargs;
- VEC(tree, gc) **debug_args = NULL;
+ vec<tree> vargs;
+ vec<tree, va_gc> **debug_args = NULL;
gimple new_stmt;
gimple_stmt_iterator gsi;
tree callee_decl;
int i, len;
- len = VEC_length (ipa_parm_adjustment_t, adjustments);
- vargs = VEC_alloc (tree, heap, len);
+ len = adjustments.length ();
+ vargs.create (len);
callee_decl = !cs ? gimple_call_fndecl (stmt) : cs->callee->symbol.decl;
gsi = gsi_for_stmt (stmt);
{
struct ipa_parm_adjustment *adj;
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (adj->copy_param)
{
tree arg = gimple_call_arg (stmt, adj->base_index);
- VEC_quick_push (tree, vargs, arg);
+ vargs.quick_push (arg);
}
else if (!adj->remove_param)
{
adj->by_ref
|| is_gimple_reg_type (adj->type),
NULL, true, GSI_SAME_STMT);
- VEC_quick_push (tree, vargs, expr);
+ vargs.quick_push (expr);
}
if (!adj->copy_param && MAY_HAVE_DEBUG_STMTS)
{
}
if (debug_args == NULL)
debug_args = decl_debug_args_insert (callee_decl);
- for (ix = 0; VEC_iterate (tree, *debug_args, ix, ddecl); ix += 2)
+ for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl); ix += 2)
if (ddecl == origin)
{
- ddecl = VEC_index (tree, *debug_args, ix + 1);
+ ddecl = (**debug_args)[ix + 1];
break;
}
if (ddecl == NULL)
TREE_TYPE (ddecl) = TREE_TYPE (origin);
DECL_MODE (ddecl) = DECL_MODE (origin);
- VEC_safe_push (tree, gc, *debug_args, origin);
- VEC_safe_push (tree, gc, *debug_args, ddecl);
+ vec_safe_push (*debug_args, origin);
+ vec_safe_push (*debug_args, ddecl);
}
- def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg),
- stmt);
+ def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg), stmt);
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
}
}
}
new_stmt = gimple_build_call_vec (callee_decl, vargs);
- VEC_free (tree, heap, vargs);
+ vargs.release ();
if (gimple_call_lhs (stmt))
gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
index_in_adjustments_multiple_times_p (int base_index,
ipa_parm_adjustment_vec adjustments)
{
- int i, len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ int i, len = adjustments.length ();
bool one = false;
for (i = 0; i < len; i++)
{
struct ipa_parm_adjustment *adj;
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (adj->base_index == base_index)
{
ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
ipa_parm_adjustment_vec outer)
{
- int i, outlen = VEC_length (ipa_parm_adjustment_t, outer);
- int inlen = VEC_length (ipa_parm_adjustment_t, inner);
+ int i, outlen = outer.length ();
+ int inlen = inner.length ();
int removals = 0;
ipa_parm_adjustment_vec adjustments, tmp;
- tmp = VEC_alloc (ipa_parm_adjustment_t, heap, inlen);
+ tmp.create (inlen);
for (i = 0; i < inlen; i++)
{
struct ipa_parm_adjustment *n;
- n = &VEC_index (ipa_parm_adjustment_t, inner, i);
+ n = &inner[i];
if (n->remove_param)
removals++;
else
- VEC_quick_push (ipa_parm_adjustment_t, tmp, *n);
+ tmp.quick_push (*n);
}
- adjustments = VEC_alloc (ipa_parm_adjustment_t, heap, outlen + removals);
+ adjustments.create (outlen + removals);
for (i = 0; i < outlen; i++)
{
struct ipa_parm_adjustment r;
- struct ipa_parm_adjustment *out = &VEC_index (ipa_parm_adjustment_t,
- outer, i);
- struct ipa_parm_adjustment *in = &VEC_index (ipa_parm_adjustment_t, tmp,
- out->base_index);
+ struct ipa_parm_adjustment *out = &outer[i];
+ struct ipa_parm_adjustment *in = &tmp[out->base_index];
memset (&r, 0, sizeof (r));
gcc_assert (!in->remove_param);
if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
{
r.remove_param = true;
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, r);
+ adjustments.quick_push (r);
}
continue;
}
r.offset = in->offset;
else
r.offset = in->offset + out->offset;
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, r);
+ adjustments.quick_push (r);
}
for (i = 0; i < inlen; i++)
{
- struct ipa_parm_adjustment *n = &VEC_index (ipa_parm_adjustment_t,
- inner, i);
+ struct ipa_parm_adjustment *n = &inner[i];
if (n->remove_param)
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, *n);
+ adjustments.quick_push (*n);
}
- VEC_free (ipa_parm_adjustment_t, heap, tmp);
+ tmp.release ();
return adjustments;
}
ipa_dump_param_adjustments (FILE *file, ipa_parm_adjustment_vec adjustments,
tree fndecl)
{
- int i, len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ int i, len = adjustments.length ();
bool first = true;
- VEC(tree, heap) *parms = ipa_get_vector_of_formal_parms (fndecl);
+ vec<tree> parms = ipa_get_vector_of_formal_parms (fndecl);
fprintf (file, "IPA param adjustments: ");
for (i = 0; i < len; i++)
{
struct ipa_parm_adjustment *adj;
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (!first)
fprintf (file, " ");
first = false;
fprintf (file, "%i. base_index: %i - ", i, adj->base_index);
- print_generic_expr (file, VEC_index (tree, parms, adj->base_index), 0);
+ print_generic_expr (file, parms[adj->base_index], 0);
if (adj->base)
{
fprintf (file, ", base: ");
print_node_brief (file, ", type: ", adj->type, 0);
fprintf (file, "\n");
}
- VEC_free (tree, heap, parms);
+ parms.release ();
}
/* Dump the AV linked list. */
break;
}
- count = VEC_length (ipa_agg_jf_item_t, jump_func->agg.items);
+ count = vec_safe_length (jump_func->agg.items);
streamer_write_uhwi (ob, count);
if (count)
{
streamer_write_bitpack (&bp);
}
- FOR_EACH_VEC_ELT (ipa_agg_jf_item_t, jump_func->agg.items, i, item)
+ FOR_EACH_VEC_SAFE_ELT (jump_func->agg.items, i, item)
{
streamer_write_uhwi (ob, item->offset);
stream_write_tree (ob, item->value, true);
}
count = streamer_read_uhwi (ib);
- jump_func->agg.items = VEC_alloc (ipa_agg_jf_item_t, gc, count);
+ vec_alloc (jump_func->agg.items, count);
if (count)
{
bp = streamer_read_bitpack (ib);
struct ipa_agg_jf_item item;
item.offset = streamer_read_uhwi (ib);
item.value = stream_read_tree (ib, data_in);
- VEC_quick_push (ipa_agg_jf_item_t, jump_func->agg.items, item);
+ jump_func->agg.items->quick_push (item);
}
}
if (!count)
continue;
- VEC_safe_grow_cleared (ipa_jump_func_t, gc, args->jump_functions, count);
+ vec_safe_grow_cleared (args->jump_functions, count);
for (k = 0; k < ipa_get_cs_argument_count (args); k++)
ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), data_in);
if (count)
{
- VEC_safe_grow_cleared (ipa_jump_func_t, gc, args->jump_functions,
- count);
+ vec_safe_grow_cleared (args->jump_functions, count);
for (k = 0; k < ipa_get_cs_argument_count (args); k++)
ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k),
data_in);
lto_symtab_encoder_t encoder;
- if (!ipa_node_params_vector)
+ if (!ipa_node_params_vector.exists ())
return;
ob = create_output_block (LTO_section_jump_functions);
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution_t>());
count = streamer_read_uhwi (&ib_main);
for (i = 0; i < count; i++)
header->main_size);
data_in = lto_data_in_create (file_data, (const char *) data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution>());
count = streamer_read_uhwi (&ib_main);
for (i = 0; i < count; i++)
unsigned int
ipcp_transform_function (struct cgraph_node *node)
{
- VEC (ipa_param_descriptor_t, heap) *descriptors = NULL;
+ vec<ipa_param_descriptor_t> descriptors = vec<ipa_param_descriptor_t>();
struct param_analysis_info *parms_ainfo;
struct ipa_agg_replacement_value *aggval;
gimple_stmt_iterator gsi;
ipa_dump_agg_replacement_values (dump_file, aggval);
parms_ainfo = XALLOCAVEC (struct param_analysis_info, param_count);
memset (parms_ainfo, 0, sizeof (struct param_analysis_info) * param_count);
- VEC_safe_grow_cleared (ipa_param_descriptor_t, heap,
- descriptors, param_count);
+ descriptors.safe_grow_cleared (param_count);
ipa_populate_param_decls (node, descriptors);
FOR_EACH_BB (bb)
cfg_changed = true;
}
- VEC_replace (ipa_agg_replacement_value_p, ipa_node_agg_replacements,
- node->uid, NULL);
+ (*ipa_node_agg_replacements)[node->uid] = NULL;
free_parms_ainfo (parms_ainfo, param_count);
- VEC_free (ipa_param_descriptor_t, heap, descriptors);
+ descriptors.release ();
if (!something_changed)
return 0;
tree value;
} ipa_agg_jf_item_t;
-DEF_VEC_O (ipa_agg_jf_item_t);
-DEF_VEC_ALLOC_O (ipa_agg_jf_item_t, gc);
-DEF_VEC_ALLOC_O (ipa_agg_jf_item_t, heap);
/* Aggregate jump function - i.e. description of contents of aggregates passed
either by reference or value. */
struct GTY(()) ipa_agg_jump_function
{
/* Description of the individual items. */
- VEC (ipa_agg_jf_item_t, gc) *items;
+ vec<ipa_agg_jf_item_t, va_gc> *items;
/* True if the data was passed by reference (as opposed to by value). */
bool by_ref;
};
typedef struct ipa_agg_jump_function *ipa_agg_jump_function_p;
-DEF_VEC_P (ipa_agg_jump_function_p);
-DEF_VEC_ALLOC_P (ipa_agg_jump_function_p, heap);
typedef struct ipa_agg_jump_function ipa_agg_jump_function_t;
-DEF_VEC_P (ipa_agg_jump_function_t);
-DEF_VEC_ALLOC_P (ipa_agg_jump_function_t, heap);
/* A jump function for a callsite represents the values passed as actual
arguments of the callsite. See enum jump_func_type for the various
} GTY ((desc ("%1.type"))) value;
} ipa_jump_func_t;
-DEF_VEC_O (ipa_jump_func_t);
-DEF_VEC_ALLOC_O (ipa_jump_func_t, gc);
/* Return the offset of the component that is decribed by a known type jump
function JFUNC. */
};
typedef struct ipa_param_descriptor ipa_param_descriptor_t;
-DEF_VEC_O (ipa_param_descriptor_t);
-DEF_VEC_ALLOC_O (ipa_param_descriptor_t, heap);
struct ipcp_lattice;
/* ipa_node_params stores information related to formal parameters of functions
{
/* Information about individual formal parameters that are gathered when
summaries are generated. */
- VEC (ipa_param_descriptor_t, heap) *descriptors;
+ vec<ipa_param_descriptor_t> descriptors;
/* Pointer to an array of structures describing individual formal
parameters. */
struct ipcp_param_lattices *lattices;
struct cgraph_node *ipcp_orig_node;
/* If this node is an ipa-cp clone, these are the known values that describe
what it has been specialized for. */
- VEC (tree, heap) *known_vals;
+ vec<tree> known_vals;
/* Whether the param uses analysis has already been performed. */
unsigned uses_analysis_done : 1;
/* Whether the function is enqueued in ipa-cp propagation stack. */
static inline int
ipa_get_param_count (struct ipa_node_params *info)
{
- return VEC_length (ipa_param_descriptor_t, info->descriptors);
+ return info->descriptors.length ();
}
/* Return the declaration of Ith formal parameter of the function corresponding
static inline tree
ipa_get_param (struct ipa_node_params *info, int i)
{
- return VEC_index (ipa_param_descriptor_t, info->descriptors, i).decl;
+ return info->descriptors[i].decl;
}
/* Set the used flag corresponding to the Ith formal parameter of the function
static inline void
ipa_set_param_used (struct ipa_node_params *info, int i, bool val)
{
- VEC_index (ipa_param_descriptor_t, info->descriptors, i).used = val;
+ info->descriptors[i].used = val;
}
/* Return the used flag corresponding to the Ith formal parameter of the
static inline bool
ipa_is_param_used (struct ipa_node_params *info, int i)
{
- return VEC_index (ipa_param_descriptor_t, info->descriptors, i).used;
+ return info->descriptors[i].used;
}
/* Information about replacements done in aggregates for a given node (each
};
typedef struct ipa_agg_replacement_value *ipa_agg_replacement_value_p;
-DEF_VEC_P (ipa_agg_replacement_value_p);
-DEF_VEC_ALLOC_P (ipa_agg_replacement_value_p, gc);
void ipa_set_node_agg_value_chain (struct cgraph_node *node,
struct ipa_agg_replacement_value *aggvals);
typedef struct GTY(()) ipa_edge_args
{
/* Vector of the callsite's jump function of each parameter. */
- VEC (ipa_jump_func_t, gc) *jump_functions;
+ vec<ipa_jump_func_t, va_gc> *jump_functions;
} ipa_edge_args_t;
/* ipa_edge_args access functions. Please use these to access fields that
static inline int
ipa_get_cs_argument_count (struct ipa_edge_args *args)
{
- return VEC_length (ipa_jump_func_t, args->jump_functions);
+ return vec_safe_length (args->jump_functions);
}
/* Returns a pointer to the jump function for the ith argument. Please note
static inline struct ipa_jump_func *
ipa_get_ith_jump_func (struct ipa_edge_args *args, int i)
{
- return &VEC_index (ipa_jump_func_t, args->jump_functions, i);
+ return &(*args->jump_functions)[i];
}
/* Vectors need to have typedefs of structures. */
typedef struct ipa_node_params ipa_node_params_t;
/* Types of vectors holding the infos. */
-DEF_VEC_O (ipa_node_params_t);
-DEF_VEC_ALLOC_O (ipa_node_params_t, heap);
-DEF_VEC_O (ipa_edge_args_t);
-DEF_VEC_ALLOC_O (ipa_edge_args_t, gc);
/* Vector where the parameter infos are actually stored. */
-extern VEC (ipa_node_params_t, heap) *ipa_node_params_vector;
+extern vec<ipa_node_params_t> ipa_node_params_vector;
/* Vector of known aggregate values in cloned nodes. */
-extern GTY(()) VEC (ipa_agg_replacement_value_p, gc) *ipa_node_agg_replacements;
+extern GTY(()) vec<ipa_agg_replacement_value_p, va_gc> *ipa_node_agg_replacements;
/* Vector where the parameter infos are actually stored. */
-extern GTY(()) VEC (ipa_edge_args_t, gc) *ipa_edge_args_vector;
+extern GTY(()) vec<ipa_edge_args_t, va_gc> *ipa_edge_args_vector;
/* Return the associated parameter/argument info corresponding to the given
node/edge. */
-#define IPA_NODE_REF(NODE) (&VEC_index (ipa_node_params_t, \
- ipa_node_params_vector, (NODE)->uid))
-#define IPA_EDGE_REF(EDGE) (&VEC_index (ipa_edge_args_t, \
- ipa_edge_args_vector, (EDGE)->uid))
+#define IPA_NODE_REF(NODE) (&ipa_node_params_vector[(NODE)->uid])
+#define IPA_EDGE_REF(EDGE) (&(*ipa_edge_args_vector)[(EDGE)->uid])
/* This macro checks validity of index returned by
ipa_get_param_decl_index function. */
#define IS_VALID_JUMP_FUNC_INDEX(I) ((I) != -1)
static inline void
ipa_check_create_node_params (void)
{
- if (!ipa_node_params_vector)
- ipa_node_params_vector = VEC_alloc (ipa_node_params_t, heap,
- cgraph_max_uid);
+ if (!ipa_node_params_vector.exists ())
+ ipa_node_params_vector.create (cgraph_max_uid);
- if (VEC_length (ipa_node_params_t, ipa_node_params_vector)
- <= (unsigned) cgraph_max_uid)
- VEC_safe_grow_cleared (ipa_node_params_t, heap,
- ipa_node_params_vector, cgraph_max_uid + 1);
+ if (ipa_node_params_vector.length () <= (unsigned) cgraph_max_uid)
+ ipa_node_params_vector.safe_grow_cleared (cgraph_max_uid + 1);
}
/* This function ensures the array of edge arguments infos is big enough to
static inline void
ipa_check_create_edge_args (void)
{
- if (!ipa_edge_args_vector)
- ipa_edge_args_vector = VEC_alloc (ipa_edge_args_t, gc,
- cgraph_edge_max_uid);
-
- if (VEC_length (ipa_edge_args_t, ipa_edge_args_vector)
- <= (unsigned) cgraph_edge_max_uid)
- VEC_safe_grow_cleared (ipa_edge_args_t, gc, ipa_edge_args_vector,
- cgraph_edge_max_uid + 1);
+ if (vec_safe_length (ipa_edge_args_vector) <= (unsigned) cgraph_edge_max_uid)
+ vec_safe_grow_cleared (ipa_edge_args_vector, cgraph_edge_max_uid + 1);
}
/* Returns true if the array of edge infos is large enough to accommodate an
static inline bool
ipa_edge_args_info_available_for_edge_p (struct cgraph_edge *edge)
{
- return ((unsigned) edge->uid < VEC_length (ipa_edge_args_t,
- ipa_edge_args_vector));
+ return ((unsigned) edge->uid < vec_safe_length (ipa_edge_args_vector));
}
/* Return the aggregate replacements for NODE, if there are any. */
static inline struct ipa_agg_replacement_value *
ipa_get_agg_replacements_for_node (struct cgraph_node *node)
{
- if ((unsigned) node->uid >= VEC_length (ipa_agg_replacement_value_p,
- ipa_node_agg_replacements))
+ if ((unsigned) node->uid >= vec_safe_length (ipa_node_agg_replacements))
return NULL;
- return VEC_index (ipa_agg_replacement_value_p, ipa_node_agg_replacements,
- node->uid);
+ return (*ipa_node_agg_replacements)[node->uid];
}
/* Function formal parameters related computations. */
void ipa_initialize_node_params (struct cgraph_node *node);
bool ipa_propagate_indirect_call_infos (struct cgraph_edge *cs,
- VEC (cgraph_edge_p, heap) **new_edges);
+ vec<cgraph_edge_p> *new_edges);
/* Indirect edge and binfo processing. */
tree ipa_get_indirect_edge_target (struct cgraph_edge *ie,
- VEC (tree, heap) *,
- VEC (tree, heap) *,
- VEC (ipa_agg_jump_function_p, heap) *);
+ vec<tree> ,
+ vec<tree> ,
+ vec<ipa_agg_jump_function_p> );
struct cgraph_edge *ipa_make_edge_direct_to_target (struct cgraph_edge *, tree);
/* Functions related to both. */
};
typedef struct ipa_parm_adjustment ipa_parm_adjustment_t;
-DEF_VEC_O (ipa_parm_adjustment_t);
-DEF_VEC_ALLOC_O (ipa_parm_adjustment_t, heap);
-typedef VEC (ipa_parm_adjustment_t, heap) *ipa_parm_adjustment_vec;
+typedef vec<ipa_parm_adjustment_t> ipa_parm_adjustment_vec;
-VEC(tree, heap) *ipa_get_vector_of_formal_parms (tree fndecl);
+vec<tree> ipa_get_vector_of_formal_parms (tree fndecl);
void ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec,
const char *);
void ipa_modify_call_arguments (struct cgraph_edge *, gimple,
/* Array, indexed by cgraph node uid, of function states. */
-DEF_VEC_P (funct_state);
-DEF_VEC_ALLOC_P (funct_state, heap);
-static VEC (funct_state, heap) *funct_state_vec;
+static vec<funct_state> funct_state_vec;
/* Holders of ipa cgraph hooks: */
static struct cgraph_node_hook_list *function_insertion_hook_holder;
static void
finish_state (void)
{
- free (funct_state_vec);
+ funct_state_vec.release ();
}
static inline bool
has_function_state (struct cgraph_node *node)
{
- if (!funct_state_vec
- || VEC_length (funct_state, funct_state_vec) <= (unsigned int)node->uid)
+ if (!funct_state_vec.exists ()
+ || funct_state_vec.length () <= (unsigned int)node->uid)
return false;
- return VEC_index (funct_state, funct_state_vec, node->uid) != NULL;
+ return funct_state_vec[node->uid] != NULL;
}
/* Return the function state from NODE. */
static inline funct_state
get_function_state (struct cgraph_node *node)
{
- if (!funct_state_vec
- || VEC_length (funct_state, funct_state_vec) <= (unsigned int)node->uid
- || !VEC_index (funct_state, funct_state_vec, node->uid))
+ if (!funct_state_vec.exists ()
+ || funct_state_vec.length () <= (unsigned int)node->uid
+ || !funct_state_vec[node->uid])
/* We might want to put correct previously_known state into varying. */
return &varying_state;
- return VEC_index (funct_state, funct_state_vec, node->uid);
+ return funct_state_vec[node->uid];
}
/* Set the function state S for NODE. */
static inline void
set_function_state (struct cgraph_node *node, funct_state s)
{
- if (!funct_state_vec
- || VEC_length (funct_state, funct_state_vec) <= (unsigned int)node->uid)
- VEC_safe_grow_cleared (funct_state, heap, funct_state_vec, node->uid + 1);
- VEC_replace (funct_state, funct_state_vec, node->uid, s);
+ if (!funct_state_vec.exists ()
+ || funct_state_vec.length () <= (unsigned int)node->uid)
+ funct_state_vec.safe_grow_cleared (node->uid + 1);
+ funct_state_vec[node->uid] = s;
}
/* Check to see if the use (or definition when CHECKING_WRITE is true)
FOR_EACH_DEFINED_FUNCTION (node)
if (has_function_state (node))
free (get_function_state (node));
- VEC_free (funct_state, heap, funct_state_vec);
+ funct_state_vec.release ();
finish_state ();
return 0;
}
static inline struct ipa_ref *
ipa_ref_list_first_reference (struct ipa_ref_list *list)
{
- if (!VEC_length (ipa_ref_t, list->references))
+ if (!vec_safe_length (list->references))
return NULL;
- return &VEC_index (ipa_ref_t, list->references, 0);
+ return &(*list->references)[0];
}
/* Return first referring ref in LIST or NULL if empty. */
static inline struct ipa_ref *
ipa_ref_list_first_referring (struct ipa_ref_list *list)
{
- if (!VEC_length (ipa_ref_ptr, list->referring))
+ if (!list->referring.length ())
return NULL;
- return VEC_index (ipa_ref_ptr, list->referring, 0);
+ return list->referring[0];
}
/* Clear reference list. */
static inline void
ipa_empty_ref_list (struct ipa_ref_list *list)
{
- list->referring = NULL;
+ list->referring.create (0);
list->references = NULL;
}
static inline unsigned int
ipa_ref_list_nreferences (struct ipa_ref_list *list)
{
- return VEC_length (ipa_ref_t, list->references);
+ return vec_safe_length (list->references);
}
#define ipa_ref_list_reference_iterate(L,I,P) \
- VEC_iterate(ipa_ref_t, (L)->references, (I), (P))
+ vec_safe_iterate ((L)->references, (I), &(P))
#define ipa_ref_list_referring_iterate(L,I,P) \
- VEC_iterate(ipa_ref_ptr, (L)->referring, (I), (P))
+ (L)->referring.iterate ((I), &(P))
{
struct ipa_ref *ref;
struct ipa_ref_list *list, *list2;
- VEC(ipa_ref_t,gc) *old_references;
+ ipa_ref_t *old_references;
gcc_checking_assert (!stmt || is_a <cgraph_node> (referring_node));
gcc_checking_assert (use_type != IPA_REF_ALIAS || !stmt);
list = &referring_node->symbol.ref_list;
- old_references = list->references;
- VEC_safe_grow (ipa_ref_t, gc, list->references,
- VEC_length (ipa_ref_t, list->references) + 1);
- ref = &VEC_last (ipa_ref_t, list->references);
+ old_references = vec_safe_address (list->references);
+ vec_safe_grow (list->references, vec_safe_length (list->references) + 1);
+ ref = &list->references->last ();
list2 = &referred_node->symbol.ref_list;
- VEC_safe_push (ipa_ref_ptr, heap, list2->referring, ref);
- ref->referred_index = VEC_length (ipa_ref_ptr, list2->referring) - 1;
+ list2->referring.safe_push (ref);
+ ref->referred_index = list2->referring.length () - 1;
ref->referring = referring_node;
ref->referred = referred_node;
ref->stmt = stmt;
ref->use = use_type;
/* If vector was moved in memory, update pointers. */
- if (old_references != list->references)
+ if (old_references != list->references->address ())
{
int i;
for (i = 0; ipa_ref_list_reference_iterate (list, i, ref); i++)
- VEC_replace (ipa_ref_ptr,
- ipa_ref_referred_ref_list (ref)->referring,
- ref->referred_index, ref);
+ ipa_ref_referred_ref_list (ref)->referring[ref->referred_index] = ref;
}
return ref;
}
{
struct ipa_ref_list *list = ipa_ref_referred_ref_list (ref);
struct ipa_ref_list *list2 = ipa_ref_referring_ref_list (ref);
- VEC(ipa_ref_t,gc) *old_references = list2->references;
+ vec<ipa_ref_t, va_gc> *old_references = list2->references;
struct ipa_ref *last;
- gcc_assert (VEC_index (ipa_ref_ptr, list->referring, ref->referred_index) == ref);
- last = VEC_last (ipa_ref_ptr, list->referring);
+ gcc_assert (list->referring[ref->referred_index] == ref);
+ last = list->referring.last ();
if (ref != last)
{
- VEC_replace (ipa_ref_ptr, list->referring,
- ref->referred_index,
- VEC_last (ipa_ref_ptr, list->referring));
- VEC_index (ipa_ref_ptr, list->referring,
- ref->referred_index)->referred_index = ref->referred_index;
+ list->referring[ref->referred_index] = list->referring.last ();
+ list->referring[ref->referred_index]->referred_index
+ = ref->referred_index;
}
- VEC_pop (ipa_ref_ptr, list->referring);
+ list->referring.pop ();
- last = &VEC_last (ipa_ref_t, list2->references);
+ last = &list2->references->last ();
if (ref != last)
{
*ref = *last;
- VEC_replace (ipa_ref_ptr,
- ipa_ref_referred_ref_list (ref)->referring,
- ref->referred_index, ref);
+ ipa_ref_referred_ref_list (ref)->referring[ref->referred_index] = ref;
}
- VEC_pop (ipa_ref_t, list2->references);
+ list2->references->pop ();
gcc_assert (list2->references == old_references);
}
void
ipa_remove_all_references (struct ipa_ref_list *list)
{
- while (VEC_length (ipa_ref_t, list->references))
- ipa_remove_reference (&VEC_last (ipa_ref_t, list->references));
- VEC_free (ipa_ref_t, gc, list->references);
- list->references = NULL;
+ while (vec_safe_length (list->references))
+ ipa_remove_reference (&list->references->last ());
+ vec_free (list->references);
}
/* Remove all references in ref list LIST. */
void
ipa_remove_all_referring (struct ipa_ref_list *list)
{
- while (VEC_length (ipa_ref_ptr, list->referring))
- ipa_remove_reference (VEC_last (ipa_ref_ptr, list->referring));
- VEC_free (ipa_ref_ptr, heap, list->referring);
- list->referring = NULL;
+ while (list->referring.length ())
+ ipa_remove_reference (list->referring.last ());
+ list->referring.release ();
}
/* Dump references in LIST to FILE. */
typedef struct ipa_ref ipa_ref_t;
typedef struct ipa_ref *ipa_ref_ptr;
-DEF_VEC_O(ipa_ref_t);
-DEF_VEC_ALLOC_O(ipa_ref_t,gc);
-DEF_VEC_P(ipa_ref_ptr);
-DEF_VEC_ALLOC_P(ipa_ref_ptr,heap);
/* List of references. This is stored in both callgraph and varpool nodes. */
struct GTY(()) ipa_ref_list
{
/* Store actual references in references vector. */
- VEC(ipa_ref_t,gc) *references;
+ vec<ipa_ref_t, va_gc> *references;
/* Referring is vector of pointers to references. It must not live in GGC space
or GGC will try to mark middle of references vectors. */
- VEC(ipa_ref_ptr,heap) * GTY((skip)) referring;
+ vec<ipa_ref_ptr> GTY((skip)) referring;
};
struct ipa_ref * ipa_record_reference (symtab_node,
/* Vector where the reference var infos are actually stored.
Indexed by UID of call graph nodes. */
-DEF_VEC_P (ipa_reference_vars_info_t);
-DEF_VEC_ALLOC_P (ipa_reference_vars_info_t, heap);
-static VEC (ipa_reference_vars_info_t, heap) *ipa_reference_vars_vector;
+static vec<ipa_reference_vars_info_t> ipa_reference_vars_vector;
-DEF_VEC_P (ipa_reference_optimization_summary_t);
-DEF_VEC_ALLOC_P (ipa_reference_optimization_summary_t, heap);
-static VEC (ipa_reference_optimization_summary_t, heap) *ipa_reference_opt_sum_vector;
+static vec<ipa_reference_optimization_summary_t> ipa_reference_opt_sum_vector;
/* Return the ipa_reference_vars structure starting from the cgraph NODE. */
static inline ipa_reference_vars_info_t
get_reference_vars_info (struct cgraph_node *node)
{
- if (!ipa_reference_vars_vector
- || VEC_length (ipa_reference_vars_info_t,
- ipa_reference_vars_vector) <= (unsigned int) node->uid)
+ if (!ipa_reference_vars_vector.exists ()
+ || ipa_reference_vars_vector.length () <= (unsigned int) node->uid)
return NULL;
- return VEC_index (ipa_reference_vars_info_t, ipa_reference_vars_vector,
- node->uid);
+ return ipa_reference_vars_vector[node->uid];
}
/* Return the ipa_reference_vars structure starting from the cgraph NODE. */
static inline ipa_reference_optimization_summary_t
get_reference_optimization_summary (struct cgraph_node *node)
{
- if (!ipa_reference_opt_sum_vector
- || (VEC_length (ipa_reference_optimization_summary_t,
- ipa_reference_opt_sum_vector)
- <= (unsigned int) node->uid))
+ if (!ipa_reference_opt_sum_vector.exists ()
+ || (ipa_reference_opt_sum_vector.length () <= (unsigned int) node->uid))
return NULL;
- return VEC_index (ipa_reference_optimization_summary_t,
- ipa_reference_opt_sum_vector,
- node->uid);
+ return ipa_reference_opt_sum_vector[node->uid];
}
/* Return the ipa_reference_vars structure starting from the cgraph NODE. */
set_reference_vars_info (struct cgraph_node *node,
ipa_reference_vars_info_t info)
{
- if (!ipa_reference_vars_vector
- || VEC_length (ipa_reference_vars_info_t,
- ipa_reference_vars_vector) <= (unsigned int) node->uid)
- VEC_safe_grow_cleared (ipa_reference_vars_info_t, heap,
- ipa_reference_vars_vector, node->uid + 1);
- VEC_replace (ipa_reference_vars_info_t, ipa_reference_vars_vector,
- node->uid, info);
+ if (!ipa_reference_vars_vector.exists ()
+ || ipa_reference_vars_vector.length () <= (unsigned int) node->uid)
+ ipa_reference_vars_vector.safe_grow_cleared (node->uid + 1);
+ ipa_reference_vars_vector[node->uid] = info;
}
/* Return the ipa_reference_vars structure starting from the cgraph NODE. */
set_reference_optimization_summary (struct cgraph_node *node,
ipa_reference_optimization_summary_t info)
{
- if (!ipa_reference_opt_sum_vector
- || (VEC_length (ipa_reference_optimization_summary_t,
- ipa_reference_opt_sum_vector)
- <= (unsigned int) node->uid))
- VEC_safe_grow_cleared (ipa_reference_optimization_summary_t,
- heap, ipa_reference_opt_sum_vector, node->uid + 1);
- VEC_replace (ipa_reference_optimization_summary_t,
- ipa_reference_opt_sum_vector, node->uid, info);
+ if (!ipa_reference_opt_sum_vector.exists ()
+ || (ipa_reference_opt_sum_vector.length () <= (unsigned int) node->uid))
+ ipa_reference_opt_sum_vector.safe_grow_cleared (node->uid + 1);
+ ipa_reference_opt_sum_vector[node->uid] = info;
}
/* Return a bitmap indexed by DECL_UID for the static variables that
fprintf (dump_file, "Starting cycle with %s/%i\n",
cgraph_node_asm_name (node), node->symbol.order);
- VEC (cgraph_node_p, heap) *cycle_nodes = ipa_get_nodes_in_cycle (node);
+ vec<cgraph_node_ptr> cycle_nodes = ipa_get_nodes_in_cycle (node);
/* If any node in a cycle is read_all or write_all, they all are. */
- FOR_EACH_VEC_ELT (cgraph_node_p, cycle_nodes, x, w)
+ FOR_EACH_VEC_ELT (cycle_nodes, x, w)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Visiting %s/%i\n",
/* Merge the sets of this cycle with all sets of callees reached
from this cycle. */
- FOR_EACH_VEC_ELT (cgraph_node_p, cycle_nodes, x, w)
+ FOR_EACH_VEC_ELT (cycle_nodes, x, w)
{
if (read_all && write_all)
break;
}
/* All nodes within a cycle have the same global info bitmaps. */
- FOR_EACH_VEC_ELT (cgraph_node_p, cycle_nodes, x, w)
+ FOR_EACH_VEC_ELT (cycle_nodes, x, w)
{
ipa_reference_vars_info_t w_ri = get_reference_vars_info (w);
w_ri->global = *node_g;
}
- VEC_free (cgraph_node_p, heap, cycle_nodes);
+ cycle_nodes.release ();
}
if (dump_file)
ipa_reference_vars_info_t node_info = get_reference_vars_info (node);
ipa_reference_global_vars_info_t node_g = &node_info->global;
- VEC (cgraph_node_p, heap) *cycle_nodes = ipa_get_nodes_in_cycle (node);
- FOR_EACH_VEC_ELT (cgraph_node_p, cycle_nodes, x, w)
+ vec<cgraph_node_ptr> cycle_nodes = ipa_get_nodes_in_cycle (node);
+ FOR_EACH_VEC_ELT (cycle_nodes, x, w)
{
ipa_reference_vars_info_t w_ri = get_reference_vars_info (w);
ipa_reference_local_vars_info_t w_l = &w_ri->local;
fprintf (dump_file, "\n locals written: ");
dump_static_vars_set_to_file (dump_file, w_l->statics_written);
}
- VEC_free (cgraph_node_p, heap, cycle_nodes);
+ cycle_nodes.release ();
fprintf (dump_file, "\n globals read: ");
dump_static_vars_set_to_file (dump_file, node_g->statics_read);
free (order);
bitmap_obstack_release (&local_info_obstack);
- VEC_free (ipa_reference_vars_info_t, heap, ipa_reference_vars_vector);
- ipa_reference_vars_vector = NULL;
+ ipa_reference_vars_vector.release ();
if (dump_file)
splay_tree_delete (reference_vars_to_consider);
reference_vars_to_consider = NULL;
unsigned int size;
unsigned int time;
} bb_info;
-DEF_VEC_O(bb_info);
-DEF_VEC_ALLOC_O(bb_info,heap);
-static VEC(bb_info, heap) *bb_info_vec;
+static vec<bb_info> bb_info_vec;
/* Description of split point. */
basic_block return_bb)
{
bitmap seen = BITMAP_ALLOC (NULL);
- VEC (basic_block,heap) *worklist = NULL;
+ vec<basic_block> worklist = vec<basic_block>();
edge e;
edge_iterator ei;
bool ok = true;
if (e->src != ENTRY_BLOCK_PTR
&& !bitmap_bit_p (current->split_bbs, e->src->index))
{
- VEC_safe_push (basic_block, heap, worklist, e->src);
+ worklist.safe_push (e->src);
bitmap_set_bit (seen, e->src->index);
}
- while (!VEC_empty (basic_block, worklist))
+ while (!worklist.is_empty ())
{
gimple_stmt_iterator bsi;
- basic_block bb = VEC_pop (basic_block, worklist);
+ basic_block bb = worklist.pop ();
FOR_EACH_EDGE (e, ei, bb->preds)
if (e->src != ENTRY_BLOCK_PTR
{
gcc_checking_assert (!bitmap_bit_p (current->split_bbs,
e->src->index));
- VEC_safe_push (basic_block, heap, worklist, e->src);
+ worklist.safe_push (e->src);
}
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
}
done:
BITMAP_FREE (seen);
- VEC_free (basic_block, heap, worklist);
+ worklist.release ();
return ok;
}
/* When false we can not split on this BB. */
bool can_split;
} stack_entry;
-DEF_VEC_O(stack_entry);
-DEF_VEC_ALLOC_O(stack_entry,heap);
/* Find all articulations and call consider_split on them.
find_split_points (int overall_time, int overall_size)
{
stack_entry first;
- VEC(stack_entry, heap) *stack = NULL;
+ vec<stack_entry> stack = vec<stack_entry>();
basic_block bb;
basic_block return_bb = find_return_bb ();
struct split_point current;
first.set_ssa_names = 0;
first.used_ssa_names = 0;
first.bbs_visited = 0;
- VEC_safe_push (stack_entry, heap, stack, first);
+ stack.safe_push (first);
ENTRY_BLOCK_PTR->aux = (void *)(intptr_t)-1;
- while (!VEC_empty (stack_entry, stack))
+ while (!stack.is_empty ())
{
- stack_entry *entry = &VEC_last (stack_entry, stack);
+ stack_entry *entry = &stack.last ();
/* We are walking an acyclic graph, so edge_num counts
succ and pred edges together. However when considering
if (entry->edge_num == EDGE_COUNT (entry->bb->succs)
&& entry->bb != ENTRY_BLOCK_PTR)
{
- int pos = VEC_length (stack_entry, stack);
+ int pos = stack.length ();
entry->can_split &= visit_bb (entry->bb, return_bb,
entry->set_ssa_names,
entry->used_ssa_names,
new_entry.bb = dest;
new_entry.edge_num = 0;
new_entry.overall_time
- = VEC_index (bb_info, bb_info_vec, dest->index).time;
+ = bb_info_vec[dest->index].time;
new_entry.overall_size
- = VEC_index (bb_info, bb_info_vec, dest->index).size;
+ = bb_info_vec[dest->index].size;
new_entry.earliest = INT_MAX;
new_entry.set_ssa_names = BITMAP_ALLOC (NULL);
new_entry.used_ssa_names = BITMAP_ALLOC (NULL);
new_entry.non_ssa_vars = BITMAP_ALLOC (NULL);
new_entry.can_split = true;
bitmap_set_bit (new_entry.bbs_visited, dest->index);
- VEC_safe_push (stack_entry, heap, stack, new_entry);
- dest->aux = (void *)(intptr_t)VEC_length (stack_entry, stack);
+ stack.safe_push (new_entry);
+ dest->aux = (void *)(intptr_t)stack.length ();
}
/* Back edge found, record the earliest point. */
else if ((intptr_t)dest->aux > 0
and merge stuff we accumulate during the walk. */
else if (entry->bb != ENTRY_BLOCK_PTR)
{
- stack_entry *prev = &VEC_index (stack_entry, stack,
- VEC_length (stack_entry, stack) - 2);
+ stack_entry *prev = &stack[stack.length () - 2];
entry->bb->aux = (void *)(intptr_t)-1;
prev->can_split &= entry->can_split;
BITMAP_FREE (entry->used_ssa_names);
BITMAP_FREE (entry->bbs_visited);
BITMAP_FREE (entry->non_ssa_vars);
- VEC_pop (stack_entry, stack);
+ stack.pop ();
}
else
- VEC_pop (stack_entry, stack);
+ stack.pop ();
}
ENTRY_BLOCK_PTR->aux = NULL;
FOR_EACH_BB (bb)
bb->aux = NULL;
- VEC_free (stack_entry, heap, stack);
+ stack.release ();
BITMAP_FREE (current.ssa_names_to_pass);
}
static void
split_function (struct split_point *split_point)
{
- VEC (tree, heap) *args_to_pass = NULL;
+ vec<tree> args_to_pass = vec<tree>();
bitmap args_to_skip;
tree parm;
int num = 0;
gimple last_stmt = NULL;
unsigned int i;
tree arg, ddef;
- VEC(tree, gc) **debug_args = NULL;
+ vec<tree, va_gc> **debug_args = NULL;
if (dump_file)
{
if (!useless_type_conversion_p (DECL_ARG_TYPE (parm), TREE_TYPE (arg)))
arg = fold_convert (DECL_ARG_TYPE (parm), arg);
- VEC_safe_push (tree, heap, args_to_pass, arg);
+ args_to_pass.safe_push (arg);
}
/* See if the split function will return. */
/* Now create the actual clone. */
rebuild_cgraph_edges ();
- node = cgraph_function_versioning (cur_node, NULL, NULL, args_to_skip,
+ node = cgraph_function_versioning (cur_node, vec<cgraph_edge_p>(),
+ NULL,
+ args_to_skip,
!split_part_return_p,
split_point->split_bbs,
split_point->entry_bb, "part");
/* Produce the call statement. */
gsi = gsi_last_bb (call_bb);
- FOR_EACH_VEC_ELT (tree, args_to_pass, i, arg)
+ FOR_EACH_VEC_ELT (args_to_pass, i, arg)
if (!is_gimple_val (arg))
{
arg = force_gimple_operand_gsi (&gsi, arg, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
- VEC_replace (tree, args_to_pass, i, arg);
+ args_to_pass[i] = arg;
}
call = gimple_build_call_vec (node->symbol.decl, args_to_pass);
gimple_set_block (call, DECL_INITIAL (current_function_decl));
- VEC_free (tree, heap, args_to_pass);
+ args_to_pass.release ();
/* For optimized away parameters, add on the caller side
before the call
DECL_ARTIFICIAL (ddecl) = 1;
TREE_TYPE (ddecl) = TREE_TYPE (parm);
DECL_MODE (ddecl) = DECL_MODE (parm);
- VEC_safe_push (tree, gc, *debug_args, DECL_ORIGIN (parm));
- VEC_safe_push (tree, gc, *debug_args, ddecl);
+ vec_safe_push (*debug_args, DECL_ORIGIN (parm));
+ vec_safe_push (*debug_args, ddecl);
def_temp = gimple_build_debug_bind (ddecl, unshare_expr (arg),
call);
gsi_insert_after (&gsi, def_temp, GSI_NEW_STMT);
push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
var = BLOCK_VARS (DECL_INITIAL (node->symbol.decl));
- i = VEC_length (tree, *debug_args);
+ i = vec_safe_length (*debug_args);
cgsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
do
{
i -= 2;
while (var != NULL_TREE
- && DECL_ABSTRACT_ORIGIN (var)
- != VEC_index (tree, *debug_args, i))
+ && DECL_ABSTRACT_ORIGIN (var) != (**debug_args)[i])
var = TREE_CHAIN (var);
if (var == NULL_TREE)
break;
vexpr = make_node (DEBUG_EXPR_DECL);
- parm = VEC_index (tree, *debug_args, i);
+ parm = (**debug_args)[i];
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (parm);
DECL_MODE (vexpr) = DECL_MODE (parm);
}
/* This can be relaxed; function might become inlinable after splitting
away the uninlinable part. */
- if (inline_edge_summary_vec && !inline_summary (node)->inlinable)
+ if (inline_edge_summary_vec.exists ()
+ && !inline_summary (node)->inlinable)
{
if (dump_file)
fprintf (dump_file, "Not splitting: not inlinable.\n");
calculate_dominance_info (CDI_DOMINATORS);
/* Compute local info about basic blocks and determine function size/time. */
- VEC_safe_grow_cleared (bb_info, heap, bb_info_vec, last_basic_block + 1);
+ bb_info_vec.safe_grow_cleared (last_basic_block + 1);
memset (&best_split_point, 0, sizeof (best_split_point));
FOR_EACH_BB (bb)
{
}
overall_time += time;
overall_size += size;
- VEC_index (bb_info, bb_info_vec, bb->index).time = time;
- VEC_index (bb_info, bb_info_vec, bb->index).size = size;
+ bb_info_vec[bb->index].time = time;
+ bb_info_vec[bb->index].size = size;
}
find_split_points (overall_time, overall_size);
if (best_split_point.split_bbs)
todo = TODO_update_ssa | TODO_cleanup_cfg;
}
BITMAP_FREE (forbidden_dominators);
- VEC_free (bb_info, heap, bb_info_vec);
- bb_info_vec = NULL;
+ bb_info_vec.release ();
return todo;
}
/* Get the set of nodes for the cycle in the reduced call graph starting
from NODE. */
-VEC (cgraph_node_p, heap) *
+vec<cgraph_node_ptr>
ipa_get_nodes_in_cycle (struct cgraph_node *node)
{
- VEC (cgraph_node_p, heap) *v = NULL;
+ vec<cgraph_node_ptr> v = vec<cgraph_node_ptr>();
struct ipa_dfs_info *node_dfs_info;
while (node)
{
- VEC_safe_push (cgraph_node_p, heap, v, node);
+ v.safe_push (node);
node_dfs_info = (struct ipa_dfs_info *) node->symbol.aux;
node = node_dfs_info->next_cycle;
}
new_node_set = XCNEW (struct cgraph_node_set_def);
new_node_set->map = pointer_map_create ();
- new_node_set->nodes = NULL;
+ new_node_set->nodes.create (0);
return new_node_set;
}
if (*slot)
{
int index = (size_t) *slot - 1;
- gcc_checking_assert ((VEC_index (cgraph_node_ptr, set->nodes, index)
+ gcc_checking_assert ((set->nodes[index]
== node));
return;
}
- *slot = (void *)(size_t) (VEC_length (cgraph_node_ptr, set->nodes) + 1);
+ *slot = (void *)(size_t) (set->nodes.length () + 1);
/* Insert into node vector. */
- VEC_safe_push (cgraph_node_ptr, heap, set->nodes, node);
+ set->nodes.safe_push (node);
}
return;
index = (size_t) *slot - 1;
- gcc_checking_assert (VEC_index (cgraph_node_ptr, set->nodes, index)
+ gcc_checking_assert (set->nodes[index]
== node);
/* Remove from vector. We do this by swapping node with the last element
of the vector. */
- last_node = VEC_pop (cgraph_node_ptr, set->nodes);
+ last_node = set->nodes.pop ();
if (last_node != node)
{
last_slot = pointer_map_contains (set->map, last_node);
*last_slot = (void *)(size_t) (index + 1);
/* Move the last element to the original spot of NODE. */
- VEC_replace (cgraph_node_ptr, set->nodes, index, last_node);
+ set->nodes[index] = last_node;
}
/* Remove element from hash table. */
void
free_cgraph_node_set (cgraph_node_set set)
{
- VEC_free (cgraph_node_ptr, heap, set->nodes);
+ set->nodes.release ();
pointer_map_destroy (set->map);
free (set);
}
new_node_set = XCNEW (struct varpool_node_set_def);
new_node_set->map = pointer_map_create ();
- new_node_set->nodes = NULL;
+ new_node_set->nodes.create (0);
return new_node_set;
}
if (*slot)
{
int index = (size_t) *slot - 1;
- gcc_checking_assert ((VEC_index (varpool_node_ptr, set->nodes, index)
+ gcc_checking_assert ((set->nodes[index]
== node));
return;
}
- *slot = (void *)(size_t) (VEC_length (varpool_node_ptr, set->nodes) + 1);
+ *slot = (void *)(size_t) (set->nodes.length () + 1);
/* Insert into node vector. */
- VEC_safe_push (varpool_node_ptr, heap, set->nodes, node);
+ set->nodes.safe_push (node);
}
return;
index = (size_t) *slot - 1;
- gcc_checking_assert (VEC_index (varpool_node_ptr, set->nodes, index)
+ gcc_checking_assert (set->nodes[index]
== node);
/* Remove from vector. We do this by swapping node with the last element
of the vector. */
- last_node = VEC_pop (varpool_node_ptr, set->nodes);
+ last_node = set->nodes.pop ();
if (last_node != node)
{
last_slot = pointer_map_contains (set->map, last_node);
*last_slot = (void *)(size_t) (index + 1);
/* Move the last element to the original spot of NODE. */
- VEC_replace (varpool_node_ptr, set->nodes, index, last_node);
+ set->nodes[index] = last_node;
}
/* Remove element from hash table. */
void
free_varpool_node_set (varpool_node_set set)
{
- VEC_free (varpool_node_ptr, heap, set->nodes);
+ set->nodes.release ();
pointer_map_destroy (set->map);
free (set);
}
int ipa_reduced_postorder (struct cgraph_node **, bool, bool,
bool (*ignore_edge) (struct cgraph_edge *));
void ipa_free_postorder_info (void);
-VEC (cgraph_node_p, heap) *ipa_get_nodes_in_cycle (struct cgraph_node *);
+vec<cgraph_node_ptr> ipa_get_nodes_in_cycle (struct cgraph_node *);
int ipa_reverse_postorder (struct cgraph_node **);
tree get_base_var (tree);
#endif
/* If we removed something, perhaps profile could be improved. */
- if (changed && optimize && inline_edge_summary_vec)
+ if (changed && optimize && inline_edge_summary_vec.exists ())
FOR_EACH_DEFINED_FUNCTION (node)
cgraph_propagate_frequency (node);
alias_pair *p;
/* Discover aliased nodes. */
- FOR_EACH_VEC_ELT (alias_pair, alias_pairs, i, p)
+ FOR_EACH_VEC_SAFE_ELT (alias_pairs, i, p)
{
if (dump_file)
- fprintf (dump_file, "Alias %s->%s",
- IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (p->decl)),
- IDENTIFIER_POINTER (p->target));
+ fprintf (dump_file, "Alias %s->%s",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (p->decl)),
+ IDENTIFIER_POINTER (p->target));
if ((node = cgraph_node_for_asm (p->target)) != NULL
- && !DECL_EXTERNAL (node->symbol.decl))
- {
+ && !DECL_EXTERNAL (node->symbol.decl))
+ {
if (!node->analyzed)
continue;
cgraph_mark_force_output_node (node);
if (dump_file)
fprintf (dump_file, " node %s/%i",
cgraph_node_name (node), node->uid);
- }
+ }
else if ((vnode = varpool_node_for_asm (p->target)) != NULL
&& !DECL_EXTERNAL (vnode->symbol.decl))
- {
+ {
vnode->symbol.force_output = 1;
pointer_set_insert (aliased_vnodes, vnode);
if (dump_file)
fprintf (dump_file, " varpool node %s",
varpool_node_name (vnode));
- }
+ }
if (dump_file)
- fprintf (dump_file, "\n");
+ fprintf (dump_file, "\n");
}
FOR_EACH_FUNCTION (node)
}
/* A vector of FUNCTION_DECLs declared as static constructors. */
-static VEC(tree, heap) *static_ctors;
+static vec<tree> static_ctors;
/* A vector of FUNCTION_DECLs declared as static destructors. */
-static VEC(tree, heap) *static_dtors;
+static vec<tree> static_dtors;
/* When target does not have ctors and dtors, we call all constructor
and destructor by special initialization/destruction function
record_cdtor_fn (struct cgraph_node *node)
{
if (DECL_STATIC_CONSTRUCTOR (node->symbol.decl))
- VEC_safe_push (tree, heap, static_ctors, node->symbol.decl);
+ static_ctors.safe_push (node->symbol.decl);
if (DECL_STATIC_DESTRUCTOR (node->symbol.decl))
- VEC_safe_push (tree, heap, static_dtors, node->symbol.decl);
+ static_dtors.safe_push (node->symbol.decl);
node = cgraph_get_node (node->symbol.decl);
DECL_DISREGARD_INLINE_LIMITS (node->symbol.decl) = 1;
}
they are destructors. */
static void
-build_cdtor (bool ctor_p, VEC (tree, heap) *cdtors)
+build_cdtor (bool ctor_p, vec<tree> cdtors)
{
size_t i,j;
- size_t len = VEC_length (tree, cdtors);
+ size_t len = cdtors.length ();
i = 0;
while (i < len)
do
{
priority_type p;
- fn = VEC_index (tree, cdtors, j);
+ fn = cdtors[j];
p = ctor_p ? DECL_INIT_PRIORITY (fn) : DECL_FINI_PRIORITY (fn);
if (j == i)
priority = p;
for (;i < j; i++)
{
tree call;
- fn = VEC_index (tree, cdtors, i);
+ fn = cdtors[i];
call = build_call_expr (fn, 0);
if (ctor_p)
DECL_STATIC_CONSTRUCTOR (fn) = 0;
static void
build_cdtor_fns (void)
{
- if (!VEC_empty (tree, static_ctors))
+ if (!static_ctors.is_empty ())
{
gcc_assert (!targetm.have_ctors_dtors || in_lto_p);
- VEC_qsort (tree, static_ctors, compare_ctor);
+ static_ctors.qsort (compare_ctor);
build_cdtor (/*ctor_p=*/true, static_ctors);
}
- if (!VEC_empty (tree, static_dtors))
+ if (!static_dtors.is_empty ())
{
gcc_assert (!targetm.have_ctors_dtors || in_lto_p);
- VEC_qsort (tree, static_dtors, compare_dtor);
+ static_dtors.qsort (compare_dtor);
build_cdtor (/*ctor_p=*/false, static_dtors);
}
}
|| DECL_STATIC_DESTRUCTOR (node->symbol.decl))
record_cdtor_fn (node);
build_cdtor_fns ();
- VEC_free (tree, heap, static_ctors);
- VEC_free (tree, heap, static_dtors);
+ static_ctors.release ();
+ static_dtors.release ();
return 0;
}
bool skip_p;
edge_iterator ei;
edge e;
- VEC (edge, heap) *edges;
+ vec<edge> edges;
loop_p loop;
ira_bb_nodes
}
ira_loop_nodes = ((struct ira_loop_tree_node *)
ira_allocate (sizeof (struct ira_loop_tree_node)
- * VEC_length (loop_p, ira_loops.larray)));
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
+ * vec_safe_length (ira_loops.larray)));
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, i, loop)
{
if (loop != ira_loops.tree_root)
{
if (skip_p)
continue;
edges = get_loop_exit_edges (loop);
- FOR_EACH_VEC_ELT (edge, edges, j, e)
+ FOR_EACH_VEC_ELT (edges, j, e)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
skip_p = true;
break;
}
- VEC_free (edge, heap, edges);
+ edges.release ();
if (skip_p)
continue;
}
loop_p loop;
if (current_loops != NULL)
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL
&& ira_loop_tree_root != &ira_loop_nodes[i])
return true;
if (current_loops == NULL)
finish_loop_tree_node (&ira_loop_nodes[0]);
else
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, i, loop)
finish_loop_tree_node (&ira_loop_nodes[i]);
ira_free (ira_loop_nodes);
for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
ira_assert (current_loops != NULL);
max_regno = max_reg_num ();
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, l, loop)
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, l, loop)
if (ira_loop_nodes[l].regno_allocno_map != NULL)
{
ira_free (ira_loop_nodes[l].regno_allocno_map);
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
-static VEC(ira_allocno_t,heap) *allocno_vec;
+static vec<ira_allocno_t> allocno_vec;
/* Vec containing references to all created ira_objects. It is a
container of ira_object_id_map. */
-static VEC(ira_object_t,heap) *ira_object_id_map_vec;
+static vec<ira_object_t> ira_object_id_map_vec;
/* Initialize data concerning allocnos. */
static void
= create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
object_pool
= create_alloc_pool ("objects", sizeof (struct ira_object), 100);
- allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
+ allocno_vec.create (max_reg_num () * 2);
ira_allocnos = NULL;
ira_allocnos_num = 0;
ira_objects_num = 0;
- ira_object_id_map_vec
- = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
+ ira_object_id_map_vec.create (max_reg_num () * 2);
ira_object_id_map = NULL;
ira_regno_allocno_map
= (ira_allocno_t *) ira_allocate (max_reg_num ()
OBJECT_MAX (obj) = -1;
OBJECT_LIVE_RANGES (obj) = NULL;
- VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
+ ira_object_id_map_vec.safe_push (obj);
ira_object_id_map
- = VEC_address (ira_object_t, ira_object_id_map_vec);
- ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
+ = ira_object_id_map_vec.address ();
+ ira_objects_num = ira_object_id_map_vec.length ();
return obj;
}
ALLOCNO_NUM_OBJECTS (a) = 0;
ALLOCNO_ADD_DATA (a) = NULL;
- VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
- ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
- ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
+ allocno_vec.safe_push (a);
+ ira_allocnos = allocno_vec.address ();
+ ira_allocnos_num = allocno_vec.length ();
return a;
}
FOR_EACH_ALLOCNO (a, ai)
finish_allocno (a);
ira_free (ira_regno_allocno_map);
- VEC_free (ira_object_t, heap, ira_object_id_map_vec);
- VEC_free (ira_allocno_t, heap, allocno_vec);
+ ira_object_id_map_vec.release ();
+ allocno_vec.release ();
free_alloc_pool (allocno_pool);
free_alloc_pool (object_pool);
free_alloc_pool (live_range_pool);
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
-static VEC(ira_copy_t,heap) *copy_vec;
+static vec<ira_copy_t> copy_vec;
/* The function initializes data concerning allocno copies. */
static void
{
copy_pool
= create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
- copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
+ copy_vec.create (get_max_uid ());
ira_copies = NULL;
ira_copies_num = 0;
}
cp->constraint_p = constraint_p;
cp->insn = insn;
cp->loop_tree_node = loop_tree_node;
- VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
- ira_copies = VEC_address (ira_copy_t, copy_vec);
- ira_copies_num = VEC_length (ira_copy_t, copy_vec);
+ copy_vec.safe_push (cp);
+ ira_copies = copy_vec.address ();
+ ira_copies_num = copy_vec.length ();
return cp;
}
FOR_EACH_COPY (cp, ci)
finish_copy (cp);
- VEC_free (ira_copy_t, heap, copy_vec);
+ copy_vec.release ();
free_alloc_pool (copy_pool);
}
correct post-ordering but it would be less likely that two nodes
connected by an edge in the CFG are neighbours in the topsort. */
-static VEC (ira_loop_tree_node_t, heap) *
+static vec<ira_loop_tree_node_t>
ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
- VEC (ira_loop_tree_node_t, heap) *loop_preorder)
+ vec<ira_loop_tree_node_t> loop_preorder)
{
- VEC (ira_loop_tree_node_t, heap) *topsort_nodes = NULL;
+ vec<ira_loop_tree_node_t> topsort_nodes = vec<ira_loop_tree_node_t>();
unsigned int n_loop_preorder;
- n_loop_preorder = VEC_length (ira_loop_tree_node_t, loop_preorder);
+ n_loop_preorder = loop_preorder.length ();
if (n_loop_preorder != 0)
{
ira_loop_tree_node_t subloop_node;
unsigned int i;
- VEC (ira_loop_tree_node_t, heap) *dfs_stack;
+ vec<ira_loop_tree_node_t> dfs_stack;
/* This is a bit of strange abuse of the BB_VISITED flag: We use
the flag to mark blocks we still have to visit to add them to
our post-order. Define an alias to avoid confusion. */
#define BB_TO_VISIT BB_VISITED
- FOR_EACH_VEC_ELT (ira_loop_tree_node_t, loop_preorder, i, subloop_node)
+ FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
{
gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
subloop_node->bb->flags |= BB_TO_VISIT;
}
- topsort_nodes = VEC_alloc (ira_loop_tree_node_t, heap, n_loop_preorder);
- dfs_stack = VEC_alloc (ira_loop_tree_node_t, heap, n_loop_preorder);
+ topsort_nodes.create (n_loop_preorder);
+ dfs_stack.create (n_loop_preorder);
- FOR_EACH_VEC_ELT_REVERSE (ira_loop_tree_node_t, loop_preorder,
- i, subloop_node)
+ FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
{
if (! (subloop_node->bb->flags & BB_TO_VISIT))
continue;
subloop_node->bb->flags &= ~BB_TO_VISIT;
- VEC_quick_push (ira_loop_tree_node_t, dfs_stack, subloop_node);
- while (! VEC_empty (ira_loop_tree_node_t, dfs_stack))
+ dfs_stack.quick_push (subloop_node);
+ while (! dfs_stack.is_empty ())
{
edge e;
edge_iterator ei;
- ira_loop_tree_node_t n = VEC_last (ira_loop_tree_node_t,
- dfs_stack);
+ ira_loop_tree_node_t n = dfs_stack.last ();
FOR_EACH_EDGE (e, ei, n->bb->preds)
{
ira_loop_tree_node_t pred_node;
&& (pred_node->bb->flags & BB_TO_VISIT))
{
pred_node->bb->flags &= ~BB_TO_VISIT;
- VEC_quick_push (ira_loop_tree_node_t, dfs_stack, pred_node);
+ dfs_stack.quick_push (pred_node);
}
}
- if (n == VEC_last (ira_loop_tree_node_t, dfs_stack))
+ if (n == dfs_stack.last ())
{
- VEC_pop (ira_loop_tree_node_t, dfs_stack);
- VEC_quick_push (ira_loop_tree_node_t, topsort_nodes, n);
+ dfs_stack.pop ();
+ topsort_nodes.quick_push (n);
}
}
}
#undef BB_TO_VISIT
- VEC_free (ira_loop_tree_node_t, heap, dfs_stack);
+ dfs_stack.release ();
}
- gcc_assert (VEC_length (ira_loop_tree_node_t, topsort_nodes)
- == n_loop_preorder);
+ gcc_assert (topsort_nodes.length () == n_loop_preorder);
return topsort_nodes;
}
if (bb_p)
{
- VEC (ira_loop_tree_node_t, heap) *loop_preorder = NULL;
+ vec<ira_loop_tree_node_t>
+ loop_preorder = vec<ira_loop_tree_node_t>();
unsigned int i;
/* Add all nodes to the set of nodes to visit. The IRA loop tree
subloop_node != NULL;
subloop_node = subloop_node->next)
if (subloop_node->bb != NULL)
- VEC_safe_push (ira_loop_tree_node_t, heap,
- loop_preorder, subloop_node);
+ loop_preorder.safe_push (subloop_node);
if (preorder_func != NULL)
- FOR_EACH_VEC_ELT (ira_loop_tree_node_t, loop_preorder, i, subloop_node)
+ FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
(*preorder_func) (subloop_node);
if (postorder_func != NULL)
{
- VEC (ira_loop_tree_node_t, heap) *loop_rev_postorder =
+ vec<ira_loop_tree_node_t> loop_rev_postorder =
ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
- FOR_EACH_VEC_ELT_REVERSE (ira_loop_tree_node_t, loop_rev_postorder,
- i, subloop_node)
+ FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
(*postorder_func) (subloop_node);
- VEC_free (ira_loop_tree_node_t, heap, loop_rev_postorder);
+ loop_rev_postorder.release ();
}
- VEC_free (ira_loop_tree_node_t, heap, loop_preorder);
+ loop_preorder.release ();
}
for (subloop_node = loop_node->subloops;
int i;
edge_iterator ei;
edge e;
- VEC (edge, heap) *edges;
+ vec<edge> edges;
ira_assert (current_loops != NULL);
FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
create_loop_allocnos (e);
edges = get_loop_exit_edges (loop_node->loop);
- FOR_EACH_VEC_ELT (edge, edges, i, e)
+ FOR_EACH_VEC_ELT (edges, i, e)
create_loop_allocnos (e);
- VEC_free (edge, heap, edges);
+ edges.release ();
}
}
int i;
edge_iterator ei;
edge e;
- VEC (edge, heap) *edges;
+ vec<edge> edges;
bool res;
FOR_EACH_EDGE (e, ei, loop->header->preds)
return true;
edges = get_loop_exit_edges (loop);
res = false;
- FOR_EACH_VEC_ELT (edge, edges, i, e)
+ FOR_EACH_VEC_ELT (edges, i, e)
if (e->flags & EDGE_COMPLEX)
{
res = true;
break;
}
- VEC_free (edge, heap, edges);
+ edges.release ();
return res;
}
#endif
ira_assert (current_loops != NULL);
sorted_loops
= (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
- * VEC_length (loop_p,
- ira_loops.larray));
- for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
+ * vec_safe_length (ira_loops.larray));
+ for (n = i = 0; vec_safe_iterate (ira_loops.larray, i, &loop); i++)
if (ira_loop_nodes[i].regno_allocno_map != NULL)
{
if (ira_loop_nodes[i].parent == NULL)
loop_p loop;
ira_assert (current_loops != NULL);
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL)
{
if (ira_loop_nodes[i].parent == NULL)
}
/* Definition of vector of loop tree nodes. */
-DEF_VEC_P(ira_loop_tree_node_t);
-DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
/* Vec containing references to all removed loop tree nodes. */
-static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
+static vec<ira_loop_tree_node_t> removed_loop_vec;
/* Vec containing references to all children of loop tree nodes. */
-static VEC(ira_loop_tree_node_t,heap) *children_vec;
+static vec<ira_loop_tree_node_t> children_vec;
/* Remove subregions of NODE if their separate allocation will not
improve the result. */
remove_p = node->to_remove_p;
if (! remove_p)
- VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
- start = VEC_length (ira_loop_tree_node_t, children_vec);
+ children_vec.safe_push (node);
+ start = children_vec.length ();
for (subnode = node->children; subnode != NULL; subnode = subnode->next)
if (subnode->bb == NULL)
remove_uneccesary_loop_nodes_from_loop_tree (subnode);
else
- VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
+ children_vec.safe_push (subnode);
node->children = node->subloops = NULL;
if (remove_p)
{
- VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
+ removed_loop_vec.safe_push (node);
return;
}
- while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
+ while (children_vec.length () > start)
{
- subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
+ subnode = children_vec.pop ();
subnode->parent = node;
subnode->next = node->children;
node->children = subnode;
mark_all_loops_for_removal ();
else
mark_loops_for_removal ();
- children_vec
- = VEC_alloc (ira_loop_tree_node_t, heap,
- last_basic_block + VEC_length (loop_p, ira_loops.larray));
- removed_loop_vec
- = VEC_alloc (ira_loop_tree_node_t, heap,
- last_basic_block + VEC_length (loop_p, ira_loops.larray));
- remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
- VEC_free (ira_loop_tree_node_t, heap, children_vec);
+ children_vec.create(last_basic_block + vec_safe_length(ira_loops.larray));
+ removed_loop_vec.create(last_basic_block + vec_safe_length(ira_loops.larray));
+ remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
+ children_vec.release ();
if (all_p)
remove_low_level_allocnos ();
else
remove_unnecessary_allocnos ();
- while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
- finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
- VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
+ while (removed_loop_vec.length () > 0)
+ finish_loop_tree_node (removed_loop_vec.pop ());
+ removed_loop_vec.release ();
}
\f
}
}
fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
- current_loops == NULL ? 1 : VEC_length (loop_p, ira_loops.larray),
+ current_loops == NULL ? 1 : vec_safe_length (ira_loops.larray),
n_basic_blocks, ira_max_point);
fprintf (ira_dump_file,
" allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
static ira_allocno_t *sorted_allocnos;
/* Vec representing the stack of allocnos used during coloring. */
-static VEC(ira_allocno_t,heap) *allocno_stack_vec;
+static vec<ira_allocno_t> allocno_stack_vec;
/* Helper for qsort comparison callbacks - return a positive integer if
X > Y, or a negative value otherwise. Use a conditional expression
\f
/* Definition of vector of allocno hard registers. */
-DEF_VEC_P(allocno_hard_regs_t);
-DEF_VEC_ALLOC_P(allocno_hard_regs_t, heap);
/* Vector of unique allocno hard registers. */
-static VEC(allocno_hard_regs_t, heap) *allocno_hard_regs_vec;
+static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
/* Returns hash value for allocno hard registers V. */
static hashval_t
static void
init_allocno_hard_regs (void)
{
- allocno_hard_regs_vec = VEC_alloc (allocno_hard_regs_t, heap, 200);
+ allocno_hard_regs_vec.create (200);
allocno_hard_regs_htab
= htab_create (200, allocno_hard_regs_hash, allocno_hard_regs_eq, NULL);
}
ira_allocate (sizeof (struct allocno_hard_regs)));
COPY_HARD_REG_SET (hv->set, set);
hv->cost = cost;
- VEC_safe_push (allocno_hard_regs_t, heap, allocno_hard_regs_vec, hv);
+ allocno_hard_regs_vec.safe_push (hv);
insert_hard_regs (hv);
}
return hv;
allocno_hard_regs_t hv;
for (i = 0;
- VEC_iterate (allocno_hard_regs_t, allocno_hard_regs_vec, i, hv);
+ allocno_hard_regs_vec.iterate (i, &hv);
i++)
ira_free (hv);
htab_delete (allocno_hard_regs_htab);
- VEC_free (allocno_hard_regs_t, heap, allocno_hard_regs_vec);
+ allocno_hard_regs_vec.release ();
}
/* Sort hard regs according to their frequency of usage. */
static allocno_hard_regs_node_t hard_regs_roots;
/* Definition of vector of allocno hard register nodes. */
-DEF_VEC_P(allocno_hard_regs_node_t);
-DEF_VEC_ALLOC_P(allocno_hard_regs_node_t, heap);
/* Vector used to create the forest. */
-static VEC(allocno_hard_regs_node_t, heap) *hard_regs_node_vec;
+static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
/* Create and return allocno hard registers node containing allocno
hard registers HV. */
HARD_REG_SET temp_set;
allocno_hard_regs_t hv2;
- start = VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
+ start = hard_regs_node_vec.length ();
for (node = *roots; node != NULL; node = node->next)
{
if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
return;
}
if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
- VEC_safe_push (allocno_hard_regs_node_t, heap,
- hard_regs_node_vec, node);
+ hard_regs_node_vec.safe_push (node);
else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
{
COPY_HARD_REG_SET (temp_set, hv->set);
add_allocno_hard_regs_to_forest (&node->first, hv2);
}
}
- if (VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec)
+ if (hard_regs_node_vec.length ()
> start + 1)
{
/* Create a new node which contains nodes in hard_regs_node_vec. */
CLEAR_HARD_REG_SET (temp_set);
for (i = start;
- i < VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
+ i < hard_regs_node_vec.length ();
i++)
{
- node = VEC_index (allocno_hard_regs_node_t, hard_regs_node_vec, i);
+ node = hard_regs_node_vec[i];
IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
}
hv = add_allocno_hard_regs (temp_set, hv->cost);
new_node = create_new_allocno_hard_regs_node (hv);
prev = NULL;
for (i = start;
- i < VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
+ i < hard_regs_node_vec.length ();
i++)
{
- node = VEC_index (allocno_hard_regs_node_t, hard_regs_node_vec, i);
+ node = hard_regs_node_vec[i];
if (node->prev == NULL)
*roots = node->next;
else
}
add_new_allocno_hard_regs_node_to_forest (roots, new_node);
}
- VEC_truncate (allocno_hard_regs_node_t, hard_regs_node_vec, start);
+ hard_regs_node_vec.truncate (start);
}
/* Add allocno hard registers nodes starting with the forest level
ira_assert (first != NULL);
for (node = first; node != NULL; node = node->next)
if (hard_reg_set_subset_p (node->hard_regs->set, set))
- VEC_safe_push (allocno_hard_regs_node_t, heap, hard_regs_node_vec,
- node);
+ hard_regs_node_vec.safe_push (node);
else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
collect_allocno_hard_regs_cover (node->first, set);
}
node_check_tick = 0;
init_allocno_hard_regs ();
hard_regs_roots = NULL;
- hard_regs_node_vec = VEC_alloc (allocno_hard_regs_node_t, heap, 100);
+ hard_regs_node_vec.create (100);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
{
node = create_new_allocno_hard_regs_node (hv);
add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
}
- start = VEC_length (allocno_hard_regs_t, allocno_hard_regs_vec);
+ start = allocno_hard_regs_vec.length ();
EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
{
a = ira_allocnos[i];
SET_HARD_REG_SET (temp);
AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
add_allocno_hard_regs (temp, 0);
- qsort (VEC_address (allocno_hard_regs_t, allocno_hard_regs_vec) + start,
- VEC_length (allocno_hard_regs_t, allocno_hard_regs_vec) - start,
+ qsort (allocno_hard_regs_vec.address () + start,
+ allocno_hard_regs_vec.length () - start,
sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
for (i = start;
- VEC_iterate (allocno_hard_regs_t, allocno_hard_regs_vec, i, hv);
+ allocno_hard_regs_vec.iterate (i, &hv);
i++)
{
add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
- ira_assert (VEC_length (allocno_hard_regs_node_t,
- hard_regs_node_vec) == 0);
+ ira_assert (hard_regs_node_vec.length () == 0);
}
/* We need to set up parent fields for right work of
first_common_ancestor_node. */
allocno_data = ALLOCNO_COLOR_DATA (a);
if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
continue;
- VEC_truncate (allocno_hard_regs_node_t, hard_regs_node_vec, 0);
+ hard_regs_node_vec.truncate (0);
collect_allocno_hard_regs_cover (hard_regs_roots,
allocno_data->profitable_hard_regs);
allocno_hard_regs_node = NULL;
- for (j = 0;
- VEC_iterate (allocno_hard_regs_node_t, hard_regs_node_vec,
- j, node);
- j++)
+ for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
allocno_hard_regs_node
= (j == 0
? node
allocno_hard_regs_subnodes
= ((allocno_hard_regs_subnode_t)
ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
- VEC_free (allocno_hard_regs_node_t, heap, hard_regs_node_vec);
+ hard_regs_node_vec.release ();
}
/* Free tree of allocno hard registers nodes given by its ROOT. */
data = ALLOCNO_COLOR_DATA (a);
data->in_graph_p = false;
- VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
+ allocno_stack_vec.safe_push (a);
aclass = ALLOCNO_CLASS (a);
if (aclass == NO_REGS)
return;
int freq, i;
edge_iterator ei;
edge e;
- VEC (edge, heap) *edges;
+ vec<edge> edges;
ira_assert (current_loops != NULL && loop_node->loop != NULL
&& (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
else
{
edges = get_loop_exit_edges (loop_node->loop);
- FOR_EACH_VEC_ELT (edge, edges, i, e)
+ FOR_EACH_VEC_ELT (edges, i, e)
if (regno < 0
|| (bitmap_bit_p (df_get_live_out (e->src), regno)
&& bitmap_bit_p (df_get_live_in (e->dest), regno)))
freq += EDGE_FREQUENCY (e);
- VEC_free (edge, heap, edges);
+ edges.release ();
}
return REG_FREQ_FROM_EDGE_FREQ (freq);
ira_allocno_t allocno;
enum reg_class aclass;
- for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
+ for (;allocno_stack_vec.length () != 0;)
{
- allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
+ allocno = allocno_stack_vec.pop ();
aclass = ALLOCNO_CLASS (allocno);
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
{
static void
color (void)
{
- allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
+ allocno_stack_vec.create (ira_allocnos_num);
memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
ira_initiate_assign ();
do_coloring ();
ira_finish_assign ();
- VEC_free (ira_allocno_t, heap, allocno_stack_vec);
+ allocno_stack_vec.release ();
move_spill_restore ();
}
/* Definitions for vectors of pointers. */
typedef void *void_p;
-DEF_VEC_P (void_p);
-DEF_VEC_ALLOC_P (void_p,heap);
/* Pointers to data allocated for allocnos being created during
emitting. Usually there are quite few such allocnos because they
are created only for resolving loop in register shuffling. */
-static VEC(void_p, heap) *new_allocno_emit_data_vec;
+static vec<void_p> new_allocno_emit_data_vec;
/* Allocate and initiate the emit data. */
void
ira_allocnos_num * sizeof (struct ira_emit_data));
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
- new_allocno_emit_data_vec = VEC_alloc (void_p, heap, 50);
+ new_allocno_emit_data_vec.create (50);
}
ira_free (ira_allocno_emit_data);
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_ADD_DATA (a) = NULL;
- for (;VEC_length (void_p, new_allocno_emit_data_vec) != 0;)
+ for (;new_allocno_emit_data_vec.length () != 0;)
{
- p = VEC_pop (void_p, new_allocno_emit_data_vec);
+ p = new_allocno_emit_data_vec.pop ();
ira_free (p);
}
- VEC_free (void_p, heap, new_allocno_emit_data_vec);
+ new_allocno_emit_data_vec.release ();
}
/* Create and return a new allocno with given REGNO and
a = ira_create_allocno (regno, false, loop_tree_node);
ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
- VEC_safe_push (void_p, heap, new_allocno_emit_data_vec, ALLOCNO_ADD_DATA (a));
+ new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
return a;
}
loop_p loop;
ira_assert (current_loops != NULL);
- FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
+ FOR_EACH_VEC_SAFE_ELT (ira_loops.larray, i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL)
ira_loop_nodes[i].entered_from_non_parent_p
= entered_from_non_parent_p (&ira_loop_nodes[i]);
/* Return TRUE if move lists on all edges given in vector VEC are
equal. */
static bool
-eq_edge_move_lists_p (VEC(edge,gc) *vec)
+eq_edge_move_lists_p (vec<edge, va_gc> *vec)
{
move_t list;
int i;
int i;
edge e;
move_t list;
- VEC(edge,gc) *vec;
+ vec<edge, va_gc> *vec;
vec = (start_p ? bb->preds : bb->succs);
if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
static int *allocno_last_set_check;
/* Definition of vector of moves. */
-DEF_VEC_P(move_t);
-DEF_VEC_ALLOC_P(move_t, heap);
/* This vec contains moves sorted topologically (depth-first) on their
dependency graph. */
-static VEC(move_t,heap) *move_vec;
+static vec<move_t> move_vec;
/* The variable value is used to check correctness of values of
elements of arrays `hard_regno_last_set' and
move->visited_p = true;
for (i = move->deps_num - 1; i >= 0; i--)
traverse_moves (move->deps[i]);
- VEC_safe_push (move_t, heap, move_vec, move);
+ move_vec.safe_push (move);
}
/* Remove unnecessary moves in the LIST, makes topological sorting,
}
}
/* Toplogical sorting: */
- VEC_truncate (move_t, move_vec, 0);
+ move_vec.truncate (0);
for (move = list; move != NULL; move = move->next)
traverse_moves (move);
last = NULL;
- for (i = (int) VEC_length (move_t, move_vec) - 1; i >= 0; i--)
+ for (i = (int) move_vec.length () - 1; i >= 0; i--)
{
- move = VEC_index (move_t, move_vec, i);
+ move = move_vec[i];
move->next = NULL;
if (last != NULL)
last->next = move;
last = move;
}
- first = VEC_last (move_t, move_vec);
+ first = move_vec.last ();
/* Removing cycles: */
curr_tick++;
- VEC_truncate (move_t, move_vec, 0);
+ move_vec.truncate (0);
for (move = first; move != NULL; move = move->next)
{
from = move->from;
new_move = create_move (set_move->to, new_allocno);
set_move->to = new_allocno;
- VEC_safe_push (move_t, heap, move_vec, new_move);
+ move_vec.safe_push (new_move);
ira_move_loops_num++;
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file,
hard_regno_last_set_check[hard_regno + i] = curr_tick;
}
}
- for (i = (int) VEC_length (move_t, move_vec) - 1; i >= 0; i--)
+ for (i = (int) move_vec.length () - 1; i >= 0; i--)
{
- move = VEC_index (move_t, move_vec, i);
+ move = move_vec[i];
move->next = NULL;
last->next = move;
last = move;
|| (ira_reg_equiv[regno].invariant == NULL_RTX
&& ira_reg_equiv[regno].constant == NULL_RTX))
continue; /* regno has no equivalence. */
- ira_assert ((int) VEC_length (reg_equivs_t, reg_equivs)
- > regno);
+ ira_assert ((int) reg_equivs->length () > regno);
reg_equiv_init (regno)
= gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
}
unify_moves (bb, true);
FOR_EACH_BB (bb)
unify_moves (bb, false);
- move_vec = VEC_alloc (move_t, heap, ira_allocnos_num);
+ move_vec.create (ira_allocnos_num);
emit_moves ();
add_ranges_and_copies ();
/* Clean up: */
e->aux = NULL;
}
}
- VEC_free (move_t, heap, move_vec);
+ move_vec.release ();
ira_free (allocno_last_set_check);
ira_free (allocno_last_set);
commit_edge_insertions ();
typedef struct ira_object *ira_object_t;
/* Definition of vector of allocnos and copies. */
-DEF_VEC_P(ira_allocno_t);
-DEF_VEC_ALLOC_P(ira_allocno_t, heap);
-DEF_VEC_P(ira_object_t);
-DEF_VEC_ALLOC_P(ira_object_t, heap);
-DEF_VEC_P(ira_copy_t);
-DEF_VEC_ALLOC_P(ira_copy_t, heap);
/* Typedef for pointer to the subsequent structure. */
typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
int i, new_regno, max;
rtx x, prev, next, insn, set;
- if (VEC_length (reg_equivs_t, reg_equivs) < max_regno)
+ if (vec_safe_length (reg_equivs) < max_regno)
{
- max = VEC_length (reg_equivs_t, reg_equivs);
+ max = vec_safe_length (reg_equivs);
grow_reg_equivs ();
for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
for (prev = NULL_RTX, x = reg_equiv_init (i);
find_movable_pseudos, with index 0 holding data for the
first_moveable_pseudo. */
/* The original home register. */
-static VEC (rtx, heap) *pseudo_replaced_reg;
+static vec<rtx> pseudo_replaced_reg;
/* Look for instances where we have an instruction that is known to increase
register pressure, and whose result is not used immediately. If it is
bitmap_initialize (&interesting, 0);
first_moveable_pseudo = max_regs;
- VEC_free (rtx, heap, pseudo_replaced_reg);
- VEC_safe_grow (rtx, heap, pseudo_replaced_reg, max_regs);
+ pseudo_replaced_reg.release ();
+ pseudo_replaced_reg.safe_grow_cleared (max_regs);
df_analyze ();
calculate_dominance_info (CDI_DOMINATORS);
unsigned nregno = REGNO (newreg);
emit_insn_before (gen_move_insn (def_reg, newreg), use_insn);
nregno -= max_regs;
- VEC_replace (rtx, pseudo_replaced_reg, nregno, def_reg);
+ pseudo_replaced_reg[nregno] = def_reg;
}
}
}
if (reg_renumber[i] < 0)
{
int idx = i - first_moveable_pseudo;
- rtx other_reg = VEC_index (rtx, pseudo_replaced_reg, idx);
+ rtx other_reg = pseudo_replaced_reg[idx];
rtx def_insn = DF_REF_INSN (DF_REG_DEF_CHAIN (i));
/* The use must follow all definitions of OTHER_REG, so we can
insert the new definition immediately after any of them. */
lra (ira_dump_file);
/* ???!!! Move it before lra () when we use ira_reg_equiv in
LRA. */
- VEC_free (reg_equivs_t, gc, reg_equivs);
+ vec_free (reg_equivs);
reg_equivs = NULL;
need_dce = false;
}
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * boehm.c: Use new vec API in vec.h.
+ * class.c: Likewise.
+ * constants.c: Likewise.
+ * decl.c: Likewise.
+ * expr.c: Likewise.
+ * java-tree.h: Likewise.
+ * jcf-parse.c: Likewise.
+ * resource.c: Likewise.
+ * verify-glue.c: Likewise.
+
2012-11-15 Jan Hubicka <jh@suse.cz>
* builtins.c (define_builtin): Accept ECF flags and
this function is only used with flag_reduced_reflection. No
point in asserting unless we hit the bad case. */
gcc_assert (!flag_reduced_reflection || TARGET_VTABLE_USES_DESCRIPTORS == 0);
- v = VEC_index (constructor_elt, CONSTRUCTOR_ELTS (dtable), 3).value;
+ v = (*CONSTRUCTOR_ELTS (dtable))[3].value;
return (PROCEDURE_OBJECT_DESCRIPTOR == TREE_INT_CST_LOW (v));
}
#include "ggc.h"
#include "cgraph.h"
#include "tree-iterator.h"
-#include "vecprim.h"
#include "target.h"
static tree make_method_value (tree);
#define class_list class_roots[2]
#define class_dtable_decl class_roots[3]
-static GTY(()) VEC(tree,gc) *registered_class;
+static GTY(()) vec<tree, va_gc> *registered_class;
/* A tree that returns the address of the class$ of the class
currently being compiled. */
/* A list of static class fields. This is to emit proper debug
info for them. */
-VEC(tree,gc) *pending_static_fields;
+vec<tree, va_gc> *pending_static_fields;
/* Return the node that most closely represents the class whose name
is IDENT. Start the search from NODE (followed by its siblings).
object file. */
DECL_EXTERNAL (field) = (is_compiled_class (klass) != 2);
if (!DECL_EXTERNAL (field))
- VEC_safe_push (tree, gc, pending_static_fields, field);
+ vec_safe_push (pending_static_fields, field);
}
return field;
int name_hash;
tree ref = IDENTIFIER_UTF8_REF (name);
tree decl;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (ref != NULL_TREE)
return ref;
int flags;
tree type = TREE_TYPE (fdecl);
int resolved = is_compiled_class (type) && ! flag_indirect_dispatch;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
START_RECORD_CONSTRUCTOR (v, field_type_node);
PUSH_FIELD_VALUE (v, "name", build_utf8_ref (DECL_NAME (fdecl)));
tree class_decl;
#define ACC_TRANSLATED 0x4000
int accflags = get_access_flags_from_decl (mdecl) | ACC_TRANSLATED;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
class_decl = DECL_CONTEXT (mdecl);
/* For interfaces, the index field contains the dispatch index. */
/* Compute the `throws' information for the method. */
tree table = null_pointer_node;
- if (!VEC_empty (tree, DECL_FUNCTION_THROWS (mdecl)))
+ if (!vec_safe_is_empty (DECL_FUNCTION_THROWS (mdecl)))
{
- int length = 1 + VEC_length (tree, DECL_FUNCTION_THROWS (mdecl));
+ int length = 1 + DECL_FUNCTION_THROWS (mdecl)->length ();
tree t, type, array;
char buf[60];
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int idx = length - 1;
unsigned ix;
constructor_elt *e;
- v = VEC_alloc (constructor_elt, gc, length);
- VEC_safe_grow_cleared (constructor_elt, gc, v, length);
+ vec_alloc (v, length);
+ v->quick_grow_cleared (length);
- e = &VEC_index (constructor_elt, v, idx--);
+ e = &(*v)[idx--];
e->value = null_pointer_node;
- FOR_EACH_VEC_ELT (tree, DECL_FUNCTION_THROWS (mdecl), ix, t)
+ FOR_EACH_VEC_SAFE_ELT (DECL_FUNCTION_THROWS (mdecl), ix, t)
{
tree sig = DECL_NAME (TYPE_NAME (t));
tree utf8
= build_utf8_ref (unmangle_classname (IDENTIFIER_POINTER (sig),
IDENTIFIER_LENGTH (sig)));
- e = &VEC_index (constructor_elt, v, idx--);
+ e = &(*v)[idx--];
e->value = utf8;
}
gcc_assert (idx == -1);
int nvirtuals = TREE_VEC_LENGTH (vtable);
int arraysize;
tree gc_descr;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
constructor_elt *e;
tree arraytype;
arraysize *= TARGET_VTABLE_USES_DESCRIPTORS;
arraysize += 2;
- VEC_safe_grow_cleared (constructor_elt, gc, v, arraysize);
- e = &VEC_index (constructor_elt, v, arraysize - 1);
+ vec_safe_grow_cleared (v, arraysize);
+ e = &(*v)[arraysize - 1];
#define CONSTRUCTOR_PREPEND_VALUE(E, V) E->value = V, E--
for (i = nvirtuals; --i >= 0; )
/** Pointer to type_info object (to be implemented), according to g++ ABI. */
CONSTRUCTOR_PREPEND_VALUE (e, null_pointer_node);
/** Offset to start of whole object. Always (ptrdiff_t)0 for Java. */
- gcc_assert (e == VEC_address (constructor_elt, v));
+ gcc_assert (e == v->address ());
e->index = integer_zero_node;
e->value = null_pointer_node;
#undef CONSTRUCTOR_PREPEND_VALUE
}
static void
-add_table_and_syms (VEC(constructor_elt,gc) **v,
- VEC(method_entry,gc) *methods,
+add_table_and_syms (vec<constructor_elt, va_gc> **v,
+ vec<method_entry, va_gc> *methods,
const char *table_name, tree table_slot, tree table_type,
const char *syms_name, tree syms_slot)
{
/** Offset from start of virtual function table declaration
to where objects actually point at, following new g++ ABI. */
tree dtable_start_offset = size_int (2 * POINTER_SIZE / BITS_PER_UNIT);
- VEC(int, heap) *field_indexes;
+ vec<int> field_indexes;
tree first_real_field;
- VEC(constructor_elt,gc) *v1 = NULL, *v2 = NULL;
+ vec<constructor_elt, va_gc> *v1 = NULL, *v2 = NULL;
tree reflection_data;
- VEC(constructor_elt,gc) *static_fields = NULL;
- VEC(constructor_elt,gc) *instance_fields = NULL;
- VEC(constructor_elt,gc) *methods = NULL;
+ vec<constructor_elt, va_gc> *static_fields = NULL;
+ vec<constructor_elt, va_gc> *instance_fields = NULL;
+ vec<constructor_elt, va_gc> *methods = NULL;
this_class_addr = build_static_class_ref (type);
decl = TREE_OPERAND (this_class_addr, 0);
}
}
field_count = static_field_count + instance_field_count;
- field_indexes = VEC_alloc (int, heap, field_count);
+ field_indexes.create (field_count);
/* gcj sorts fields so that static fields come first, followed by
instance fields. Unfortunately, by the time this takes place we
field_index = instance_count++;
else
continue;
- VEC_quick_push (int, field_indexes, field_index);
+ field_indexes.quick_push (field_index);
}
}
}
}
}
- gcc_assert (static_field_count
- == (int) VEC_length (constructor_elt, static_fields));
- gcc_assert (instance_field_count
- == (int) VEC_length (constructor_elt, instance_fields));
+ gcc_assert (static_field_count == (int) vec_safe_length (static_fields));
+ gcc_assert (instance_field_count == (int) vec_safe_length (instance_fields));
if (field_count > 0)
{
- VEC_safe_splice (constructor_elt, gc, static_fields, instance_fields);
+ vec_safe_splice (static_fields, instance_fields);
field_array_type = build_prim_array_type (field_type_node, field_count);
fields_decl = build_decl (input_location,
VAR_DECL, mangled_classname ("_FL_", type),
{
int i;
tree interface_array_type, idecl;
- VEC(constructor_elt,gc) *init = VEC_alloc (constructor_elt, gc,
- interface_len);
+ vec<constructor_elt, va_gc> *init;
+ vec_alloc (init, interface_len);
interface_array_type
= build_prim_array_type (class_ptr_type, interface_len);
idecl = build_decl (input_location,
"itable_syms", TYPE_ITABLE_SYMS_DECL (type));
PUSH_FIELD_VALUE (v2, "catch_classes",
- build1 (ADDR_EXPR, ptr_type_node, TYPE_CTABLE_DECL (type)));
+ build1 (ADDR_EXPR, ptr_type_node, TYPE_CTABLE_DECL (type)));
PUSH_FIELD_VALUE (v2, "interfaces", interfaces);
PUSH_FIELD_VALUE (v2, "loader", null_pointer_node);
PUSH_FIELD_VALUE (v2, "interface_count",
{
int i;
int count = TYPE_REFLECTION_DATASIZE (current_class);
- VEC (constructor_elt, gc) *v
- = VEC_alloc (constructor_elt, gc, count);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, count);
unsigned char *data = TYPE_REFLECTION_DATA (current_class);
tree max_index = build_int_cst (sizetype, count);
tree index = build_index_type (max_index);
array = build_decl (input_location,
VAR_DECL, get_identifier (buf), type);
- rewrite_reflection_indexes (field_indexes);
+ rewrite_reflection_indexes (&field_indexes);
for (i = 0; i < count; i++)
{
constructor_elt elt;
elt.index = build_int_cst (sizetype, i);
elt.value = build_int_cstu (byte_type_node, data[i]);
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
}
DECL_INITIAL (array) = build_constructor (type, v);
tree node;
if (!registered_class)
- registered_class = VEC_alloc (tree, gc, 8);
+ vec_alloc (registered_class, 8);
if (flag_indirect_classes)
node = current_class;
else
node = TREE_OPERAND (build_class_ref (current_class), 0);
- VEC_safe_push (tree, gc, registered_class, node);
+ vec_safe_push (registered_class, node);
}
/* Emit a function that calls _Jv_RegisterNewClasses with a list of
tree klass, t, register_class_fn;
int i;
- int size = VEC_length (tree, registered_class) * 2 + 1;
- VEC(constructor_elt,gc) *init = VEC_alloc (constructor_elt, gc, size);
+ int size = vec_safe_length (registered_class) * 2 + 1;
+ vec<constructor_elt, va_gc> *init;
+ vec_alloc (init, size);
tree class_array_type
= build_prim_array_type (ptr_type_node, size);
tree cdecl = build_decl (input_location,
VAR_DECL, get_identifier ("_Jv_CLS"),
class_array_type);
tree reg_class_list;
- FOR_EACH_VEC_ELT (tree, registered_class, i, klass)
+ FOR_EACH_VEC_SAFE_ELT (registered_class, i, klass)
{
t = fold_convert (ptr_type_node, build_static_class_ref (klass));
CONSTRUCTOR_APPEND_ELT (init, NULL_TREE, t);
#ifdef JCR_SECTION_NAME
tree klass, cdecl, class_array_type;
int i;
- int size = VEC_length (tree, registered_class);
- VEC(constructor_elt,gc) *init = VEC_alloc (constructor_elt, gc, size);
+ int size = vec_safe_length (registered_class);
+ vec<constructor_elt, va_gc> *init;
+ vec_alloc (init, size);
- FOR_EACH_VEC_ELT (tree, registered_class, i, klass)
+ FOR_EACH_VEC_SAFE_ELT (registered_class, i, klass)
CONSTRUCTOR_APPEND_ELT (init, NULL_TREE, build_fold_addr_expr (klass));
/* ??? I would like to use tree_output_constant_def() but there is no way
DECL_EXTERNAL (t) = 1;
register_class_fn = t;
- FOR_EACH_VEC_ELT (tree, registered_class, i, klass)
+ FOR_EACH_VEC_SAFE_ELT (registered_class, i, klass)
{
t = build_fold_addr_expr (klass);
t = build_call_expr (register_class_fn, 1, t);
build_symbol_table_entry (tree clname, tree name, tree signature)
{
tree symbol;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
START_RECORD_CONSTRUCTOR (v, symbol_type);
PUSH_FIELD_VALUE (v, "clname", clname);
tree
emit_symbol_table (tree name, tree the_table,
- VEC(method_entry,gc) *decl_table,
+ vec<method_entry, va_gc> *decl_table,
tree the_syms_decl, tree the_array_element_type,
int element_size)
{
tree table, null_symbol, table_size, the_array_type;
unsigned index;
method_entry *e;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Only emit a table if this translation unit actually made any
references via it. */
- if (decl_table == NULL)
+ if (!decl_table)
return the_table;
/* Build a list of _Jv_MethodSymbols for each entry in otable_methods. */
- FOR_EACH_VEC_ELT (method_entry, decl_table, index, e)
+ FOR_EACH_VEC_ELT (*decl_table, index, e)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
build_symbol_entry (e->method, e->special));
{
tree entry;
tree type = TREE_TYPE (TREE_TYPE (TYPE_CTABLE_DECL (output_class)));
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
START_RECORD_CONSTRUCTOR (v, type);
PUSH_FIELD_VALUE (v, "address", catch_class);
PUSH_FIELD_VALUE (v, "classname", classname);
int n_catch_classes;
constructor_elt *e;
/* Fill in the dummy entry that make_class created. */
- e = &VEC_index (constructor_elt, TYPE_CATCH_CLASSES (this_class), 0);
+ e = &(*TYPE_CATCH_CLASSES (this_class))[0];
e->value = make_catch_class_record (null_pointer_node, null_pointer_node);
CONSTRUCTOR_APPEND_ELT (TYPE_CATCH_CLASSES (this_class), NULL_TREE,
make_catch_class_record (null_pointer_node,
null_pointer_node));
- n_catch_classes = VEC_length (constructor_elt,
- TYPE_CATCH_CLASSES (this_class));
+ n_catch_classes = TYPE_CATCH_CLASSES (this_class)->length ();
table_size = build_index_type (build_int_cst (NULL_TREE, n_catch_classes));
array_type
= build_array_type (TREE_TYPE (TREE_TYPE (TYPE_CTABLE_DECL (this_class))),
static tree
build_assertion_table_entry (tree code, tree op1, tree op2)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree entry;
START_RECORD_CONSTRUCTOR (v, assertion_entry_type);
{
tree entry;
tree code_val, op1_utf8, op2_utf8;
- VEC(constructor_elt,gc) **v = (VEC(constructor_elt,gc) **) ptr;
+ vec<constructor_elt, va_gc> **v
+ = ((vec<constructor_elt, va_gc> **) ptr);
type_assertion *as = (type_assertion *) *htab_entry;
code_val = build_int_cst (NULL_TREE, as->assertion_code);
{
tree null_entry, ctor, table_decl;
htab_t assertions_htab = TYPE_ASSERTIONS (klass);
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* Iterate through the hash table. */
htab_traverse (assertions_htab, add_assertion_table_entry, &v);
void
java_write_globals (void)
{
- tree *vec = VEC_address (tree, pending_static_fields);
- int len = VEC_length (tree, pending_static_fields);
+ tree *vec = vec_safe_address (pending_static_fields);
+ int len = vec_safe_length (pending_static_fields);
write_global_declarations ();
emit_debug_global_declarations (vec, len);
- VEC_free (tree, gc, pending_static_fields);
+ vec_free (pending_static_fields);
}
#include "gt-java-class.h"
CPool *outgoing_cpool = cpool_for_class (current_class);
tree tags_value, data_value;
tree cons;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int i;
- VEC(constructor_elt,gc) *tags = NULL;
- VEC(constructor_elt,gc) *data = NULL;
+ vec<constructor_elt, va_gc> *tags = NULL;
+ vec<constructor_elt, va_gc> *data = NULL;
constructor_elt *t = NULL;
constructor_elt *d = NULL;
if (outgoing_cpool->count > 0)
{
int c = outgoing_cpool->count;
- VEC_safe_grow_cleared (constructor_elt, gc, tags, c);
- VEC_safe_grow_cleared (constructor_elt, gc, data, c);
- t = &VEC_index (constructor_elt, tags, c-1);
- d = &VEC_index (constructor_elt, data, c-1);
+ vec_safe_grow_cleared (tags, c);
+ vec_safe_grow_cleared (data, c);
+ t = &(*tags)[c-1];
+ d = &(*data)[c-1];
}
#define CONSTRUCTOR_PREPEND_VALUE(E, V) E->value = V, E--
tree tem;
/* Add dummy 0'th element of constant pool. */
- gcc_assert (t == VEC_address (constructor_elt, tags));
- gcc_assert (d == VEC_address (constructor_elt, data));
+ gcc_assert (t == tags->address ());
+ gcc_assert (d == data->address ());
t->value = get_tag_node (0);
d->value = null_pointer_node;
if (FIELD_STATIC (t))
{
if (DECL_EXTERNAL (t))
- VEC_safe_push (tree, gc, pending_static_fields, t);
+ vec_safe_push (pending_static_fields, t);
java_mark_decl_local (t);
}
static void expand_java_goto (int);
static tree expand_java_switch (tree, int);
static void expand_java_add_case (tree, int, int);
-static VEC(tree,gc) *pop_arguments (tree);
+static vec<tree, va_gc> *pop_arguments (tree);
static void expand_invoke (int, int, int);
static void expand_java_field_op (int, int, int);
static void java_push_constant_from_pool (struct JCF *, int);
int always_initialize_class_p = 0;
/* We store the stack state in two places:
- Within a basic block, we use the quick_stack, which is a VEC of expression
+ Within a basic block, we use the quick_stack, which is a vec of expression
nodes.
This is the top part of the stack; below that we use find_stack_slot.
At the end of a basic block, the quick_stack must be flushed
So dup cannot just add an extra element to the quick_stack, but iadd can.
*/
-static GTY(()) VEC(tree,gc) *quick_stack;
+static GTY(()) vec<tree, va_gc> *quick_stack;
/* The physical memory page size used in this computer. See
build_field_ref(). */
tree t;
/* Count the number of slots the quick stack is holding. */
- for (ix = 0; VEC_iterate(tree, quick_stack, ix, t); ix++)
+ for (ix = 0; vec_safe_iterate (quick_stack, ix, &t); ix++)
stack_index -= 1 + TYPE_IS_WIDE (TREE_TYPE (t));
- for (ix = 0; VEC_iterate(tree, quick_stack, ix, t); ix++)
+ for (ix = 0; vec_safe_iterate (quick_stack, ix, &t); ix++)
{
tree decl, type = TREE_TYPE (t);
stack_index += 1 + TYPE_IS_WIDE (type);
}
- VEC_truncate (tree, quick_stack, 0);
+ vec_safe_truncate (quick_stack, 0);
}
/* Push TYPE on the type stack.
value = convert (type, value);
}
push_type (type);
- VEC_safe_push (tree, gc, quick_stack, value);
+ vec_safe_push (quick_stack, value);
/* If the value has a side effect, then we need to evaluate it
whether or not the result is used. If the value ends up on the
pop_value (tree type)
{
type = pop_type (type);
- if (VEC_length (tree, quick_stack) != 0)
- return VEC_pop (tree, quick_stack);
+ if (vec_safe_length (quick_stack) != 0)
+ return quick_stack->pop ();
else
return find_stack_slot (stack_pointer, promote_type (type));
}
expand_java_multianewarray (tree class_type, int ndim)
{
int i;
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
- VEC_safe_grow (tree, gc, args, 3 + ndim);
+ vec_safe_grow (args, 3 + ndim);
- VEC_replace (tree, args, 0, build_class_ref (class_type));
- VEC_replace (tree, args, 1, build_int_cst (NULL_TREE, ndim));
+ (*args)[0] = build_class_ref (class_type);
+ (*args)[1] = build_int_cst (NULL_TREE, ndim);
for(i = ndim - 1; i >= 0; i-- )
- VEC_replace (tree, args, (unsigned)(2 + i), pop_value (int_type_node));
+ (*args)[(unsigned)(2 + i)] = pop_value (int_type_node);
- VEC_replace (tree, args, 2 + ndim, null_pointer_node);
+ (*args)[2 + ndim] = null_pointer_node;
push_value (build_call_vec (promote_type (class_type),
build_address_of (soft_multianewarray_node),
indexed by PC. Each element is a tree vector holding the type
state at that PC. We only note type states at basic block
boundaries. */
-VEC(tree, gc) *type_states;
+vec<tree, va_gc> *type_states;
static void
note_label (int current_pc ATTRIBUTE_UNUSED, int target_pc)
append_to_statement_list (x, &SWITCH_BODY (switch_expr));
}
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
pop_arguments (tree method_type)
{
function_args_iterator fnai;
tree type;
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
int arity;
FOREACH_FUNCTION_ARGS (method_type, type, fnai)
if (type == void_type_node)
break;
- VEC_safe_push (tree, gc, args, type);
+ vec_safe_push (args, type);
}
- arity = VEC_length (tree, args);
+ arity = vec_safe_length (args);
while (arity--)
{
- tree arg = pop_value (VEC_index (tree, args, arity));
+ tree arg = pop_value ((*args)[arity]);
/* We simply cast each argument to its proper type. This is
needed since we lose type information coming out of the
&& INTEGRAL_TYPE_P (type))
arg = convert (integer_type_node, arg);
- VEC_replace (tree, args, arity, arg);
+ (*args)[arity] = arg;
}
return args;
const char *new_classname;
const char *new_signature;
int flags;
- void (*rewrite_arglist) (VEC(tree,gc) **);
+ void (*rewrite_arglist) (vec<tree, va_gc> **);
} rewrite_rule;
/* Add __builtin_return_address(0) to the end of an arglist. */
static void
-rewrite_arglist_getcaller (VEC(tree,gc) **arglist)
+rewrite_arglist_getcaller (vec<tree, va_gc> **arglist)
{
tree retaddr
= build_call_expr (builtin_decl_explicit (BUILT_IN_RETURN_ADDRESS),
DECL_UNINLINABLE (current_function_decl) = 1;
- VEC_safe_push (tree, gc, *arglist, retaddr);
+ vec_safe_push (*arglist, retaddr);
}
/* Add this.class to the end of an arglist. */
static void
-rewrite_arglist_getclass (VEC(tree,gc) **arglist)
+rewrite_arglist_getclass (vec<tree, va_gc> **arglist)
{
- VEC_safe_push (tree, gc, *arglist, build_class_ref (output_class));
+ vec_safe_push (*arglist, build_class_ref (output_class));
}
static rewrite_rule rules[] =
method, update SPECIAL.*/
void
-maybe_rewrite_invocation (tree *method_p, VEC(tree,gc) **arg_list_p,
+maybe_rewrite_invocation (tree *method_p, vec<tree, va_gc> **arg_list_p,
tree *method_signature_p, tree *special)
{
tree context = DECL_NAME (TYPE_NAME (DECL_CONTEXT (*method_p)));
tree
build_known_method_ref (tree method, tree method_type ATTRIBUTE_UNUSED,
tree self_type, tree method_signature ATTRIBUTE_UNUSED,
- VEC(tree,gc) *arg_list ATTRIBUTE_UNUSED, tree special)
+ vec<tree, va_gc> *arg_list ATTRIBUTE_UNUSED, tree special)
{
tree func;
if (is_compiled_class (self_type))
}
tree
-invoke_build_dtable (int is_invoke_interface, VEC(tree,gc) *arg_list)
+invoke_build_dtable (int is_invoke_interface, vec<tree, va_gc> *arg_list)
{
tree dtable, objectref;
- tree saved = save_expr (VEC_index (tree, arg_list, 0));
+ tree saved = save_expr ((*arg_list)[0]);
- VEC_replace (tree, arg_list, 0, saved);
+ (*arg_list)[0] = saved;
/* If we're dealing with interfaces and if the objectref
argument is an array then get the dispatch table of the class
int
get_symbol_table_index (tree t, tree special,
- VEC(method_entry,gc) **symbol_table)
+ vec<method_entry, va_gc> **symbol_table)
{
method_entry *e;
unsigned i;
method_entry elem = {t, special};
- FOR_EACH_VEC_ELT (method_entry, *symbol_table, i, e)
+ FOR_EACH_VEC_SAFE_ELT (*symbol_table, i, e)
if (t == e->method && special == e->special)
goto done;
- VEC_safe_push (method_entry, gc, *symbol_table, elem);
+ vec_safe_push (*symbol_table, elem);
done:
return i + 1;
const char *const self_name
= IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (self_type)));
tree call, func, method, method_type;
- VEC(tree,gc) *arg_list;
+ vec<tree, va_gc> *arg_list;
tree check = NULL_TREE;
tree special = NULL_TREE;
We do omit the check if we're calling <init>. */
/* We use a SAVE_EXPR here to make sure we only evaluate
the new `self' expression once. */
- tree save_arg = save_expr (VEC_index (tree, arg_list, 0));
- VEC_replace (tree, arg_list, 0, save_arg);
+ tree save_arg = save_expr ((*arg_list)[0]);
+ (*arg_list)[0] = save_arg;
check = java_check_reference (save_arg, ! DECL_INIT_P (method));
func = build_known_method_ref (method, method_type, self_type,
method_signature, arg_list, special);
tree method_args;
tree meth_var;
tree bind;
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
int args_size = 0;
tree klass = DECL_CONTEXT (method);
/* The JNIEnv structure is the first argument to the JNI function. */
args_size += int_size_in_bytes (TREE_TYPE (env_var));
- VEC_safe_push (tree, gc, args, env_var);
+ vec_safe_push (args, env_var);
/* For a static method the second argument is the class. For a
non-static method the second argument is `this'; that is already
if (METHOD_STATIC (method))
{
args_size += int_size_in_bytes (TREE_TYPE (klass));
- VEC_safe_push (tree, gc, args, klass);
+ vec_safe_push (args, klass);
}
/* All the arguments to this method become arguments to the
#endif
args_size += (arg_bits / BITS_PER_UNIT);
- VEC_safe_push (tree, gc, args, tem);
+ vec_safe_push (args, tem);
}
arg_types = TYPE_ARG_TYPES (TREE_TYPE (method));
load_type_state (int pc)
{
int i;
- tree vec = VEC_index (tree, type_states, pc);
+ tree vec = (*type_states)[pc];
int cur_length = TREE_VEC_LENGTH (vec);
stack_pointer = cur_length - DECL_MAX_LOCALS(current_function_decl);
for (i = 0; i < cur_length; i++)
byte_ops = jcf->read_ptr;
instruction_bits = XRESIZEVAR (char, instruction_bits, length + 1);
memset (instruction_bits, 0, length + 1);
- type_states = VEC_alloc (tree, gc, length + 1);
- VEC_safe_grow_cleared (tree, gc, type_states, length + 1);
+ vec_alloc (type_states, length + 1);
+ type_states->quick_grow_cleared (length + 1);
/* This pass figures out which PC can be the targets of jumps. */
for (PC = 0; PC < length;)
int max_stack;
int arg_slot_count;
source_location last_line; /* End line number for a function decl */
- VEC(tree,gc) *throws_list; /* Exception specified by `throws' */
+ vec<tree, va_gc> *throws_list; /* Exception specified by `throws' */
tree exc_obj; /* Decl holding the exception object. */
/* Class initialization test variables */
tree special;
} method_entry;
-DEF_VEC_O(method_entry);
-DEF_VEC_ALLOC_O(method_entry,gc);
/* FIXME: the variable_size annotation here is needed because these types are
variable-sized in some other frontends. Due to gengtype deficiency the GTY
tree cpool_data_ref; /* Cached */
tree package_list; /* List of package names, progressive */
- VEC(method_entry,gc) *otable_methods; /* List of static decls referred
+ vec<method_entry, va_gc> *otable_methods; /* List of static decls referred
to by this class. */
tree otable_decl; /* The static address table. */
tree otable_syms_decl;
- VEC(method_entry,gc) *atable_methods; /* List of abstract methods
+ vec<method_entry, va_gc> *atable_methods; /* List of abstract methods
referred to by this class. */
tree atable_decl; /* The static address table. */
tree atable_syms_decl;
- VEC(method_entry,gc) *itable_methods; /* List of interface methods
+ vec<method_entry, va_gc> *itable_methods; /* List of interface methods
referred to by this class. */
tree itable_decl; /* The interfaces table. */
tree itable_syms_decl;
tree ctable_decl; /* The table of classes for the runtime
type matcher. */
- VEC(constructor_elt,gc) *catch_classes;
+ vec<constructor_elt, va_gc> *catch_classes;
htab_t GTY ((param_is (struct treetreehash_entry))) type_to_runtime_map;
/* The mapping of classes to exception region
extern tree lookup_name (tree);
extern bool special_method_p (tree);
-extern void maybe_rewrite_invocation (tree *, VEC(tree,gc) **, tree *, tree *);
-extern tree build_known_method_ref (tree, tree, tree, tree, VEC(tree,gc) *, tree);
+extern void maybe_rewrite_invocation (tree *, vec<tree, va_gc> **, tree *,
+ tree *);
+extern tree build_known_method_ref (tree, tree, tree, tree, vec<tree, va_gc> *,
+ tree);
extern tree build_class_init (tree, tree);
extern int attach_init_test_initialization_flags (void **, void *);
extern tree build_invokevirtual (tree, tree, tree);
extern tree build_invokeinterface (tree, tree);
extern tree build_jni_stub (tree);
-extern tree invoke_build_dtable (int, VEC(tree,gc) *);
+extern tree invoke_build_dtable (int, vec<tree, va_gc> *);
extern tree build_field_ref (tree, tree, tree);
extern tree java_modify_addr_for_volatile (tree);
extern void pushdecl_force_head (tree);
extern int alloc_name_constant (int, tree);
extern int alloc_constant_fieldref (tree, tree);
extern void emit_register_classes (tree *);
-extern tree emit_symbol_table (tree, tree, VEC(method_entry,gc) *,
+extern tree emit_symbol_table (tree, tree, vec<method_entry, va_gc> *,
tree, tree, int);
extern void lang_init_source (int);
extern void write_classfile (tree);
extern void finish_method (tree);
extern void java_expand_body (tree);
-extern int get_symbol_table_index (tree, tree, VEC(method_entry,gc) **);
+extern int get_symbol_table_index (tree, tree, vec<method_entry, va_gc> **);
extern tree make_catch_class_record (tree, tree);
extern tree emit_catch_table (tree);
int cxx_keyword_p (const char *name, int length);
-extern GTY(()) VEC(tree,gc) *pending_static_fields;
+extern GTY(()) vec<tree, va_gc> *pending_static_fields;
extern void java_write_globals (void);
#define CLASS_COMPLETE_P(DECL) DECL_LANG_FLAG_2 (DECL)
/* A vector used to track type states for the current method. */
-extern VEC(tree, gc) *type_states;
+extern vec<tree, va_gc> *type_states;
/* This maps a bytecode offset (PC) to various flags,
listed below (starting with BCODE_). */
#define START_RECORD_CONSTRUCTOR(V, CTYPE) \
do \
{ \
- V = VEC_alloc (constructor_elt, gc, 0); \
+ vec_alloc (V, 0); \
CONSTRUCTOR_APPEND_ELT (V, TYPE_FIELDS (CTYPE), NULL); \
} \
while (0)
#define PUSH_SUPER_VALUE(V, VALUE) \
do \
{ \
- constructor_elt *_elt___ = &VEC_last (constructor_elt, V); \
+ constructor_elt *_elt___ = &(V)->last (); \
tree _next___ = DECL_CHAIN (_elt___->index); \
gcc_assert (!DECL_NAME (_elt___->index)); \
_elt___->value = VALUE; \
#define PUSH_FIELD_VALUE(V, NAME, VALUE) \
do \
{ \
- constructor_elt *_elt___ = &VEC_last (constructor_elt, V); \
+ constructor_elt *_elt___ = &(V)->last (); \
tree _next___ = DECL_CHAIN (_elt___->index); \
gcc_assert (strcmp (IDENTIFIER_POINTER (DECL_NAME (_elt___->index)), \
NAME) == 0); \
#define FINISH_RECORD_CONSTRUCTOR(CONS, V, CTYPE) \
do \
{ \
- VEC_pop (constructor_elt, V); \
+ V->pop (); \
CONS = build_constructor (CTYPE, V); \
TREE_CONSTANT (CONS) = 0; \
} \
#include "ggc.h"
#include "debug.h"
#include "cgraph.h"
-#include "vecprim.h"
#include "bitmap.h"
#include "target.h"
static GTY(()) struct JCF * main_jcf;
/* A list of all the class DECLs seen so far. */
-static GTY(()) VEC(tree,gc) *all_class_list;
+static GTY(()) vec<tree, va_gc> *all_class_list;
/* The number of source files passed to us by -fsource-filename and an
array of pointers to each name. Used by find_sourcefile(). */
{
bitmap_iterator bi;
unsigned int offset;
- VEC(int, heap) *map = (VEC(int, heap) *) arg;
+ vec<int> *map = (vec<int> *) arg;
unsigned char *data = TYPE_REFLECTION_DATA (current_class);
if (map)
{
uint16 index = annotation_read_short (data + offset);
annotation_rewrite_short
- (VEC_index (int, map, index), data + offset);
+ ((*map)[index], data + offset);
}
}
}
#define HANDLE_EXCEPTIONS_ATTRIBUTE(COUNT) \
{ \
int n = COUNT; \
- VEC (tree,gc) *v = VEC_alloc (tree, gc, n); \
- gcc_assert (DECL_FUNCTION_THROWS (current_method) == NULL); \
+ vec<tree, va_gc> *v; \
+ vec_alloc (v, n); \
+ gcc_assert (!DECL_FUNCTION_THROWS (current_method)); \
while (--n >= 0) \
{ \
tree thrown_class = get_class_constant (jcf, JCF_readu2 (jcf)); \
- VEC_quick_push (tree, v, thrown_class); \
+ v->quick_push (thrown_class); \
} \
DECL_FUNCTION_THROWS (current_method) = v; \
}
if (current_class == object_type_node)
layout_class_methods (object_type_node);
else
- VEC_safe_push (tree, gc, all_class_list, TYPE_NAME (current_class));
+ vec_safe_push (all_class_list, TYPE_NAME (current_class));
}
/* If we came across inner classes, load them now. */
java_layout_seen_class_methods (void)
{
unsigned start = 0;
- unsigned end = VEC_length (tree, all_class_list);
+ unsigned end = vec_safe_length (all_class_list);
while (1)
{
for (ix = start; ix != end; ix++)
{
- tree decl = VEC_index (tree, all_class_list, ix);
+ tree decl = (*all_class_list)[ix];
tree cls = TREE_TYPE (decl);
input_location = DECL_SOURCE_LOCATION (decl);
/* Note that new classes might have been added while laying out
methods, changing the value of all_class_list. */
- new_length = VEC_length (tree, all_class_list);
+ new_length = vec_safe_length (all_class_list);
if (end != new_length)
{
start = end;
input_location = save_location;
}
-static VEC(tree,gc) *predefined_filenames;
+static vec<tree, va_gc> *predefined_filenames;
void
add_predefined_file (tree name)
{
- VEC_safe_push (tree, gc, predefined_filenames, name);
+ vec_safe_push (predefined_filenames, name);
}
int
unsigned ix;
tree f;
- FOR_EACH_VEC_ELT (tree, predefined_filenames, ix, f)
+ FOR_EACH_VEC_SAFE_ELT (predefined_filenames, ix, f)
if (f == node)
return 1;
const char *resource_filename;
/* Only one resource file may be compiled at a time. */
- gcc_assert (VEC_length (tree, all_translation_units) == 1);
+ gcc_assert (all_translation_units->length () == 1);
resource_filename
- = IDENTIFIER_POINTER
- (DECL_NAME (VEC_index (tree, all_translation_units, 0)));
+ = IDENTIFIER_POINTER (DECL_NAME ((*all_translation_units)[0]));
compile_resource_file (resource_name, resource_filename);
goto finish;
}
current_jcf = main_jcf;
- FOR_EACH_VEC_ELT (tree, all_translation_units, ix, node)
+ FOR_EACH_VEC_ELT (*all_translation_units, ix, node)
{
unsigned char magic_string[4];
char *real_path;
}
}
- FOR_EACH_VEC_ELT (tree, all_translation_units, ix, node)
+ FOR_EACH_VEC_ELT (*all_translation_units, ix, node)
{
input_location = DECL_SOURCE_LOCATION (node);
if (CLASS_FILE_P (node))
#include "cgraph.h"
/* A list of all the resources files. */
-static GTY(()) VEC(tree,gc) *resources;
+static GTY(()) vec<tree, va_gc> *resources;
void
compile_resource_data (const char *name, const char *buffer, int length)
{
tree rtype, field = NULL_TREE, data_type, rinit, data, decl;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
data_type = build_prim_array_type (unsigned_byte_type_node,
strlen (name) + length);
rest_of_decl_compilation (decl, global_bindings_p (), 0);
varpool_finalize_decl (decl);
- VEC_safe_push (tree, gc, resources, decl);
+ vec_safe_push (resources, decl);
}
void
register_resource_fn = t;
/* Write out entries in the same order in which they were defined. */
- FOR_EACH_VEC_ELT (tree, resources, ix, decl)
+ FOR_EACH_VEC_ELT (*resources, ix, decl)
{
t = build_fold_addr_expr (decl);
t = build_call_expr (register_resource_fn, 1, t);
vfy_note_stack_depth (vfy_method *method, int pc, int depth)
{
tree val = make_tree_vec (method->max_locals + depth);
- VEC_replace (tree, type_states, pc, val);
+ (*type_states)[pc] = val;
/* Called for side effects. */
lookup_label (pc);
}
if (type == object_type_node)
type = object_ptr_type_node;
- vec = VEC_index (tree, type_states, pc);
+ vec = (*type_states)[pc];
TREE_VEC_ELT (vec, slot) = type;
/* Called for side effects. */
lookup_label (pc);
if (type == object_type_node)
type = object_ptr_type_node;
- vec = VEC_index (tree, type_states, pc);
+ vec = (*type_states)[pc];
TREE_VEC_ELT (vec, slot) = type;
/* Called for side effects. */
lookup_label (pc);
typedef struct invariant *invariant_p;
-DEF_VEC_P(invariant_p);
-DEF_VEC_ALLOC_P(invariant_p, heap);
/* The invariants. */
-static VEC(invariant_p,heap) *invariants;
+static vec<invariant_p> invariants;
/* Check the size of the invariant table and realloc if necessary. */
EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
{
- dep = VEC_index (invariant_p, invariants, depno);
+ dep = invariants[depno];
find_identical_invariants (eq, dep);
}
{
unsigned i;
struct invariant *inv;
- htab_t eq = htab_create (VEC_length (invariant_p, invariants),
+ htab_t eq = htab_create (invariants.length (),
hash_invariant_expr, eq_invariant_expr, free);
- FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
+ FOR_EACH_VEC_ELT (invariants, i, inv)
find_identical_invariants (eq, inv);
htab_delete (eq);
inv->stamp = 0;
inv->insn = insn;
- inv->invno = VEC_length (invariant_p, invariants);
+ inv->invno = invariants.length ();
inv->eqto = ~0u;
if (def)
def->invno = inv->invno;
- VEC_safe_push (invariant_p, heap, invariants, inv);
+ invariants.safe_push (inv);
if (dump_file)
{
bitmap_iterator bi;
/* Find the representative of the class of the equivalent invariants. */
- inv = VEC_index (invariant_p, invariants, inv->eqto);
+ inv = invariants[inv->eqto];
*comp_cost = 0;
if (! flag_ira_loop_pressure)
{
bool check_p;
- dep = VEC_index (invariant_p, invariants, depno);
+ dep = invariants[depno];
get_inv_cost (dep, &acomp_cost, aregs_needed);
int i, gain = 0, again;
unsigned aregs_needed[N_REG_CLASSES], invno;
- FOR_EACH_VEC_ELT (invariant_p, invariants, invno, inv)
+ FOR_EACH_VEC_ELT (invariants, invno, inv)
{
if (inv->move)
continue;
static void
set_move_mark (unsigned invno, int gain)
{
- struct invariant *inv = VEC_index (invariant_p, invariants, invno);
+ struct invariant *inv = invariants[invno];
bitmap_iterator bi;
/* Find the representative of the class of the equivalent invariants. */
- inv = VEC_index (invariant_p, invariants, inv->eqto);
+ inv = invariants[inv->eqto];
if (inv->move)
return;
unsigned i, regs_used, regs_needed[N_REG_CLASSES], new_regs[N_REG_CLASSES];
struct invariant *inv = NULL;
- if (!VEC_length (invariant_p, invariants))
+ if (!invariants.length ())
return;
if (flag_ira_loop_pressure)
static bool
move_invariant_reg (struct loop *loop, unsigned invno)
{
- struct invariant *inv = VEC_index (invariant_p, invariants, invno);
- struct invariant *repr = VEC_index (invariant_p, invariants, inv->eqto);
+ struct invariant *inv = invariants[invno];
+ struct invariant *repr = invariants[inv->eqto];
unsigned i;
basic_block preheader = loop_preheader_edge (loop)->src;
rtx reg, set, dest, note;
struct invariant *inv;
unsigned i;
- FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
+ FOR_EACH_VEC_ELT (invariants, i, inv)
move_invariant_reg (loop, i);
if (flag_ira_loop_pressure && resize_reg_info ())
{
- FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
+ FOR_EACH_VEC_ELT (invariants, i, inv)
if (inv->reg != NULL_RTX)
{
if (inv->orig_regno >= 0)
{
actual_stamp = 1;
- invariants = VEC_alloc (invariant_p, heap, 100);
+ invariants.create (100);
}
/* Frees the data allocated by invariant motion. */
}
}
- FOR_EACH_VEC_ELT (invariant_p, invariants, i, inv)
+ FOR_EACH_VEC_ELT (invariants, i, inv)
{
BITMAP_FREE (inv->depends_on);
free (inv);
}
- VEC_free (invariant_p, heap, invariants);
+ invariants.release ();
}
/* Move the invariants out of the LOOP. */
{
rtx insn; /* The insn in that the variable expansion occurs. */
rtx reg; /* The accumulator which is expanded. */
- VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
+ vec<rtx> var_expansions; /* The copies of the accumulator which is expanded. */
struct var_to_expand *next; /* Next entry in walking order. */
enum rtx_code op; /* The type of the accumulation - addition, subtraction
or multiplication. */
sbitmap wont_exit;
unsigned HOST_WIDE_INT npeel;
unsigned i;
- VEC (edge, heap) *remove_edges;
+ vec<edge> remove_edges;
edge ein;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
if (desc->noloop_assumptions)
bitmap_clear_bit (wont_exit, 1);
- remove_edges = NULL;
+ remove_edges.create (0);
if (flag_split_ivs_in_unroller)
opt_info = analyze_insns_in_loop (loop);
}
/* Remove the exit edges. */
- FOR_EACH_VEC_ELT (edge, remove_edges, i, ein)
+ FOR_EACH_VEC_ELT (remove_edges, i, ein)
remove_path (ein);
- VEC_free (edge, heap, remove_edges);
+ remove_edges.release ();
}
ein = desc->in_edge;
unsigned exit_mod;
sbitmap wont_exit;
unsigned i;
- VEC (edge, heap) *remove_edges;
+ vec<edge> remove_edges;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
wont_exit = sbitmap_alloc (max_unroll + 1);
bitmap_ones (wont_exit);
- remove_edges = NULL;
+ remove_edges.create (0);
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
desc->niter_expr = GEN_INT (desc->niter);
/* Remove the edges. */
- FOR_EACH_VEC_ELT (edge, remove_edges, i, e)
+ FOR_EACH_VEC_ELT (remove_edges, i, e)
remove_path (e);
- VEC_free (edge, heap, remove_edges);
+ remove_edges.release ();
if (dump_file)
fprintf (dump_file,
rtx old_niter, niter, init_code, branch_code, tmp;
unsigned i, j, p;
basic_block preheader, *body, swtch, ezc_swtch;
- VEC (basic_block, heap) *dom_bbs;
+ vec<basic_block> dom_bbs;
sbitmap wont_exit;
int may_exit_copy;
unsigned n_peel;
- VEC (edge, heap) *remove_edges;
+ vec<edge> remove_edges;
edge e;
bool extra_zero_check, last_may_exit;
unsigned max_unroll = loop->lpt_decision.times;
opt_info = analyze_insns_in_loop (loop);
/* Remember blocks whose dominators will have to be updated. */
- dom_bbs = NULL;
+ dom_bbs.create (0);
body = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
{
- VEC (basic_block, heap) *ldom;
+ vec<basic_block> ldom;
basic_block bb;
ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
- FOR_EACH_VEC_ELT (basic_block, ldom, j, bb)
+ FOR_EACH_VEC_ELT (ldom, j, bb)
if (!flow_bb_inside_loop_p (loop, bb))
- VEC_safe_push (basic_block, heap, dom_bbs, bb);
+ dom_bbs.safe_push (bb);
- VEC_free (basic_block, heap, ldom);
+ ldom.release ();
}
free (body);
/* Precondition the loop. */
split_edge_and_insert (loop_preheader_edge (loop), init_code);
- remove_edges = NULL;
+ remove_edges.create (0);
wont_exit = sbitmap_alloc (max_unroll + 2);
}
/* Remove the edges. */
- FOR_EACH_VEC_ELT (edge, remove_edges, i, e)
+ FOR_EACH_VEC_ELT (remove_edges, i, e)
remove_path (e);
- VEC_free (edge, heap, remove_edges);
+ remove_edges.release ();
/* We must be careful when updating the number of iterations due to
preconditioning and the fact that the value must be valid at entry
"in runtime, %i insns\n",
max_unroll, num_loop_insns (loop));
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
}
/* Decide whether to simply peel LOOP and how much. */
ves = XNEW (struct var_to_expand);
ves->insn = insn;
ves->reg = copy_rtx (dest);
- ves->var_expansions = VEC_alloc (rtx, heap, 1);
+ ves->var_expansions.create (1);
ves->next = NULL;
ves->op = GET_CODE (src);
ves->expansion_count = 0;
struct var_to_expand *ves = NULL;
PTR *slot1;
PTR *slot2;
- VEC (edge, heap) *edges = get_loop_exit_edges (loop);
+ vec<edge> edges = get_loop_exit_edges (loop);
edge exit;
bool can_apply = false;
/* Record the loop exit bb and loop preheader before the unrolling. */
opt_info->loop_preheader = loop_preheader_edge (loop)->src;
- if (VEC_length (edge, edges) == 1)
+ if (edges.length () == 1)
{
- exit = VEC_index (edge, edges, 0);
+ exit = edges[0];
if (!(exit->flags & EDGE_COMPLEX))
{
opt_info->loop_exit = split_edge (exit);
}
}
- VEC_free (edge, heap, edges);
+ edges.release ();
free (body);
return opt_info;
}
if (ve->reuse_expansion == 0)
reg = ve->reg;
else
- reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1);
+ reg = ve->var_expansions[ve->reuse_expansion - 1];
- if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion)
+ if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion)
ve->reuse_expansion = 0;
else
ve->reuse_expansion++;
if (apply_change_group ())
if (really_new_expansion)
{
- VEC_safe_push (rtx, heap, ve->var_expansions, new_reg);
+ ve->var_expansions.safe_push (new_reg);
ve->expansion_count++;
}
}
enum machine_mode mode = GET_MODE (ve->reg);
bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
- if (VEC_length (rtx, ve->var_expansions) == 0)
+ if (ve->var_expansions.length () == 0)
return;
start_sequence ();
/* Note that we only accumulate FMA via the ADD operand. */
case PLUS:
case MINUS:
- FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
{
if (honor_signed_zero_p)
zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
break;
case MULT:
- FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
{
zero_init = CONST1_RTX (GET_MODE (var));
emit_move_insn (var, zero_init);
rtx expr, seq, var, insn;
unsigned i;
- if (VEC_length (rtx, ve->var_expansions) == 0)
+ if (ve->var_expansions.length () == 0)
return;
start_sequence ();
/* Note that we only accumulate FMA via the ADD operand. */
case PLUS:
case MINUS:
- FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum);
break;
case MULT:
- FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
+ FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum);
break;
struct var_to_expand *ves;
for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
- VEC_free (rtx, heap, ves->var_expansions);
+ ves->var_expansions.release ();
htab_delete (opt_info->insns_with_var_to_expand);
}
free (opt_info);
# define STACK_GROWS_DOWNWARD 0
#endif
-DEF_VEC_P (bitmap);
-DEF_VEC_ALLOC_P (bitmap,heap);
/* Decompose multi-word pseudo-registers into individual
pseudo-registers when possible and profitable. This is possible
/* Bit N in the bitmap in element M of this array is set if there is a
copy from reg M to reg N. */
-static VEC(bitmap,heap) *reg_copy_graph;
+static vec<bitmap> reg_copy_graph;
struct target_lower_subreg default_target_lower_subreg;
#if SWITCHABLE_TARGET
if (HARD_REGISTER_NUM_P (rd) || HARD_REGISTER_NUM_P (rs))
return false;
- b = VEC_index (bitmap, reg_copy_graph, rs);
+ b = reg_copy_graph[rs];
if (b == NULL)
{
b = BITMAP_ALLOC (NULL);
- VEC_replace (bitmap, reg_copy_graph, rs, b);
+ reg_copy_graph[rs] = b;
}
bitmap_set_bit (b, rd);
EXECUTE_IF_SET_IN_BITMAP (queue, 0, i, iter)
{
- bitmap b = VEC_index (bitmap, reg_copy_graph, i);
+ bitmap b = reg_copy_graph[i];
if (b)
bitmap_ior_and_compl_into (propagate, b, non_decomposable_context);
}
non_decomposable_context = BITMAP_ALLOC (NULL);
subreg_context = BITMAP_ALLOC (NULL);
- reg_copy_graph = VEC_alloc (bitmap, heap, max);
- VEC_safe_grow (bitmap, heap, reg_copy_graph, max);
- memset (VEC_address (bitmap, reg_copy_graph), 0, sizeof (bitmap) * max);
+ reg_copy_graph.create (max);
+ reg_copy_graph.safe_grow_cleared (max);
+ memset (reg_copy_graph.address (), 0, sizeof (bitmap) * max);
speed_p = optimize_function_for_speed_p (cfun);
FOR_EACH_BB (bb)
unsigned int i;
bitmap b;
- FOR_EACH_VEC_ELT (bitmap, reg_copy_graph, i, b)
+ FOR_EACH_VEC_ELT (reg_copy_graph, i, b)
if (b)
BITMAP_FREE (b);
}
- VEC_free (bitmap, heap, reg_copy_graph);
+ reg_copy_graph.release ();
BITMAP_FREE (decomposable_context);
BITMAP_FREE (non_decomposable_context);
}
/* Vec containing execution frequencies of program points. */
-static VEC(int,heap) *point_freq_vec;
+static vec<int> point_freq_vec;
/* The start of the above vector elements. */
int *lra_point_freq;
static void
next_program_point (int &point, int freq)
{
- VEC_safe_push (int, heap, point_freq_vec, freq);
- lra_point_freq = VEC_address (int, point_freq_vec);
+ point_freq_vec.safe_push (freq);
+ lra_point_freq = point_freq_vec.address ();
point++;
}
dead_set = sparseset_alloc (max_regno);
unused_set = sparseset_alloc (max_regno);
curr_point = 0;
- point_freq_vec = VEC_alloc (int, heap, get_max_uid () * 2);
- lra_point_freq = VEC_address (int, point_freq_vec);
+ point_freq_vec.create (get_max_uid () * 2);
+ lra_point_freq = point_freq_vec.address ();
int *post_order_rev_cfg = XNEWVEC (int, last_basic_block);
int n_blocks_inverted = inverted_post_order_compute (post_order_rev_cfg);
lra_assert (n_blocks_inverted == n_basic_blocks);
for (i = 0; i < max_reg_num (); i++)
free_live_range_list (lra_reg_info[i].live_ranges);
- VEC_free (int, heap, point_freq_vec);
+ point_freq_vec.release ();
}
/* Initialize live ranges data once per function. */
/* Pools for copies. */
static alloc_pool copy_pool;
-DEF_VEC_P(lra_copy_t);
-DEF_VEC_ALLOC_P(lra_copy_t, heap);
-
/* Vec referring to pseudo copies. */
-static VEC(lra_copy_t,heap) *copy_vec;
+static vec<lra_copy_t> copy_vec;
/* Initialize I-th element of lra_reg_info. */
static inline void
initialize_lra_reg_info_element (i);
copy_pool
= create_alloc_pool ("lra copies", sizeof (struct lra_copy), 100);
- copy_vec = VEC_alloc (lra_copy_t, heap, 100);
+ copy_vec.create (100);
}
free (lra_reg_info);
reg_info_size = 0;
free_alloc_pool (copy_pool);
- VEC_free (lra_copy_t, heap, copy_vec);
+ copy_vec.release ();
}
/* Expand common reg info if it is necessary. */
{
lra_copy_t cp;
- while (VEC_length (lra_copy_t, copy_vec) != 0)
+ while (copy_vec.length () != 0)
{
- cp = VEC_pop (lra_copy_t, copy_vec);
+ cp = copy_vec.pop ();
lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
pool_free (copy_pool, cp);
}
regno1 = temp;
}
cp = (lra_copy_t) pool_alloc (copy_pool);
- VEC_safe_push (lra_copy_t, heap, copy_vec, cp);
+ copy_vec.safe_push (cp);
cp->regno1_dest_p = regno1_dest_p;
cp->freq = freq;
cp->regno1 = regno1;
lra_copy_t
lra_get_copy (int n)
{
- if (n >= (int) VEC_length (lra_copy_t, copy_vec))
+ if (n >= (int) copy_vec.length ())
return NULL;
- return VEC_index (lra_copy_t, copy_vec, n);
+ return copy_vec[n];
}
\f
static sbitmap lra_constraint_insn_stack_bitmap;
/* The stack itself. */
-VEC (rtx, heap) *lra_constraint_insn_stack;
+vec<rtx> lra_constraint_insn_stack;
/* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg
info for INSN, otherwise only update it if INSN is not already on the
bitmap_set_bit (lra_constraint_insn_stack_bitmap, uid);
if (! always_update)
lra_update_insn_regno_info (insn);
- VEC_safe_push (rtx, heap, lra_constraint_insn_stack, insn);
+ lra_constraint_insn_stack.safe_push (insn);
}
/* Put INSN on the stack. */
rtx
lra_pop_insn (void)
{
- rtx insn = VEC_pop (rtx, lra_constraint_insn_stack);
+ rtx insn = lra_constraint_insn_stack.pop ();
bitmap_clear_bit (lra_constraint_insn_stack_bitmap, INSN_UID (insn));
return insn;
}
unsigned int
lra_insn_stack_length (void)
{
- return VEC_length (rtx, lra_constraint_insn_stack);
+ return lra_constraint_insn_stack.length ();
}
/* Push insns FROM to TO (excluding it) going in reverse order. */
typedef struct sloc *sloc_t;
-DEF_VEC_P(sloc_t);
-DEF_VEC_ALLOC_P(sloc_t, heap);
-
/* Locations of the former scratches. */
-static VEC (sloc_t, heap) *scratches;
+static vec<sloc_t> scratches;
/* Bitmap of scratch regnos. */
static bitmap_head scratch_bitmap;
lra_insn_recog_data_t id;
struct lra_static_insn_data *static_id;
- scratches = VEC_alloc (sloc_t, heap, get_max_uid ());
+ scratches.create (get_max_uid ());
bitmap_initialize (&scratch_bitmap, ®_obstack);
bitmap_initialize (&scratch_operand_bitmap, ®_obstack);
FOR_EACH_BB (bb)
loc = XNEW (struct sloc);
loc->insn = insn;
loc->nop = i;
- VEC_safe_push (sloc_t, heap, scratches, loc);
+ scratches.safe_push (loc);
bitmap_set_bit (&scratch_bitmap, REGNO (*id->operand_loc[i]));
bitmap_set_bit (&scratch_operand_bitmap,
INSN_UID (insn) * MAX_RECOG_OPERANDS + i);
static void
restore_scratches (void)
{
- int i, regno;
+ int regno;
+ unsigned i;
sloc_t loc;
rtx last = NULL_RTX;
lra_insn_recog_data_t id = NULL;
- for (i = 0; VEC_iterate (sloc_t, scratches, i, loc); i++)
+ for (i = 0; scratches.iterate (i, &loc); i++)
{
if (last != loc->insn)
{
INSN_UID (loc->insn), loc->nop);
}
}
- for (i = 0; VEC_iterate (sloc_t, scratches, i, loc); i++)
+ for (i = 0; scratches.iterate (i, &loc); i++)
free (loc);
- VEC_free (sloc_t, heap, scratches);
+ scratches.release ();
bitmap_clear (&scratch_bitmap);
bitmap_clear (&scratch_operand_bitmap);
}
/* We don't DF from now and avoid its using because it is to
expensive when a lot of RTL changes are made. */
df_set_flags (DF_NO_INSN_RESCAN);
- lra_constraint_insn_stack = VEC_alloc (rtx, heap, get_max_uid ());
+ lra_constraint_insn_stack.create (get_max_uid ());
lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ());
bitmap_clear (lra_constraint_insn_stack_bitmap);
lra_live_ranges_init ();
lra_constraints_finish ();
finish_reg_info ();
sbitmap_free (lra_constraint_insn_stack_bitmap);
- VEC_free (rtx, heap, lra_constraint_insn_stack);
+ lra_constraint_insn_stack.release ();
finish_insn_recog_data ();
regstat_free_n_sets_and_refs ();
regstat_free_ri ();
#include "tree-pass.h"
static void output_cgraph_opt_summary (void);
-static void input_cgraph_opt_summary (VEC (symtab_node, heap) * nodes);
+static void input_cgraph_opt_summary (vec<symtab_node> nodes);
/* Number of LDPR values known to GCC. */
#define LDPR_NUM_KNOWN (LDPR_PREVAILING_DEF_IRONLY_EXP + 1)
if (!for_input)
encoder->map = pointer_map_create ();
- encoder->nodes = NULL;
+ encoder->nodes.create (0);
return encoder;
}
void
lto_symtab_encoder_delete (lto_symtab_encoder_t encoder)
{
- VEC_free (lto_encoder_entry, heap, encoder->nodes);
+ encoder->nodes.release ();
if (encoder->map)
pointer_map_destroy (encoder->map);
free (encoder);
{
lto_encoder_entry entry = {node, false, false, false};
- ref = VEC_length (lto_encoder_entry, encoder->nodes);
- VEC_safe_push (lto_encoder_entry, heap, encoder->nodes, entry);
+ ref = encoder->nodes.length ();
+ encoder->nodes.safe_push (entry);
return ref;
}
if (!slot || !*slot)
{
lto_encoder_entry entry = {node, false, false, false};
- ref = VEC_length (lto_encoder_entry, encoder->nodes);
+ ref = encoder->nodes.length ();
if (!slot)
slot = pointer_map_insert (encoder->map, node);
*slot = (void *) (intptr_t) (ref + 1);
- VEC_safe_push (lto_encoder_entry, heap, encoder->nodes, entry);
+ encoder->nodes.safe_push (entry);
}
else
ref = (size_t) *slot - 1;
return false;
index = (size_t) *slot - 1;
- gcc_checking_assert (VEC_index (lto_encoder_entry,
- encoder->nodes, index).node
- == node);
+ gcc_checking_assert (encoder->nodes[index].node == node);
/* Remove from vector. We do this by swapping node with the last element
of the vector. */
- last_node = VEC_pop (lto_encoder_entry, encoder->nodes);
+ last_node = encoder->nodes.pop ();
if (last_node.node != node)
{
last_slot = pointer_map_contains (encoder->map, last_node.node);
*last_slot = (void *)(size_t) (index + 1);
/* Move the last element to the original spot of NODE. */
- VEC_replace (lto_encoder_entry, encoder->nodes, index,
- last_node);
+ encoder->nodes[index] = last_node;
}
/* Remove element from hash table. */
struct cgraph_node *node)
{
int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
- return VEC_index (lto_encoder_entry, encoder->nodes, index).body;
+ return encoder->nodes[index].body;
}
/* Return TRUE if we should encode body of NODE (if any). */
struct cgraph_node *node)
{
int index = lto_symtab_encoder_encode (encoder, (symtab_node)node);
- gcc_checking_assert (VEC_index (lto_encoder_entry, encoder->nodes,
- index).node == (symtab_node)node);
- VEC_index (lto_encoder_entry, encoder->nodes, index).body = true;
+ gcc_checking_assert (encoder->nodes[index].node == (symtab_node)node);
+ encoder->nodes[index].body = true;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
if (index == LCC_NOT_FOUND)
return false;
- return VEC_index (lto_encoder_entry, encoder->nodes, index).initializer;
+ return encoder->nodes[index].initializer;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
struct varpool_node *node)
{
int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
- VEC_index (lto_encoder_entry, encoder->nodes, index).initializer = true;
+ encoder->nodes[index].initializer = true;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
if (index == LCC_NOT_FOUND)
return false;
- return VEC_index (lto_encoder_entry, encoder->nodes, index).in_partition;
+ return encoder->nodes[index].in_partition;
}
/* Return TRUE if we should encode body of NODE (if any). */
symtab_node node)
{
int index = lto_symtab_encoder_encode (encoder, (symtab_node)node);
- VEC_index (lto_encoder_entry, encoder->nodes, index).in_partition = true;
+ encoder->nodes[index].in_partition = true;
}
/* Output the cgraph EDGE to OB using ENCODER. */
streamer_write_hwi_stream (ob->main_stream, node->count_materialization_scale);
streamer_write_hwi_stream (ob->main_stream,
- VEC_length (ipa_opt_pass,
- node->ipa_transforms_to_apply));
- FOR_EACH_VEC_ELT (ipa_opt_pass, node->ipa_transforms_to_apply, i, pass)
+ node->ipa_transforms_to_apply.length ());
+ FOR_EACH_VEC_ELT (node->ipa_transforms_to_apply, i, pass)
streamer_write_hwi_stream (ob->main_stream, pass->pass.static_pass_number);
if (tag == LTO_symtab_analyzed_node)
input_node (struct lto_file_decl_data *file_data,
struct lto_input_block *ib,
enum LTO_symtab_tags tag,
- VEC(symtab_node, heap) *nodes)
+ vec<symtab_node> nodes)
{
tree fn_decl;
struct cgraph_node *node;
if (clone_ref != LCC_NOT_FOUND)
{
- node = cgraph_clone_node (cgraph (VEC_index (symtab_node, nodes, clone_ref)), fn_decl,
- 0, CGRAPH_FREQ_BASE, false, NULL, false);
+ node = cgraph_clone_node (cgraph (nodes[clone_ref]), fn_decl,
+ 0, CGRAPH_FREQ_BASE, false,
+ vec<cgraph_edge_p>(), false);
}
else
node = cgraph_get_create_node (fn_decl);
node->count_materialization_scale = streamer_read_hwi (ib);
count = streamer_read_hwi (ib);
- node->ipa_transforms_to_apply = NULL;
+ node->ipa_transforms_to_apply = vec<ipa_opt_pass>();
for (i = 0; i < count; i++)
{
struct opt_pass *pass;
gcc_assert (pid < passes_by_id_size);
pass = passes_by_id[pid];
- VEC_safe_push (ipa_opt_pass, heap, node->ipa_transforms_to_apply,
- (struct ipa_opt_pass_d *) pass);
+ node->ipa_transforms_to_apply.safe_push ((struct ipa_opt_pass_d *) pass);
}
if (tag == LTO_symtab_analyzed_node)
static void
input_ref (struct lto_input_block *ib,
symtab_node referring_node,
- VEC(symtab_node, heap) *nodes)
+ vec<symtab_node> nodes)
{
symtab_node node = NULL;
struct bitpack_d bp;
bp = streamer_read_bitpack (ib);
use = (enum ipa_ref_use) bp_unpack_value (&bp, 2);
- node = VEC_index (symtab_node, nodes, streamer_read_hwi (ib));
+ node = nodes[streamer_read_hwi (ib)];
ipa_record_reference (referring_node, node, use, NULL);
}
indirect_unknown_callee set). */
static void
-input_edge (struct lto_input_block *ib, VEC(symtab_node, heap) *nodes,
+input_edge (struct lto_input_block *ib, vec<symtab_node> nodes,
bool indirect)
{
struct cgraph_node *caller, *callee;
struct bitpack_d bp;
int ecf_flags = 0;
- caller = cgraph (VEC_index (symtab_node, nodes, streamer_read_hwi (ib)));
+ caller = cgraph (nodes[streamer_read_hwi (ib)]);
if (caller == NULL || caller->symbol.decl == NULL_TREE)
internal_error ("bytecode stream: no caller found while reading edge");
if (!indirect)
{
- callee = cgraph (VEC_index (symtab_node, nodes, streamer_read_hwi (ib)));
+ callee = cgraph (nodes[streamer_read_hwi (ib)]);
if (callee == NULL || callee->symbol.decl == NULL_TREE)
internal_error ("bytecode stream: no callee found while reading edge");
}
/* Read a cgraph from IB using the info in FILE_DATA. */
-static VEC(symtab_node, heap) *
+static vec<symtab_node>
input_cgraph_1 (struct lto_file_decl_data *file_data,
struct lto_input_block *ib)
{
enum LTO_symtab_tags tag;
- VEC(symtab_node, heap) *nodes = NULL;
+ vec<symtab_node> nodes = vec<symtab_node>();
symtab_node node;
unsigned i;
else if (tag == LTO_symtab_variable)
{
node = (symtab_node)input_varpool_node (file_data, ib);
- VEC_safe_push (symtab_node, heap, nodes, node);
+ nodes.safe_push (node);
lto_symtab_encoder_encode (file_data->symtab_node_encoder, node);
}
else
node = (symtab_node)input_node (file_data, ib, tag, nodes);
if (node == NULL || node->symbol.decl == NULL_TREE)
internal_error ("bytecode stream: found empty cgraph node");
- VEC_safe_push (symtab_node, heap, nodes, node);
+ nodes.safe_push (node);
lto_symtab_encoder_encode (file_data->symtab_node_encoder, node);
}
/* AUX pointers should be all non-zero for function nodes read from the stream. */
#ifdef ENABLE_CHECKING
- FOR_EACH_VEC_ELT (symtab_node, nodes, i, node)
+ FOR_EACH_VEC_ELT (nodes, i, node)
gcc_assert (node->symbol.aux || !is_a <cgraph_node> (node));
#endif
- FOR_EACH_VEC_ELT (symtab_node, nodes, i, node)
+ FOR_EACH_VEC_ELT (nodes, i, node)
{
int ref;
if (cgraph_node *cnode = dyn_cast <cgraph_node> (node))
/* Fixup inlined_to from reference to pointer. */
if (ref != LCC_NOT_FOUND)
- cnode->global.inlined_to = cgraph (VEC_index (symtab_node, nodes, ref));
+ cgraph (node)->global.inlined_to = cgraph (nodes[ref]);
else
cnode->global.inlined_to = NULL;
}
/* Fixup same_comdat_group from reference to pointer. */
if (ref != LCC_NOT_FOUND)
- node->symbol.same_comdat_group = VEC_index (symtab_node, nodes, ref);
+ node->symbol.same_comdat_group = nodes[ref];
else
node->symbol.same_comdat_group = NULL;
}
- FOR_EACH_VEC_ELT (symtab_node, nodes, i, node)
+ FOR_EACH_VEC_ELT (nodes, i, node)
node->symbol.aux = is_a <cgraph_node> (node) ? (void *)1 : NULL;
return nodes;
}
static void
input_refs (struct lto_input_block *ib,
- VEC(symtab_node, heap) *nodes)
+ vec<symtab_node> nodes)
{
int count;
int idx;
if (!count)
break;
idx = streamer_read_uhwi (ib);
- node = VEC_index (symtab_node, nodes, idx);
+ node = nodes[idx];
while (count)
{
input_ref (ib, node, nodes);
const char *data;
size_t len;
struct lto_input_block *ib;
- VEC(symtab_node, heap) *nodes;
+ vec<symtab_node> nodes;
ib = lto_create_simple_input_block (file_data, LTO_section_symtab_nodes,
&data, &len);
ib, data, len);
if (flag_ltrans)
input_cgraph_opt_summary (nodes);
- VEC_free (symtab_node, heap, nodes);
+ nodes.release ();
}
merge_profile_summaries (file_data_vec);
}
else
streamer_write_uhwi (ob, 0);
- streamer_write_uhwi (ob, VEC_length (ipa_replace_map_p,
- node->clone.tree_map));
- FOR_EACH_VEC_ELT (ipa_replace_map_p, node->clone.tree_map, i, map)
+ streamer_write_uhwi (ob, vec_safe_length (node->clone.tree_map));
+ FOR_EACH_VEC_SAFE_ELT (node->clone.tree_map, i, map)
{
int parm_num;
tree parm;
{
struct ipa_replace_map *map = ggc_alloc_ipa_replace_map ();
- VEC_safe_push (ipa_replace_map_p, gc, node->clone.tree_map, map);
+ vec_safe_push (node->clone.tree_map, map);
map->parm_num = streamer_read_uhwi (ib_main);
map->old_tree = NULL;
map->new_tree = stream_read_tree (ib_main, data_in);
static void
input_cgraph_opt_section (struct lto_file_decl_data *file_data,
- const char *data, size_t len, VEC (symtab_node,
- heap) * nodes)
+ const char *data, size_t len,
+ vec<symtab_node> nodes)
{
const struct lto_function_header *header =
(const struct lto_function_header *) data;
data_in =
lto_data_in_create (file_data, (const char *) data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution_t>());
count = streamer_read_uhwi (&ib_main);
for (i = 0; i < count; i++)
{
int ref = streamer_read_uhwi (&ib_main);
- input_node_opt_summary (cgraph (VEC_index (symtab_node, nodes, ref)),
+ input_node_opt_summary (cgraph (nodes[ref]),
&ib_main, data_in);
}
lto_free_section_data (file_data, LTO_section_cgraph_opt_sum, NULL, data,
/* Input optimization summary of cgraph. */
static void
-input_cgraph_opt_summary (VEC (symtab_node, heap) * nodes)
+input_cgraph_opt_summary (vec<symtab_node> nodes)
{
struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
struct lto_file_decl_data *file_data;
#include "lto-streamer.h"
#include "lto-compress.h"
-static VEC(lto_out_decl_state_ptr, heap) *decl_state_stack;
+static vec<lto_out_decl_state_ptr> decl_state_stack;
/* List of out decl states used by functions. We use this to
generate the decl directory later. */
-VEC(lto_out_decl_state_ptr, heap) *lto_function_decl_states;
+vec<lto_out_decl_state_ptr> lto_function_decl_states;
/* Returns a hash code for P. */
hashval_t
new_slot->t = name;
new_slot->slot_num = index;
*slot = new_slot;
- VEC_safe_push (tree, heap, encoder->trees, name);
+ encoder->trees.safe_push (name);
new_entry_p = TRUE;
}
else
struct lto_out_decl_state *
lto_get_out_decl_state (void)
{
- return VEC_last (lto_out_decl_state_ptr, decl_state_stack);
+ return decl_state_stack.last ();
}
/* Push STATE to top of out decl stack. */
void
lto_push_out_decl_state (struct lto_out_decl_state *state)
{
- VEC_safe_push (lto_out_decl_state_ptr, heap, decl_state_stack, state);
+ decl_state_stack.safe_push (state);
}
/* Pop the currently used out-decl state from top of stack. */
struct lto_out_decl_state *
lto_pop_out_decl_state (void)
{
- return VEC_pop (lto_out_decl_state_ptr, decl_state_stack);
+ return decl_state_stack.pop ();
}
/* Record STATE after it has been used in serializing the body of
state->streams[i].tree_hash_table = NULL;
}
state->fn_decl = fn_decl;
- VEC_safe_push (lto_out_decl_state_ptr, heap, lto_function_decl_states,
- state);
+ lto_function_decl_states.safe_push (state);
}
case LTO_ssa_name_ref:
ix_u = streamer_read_uhwi (ib);
- result = VEC_index (tree, SSANAMES (fn), ix_u);
+ result = (*SSANAMES (fn))[ix_u];
break;
case LTO_field_decl_ref:
fixup_eh_region_pointers (struct function *fn, HOST_WIDE_INT root_region)
{
unsigned i;
- VEC(eh_region,gc) *eh_array = fn->eh->region_array;
- VEC(eh_landing_pad,gc) *lp_array = fn->eh->lp_array;
+ vec<eh_region, va_gc> *eh_array = fn->eh->region_array;
+ vec<eh_landing_pad, va_gc> *lp_array = fn->eh->lp_array;
eh_region r;
eh_landing_pad lp;
gcc_assert (eh_array && lp_array);
gcc_assert (root_region >= 0);
- fn->eh->region_tree = VEC_index (eh_region, eh_array, root_region);
+ fn->eh->region_tree = (*eh_array)[root_region];
-#define FIXUP_EH_REGION(r) (r) = VEC_index (eh_region, eh_array, \
- (HOST_WIDE_INT) (intptr_t) (r))
-#define FIXUP_EH_LP(p) (p) = VEC_index (eh_landing_pad, lp_array, \
- (HOST_WIDE_INT) (intptr_t) (p))
+#define FIXUP_EH_REGION(r) (r) = (*eh_array)[(HOST_WIDE_INT) (intptr_t) (r)]
+#define FIXUP_EH_LP(p) (p) = (*lp_array)[(HOST_WIDE_INT) (intptr_t) (p)]
/* Convert all the index numbers stored in pointer fields into
pointers to the corresponding slots in the EH region array. */
- FOR_EACH_VEC_ELT (eh_region, eh_array, i, r)
+ FOR_EACH_VEC_ELT (*eh_array, i, r)
{
/* The array may contain NULL regions. */
if (r == NULL)
/* Convert all the index numbers stored in pointer fields into
pointers to the corresponding slots in the EH landing pad array. */
- FOR_EACH_VEC_ELT (eh_landing_pad, lp_array, i, lp)
+ FOR_EACH_VEC_ELT (*lp_array, i, lp)
{
/* The array may contain NULL landing pads. */
if (lp == NULL)
gcc_assert (len == (int) len);
if (len > 0)
{
- VEC_safe_grow (eh_region, gc, fn->eh->region_array, len);
+ vec_safe_grow_cleared (fn->eh->region_array, len);
for (i = 0; i < len; i++)
{
eh_region r = input_eh_region (ib, data_in, i);
- VEC_replace (eh_region, fn->eh->region_array, i, r);
+ (*fn->eh->region_array)[i] = r;
}
}
gcc_assert (len == (int) len);
if (len > 0)
{
- VEC_safe_grow (eh_landing_pad, gc, fn->eh->lp_array, len);
+ vec_safe_grow_cleared (fn->eh->lp_array, len);
for (i = 0; i < len; i++)
{
eh_landing_pad lp = input_eh_lp (ib, data_in, i);
- VEC_replace (eh_landing_pad, fn->eh->lp_array, i, lp);
+ (*fn->eh->lp_array)[i] = lp;
}
}
gcc_assert (len == (int) len);
if (len > 0)
{
- VEC_safe_grow (tree, gc, fn->eh->ttype_data, len);
+ vec_safe_grow_cleared (fn->eh->ttype_data, len);
for (i = 0; i < len; i++)
{
tree ttype = stream_read_tree (ib, data_in);
- VEC_replace (tree, fn->eh->ttype_data, i, ttype);
+ (*fn->eh->ttype_data)[i] = ttype;
}
}
{
if (targetm.arm_eabi_unwinder)
{
- VEC_safe_grow (tree, gc, fn->eh->ehspec_data.arm_eabi, len);
+ vec_safe_grow_cleared (fn->eh->ehspec_data.arm_eabi, len);
for (i = 0; i < len; i++)
{
tree t = stream_read_tree (ib, data_in);
- VEC_replace (tree, fn->eh->ehspec_data.arm_eabi, i, t);
+ (*fn->eh->ehspec_data.arm_eabi)[i] = t;
}
}
else
{
- VEC_safe_grow (uchar, gc, fn->eh->ehspec_data.other, len);
+ vec_safe_grow_cleared (fn->eh->ehspec_data.other, len);
for (i = 0; i < len; i++)
{
uchar c = streamer_read_uchar (ib);
- VEC_replace (uchar, fn->eh->ehspec_data.other, i, c);
+ (*fn->eh->ehspec_data.other)[i] = c;
}
}
}
bb_count = streamer_read_uhwi (ib);
last_basic_block_for_function (fn) = bb_count;
- if (bb_count > VEC_length (basic_block, basic_block_info_for_function (fn)))
- VEC_safe_grow_cleared (basic_block, gc,
- basic_block_info_for_function (fn), bb_count);
+ if (bb_count > basic_block_info_for_function (fn)->length ())
+ vec_safe_grow_cleared (basic_block_info_for_function (fn), bb_count);
- if (bb_count > VEC_length (basic_block, label_to_block_map_for_function (fn)))
- VEC_safe_grow_cleared (basic_block, gc,
- label_to_block_map_for_function (fn), bb_count);
+ if (bb_count > label_to_block_map_for_function (fn)->length ())
+ vec_safe_grow_cleared (label_to_block_map_for_function (fn), bb_count);
index = streamer_read_hwi (ib);
while (index != -1)
bool is_default_def;
/* Skip over the elements that had been freed. */
- while (VEC_length (tree, SSANAMES (fn)) < i)
- VEC_quick_push (tree, SSANAMES (fn), NULL_TREE);
+ while (SSANAMES (fn)->length () < i)
+ SSANAMES (fn)->quick_push (NULL_TREE);
is_default_def = (streamer_read_uchar (ib) != 0);
name = stream_read_tree (ib, data_in);
if (len > 0)
{
int i;
- VEC_safe_grow (tree, gc, fn->local_decls, len);
+ vec_safe_grow_cleared (fn->local_decls, len);
for (i = 0; i < len; i++)
{
tree t = stream_read_tree (ib, data_in);
- VEC_replace (tree, fn->local_decls, i, t);
+ (*fn->local_decls)[i] = t;
}
}
header->main_size);
data_in = lto_data_in_create (file_data, data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution_t>());
/* Make sure the file was generated by the exact same compiler. */
lto_check_version (header->lto_header.major_version,
input_cfg (&ib_cfg, fn, node->count_materialization_scale);
/* Set up the struct function. */
- from = VEC_length (tree, data_in->reader_cache->nodes);
+ from = data_in->reader_cache->nodes.length ();
input_function (fn_decl, data_in, &ib_main);
/* And fixup types we streamed locally. */
{
struct streamer_tree_cache_d *cache = data_in->reader_cache;
- unsigned len = VEC_length (tree, cache->nodes);
+ unsigned len = cache->nodes.length ();
unsigned i;
for (i = len; i-- > from;)
{
- tree t = VEC_index (tree, cache->nodes, i);
+ tree t = cache->nodes[i];
if (t == NULL_TREE)
continue;
header->main_size);
data_in = lto_data_in_create (file_data, data + string_offset,
- header->string_size, NULL);
+ header->string_size,
+ vec<ld_plugin_symbol_resolution_t>());
/* Make sure the file was generated by the exact same compiler. */
lto_check_version (header->lto_header.major_version,
struct data_in *
lto_data_in_create (struct lto_file_decl_data *file_data, const char *strings,
unsigned len,
- VEC(ld_plugin_symbol_resolution_t,heap) *resolutions)
+ vec<ld_plugin_symbol_resolution_t> resolutions)
{
struct data_in *data_in = XCNEW (struct data_in);
data_in->file_data = file_data;
void
lto_data_in_delete (struct data_in *data_in)
{
- VEC_free (ld_plugin_symbol_resolution_t, heap, data_in->globals_resolution);
+ data_in->globals_resolution.release ();
streamer_tree_cache_delete (data_in->reader_cache);
free (data_in->labels);
free (data_in);
streamer_write_hwi (ob, fn->eh->region_tree->index);
/* Emit all the EH regions in the region array. */
- streamer_write_hwi (ob, VEC_length (eh_region, fn->eh->region_array));
- FOR_EACH_VEC_ELT (eh_region, fn->eh->region_array, i, eh)
+ streamer_write_hwi (ob, vec_safe_length (fn->eh->region_array));
+ FOR_EACH_VEC_SAFE_ELT (fn->eh->region_array, i, eh)
output_eh_region (ob, eh);
/* Emit all landing pads. */
- streamer_write_hwi (ob, VEC_length (eh_landing_pad, fn->eh->lp_array));
- FOR_EACH_VEC_ELT (eh_landing_pad, fn->eh->lp_array, i, lp)
+ streamer_write_hwi (ob, vec_safe_length (fn->eh->lp_array));
+ FOR_EACH_VEC_SAFE_ELT (fn->eh->lp_array, i, lp)
output_eh_lp (ob, lp);
/* Emit all the runtime type data. */
- streamer_write_hwi (ob, VEC_length (tree, fn->eh->ttype_data));
- FOR_EACH_VEC_ELT (tree, fn->eh->ttype_data, i, ttype)
+ streamer_write_hwi (ob, vec_safe_length (fn->eh->ttype_data));
+ FOR_EACH_VEC_SAFE_ELT (fn->eh->ttype_data, i, ttype)
stream_write_tree (ob, ttype, true);
/* Emit the table of action chains. */
if (targetm.arm_eabi_unwinder)
{
tree t;
- streamer_write_hwi (ob, VEC_length (tree,
- fn->eh->ehspec_data.arm_eabi));
- FOR_EACH_VEC_ELT (tree, fn->eh->ehspec_data.arm_eabi, i, t)
+ streamer_write_hwi (ob, vec_safe_length (fn->eh->ehspec_data.arm_eabi));
+ FOR_EACH_VEC_SAFE_ELT (fn->eh->ehspec_data.arm_eabi, i, t)
stream_write_tree (ob, t, true);
}
else
{
uchar c;
- streamer_write_hwi (ob, VEC_length (uchar,
- fn->eh->ehspec_data.other));
- FOR_EACH_VEC_ELT (uchar, fn->eh->ehspec_data.other, i, c)
+ streamer_write_hwi (ob, vec_safe_length (fn->eh->ehspec_data.other));
+ FOR_EACH_VEC_SAFE_ELT (fn->eh->ehspec_data.other, i, c)
streamer_write_char_stream (ob->main_stream, c);
}
}
{
unsigned int i, len;
- len = VEC_length (tree, SSANAMES (fn));
+ len = vec_safe_length (SSANAMES (fn));
streamer_write_uhwi (ob, len);
for (i = 1; i < len; i++)
{
- tree ptr = VEC_index (tree, SSANAMES (fn), i);
+ tree ptr = (*SSANAMES (fn))[i];
if (ptr == NULL_TREE
|| SSA_NAME_IN_FREE_LIST (ptr)
stream_write_tree (ob, fn->nonlocal_goto_save_area, true);
/* Output all the local variables in the function. */
- streamer_write_hwi (ob, VEC_length (tree, fn->local_decls));
- FOR_EACH_VEC_ELT (tree, fn->local_decls, i, t)
+ streamer_write_hwi (ob, vec_safe_length (fn->local_decls));
+ FOR_EACH_VEC_SAFE_ELT (fn->local_decls, i, t)
stream_write_tree (ob, t, true);
/* Output current IL state of the function. */
must be empty where we reach here. */
gcc_assert (lto_tree_ref_encoder_size (encoder) == 0);
for (j = 0; j < n; j++)
- VEC_safe_push (tree, heap, encoder->trees, trees[j]);
+ encoder->trees.safe_push (trees[j]);
encoder->next_index = n;
}
/* Write the global symbols. */
out_state = lto_get_out_decl_state ();
- num_fns = VEC_length (lto_out_decl_state_ptr, lto_function_decl_states);
+ num_fns = lto_function_decl_states.length ();
lto_output_decl_state_streams (ob, out_state);
for (idx = 0; idx < num_fns; idx++)
{
fn_out_state =
- VEC_index (lto_out_decl_state_ptr, lto_function_decl_states, idx);
+ lto_function_decl_states[idx];
lto_output_decl_state_streams (ob, fn_out_state);
}
for (idx = 0; idx < num_fns; idx++)
{
fn_out_state =
- VEC_index (lto_out_decl_state_ptr, lto_function_decl_states, idx);
+ lto_function_decl_states[idx];
decl_state_size += lto_out_decl_state_written_size (fn_out_state);
}
header.decl_state_size = decl_state_size;
for (idx = 0; idx < num_fns; idx++)
{
fn_out_state =
- VEC_index (lto_out_decl_state_ptr, lto_function_decl_states, idx);
+ lto_function_decl_states[idx];
lto_output_decl_state_refs (ob, decl_state_stream, fn_out_state);
}
lto_write_stream (decl_state_stream);
for (idx = 0; idx < num_fns; idx++)
{
fn_out_state =
- VEC_index (lto_out_decl_state_ptr, lto_function_decl_states, idx);
+ lto_function_decl_states[idx];
lto_delete_out_decl_state (fn_out_state);
}
lto_symtab_encoder_delete (ob->decl_state->symtab_node_encoder);
- VEC_free (lto_out_decl_state_ptr, heap, lto_function_decl_states);
- lto_function_decl_states = NULL;
+ lto_function_decl_states.release ();
destroy_output_block (ob);
}
#include "target.h"
#include "cgraph.h"
#include "vec.h"
-#include "vecprim.h"
#include "alloc-pool.h"
#include "gcov-io.h"
#include "diagnostic.h"
} lto_decl_stream_e_t;
typedef enum ld_plugin_symbol_resolution ld_plugin_symbol_resolution_t;
-DEF_VEC_I(ld_plugin_symbol_resolution_t);
-DEF_VEC_ALLOC_I(ld_plugin_symbol_resolution_t, heap);
/* Macro to define convenience functions for type and decl streams
unsigned int initializer:1;
} lto_encoder_entry;
-DEF_VEC_O(lto_encoder_entry);
-DEF_VEC_ALLOC_O(lto_encoder_entry, heap);
/* Encoder data structure used to stream callgraph nodes. */
struct lto_symtab_encoder_d
{
- VEC(lto_encoder_entry,heap) *nodes;
+ vec<lto_encoder_entry> nodes;
pointer_map_t *map;
};
{
htab_t tree_hash_table; /* Maps pointers to indices. */
unsigned int next_index; /* Next available index. */
- VEC(tree,heap) *trees; /* Maps indices to pointers. */
+ vec<tree> trees; /* Maps indices to pointers. */
};
typedef struct lto_out_decl_state *lto_out_decl_state_ptr;
-DEF_VEC_P(lto_out_decl_state_ptr);
-DEF_VEC_ALLOC_P(lto_out_decl_state_ptr, heap);
/* Compact representation of a index <-> resolution pair. Unpacked to an
vector later. */
};
typedef struct res_pair res_pair;
-DEF_VEC_O(res_pair);
-DEF_VEC_ALLOC_O(res_pair, heap);
/* One of these is allocated for each object file that being compiled
by lto. This structure contains the tables that are needed by the
unsigned HOST_WIDE_INT id;
/* Symbol resolutions for this file */
- VEC(res_pair, heap) * GTY((skip)) respairs;
+ vec<res_pair> GTY((skip)) respairs;
unsigned max_index;
struct gcov_ctr_summary GTY((skip)) profile_info;
int current_col;
/* Maps each reference number to the resolution done by the linker. */
- VEC(ld_plugin_symbol_resolution_t,heap) *globals_resolution;
+ vec<ld_plugin_symbol_resolution_t> globals_resolution;
/* Cache of pickled nodes. */
struct streamer_tree_cache_d *reader_cache;
extern void lto_input_toplevel_asms (struct lto_file_decl_data *, int);
extern struct data_in *lto_data_in_create (struct lto_file_decl_data *,
const char *, unsigned,
- VEC(ld_plugin_symbol_resolution_t,heap) *);
+ vec<ld_plugin_symbol_resolution_t> );
extern void lto_data_in_delete (struct data_in *);
extern void lto_input_data_block (struct lto_input_block *, void *, size_t);
location_t lto_input_location (struct bitpack_d *, struct data_in *);
extern void lto_symtab_merge_decls (void);
extern void lto_symtab_merge_cgraph_nodes (void);
extern tree lto_symtab_prevailing_decl (tree decl);
-extern GTY(()) VEC(tree,gc) *lto_global_var_decls;
+extern GTY(()) vec<tree, va_gc> *lto_global_var_decls;
/* In lto-opts.c. */
/* Holds all the out decl states of functions output so far in the
current output file. */
-extern VEC(lto_out_decl_state_ptr, heap) *lto_function_decl_states;
+extern vec<lto_out_decl_state_ptr> lto_function_decl_states;
/* Return true if LTO tag TAG corresponds to a tree code. */
static inline bool
{
encoder->tree_hash_table = htab_create (37, hash_fn, eq_fn, free);
encoder->next_index = 0;
- encoder->trees = NULL;
+ encoder->trees.create (0);
}
/* Hash table may be delete already. */
if (encoder->tree_hash_table)
htab_delete (encoder->tree_hash_table);
- VEC_free (tree, heap, encoder->trees);
+ encoder->trees.release ();
}
/* Return the number of trees encoded in ENCODER. */
static inline unsigned int
lto_tree_ref_encoder_size (struct lto_tree_ref_encoder *encoder)
{
- return VEC_length (tree, encoder->trees);
+ return encoder->trees.length ();
}
/* Return the IDX-th tree in ENCODER. */
lto_tree_ref_encoder_get_tree (struct lto_tree_ref_encoder *encoder,
unsigned int idx)
{
- return VEC_index (tree, encoder->trees, idx);
+ return encoder->trees[idx];
}
static inline int
lto_symtab_encoder_size (lto_symtab_encoder_t encoder)
{
- return VEC_length (lto_encoder_entry, encoder->nodes);
+ return encoder->nodes.length ();
}
/* Value used to represent failure of lto_symtab_encoder_lookup. */
static inline symtab_node
lsei_node (lto_symtab_encoder_iterator lsei)
{
- return VEC_index (lto_encoder_entry,
- lsei.encoder->nodes, lsei.index).node;
+ return lsei.encoder->nodes[lsei.index].node;
}
/* Return the node pointed to by LSI. */
static inline struct cgraph_node *
lsei_cgraph_node (lto_symtab_encoder_iterator lsei)
{
- return cgraph (VEC_index (lto_encoder_entry,
- lsei.encoder->nodes, lsei.index).node);
+ return cgraph (lsei.encoder->nodes[lsei.index].node);
}
/* Return the node pointed to by LSI. */
static inline struct varpool_node *
lsei_varpool_node (lto_symtab_encoder_iterator lsei)
{
- return varpool (VEC_index (lto_encoder_entry,
- lsei.encoder->nodes, lsei.index).node);
+ return varpool (lsei.encoder->nodes[lsei.index].node);
}
/* Return the cgraph node corresponding to REF using ENCODER. */
if (ref == LCC_NOT_FOUND)
return NULL;
- return VEC_index (lto_encoder_entry, encoder->nodes, ref).node;
+ return encoder->nodes[ref].node;
}
/* Return an iterator to the first node in LSI. */
#include "lto-streamer.h"
/* Vector to keep track of external variables we've seen so far. */
-VEC(tree,gc) *lto_global_var_decls;
+vec<tree, va_gc> *lto_global_var_decls;
/* Replace the cgraph node NODE with PREVAILING_NODE in the cgraph, merging
all edges and removing the old node. */
lto_symtab_merge_decls_2 (symtab_node first, bool diagnosed_p)
{
symtab_node prevailing, e;
- VEC(tree, heap) *mismatches = NULL;
+ vec<tree> mismatches = vec<tree>();
unsigned i;
tree decl;
{
if (!lto_symtab_merge (prevailing, e)
&& !diagnosed_p)
- VEC_safe_push (tree, heap, mismatches, e->symbol.decl);
+ mismatches.safe_push (e->symbol.decl);
}
- if (VEC_empty (tree, mismatches))
+ if (mismatches.is_empty ())
return;
/* Diagnose all mismatched re-declarations. */
- FOR_EACH_VEC_ELT (tree, mismatches, i, decl)
+ FOR_EACH_VEC_ELT (mismatches, i, decl)
{
if (!types_compatible_p (TREE_TYPE (prevailing->symbol.decl),
TREE_TYPE (decl)))
inform (DECL_SOURCE_LOCATION (prevailing->symbol.decl),
"previously declared here");
- VEC_free (tree, heap, mismatches);
+ mismatches.release ();
}
/* Helper to process the decl chain for the symbol table entry *SLOT. */
/* Record the prevailing variable. */
if (TREE_CODE (prevailing->symbol.decl) == VAR_DECL)
- VEC_safe_push (tree, gc, lto_global_var_decls,
- prevailing->symbol.decl);
+ vec_safe_push (lto_global_var_decls, prevailing->symbol.decl);
/* Diagnose mismatched objects. */
for (e = prevailing->symbol.next_sharing_asm_name;
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * lto-lang.c: Use new vec API in vec.h.
+ * lto-partition.c: Likewise.
+ * lto-partition.h: Likewise.
+ * lto.c: Likewise.
+
2012-10-31 Lawrence Crowl <crowl@google.com>
* lto.c (lto_wpa_write_files): Change symtab checking to a checked
static void
lto_write_globals (void)
{
- tree *vec = VEC_address (tree, lto_global_var_decls);
- int len = VEC_length (tree, lto_global_var_decls);
+ tree *vec = lto_global_var_decls->address ();
+ int len = lto_global_var_decls->length ();
wrapup_global_declarations (vec, len);
emit_debug_global_declarations (vec, len);
- VEC_free (tree, gc, lto_global_var_decls);
+ vec_free (lto_global_var_decls);
}
static tree
lto_register_canonical_types (global_trees[i]);
/* Initialize LTO-specific data structures. */
- lto_global_var_decls = VEC_alloc (tree, gc, 256);
+ vec_alloc (lto_global_var_decls, 256);
in_lto_p = true;
return true;
SYMBOL_DUPLICATE
};
-VEC(ltrans_partition, heap) *ltrans_partitions;
+vec<ltrans_partition> ltrans_partitions;
static void add_symbol_to_partition (ltrans_partition part, symtab_node node);
part->encoder = lto_symtab_encoder_new (false);
part->name = name;
part->insns = 0;
- VEC_safe_push (ltrans_partition, heap, ltrans_partitions, part);
+ ltrans_partitions.safe_push (part);
return part;
}
{
unsigned int idx;
ltrans_partition part;
- for (idx = 0; VEC_iterate (ltrans_partition, ltrans_partitions, idx, part); idx++)
+ for (idx = 0; ltrans_partitions.iterate (idx, &part); idx++)
{
if (part->initializers_visited)
pointer_set_destroy (part->initializers_visited);
/* Symtab encoder is freed after streaming. */
free (part);
}
- VEC_free (ltrans_partition, heap, ltrans_partitions);
+ ltrans_partitions.release ();
}
/* Return true if symbol is already in some partition. */
npartitions++;
}
}
- else if (!file_data
- && VEC_length (ltrans_partition, ltrans_partitions))
- partition = VEC_index (ltrans_partition, ltrans_partitions, 0);
+ else if (!file_data && ltrans_partitions.length ())
+ partition = ltrans_partitions[0];
else
{
partition = new_partition ("");
gcc_assert (flag_wpa);
/* First compute boundaries. */
- n_sets = VEC_length (ltrans_partition, ltrans_partitions);
+ n_sets = ltrans_partitions.length ();
for (i = 0; i < n_sets; i++)
{
ltrans_partition part
- = VEC_index (ltrans_partition, ltrans_partitions, i);
+ = ltrans_partitions[i];
part->encoder = compute_ltrans_boundary (part->encoder);
}
lto_symtab_encoder_iterator lsei;
lto_symtab_encoder_t encoder;
ltrans_partition part
- = VEC_index (ltrans_partition, ltrans_partitions, i);
+ = ltrans_partitions[i];
encoder = part->encoder;
for (lsei = lsei_start (encoder); !lsei_end_p (lsei);
};
typedef struct ltrans_partition_def *ltrans_partition;
-DEF_VEC_P(ltrans_partition);
-DEF_VEC_ALLOC_P(ltrans_partition,heap);
-extern VEC(ltrans_partition, heap) *ltrans_partitions;
+extern vec<ltrans_partition> ltrans_partitions;
void lto_1_to_1_map (void);
void lto_max_map (void);
signed char same_p;
};
typedef struct type_pair_d *type_pair_t;
-DEF_VEC_P(type_pair_t);
-DEF_VEC_ALLOC_P(type_pair_t,heap);
#define GIMPLE_TYPE_PAIR_SIZE 16381
struct type_pair_d *type_pair_cache;
static bool
gimple_types_compatible_p_1 (tree, tree, type_pair_t,
- VEC(type_pair_t, heap) **,
+ vec<type_pair_t> *,
struct pointer_map_t *, struct obstack *);
/* DFS visit the edge from the callers type pair with state *STATE to
static bool
gtc_visit (tree t1, tree t2,
struct sccs *state,
- VEC(type_pair_t, heap) **sccstack,
+ vec<type_pair_t> *sccstack,
struct pointer_map_t *sccstate,
struct obstack *sccstate_obstack)
{
static bool
gimple_types_compatible_p_1 (tree t1, tree t2, type_pair_t p,
- VEC(type_pair_t, heap) **sccstack,
+ vec<type_pair_t> *sccstack,
struct pointer_map_t *sccstate,
struct obstack *sccstate_obstack)
{
state = XOBNEW (sccstate_obstack, struct sccs);
*pointer_map_insert (sccstate, p) = state;
- VEC_safe_push (type_pair_t, heap, *sccstack, p);
+ sccstack->safe_push (p);
state->dfsnum = gtc_next_dfs_num++;
state->low = state->dfsnum;
state->on_sccstack = true;
do
{
struct sccs *cstate;
- x = VEC_pop (type_pair_t, *sccstack);
+ x = sccstack->pop ();
cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
cstate->on_sccstack = false;
x->same_p = state->u.same_p;
static bool
gimple_types_compatible_p (tree t1, tree t2)
{
- VEC(type_pair_t, heap) *sccstack = NULL;
+ vec<type_pair_t> sccstack = vec<type_pair_t>();
struct pointer_map_t *sccstate;
struct obstack sccstate_obstack;
type_pair_t p = NULL;
gcc_obstack_init (&sccstate_obstack);
res = gimple_types_compatible_p_1 (t1, t2, p,
&sccstack, sccstate, &sccstate_obstack);
- VEC_free (type_pair_t, heap, sccstack);
+ sccstack.release ();
pointer_map_destroy (sccstate);
obstack_free (&sccstate_obstack, NULL);
}
static hashval_t
-iterative_hash_gimple_type (tree, hashval_t, VEC(tree, heap) **,
+iterative_hash_gimple_type (tree, hashval_t, vec<tree> *,
struct pointer_map_t *, struct obstack *);
/* DFS visit the edge from the callers type with state *STATE to T.
static hashval_t
visit (tree t, struct sccs *state, hashval_t v,
- VEC (tree, heap) **sccstack,
+ vec<tree> *sccstack,
struct pointer_map_t *sccstate,
struct obstack *sccstate_obstack)
{
static hashval_t
iterative_hash_gimple_type (tree type, hashval_t val,
- VEC(tree, heap) **sccstack,
+ vec<tree> *sccstack,
struct pointer_map_t *sccstate,
struct obstack *sccstate_obstack)
{
state = XOBNEW (sccstate_obstack, struct sccs);
*pointer_map_insert (sccstate, type) = state;
- VEC_safe_push (tree, heap, *sccstack, type);
+ sccstack->safe_push (type);
state->dfsnum = next_dfs_num++;
state->low = state->dfsnum;
state->on_sccstack = true;
struct tree_int_map *m;
/* Pop off the SCC and set its hash values. */
- x = VEC_pop (tree, *sccstack);
+ x = sccstack->pop ();
/* Optimize SCC size one. */
if (x == type)
{
unsigned first, i, size, j;
struct type_hash_pair *pairs;
/* Pop off the SCC and build an array of type, hash pairs. */
- first = VEC_length (tree, *sccstack) - 1;
- while (VEC_index (tree, *sccstack, first) != type)
+ first = sccstack->length () - 1;
+ while ((*sccstack)[first] != type)
--first;
- size = VEC_length (tree, *sccstack) - first + 1;
+ size = sccstack->length () - first + 1;
pairs = XALLOCAVEC (struct type_hash_pair, size);
i = 0;
cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
pairs[i].hash = cstate->u.hash;
do
{
- x = VEC_pop (tree, *sccstack);
+ x = sccstack->pop ();
cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
cstate->on_sccstack = false;
++i;
gimple_type_hash (const void *p)
{
const_tree t = (const_tree) p;
- VEC(tree, heap) *sccstack = NULL;
+ vec<tree> sccstack = vec<tree>();
struct pointer_map_t *sccstate;
struct obstack sccstate_obstack;
hashval_t val;
gcc_obstack_init (&sccstate_obstack);
val = iterative_hash_gimple_type (CONST_CAST_TREE (t), 0,
&sccstack, sccstate, &sccstate_obstack);
- VEC_free (tree, heap, sccstack);
+ sccstack.release ();
pointer_map_destroy (sccstate);
obstack_free (&sccstate_obstack, NULL);
LTO_FIXUP_TREE (BINFO_OFFSET (t));
LTO_FIXUP_TREE (BINFO_VIRTUALS (t));
LTO_FIXUP_TREE (BINFO_VPTR_FIELD (t));
- n = VEC_length (tree, BINFO_BASE_ACCESSES (t));
+ n = vec_safe_length (BINFO_BASE_ACCESSES (t));
for (i = 0; i < n; i++)
{
saved_base = base = BINFO_BASE_ACCESS (t, i);
LTO_FIXUP_TREE (base);
if (base != saved_base)
- VEC_replace (tree, BINFO_BASE_ACCESSES (t), i, base);
+ (*BINFO_BASE_ACCESSES (t))[i] = base;
}
LTO_FIXUP_TREE (BINFO_INHERITANCE_CHAIN (t));
LTO_FIXUP_TREE (BINFO_SUBVTT_INDEX (t));
saved_base = base = BINFO_BASE_BINFO (t, i);
LTO_FIXUP_TREE (base);
if (base != saved_base)
- VEC_replace (tree, BINFO_BASE_BINFOS (t), i, base);
+ (*BINFO_BASE_BINFOS (t))[i] = base;
}
}
lto_ft_typed (t);
- for (idx = 0;
- VEC_iterate(constructor_elt, CONSTRUCTOR_ELTS (t), idx, ce);
- idx++)
+ for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (t), idx, &ce); idx++)
{
LTO_FIXUP_TREE (ce->index);
LTO_FIXUP_TREE (ce->value);
static enum ld_plugin_symbol_resolution
get_resolution (struct data_in *data_in, unsigned index)
{
- if (data_in->globals_resolution)
+ if (data_in->globals_resolution.exists ())
{
ld_plugin_symbol_resolution_t ret;
/* We can have references to not emitted functions in
DECL_FUNCTION_PERSONALITY at least. So we can and have
to indeed return LDPR_UNKNOWN in some cases. */
- if (VEC_length (ld_plugin_symbol_resolution_t,
- data_in->globals_resolution) <= index)
+ if (data_in->globals_resolution.length () <= index)
return LDPR_UNKNOWN;
- ret = VEC_index (ld_plugin_symbol_resolution_t,
- data_in->globals_resolution,
- index);
+ ret = data_in->globals_resolution[index];
return ret;
}
else
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
rest_of_decl_compilation (decl, 1, 0);
- VEC_safe_push (tree, gc, lto_global_var_decls, decl);
+ vec_safe_push (lto_global_var_decls, decl);
}
/* If this variable has already been declared, queue the
uniquify_nodes (struct data_in *data_in, unsigned from)
{
struct streamer_tree_cache_d *cache = data_in->reader_cache;
- unsigned len = VEC_length (tree, cache->nodes);
+ unsigned len = cache->nodes.length ();
unsigned i;
/* Go backwards because children streamed for the first time come
them and computing hashes. */
for (i = len; i-- > from;)
{
- tree t = VEC_index (tree, cache->nodes, i);
+ tree t = cache->nodes[i];
if (t && TYPE_P (t))
{
tree newt = gimple_register_type (t);
/* Second fixup all trees in the new cache entries. */
for (i = len; i-- > from;)
{
- tree t = VEC_index (tree, cache->nodes, i);
+ tree t = cache->nodes[i];
tree oldt = t;
if (!t)
continue;
make sure it is done last. */
for (i = len; i-- > from;)
{
- tree t = VEC_index (tree, cache->nodes, i);
+ tree t = cache->nodes[i];
if (t == NULL_TREE)
continue;
static void
lto_read_decls (struct lto_file_decl_data *decl_data, const void *data,
- VEC(ld_plugin_symbol_resolution_t,heap) *resolutions)
+ vec<ld_plugin_symbol_resolution_t> resolutions)
{
const struct lto_decl_header *header = (const struct lto_decl_header *) data;
const int decl_offset = sizeof (struct lto_decl_header);
while (ib_main.p < ib_main.len)
{
tree t;
- unsigned from = VEC_length (tree, data_in->reader_cache->nodes);
+ unsigned from = data_in->reader_cache->nodes.length ();
t = stream_read_tree (&ib_main, data_in);
gcc_assert (t && ib_main.p <= ib_main.len);
uniquify_nodes (data_in, from);
format that is only unpacked later when the subfile is processed. */
rp.res = r;
rp.index = index;
- VEC_safe_push (res_pair, heap, file_data->respairs, rp);
+ file_data->respairs.safe_push (rp);
if (file_data->max_index < index)
file_data->max_index = index;
}
{
const char *data;
size_t len;
- VEC(ld_plugin_symbol_resolution_t,heap) *resolutions = NULL;
+ vec<ld_plugin_symbol_resolution_t>
+ resolutions = vec<ld_plugin_symbol_resolution_t>();
int i;
res_pair *rp;
/* Create vector for fast access of resolution. We do this lazily
to save memory. */
- VEC_safe_grow_cleared (ld_plugin_symbol_resolution_t, heap,
- resolutions,
- file_data->max_index + 1);
- for (i = 0; VEC_iterate (res_pair, file_data->respairs, i, rp); i++)
- VEC_replace (ld_plugin_symbol_resolution_t, resolutions, rp->index, rp->res);
- VEC_free (res_pair, heap, file_data->respairs);
+ resolutions.safe_grow_cleared (file_data->max_index + 1);
+ for (i = 0; file_data->respairs.iterate (i, &rp); i++)
+ resolutions[rp->index] = rp->res;
+ file_data->respairs.release ();
file_data->renaming_hash_table = lto_create_renaming_table ();
file_data->file_name = file->filename;
/* Finalize FILE_DATA in FILE and increase COUNT. */
static int
-lto_create_files_from_ids (lto_file *file, struct lto_file_decl_data *file_data,
+lto_create_files_from_ids (lto_file *file, struct lto_file_decl_data *file_data,
int *count)
{
lto_file_finalize (file_data, file);
timevar_push (TV_WHOPR_WPA);
- FOR_EACH_VEC_ELT (ltrans_partition, ltrans_partitions, i, part)
+ FOR_EACH_VEC_ELT (ltrans_partitions, i, part)
lto_stats.num_output_symtab_nodes += lto_symtab_encoder_size (part->encoder);
/* Find out statics that need to be promoted
temp_filename[blen - sizeof (".out") + 1] = '\0';
blen = strlen (temp_filename);
- n_sets = VEC_length (ltrans_partition, ltrans_partitions);
+ n_sets = ltrans_partitions.length ();
/* Sort partitions by size so small ones are compiled last.
FIXME: Even when not reordering we may want to output one list for parallel make
and other for final link command. */
- VEC_qsort (ltrans_partition, ltrans_partitions,
- flag_toplevel_reorder ? cmp_partitions_size : cmp_partitions_order);
+ ltrans_partitions.qsort (flag_toplevel_reorder
+ ? cmp_partitions_size
+ : cmp_partitions_order);
for (i = 0; i < n_sets; i++)
{
size_t len;
- ltrans_partition part = VEC_index (ltrans_partition, ltrans_partitions, i);
+ ltrans_partition part = ltrans_partitions[i];
/* Write all the nodes in SET. */
sprintf (temp_filename + blen, "%u.o", i);
this field into ltrans compilation. */
if (flag_ltrans)
FOR_EACH_DEFINED_FUNCTION (node)
- VEC_safe_push (ipa_opt_pass, heap,
- node->ipa_transforms_to_apply,
- (ipa_opt_pass)&pass_ipa_inline);
+ node->ipa_transforms_to_apply.safe_push ((ipa_opt_pass)&pass_ipa_inline);
timevar_pop (TV_IPA_LTO_CGRAPH_MERGE);
set_cfun (NULL);
/* Inform the middle end about the global variables we have seen. */
- FOR_EACH_VEC_ELT (tree, lto_global_var_decls, i, decl)
+ FOR_EACH_VEC_ELT (*lto_global_var_decls, i, decl)
rest_of_decl_compilation (decl, 1, 0);
if (!quiet_flag)
FOR_EACH_SYMBOL (node)
node->symbol.aux = NULL;
- lto_stats.num_cgraph_partitions += VEC_length (ltrans_partition,
- ltrans_partitions);
+ lto_stats.num_cgraph_partitions += ltrans_partitions.length ();
timevar_pop (TV_WHOPR_PARTITIONING);
timevar_stop (TV_PHASE_OPT_GEN);
typedef fixup_edge_type *fixup_edge_p;
-DEF_VEC_P (fixup_edge_p);
-DEF_VEC_ALLOC_P (fixup_edge_p, heap);
/* Structure to represent a vertex in the fixup graph. */
typedef struct fixup_vertex_d
{
- VEC (fixup_edge_p, heap) *succ_edges;
+ vec<fixup_edge_p> succ_edges;
} fixup_vertex_type;
typedef fixup_vertex_type *fixup_vertex_p;
{
pfvertex = fvertex_list + i;
fprintf (file, "vertex_list[%d]: %d succ fixup edges.\n",
- i, VEC_length (fixup_edge_p, pfvertex->succ_edges));
+ i, pfvertex->succ_edges.length ());
- for (j = 0; VEC_iterate (fixup_edge_p, pfvertex->succ_edges, j, pfedge);
+ for (j = 0; pfvertex->succ_edges.iterate (j, &pfedge);
j++)
{
/* Distinguish forward edges and backward edges in the residual flow
fixup_graph->num_edges++;
if (dump_file)
dump_fixup_edge (dump_file, fixup_graph, curr_edge);
- VEC_safe_push (fixup_edge_p, heap, curr_vertex->succ_edges, curr_edge);
+ curr_vertex->succ_edges.safe_push (curr_edge);
return curr_edge;
}
pfvertex = fixup_graph->vertex_list + src;
- for (j = 0; VEC_iterate (fixup_edge_p, pfvertex->succ_edges, j, pfedge);
+ for (j = 0; pfvertex->succ_edges.iterate (j, &pfedge);
j++)
if (pfedge->dest == dest)
return pfedge;
fixup_vertex_p pfvertex = fixup_graph->vertex_list;
for (i = 0; i < fnum_vertices; i++, pfvertex++)
- VEC_free (fixup_edge_p, heap, pfvertex->succ_edges);
+ pfvertex->succ_edges.release ();
free (fixup_graph->vertex_list);
free (fixup_graph->edge_list);
u = dequeue (queue_list);
is_visited[u] = 1;
pfvertex = fvertex_list + u;
- for (i = 0; VEC_iterate (fixup_edge_p, pfvertex->succ_edges, i, pfedge);
+ for (i = 0; pfvertex->succ_edges.iterate (i, &pfedge);
i++)
{
int dest = pfedge->dest;
/* Compute the sum of the edge counts in TO_EDGES. */
gcov_type
-sum_edge_counts (VEC (edge, gc) *to_edges)
+sum_edge_counts (vec<edge, va_gc> *to_edges)
{
gcov_type sum = 0;
edge e;
};
typedef struct ps_reg_move_info ps_reg_move_info;
-DEF_VEC_O (ps_reg_move_info);
-DEF_VEC_ALLOC_O (ps_reg_move_info, heap);
/* Holds the partial schedule as an array of II rows. Each entry of the
array points to a linked list of PS_INSNs, which represents the
/* All the moves added for this partial schedule. Index X has
a ps_insn id of X + g->num_nodes. */
- VEC (ps_reg_move_info, heap) *reg_moves;
+ vec<ps_reg_move_info> reg_moves;
/* rows_length[i] holds the number of instructions in the row.
It is used only (as an optimization) to back off quickly from
#define NODE_ASAP(node) ((node)->aux.count)
-#define SCHED_PARAMS(x) (&VEC_index (node_sched_params, node_sched_param_vec, x))
+#define SCHED_PARAMS(x) (&node_sched_param_vec[x])
#define SCHED_TIME(x) (SCHED_PARAMS (x)->time)
#define SCHED_ROW(x) (SCHED_PARAMS (x)->row)
#define SCHED_STAGE(x) (SCHED_PARAMS (x)->stage)
} *node_sched_params_ptr;
typedef struct node_sched_params node_sched_params;
-DEF_VEC_O (node_sched_params);
-DEF_VEC_ALLOC_O (node_sched_params, heap);
\f
/* The following three functions are copied from the current scheduler
code in order to use sched_analyze() for computing the dependencies.
ps_reg_move (partial_schedule_ptr ps, int id)
{
gcc_checking_assert (id >= ps->g->num_nodes);
- return &VEC_index (ps_reg_move_info, ps->reg_moves, id - ps->g->num_nodes);
+ return &ps->reg_moves[id - ps->g->num_nodes];
}
/* Return the rtl instruction that is being scheduled by partial schedule
/* A vector that contains the sched data for each ps_insn. */
-static VEC (node_sched_params, heap) *node_sched_param_vec;
+static vec<node_sched_params> node_sched_param_vec;
/* Allocate sched_params for each node and initialize it. */
static void
set_node_sched_params (ddg_ptr g)
{
- VEC_truncate (node_sched_params, node_sched_param_vec, 0);
- VEC_safe_grow_cleared (node_sched_params, heap,
- node_sched_param_vec, g->num_nodes);
+ node_sched_param_vec.truncate (0);
+ node_sched_param_vec.safe_grow_cleared (g->num_nodes);
}
/* Make sure that node_sched_param_vec has an entry for every move in PS. */
static void
extend_node_sched_params (partial_schedule_ptr ps)
{
- VEC_safe_grow_cleared (node_sched_params, heap, node_sched_param_vec,
- ps->g->num_nodes + VEC_length (ps_reg_move_info,
- ps->reg_moves));
+ node_sched_param_vec.safe_grow_cleared (ps->g->num_nodes
+ + ps->reg_moves.length ());
}
/* Update the sched_params (time, row and stage) for node U using the II,
continue;
/* Create NREG_MOVES register moves. */
- first_move = VEC_length (ps_reg_move_info, ps->reg_moves);
- VEC_safe_grow_cleared (ps_reg_move_info, heap, ps->reg_moves,
- first_move + nreg_moves);
+ first_move = ps->reg_moves.length ();
+ ps->reg_moves.safe_grow_cleared (first_move + nreg_moves);
extend_node_sched_params (ps);
/* Record the moves associated with this node. */
ps_reg_move_info *move;
int i;
- FOR_EACH_VEC_ELT (ps_reg_move_info, ps->reg_moves, i, move)
+ FOR_EACH_VEC_ELT (ps->reg_moves, i, move)
{
unsigned int i_use;
sbitmap_iterator sbi;
}
free_partial_schedule (ps);
- VEC_free (node_sched_params, heap, node_sched_param_vec);
+ node_sched_param_vec.release ();
free (node_order);
free_ddg (g);
}
partial_schedule_ptr ps = XNEW (struct partial_schedule);
ps->rows = (ps_insn_ptr *) xcalloc (ii, sizeof (ps_insn_ptr));
ps->rows_length = (int *) xcalloc (ii, sizeof (int));
- ps->reg_moves = NULL;
+ ps->reg_moves.create (0);
ps->ii = ii;
ps->history = history;
ps->min_cycle = INT_MAX;
if (!ps)
return;
- FOR_EACH_VEC_ELT (ps_reg_move_info, ps->reg_moves, i, move)
+ FOR_EACH_VEC_ELT (ps->reg_moves, i, move)
sbitmap_free (move->uses);
- VEC_free (ps_reg_move_info, heap, ps->reg_moves);
+ ps->reg_moves.release ();
free_ps_insns (ps);
free (ps->rows);
+2012-11-16 Diego Novillo <dnovillo@google.com>
+
+ Adjust for new vec API (http://gcc.gnu.org/wiki/cxx-conversion/cxx-vec)
+
+ * objc-act.c: Use new vec API in vec.h.
+ * objc-act.h: Likewise.
+ * objc-gnu-runtime-abi-01.c: Likewise.
+ * objc-next-runtime-abi-01.c: Likewise.
+ * objc-next-runtime-abi-02.c: Likewise.
+ * objc-runtime-hooks.h: Likewise.
+ * objc-runtime-shared-support.c: Likewise.
+ * objc-runtime-shared-support.h: Likewise.
+
2012-10-01 Lawrence Crowl <crowl@google.com>
* Make-lang.in (OBJC_OBJS): Add dependence on hash-table.o.
/* An array of all the local variables in the current function that
need to be marked as volatile. */
-VEC(tree,gc) *local_variables_to_volatilize = NULL;
+vec<tree, va_gc> *local_variables_to_volatilize = NULL;
/* Store all constructed constant strings in a hash table so that
they get uniqued properly. */
tree s = objc_start_struct (name);
tree super = (super_name ? xref_tag (RECORD_TYPE, super_name) : NULL_TREE);
tree t;
- VEC(tree,heap) *objc_info = NULL;
+ vec<tree> objc_info = vec<tree>();
int i;
if (super)
for (t = TYPE_MAIN_VARIANT (s); t; t = TYPE_NEXT_VARIANT (t))
{
INIT_TYPE_OBJC_INFO (t);
- VEC_safe_push (tree, heap, objc_info, TYPE_OBJC_INFO (t));
+ objc_info.safe_push (TYPE_OBJC_INFO (t));
}
s = objc_finish_struct (s, fields);
/* Replace TYPE_OBJC_INFO with the saved one. This restores any
protocol information that may have been associated with the
type. */
- TYPE_OBJC_INFO (t) = VEC_index (tree, objc_info, i);
+ TYPE_OBJC_INFO (t) = objc_info[i];
/* Replace the IDENTIFIER_NODE with an actual @interface now
that we have it. */
TYPE_OBJC_INTERFACE (t) = klass;
}
- VEC_free (tree, heap, objc_info);
+ objc_info.release ();
/* Use TYPE_BINFO structures to point at the super class, if any. */
objc_xref_basetypes (s, super);
|| TREE_CODE (decl) == PARM_DECL))
{
if (local_variables_to_volatilize == NULL)
- local_variables_to_volatilize = VEC_alloc (tree, gc, 8);
+ vec_alloc (local_variables_to_volatilize, 8);
- VEC_safe_push (tree, gc, local_variables_to_volatilize, decl);
+ vec_safe_push (local_variables_to_volatilize, decl);
}
}
{
int i;
tree decl;
- FOR_EACH_VEC_ELT (tree, local_variables_to_volatilize, i, decl)
+ FOR_EACH_VEC_ELT (*local_variables_to_volatilize, i, decl)
{
tree t = TREE_TYPE (decl);
}
/* Now we delete the vector. This sets it to NULL as well. */
- VEC_free (tree, gc, local_variables_to_volatilize);
+ vec_free (local_variables_to_volatilize);
}
}
tree base_binfo = objc_copy_binfo (TYPE_BINFO (basetype));
BINFO_INHERITANCE_CHAIN (base_binfo) = binfo;
- BINFO_BASE_ACCESSES (binfo) = VEC_alloc (tree, gc, 1);
+ vec_alloc (BINFO_BASE_ACCESSES (binfo), 1);
BINFO_BASE_APPEND (binfo, base_binfo);
BINFO_BASE_ACCESS_APPEND (binfo, access_public_node);
}
with type TYPE and elements ELTS. */
tree
-objc_build_constructor (tree type, VEC(constructor_elt,gc) *elts)
+objc_build_constructor (tree type, vec<constructor_elt, va_gc> *elts)
{
tree constructor = build_constructor (type, elts);
#ifdef OBJCPLUS
/* Adjust for impedance mismatch. We should figure out how to build
CONSTRUCTORs that consistently please both the C and C++ gods. */
- if (!VEC_index (constructor_elt, elts, 0).index)
+ if (!(*elts)[0].index)
TREE_TYPE (constructor) = init_list_type_node;
#endif
build_function_type_for_method (tree return_type, tree method,
int context, bool super_flag)
{
- VEC(tree,gc) *argtypes = make_tree_vector ();
+ vec<tree, va_gc> *argtypes = make_tree_vector ();
tree t, ftype;
bool is_varargs = false;
appropriate. */
arg_type = objc_decay_parm_type (arg_type);
- VEC_safe_push (tree, gc, argtypes, arg_type);
+ vec_safe_push (argtypes, arg_type);
}
if (METHOD_ADD_ARGS (method))
arg_type = objc_decay_parm_type (arg_type);
- VEC_safe_push (tree, gc, argtypes, arg_type);
+ vec_safe_push (argtypes, arg_type);
}
if (METHOD_ADD_ARGS_ELLIPSIS_P (method))
/* An array of all the local variables in the current function that
need to be marked as volatile. */
-extern GTY(()) VEC(tree,gc) *local_variables_to_volatilize;
+extern GTY(()) vec<tree, va_gc> *local_variables_to_volatilize;
/* Objective-C/Objective-C++ @implementation list. */
static tree gnu_runtime_abi_01_get_category_super_ref (location_t, struct imp_entry *, bool);
static tree gnu_runtime_abi_01_receiver_is_class_object (tree);
-static void gnu_runtime_abi_01_get_arg_type_list_base (VEC(tree,gc) **, tree,
- int, int);
+static void gnu_runtime_abi_01_get_arg_type_list_base (vec<tree, va_gc> **,
+ tree, int, int);
static tree gnu_runtime_abi_01_build_objc_method_call (location_t, tree, tree,
tree, tree, tree, int);
prototype. */
static void
-gnu_runtime_abi_01_get_arg_type_list_base (VEC(tree,gc) **argtypes, tree meth,
- int context,
+gnu_runtime_abi_01_get_arg_type_list_base (vec<tree, va_gc> **argtypes,
+ tree meth, int context,
int superflag ATTRIBUTE_UNUSED)
{
tree receiver_type;
else
receiver_type = objc_object_type;
- VEC_safe_push (tree, gc, *argtypes, receiver_type);
+ vec_safe_push (*argtypes, receiver_type);
/* Selector type - will eventually change to `int'. */
- VEC_safe_push (tree, gc, *argtypes, objc_selector_type);
+ vec_safe_push (*argtypes, objc_selector_type);
}
/* Unused for GNU runtime. */
: (flag_objc_direct_dispatch ? umsg_fast_decl
: umsg_decl));
tree rcv_p = (super_flag ? objc_super_type : objc_object_type);
- VEC(tree, gc) *parms;
- VEC(tree, gc) *tv;
+ vec<tree, va_gc> *parms;
+ vec<tree, va_gc> *tv;
unsigned nparm = (method_params ? list_length (method_params) : 0);
/* If a prototype for the method to be called exists, then cast
lookup_object = save_expr (lookup_object);
/* Param list + 2 slots for object and selector. */
- parms = VEC_alloc (tree, gc, nparm + 2);
- tv = VEC_alloc (tree, gc, 2);
+ vec_alloc (parms, nparm + 2);
+ vec_alloc (tv, 2);
/* First, call the lookup function to get a pointer to the method,
then cast the pointer, then call it with the method arguments. */
- VEC_quick_push (tree, tv, lookup_object);
- VEC_quick_push (tree, tv, selector);
+ tv->quick_push (lookup_object);
+ tv->quick_push (selector);
method = build_function_call_vec (loc, sender, tv, NULL);
- VEC_free (tree, gc, tv);
+ vec_free (tv);
/* Pass the appropriate object to the method. */
- VEC_quick_push (tree, parms, (super_flag ? self_decl : lookup_object));
+ parms->quick_push ((super_flag ? self_decl : lookup_object));
/* Pass the selector to the method. */
- VEC_quick_push (tree, parms, selector);
+ parms->quick_push (selector);
/* Now append the remainder of the parms. */
if (nparm)
for (; method_params; method_params = TREE_CHAIN (method_params))
- VEC_quick_push (tree, parms, TREE_VALUE (method_params));
+ parms->quick_push (TREE_VALUE (method_params));
/* Build an obj_type_ref, with the correct cast for the method call. */
t = build3 (OBJ_TYPE_REF, sender_cast, method, lookup_object, size_zero_node);
t = build_function_call_vec (loc, t, parms, NULL);
- VEC_free (tree, gc, parms);
+ vec_free (parms);
return t;
}
int length)
{
tree constructor, fields;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* GNU: (NXConstantString *) & ((__builtin_ObjCString) { NULL, string, length }) */
fields = TYPE_FIELDS (internal_const_str_type);
{
tree expr, ttyp;
location_t loc;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
/* TODO: pass the loc in or find it from args. */
loc = input_location;
generate_protocol_list (tree i_or_p, tree klass_ctxt)
{
tree array_type, ptype, refs_decl, lproto, e, plist;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
int size = 0;
{
tree method_list_template, initlist, decl;
int size;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
if (!chain || !prefix)
generate_dispatch_table (tree chain, const char *name)
{
tree decl, method_list_template, initlist;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int size = list_length (chain);
if (!objc_method_template)
{
tree expr, ltyp;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* TODO: pass the loc in or find it from args. */
/* TODO: pass the loc in or find it from args. */
tree protocol_list)
{
tree expr, ltyp;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* isa = */
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, isa);
{
tree initlist, ivar_list_template, decl;
int size;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
if (!chain)
return NULL_TREE;
build_gnu_selector_translation_table (void)
{
tree chain, expr;
- VEC(constructor_elt,gc) *inits = NULL;
- VEC(constructor_elt,gc) *v ;
+ vec<constructor_elt, va_gc> *inits = NULL;
+ vec<constructor_elt, va_gc> *v ;
/* Cause the selector table (previously forward-declared)
to be actually output. */
= build_array_type (build_pointer_type (void_type_node), NULL_TREE);
int num_inst, num_class;
char buf[BUFSIZE];
- VEC(constructor_elt,gc) *decls = NULL;
+ vec<constructor_elt, va_gc> *decls = NULL;
/* FIXME: Remove NeXT runtime code. */
if (flag_next_runtime)
for (cl_chain = objc_static_instances, num_class = 0;
cl_chain; cl_chain = TREE_CHAIN (cl_chain), num_class++)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
for (num_inst = 0, in_chain = TREE_PURPOSE (cl_chain);
in_chain; num_inst++, in_chain = TREE_CHAIN (in_chain));
tree expr;
struct imp_entry *impent;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (imp_count)
for (impent = imp_list; impent; impent = impent->next)
{
tree field, expr, ltyp;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
loc = UNKNOWN_LOCATION;
build_throw_stmt (location_t loc, tree throw_expr, bool rethrown ATTRIBUTE_UNUSED)
{
tree t;
- VEC(tree, gc) *parms = VEC_alloc (tree, gc, 1);
+ vec<tree, va_gc> *parms;
+ vec_alloc (parms, 1);
/* A throw is just a call to the runtime throw function with the
object as a parameter. */
- VEC_quick_push (tree, parms, throw_expr);
+ parms->quick_push (throw_expr);
t = build_function_call_vec (loc, objc_exception_throw_decl, parms, NULL);
- VEC_free (tree, gc, parms);
+ vec_free (parms);
return add_stmt (t);
}
static tree next_runtime_abi_01_get_category_super_ref (location_t, struct imp_entry *, bool);
static tree next_runtime_abi_01_receiver_is_class_object (tree);
-static void next_runtime_abi_01_get_arg_type_list_base (VEC(tree,gc) **, tree,
- int, int);
+static void next_runtime_abi_01_get_arg_type_list_base (vec<tree, va_gc> **,
+ tree, int, int);
static tree next_runtime_abi_01_build_objc_method_call (location_t, tree, tree,
tree, tree, tree, int);
static bool next_runtime_abi_01_setup_const_string_class_decl (void);
prototype. */
static void
-next_runtime_abi_01_get_arg_type_list_base (VEC(tree,gc) **argtypes, tree meth,
- int context, int superflag)
+next_runtime_abi_01_get_arg_type_list_base (vec<tree, va_gc> **argtypes,
+ tree meth, int context,
+ int superflag)
{
tree receiver_type;
else
receiver_type = objc_object_type;
- VEC_safe_push (tree, gc, *argtypes, receiver_type);
+ vec_safe_push (*argtypes, receiver_type);
/* Selector type - will eventually change to `int'. */
- VEC_safe_push (tree, gc, *argtypes, objc_selector_type);
+ vec_safe_push (*argtypes, objc_selector_type);
}
static tree
{
tree sender, sender_cast, method, t;
tree rcv_p = (super_flag ? objc_super_type : objc_object_type);
- VEC(tree, gc) *parms;
+ vec<tree, va_gc> *parms;
unsigned nparm = (method_params ? list_length (method_params) : 0);
/* If a prototype for the method to be called exists, then cast
lookup_object = save_expr (lookup_object);
/* Param list + 2 slots for object and selector. */
- parms = VEC_alloc (tree, gc, nparm + 2);
+ vec_alloc (parms, nparm + 2);
/* If we are returning a struct in memory, and the address
of that memory location is passed as a hidden first
method = build_fold_addr_expr_loc (loc, sender);
/* Pass the object to the method. */
- VEC_quick_push (tree, parms, lookup_object);
+ parms->quick_push (lookup_object);
/* Pass the selector to the method. */
- VEC_quick_push (tree, parms, selector);
+ parms->quick_push (selector);
/* Now append the remainder of the parms. */
if (nparm)
for (; method_params; method_params = TREE_CHAIN (method_params))
- VEC_quick_push (tree, parms, TREE_VALUE (method_params));
+ parms->quick_push (TREE_VALUE (method_params));
/* Build an obj_type_ref, with the correct cast for the method call. */
t = build3 (OBJ_TYPE_REF, sender_cast, method,
lookup_object, size_zero_node);
t = build_function_call_vec (loc, t, parms, NULL);
- VEC_free (tree, gc, parms);
+ vec_free (parms);
return t;
}
int length)
{
tree constructor, fields, var;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* NeXT: (NSConstantString *) & ((__builtin_ObjCString) { isa, string, length }) */
fields = TYPE_FIELDS (internal_const_str_type);
{
tree method_list_template, initlist, decl;
int size;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
if (!chain || !prefix)
{
int size;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree decl, expr;
char buf[BUFSIZE];
build_v1_property_table_initializer (tree type, tree context)
{
tree x;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
if (TREE_CODE (context) == PROTOCOL_INTERFACE_TYPE)
x = CLASS_PROPERTY_DECL (context);
for (; x; x = TREE_CHAIN (x))
{
- VEC(constructor_elt,gc) *elemlist = NULL;
+ vec<constructor_elt, va_gc> *elemlist = NULL;
tree attribute, name_ident = PROPERTY_NAME (x);
CONSTRUCTOR_APPEND_ELT (elemlist, NULL_TREE,
{
tree x, decl, initlist, property_list_template;
bool is_proto = false;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
int init_val, size = 0;
char buf[BUFSIZE];
{
tree array_type, ptype, refs_decl, lproto, e, plist, attr;
int size = 0;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
switch (TREE_CODE (i_or_p))
{
tree expr, ttyp;
location_t loc;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
if (!objc_protocol_extension_template)
build_v1_objc_protocol_extension_template ();
generate_dispatch_table (tree chain, const char *name, tree attr)
{
tree decl, method_list_template, initlist;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int size;;
if (!chain || !name || !(size = list_length (chain)))
location_t loc)
{
tree expr, ltyp;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, cat_name);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, class_name);
tree weak_ivar_layout_tree;
int size;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
/* TODO: pass the loc in or find it from args. */
{
tree expr, ltyp;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* TODO: fish the location out of the input data. */
loc = UNKNOWN_LOCATION;
{
tree initlist, ivar_list_template, decl;
int size;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
if (!chain)
return NULL_TREE;
tree expr;
location_t loc;
struct imp_entry *impent;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (imp_count)
for (impent = imp_list; impent; impent = impent->next)
static tree
init_objc_symtab (tree type)
{
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* sel_ref_cnt = { ..., 5, ... } */
int flags
= ((flag_replace_objc_classes && imp_count ? 1 : 0)
| (flag_objc_gc ? 2 : 0));
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
tree array_type;
array_type = build_sized_array_type (integer_type_node, 2);
build_throw_stmt (location_t loc, tree throw_expr, bool rethrown ATTRIBUTE_UNUSED)
{
tree t;
- VEC(tree, gc) *parms = VEC_alloc (tree, gc, 1);
+ vec<tree, va_gc> *parms;
+ vec_alloc (parms, 1);
/* A throw is just a call to the runtime throw function with the
object as a parameter. */
- VEC_quick_push (tree, parms, throw_expr);
+ parms->quick_push (throw_expr);
t = build_function_call_vec (loc, objc_exception_throw_decl, parms, NULL);
- VEC_free (tree, gc, parms);
+ vec_free (parms);
return add_stmt (t);
}
static tree next_runtime_abi_02_get_category_super_ref (location_t, struct imp_entry *, bool);
static tree next_runtime_abi_02_receiver_is_class_object (tree);
-static void next_runtime_abi_02_get_arg_type_list_base (VEC(tree,gc) **, tree,
- int, int);
+static void next_runtime_abi_02_get_arg_type_list_base (vec<tree, va_gc> **,
+ tree, int, int);
static tree next_runtime_abi_02_build_objc_method_call (location_t, tree, tree,
tree, tree, tree, int);
static bool next_runtime_abi_02_setup_const_string_class_decl (void);
tree ident;
tree data;
} ident_data_tuple ;
-DEF_VEC_O(ident_data_tuple);
-DEF_VEC_ALLOC_O(ident_data_tuple, gc);
/* This routine creates a file scope static variable of type 'Class'
to hold the address of a class. */
ident is replaced with address of the class metadata (of type
'Class') in the output routine. */
-static GTY (()) VEC (ident_data_tuple, gc) * classrefs;
+static GTY (()) vec<ident_data_tuple, va_gc> *classrefs;
static tree
objc_v2_get_class_reference (tree ident)
{
int count;
ident_data_tuple *ref;
- FOR_EACH_VEC_ELT (ident_data_tuple, classrefs, count, ref)
+ FOR_EACH_VEC_ELT (*classrefs, count, ref)
{
if (ref->ident == ident)
{
}
else
/* Somewhat arbitrary initial provision. */
- classrefs = VEC_alloc (ident_data_tuple, gc, 16);
+ vec_alloc (classrefs, 16);
/* We come here if we don't find the entry - or if the table was yet
to be created. */
decl = build_v2_class_reference_decl (ident);
e.ident = ident;
e.data = decl;
- VEC_safe_push (ident_data_tuple, gc, classrefs, e);
+ vec_safe_push (classrefs, e);
return decl;
}
else
{
/* We fall back to using objc_getClass (). */
- VEC(tree,gc) *vec = VEC_alloc (tree, gc, 1);
+ vec<tree, va_gc> *v;
+ vec_alloc (v, 1);
tree t;
/* ??? add_class_reference (ident); - is pointless, since the
system lib does not export the equivalent symbols. Maybe we
need to build a class ref anyway. */
t = my_build_string_pointer (IDENTIFIER_LENGTH (ident) + 1,
IDENTIFIER_POINTER (ident));
- VEC_quick_push (tree, vec, t);
- t = build_function_call_vec (input_location, objc_get_class_decl,
- vec, NULL);
- VEC_free (tree, gc, vec);
+ v->quick_push (t);
+ t = build_function_call_vec (input_location, objc_get_class_decl, v, 0);
+ vec_free (v);
return t;
}
}
prototype. */
static void
-next_runtime_abi_02_get_arg_type_list_base (VEC(tree,gc) **argtypes, tree meth,
- int context, int superflag)
+next_runtime_abi_02_get_arg_type_list_base (vec<tree, va_gc> **argtypes,
+ tree meth, int context,
+ int superflag)
{
tree receiver_type;
else
receiver_type = objc_object_type;
- VEC_safe_push (tree, gc, *argtypes, receiver_type);
+ vec_safe_push (*argtypes, receiver_type);
/* Selector type - will eventually change to `int'. */
- VEC_safe_push (tree, gc, *argtypes,
- (superflag
- ? objc_v2_super_selector_type
- : objc_v2_selector_type));
+ vec_safe_push (*argtypes,
+ superflag ? objc_v2_super_selector_type
+ : objc_v2_selector_type);
}
/* TODO: Merge this with the message refs. */
tree selname;
tree refdecl;
} msgref_entry;
-DEF_VEC_O(msgref_entry);
-DEF_VEC_ALLOC_O(msgref_entry, gc);
-static GTY (()) VEC (msgref_entry, gc) * msgrefs;
+static GTY (()) vec<msgref_entry, va_gc> *msgrefs;
/* Build the list of (objc_msgSend_fixup_xxx, selector name), used
later on to initialize the table of 'struct message_ref_t'
{
int count;
msgref_entry *ref;
- FOR_EACH_VEC_ELT (msgref_entry, msgrefs, count, ref)
+ FOR_EACH_VEC_ELT (*msgrefs, count, ref)
if (ref->func == message_func_decl && ref->selname == sel_name)
return ref->refdecl;
}
else
/* Somewhat arbitrary initial provision. */
- msgrefs = VEC_alloc (msgref_entry, gc, 32);
+ vec_alloc (msgrefs, 32);
/* We come here if we don't find a match or at the start. */
decl = build_v2_message_reference_decl (sel_name,
e.func = message_func_decl;
e.selname = sel_name;
e.refdecl = decl;
- VEC_safe_push (msgref_entry, gc, msgrefs, e);
+ vec_safe_push (msgrefs, e);
return decl;
}
tree id;
tree refdecl;
} prot_list_entry;
-DEF_VEC_O(prot_list_entry);
-DEF_VEC_ALLOC_O(prot_list_entry, gc);
-static GTY (()) VEC (prot_list_entry, gc) * protrefs;
+static GTY (()) vec<prot_list_entry, va_gc> *protrefs;
static tree
objc_v2_get_protocol_reference (tree ident)
{
int count;
prot_list_entry *ref;
- FOR_EACH_VEC_ELT (prot_list_entry, protrefs, count, ref)
+ FOR_EACH_VEC_ELT (*protrefs, count, ref)
{
if (ref->id == ident)
{
}
else
/* Somewhat arbitrary initial provision. */
- protrefs = VEC_alloc (prot_list_entry, gc, 32);
+ vec_alloc (protrefs, 32);
/* We come here if we don't find the entry - or if the table was yet
to be created. */
decl = build_v2_protocollist_ref_decl (ident);
e.id = ident;
e.refdecl = decl;
- VEC_safe_push (prot_list_entry, gc, protrefs, e);
+ vec_safe_push (protrefs, e);
return decl;
}
return decl;
}
-static GTY (()) VEC (ident_data_tuple, gc) * class_super_refs;
-static GTY (()) VEC (ident_data_tuple, gc) * metaclass_super_refs;
+static GTY (()) vec<ident_data_tuple, va_gc> *class_super_refs;
+static GTY (()) vec<ident_data_tuple, va_gc> *metaclass_super_refs;
static tree
next_runtime_abi_02_get_class_super_ref (location_t loc ATTRIBUTE_UNUSED,
tree decl;
ident_data_tuple e;
tree id = CLASS_NAME (imp->imp_context);
- VEC (ident_data_tuple, gc) *list = inst_meth ? class_super_refs
+ vec<ident_data_tuple, va_gc> *list = inst_meth ? class_super_refs
: metaclass_super_refs;
if (list)
{
int count;
ident_data_tuple *ref;
- FOR_EACH_VEC_ELT (ident_data_tuple, list, count, ref)
+ FOR_EACH_VEC_ELT (*list, count, ref)
{
if (ref->ident == id)
{
{
/* Somewhat arbitrary initial provision. */
if (inst_meth)
- list = class_super_refs = VEC_alloc (ident_data_tuple, gc, 16);
+ {
+ vec_alloc (class_super_refs, 16);
+ list = class_super_refs;
+ }
else
- list = metaclass_super_refs = VEC_alloc (ident_data_tuple, gc, 16);
+ {
+ vec_alloc (metaclass_super_refs, 16);
+ list = metaclass_super_refs;
+ }
}
/* We come here if we don't find the entry - or if the table was yet
to be created. */
decl = build_v2_superclass_ref_decl (id, inst_meth);
e.ident = id;
e.data = decl;
- VEC_safe_push (ident_data_tuple, gc, list, e);
+ vec_safe_push (list, e);
return decl;
}
{
if (TREE_CODE (receiver) == VAR_DECL
&& IS_CLASS (TREE_TYPE (receiver))
- && classrefs
- && VEC_length (ident_data_tuple, classrefs))
+ && vec_safe_length (classrefs))
{
int count;
ident_data_tuple *ref;
/* The receiver is a variable created by build_class_reference_decl. */
- FOR_EACH_VEC_ELT (ident_data_tuple, classrefs, count, ref)
+ FOR_EACH_VEC_ELT (*classrefs, count, ref)
if (ref->data == receiver)
return ref->ident;
}
if (TREE_CODE (ret_type) == RECORD_TYPE
|| TREE_CODE (ret_type) == UNION_TYPE)
{
- VEC(constructor_elt,gc) *rtt = NULL;
+ vec<constructor_elt, va_gc> *rtt = NULL;
/* ??? CHECKME. hmmm..... think we need something more
here. */
CONSTRUCTOR_APPEND_ELT (rtt, NULL_TREE, NULL_TREE);
int length)
{
tree constructor, fields, var;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* NeXT: (NSConstantString *) & ((__builtin_ObjCString) { isa, string, length }) */
fields = TYPE_FIELDS (internal_const_str_type);
int count;
msgref_entry *ref;
- if (!msgrefs || !VEC_length (msgref_entry,msgrefs))
+ if (!vec_safe_length (msgrefs))
return;
- FOR_EACH_VEC_ELT (msgref_entry, msgrefs, count, ref)
+ FOR_EACH_VEC_ELT (*msgrefs, count, ref)
{
- VEC(constructor_elt,gc) *initializer;
+ vec<constructor_elt, va_gc> *initializer;
tree expr, constructor;
tree struct_type = TREE_TYPE (ref->refdecl);
location_t loc = DECL_SOURCE_LOCATION (ref->refdecl);
int count;
ident_data_tuple *ref;
- if (!classrefs || !VEC_length (ident_data_tuple, classrefs))
+ if (!vec_safe_length (classrefs))
return;
- FOR_EACH_VEC_ELT (ident_data_tuple, classrefs, count, ref)
+ FOR_EACH_VEC_ELT (*classrefs, count, ref)
{
tree expr = ref->ident;
tree decl = ref->data;
{
int count;
ident_data_tuple *ref;
- VEC (ident_data_tuple, gc) *list = metaclass ? metaclass_super_refs
+ vec<ident_data_tuple, va_gc> *list = metaclass ? metaclass_super_refs
: class_super_refs;
- if (!list || !VEC_length (ident_data_tuple, list))
+ if (!vec_safe_length (list))
return;
- FOR_EACH_VEC_ELT (ident_data_tuple, list, count, ref)
+ FOR_EACH_VEC_ELT (*list, count, ref)
{
tree expr = ref->ident;
tree decl = ref->data;
/* Add the global class meta-data declaration to the list which later
on ends up in the __class_list section. */
-static GTY(()) VEC(tree,gc) *class_list;
+static GTY(()) vec<tree, va_gc> *class_list;
static void
objc_v2_add_to_class_list (tree global_class_decl)
{
- if (!class_list)
- class_list = VEC_alloc (tree, gc, imp_count?imp_count:1);
- VEC_safe_push (tree, gc, class_list, global_class_decl);
+ vec_safe_push (class_list, global_class_decl);
}
-static GTY(()) VEC(tree,gc) *nonlazy_class_list;
+static GTY(()) vec<tree, va_gc> *nonlazy_class_list;
/* Add the global class meta-data declaration to the list which later
on ends up in the __nonlazy_class section. */
static void
objc_v2_add_to_nonlazy_class_list (tree global_class_decl)
{
- if (!nonlazy_class_list)
- nonlazy_class_list = VEC_alloc (tree, gc, imp_count?imp_count:1);
- VEC_safe_push (tree, gc, nonlazy_class_list, global_class_decl);
+ vec_safe_push (nonlazy_class_list, global_class_decl);
}
-static GTY(()) VEC(tree,gc) *category_list;
+static GTY(()) vec<tree, va_gc> *category_list;
/* Add the category meta-data declaration to the list which later on
ends up in the __nonlazy_category section. */
static void
objc_v2_add_to_category_list (tree decl)
{
- if (!category_list)
- category_list = VEC_alloc (tree, gc, cat_count?cat_count:1);
- VEC_safe_push (tree, gc, category_list, decl);
+ vec_safe_push (category_list, decl);
}
-static GTY(()) VEC(tree,gc) *nonlazy_category_list;
+static GTY(()) vec<tree, va_gc> *nonlazy_category_list;
/* Add the category meta-data declaration to the list which later on
ends up in the __category_list section. */
static void
objc_v2_add_to_nonlazy_category_list (tree decl)
{
- if (!nonlazy_category_list)
- nonlazy_category_list = VEC_alloc (tree, gc, cat_count?cat_count:1);
- VEC_safe_push (tree, gc, nonlazy_category_list, decl);
+ vec_safe_push (nonlazy_category_list, decl);
}
static bool
all @implemented {class,category} meta-data. */
static void
-build_v2_address_table (VEC(tree,gc) *src, const char *nam, tree attr)
+build_v2_address_table (vec<tree, va_gc> *src, const char *nam, tree attr)
{
int count=0;
tree type, decl, expr;
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
- if (!src || !VEC_length(tree,src))
+ if (!vec_safe_length (src))
return;
- FOR_EACH_VEC_ELT (tree, src, count, decl)
+ FOR_EACH_VEC_ELT (*src, count, decl)
{
#ifndef OBJCPLUS
tree purpose = build_int_cst (NULL_TREE, count);
if (!protrefs)
return;
- FOR_EACH_VEC_ELT (prot_list_entry, protrefs, count, ref)
+ FOR_EACH_VEC_ELT (*protrefs, count, ref)
{
char buf[BUFSIZE];
tree expr;
/* TODO: Maybe we could explicitly delete the vec. now? */
}
-static GTY (()) VEC (prot_list_entry, gc) * protlist;
+static GTY (()) vec<prot_list_entry, va_gc> *protlist;
/* Add the local protocol meta-data declaration to the list which
later on ends up in the __protocol_list section. */
prot_list_entry e;
if (!protlist)
/* Arbitrary init count. */
- protlist = VEC_alloc (prot_list_entry, gc, 32);
+ vec_alloc (protlist, 32);
e.id = protocol_interface_decl;
e.refdecl = protocol_decl;
- VEC_safe_push (prot_list_entry, gc, protlist, e);
+ vec_safe_push (protlist, e);
}
/* Build the __protocol_list section table containing address of all
{
int count;
prot_list_entry *ref;
- if (!protlist || !VEC_length (prot_list_entry, protlist))
+ if (!vec_safe_length (protlist))
return;
- FOR_EACH_VEC_ELT (prot_list_entry, protlist, count, ref)
+ FOR_EACH_VEC_ELT (*protlist, count, ref)
{
tree decl, expr;
char buf[BUFSIZE];
{
tree refs_decl, lproto, e, plist, ptempl_p_t;
int size = 0;
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
char buf[BUFSIZE];
if (TREE_CODE (i_or_p) == CLASS_INTERFACE_TYPE
static tree
build_v2_descriptor_table_initializer (tree type, tree entries)
{
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
do
{
- VEC(constructor_elt,gc) *eltlist = NULL;
+ vec<constructor_elt, va_gc> *eltlist = NULL;
CONSTRUCTOR_APPEND_ELT (eltlist, NULL_TREE,
build_selector (METHOD_SEL_NAME (entries)));
CONSTRUCTOR_APPEND_ELT (eltlist, NULL_TREE,
{
tree method_list_template, initlist, decl, methods;
int size, entsize;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
char buf[BUFSIZE];
if (!chain || !prefix)
build_v2_property_table_initializer (tree type, tree context)
{
tree x;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
if (TREE_CODE (context) == PROTOCOL_INTERFACE_TYPE)
x = CLASS_PROPERTY_DECL (context);
else
for (; x; x = TREE_CHAIN (x))
{
- VEC(constructor_elt,gc) *elemlist = NULL;
+ vec<constructor_elt, va_gc> *elemlist = NULL;
/* NOTE! sections where property name/attribute go MUST change
later. */
tree attribute, name_ident = PROPERTY_NAME (x);
{
tree x, decl, initlist, property_list_template;
bool is_proto = false;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
int init_val, size = 0;
char buf[BUFSIZE];
{
tree expr, ttyp;
location_t loc;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
/* TODO: find a better representation of location from the inputs. */
loc = UNKNOWN_LOCATION;
generate_v2_dispatch_table (tree chain, const char *name, tree attr)
{
tree decl, method_list_template, initlist;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int size, init_val;
if (!chain || !name || !(size = list_length (chain)))
location_t loc)
{
tree expr, ltyp;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, cat_name);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, class_name);
tree decl;
tree offset;
} ivarref_entry;
-DEF_VEC_O(ivarref_entry);
-DEF_VEC_ALLOC_O(ivarref_entry, gc);
-static GTY (()) VEC (ivarref_entry, gc) * ivar_offset_refs;
+static GTY (()) vec<ivarref_entry, va_gc> *ivar_offset_refs;
static tree
ivar_offset_ref (tree class_name, tree field_decl)
{
int count;
ivarref_entry *ref;
- FOR_EACH_VEC_ELT (ivarref_entry, ivar_offset_refs, count, ref)
+ FOR_EACH_VEC_ELT (*ivar_offset_refs, count, ref)
if (DECL_NAME (ref->decl) == field_decl_id)
return ref->decl;
}
else
/* Somewhat arbitrary initial provision. */
- ivar_offset_refs = VEC_alloc (ivarref_entry, gc, 32);
+ vec_alloc (ivar_offset_refs, 32);
/* We come here if we don't find a match or at the start. */
global_var = (TREE_PUBLIC (field_decl) || TREE_PROTECTED (field_decl));
e.decl = decl;
e.offset = byte_position (field_decl);
- VEC_safe_push (ivarref_entry, gc, ivar_offset_refs, e);
+ vec_safe_push (ivar_offset_refs, e);
return decl;
}
static tree
build_v2_ivar_list_initializer (tree class_name, tree type, tree field_decl)
{
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
do
{
- VEC(constructor_elt,gc) *ivar = NULL;
+ vec<constructor_elt, va_gc> *ivar = NULL;
int val;
tree id;
generate_v2_ivars_list (tree chain, const char *name, tree attr, tree templ)
{
tree decl, initlist, ivar_list_template;
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
int size, ivar_t_size;
if (!chain || !name || !(size = ivar_list_length (chain)))
build_v2_class_t_initializer (tree type, tree isa, tree superclass,
tree ro, tree cache, tree vtable)
{
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
/* isa */
CONSTRUCTOR_APPEND_ELT (initlist, NULL_TREE, isa);
{
tree expr, unsigned_char_star, ltyp;
location_t loc;
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
/* TODO: fish out the real location from somewhere. */
loc = UNKNOWN_LOCATION;
return objc_build_constructor (type, initlist);
}
-static GTY (()) VEC (ident_data_tuple, gc) * ehtype_list;
+static GTY (()) vec<ident_data_tuple, va_gc> *ehtype_list;
/* Record a name as needing a catcher. */
static void
int count = 0;
ident_data_tuple *ref;
- FOR_EACH_VEC_ELT (ident_data_tuple, ehtype_list, count, ref)
+ FOR_EACH_VEC_ELT (*ehtype_list, count, ref)
if (ref->ident == name)
return; /* Already entered. */
}
else
/* Arbitrary initial count. */
- ehtype_list = VEC_alloc (ident_data_tuple, gc, 8);
+ vec_alloc (ehtype_list, 8);
/* Not found, or new list. */
e.ident = name;
e.data = NULL_TREE;
- VEC_safe_push (ident_data_tuple, gc, ehtype_list, e);
+ vec_safe_push (ehtype_list, e);
}
static void
int count;
ivarref_entry *ref;
- if (!ivar_offset_refs || !VEC_length (ivarref_entry, ivar_offset_refs))
+ if (!vec_safe_length (ivar_offset_refs))
return;
- FOR_EACH_VEC_ELT (ivarref_entry, ivar_offset_refs, count, ref)
+ FOR_EACH_VEC_ELT (*ivar_offset_refs, count, ref)
finish_var_decl (ref->decl, ref->offset);
}
generate_v2_objc_image_info (void)
{
tree decl, array_type;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
int flags =
((flag_replace_objc_classes && imp_count ? 1 : 0)
| (flag_objc_gc ? 2 : 0));
static tree
objc2_build_ehtype_initializer (tree name, tree cls)
{
- VEC(constructor_elt,gc) *initlist = NULL;
+ vec<constructor_elt, va_gc> *initlist = NULL;
tree addr, offs;
/* This is done the same way as c++, missing the two first entries
int count=0;
ident_data_tuple *ref;
- if (!ehtype_list || !VEC_length (ident_data_tuple, ehtype_list))
+ if (!vec_safe_length (ehtype_list))
return;
- FOR_EACH_VEC_ELT (ident_data_tuple, ehtype_list, count, ref)
+ FOR_EACH_VEC_ELT (*ehtype_list, count, ref)
{
char buf[BUFSIZE];
bool impl = is_implemented (ref->ident);
int count=0;
ident_data_tuple *ref;
- if (!ehtype_list || !VEC_length (ident_data_tuple, ehtype_list))
+ if (!vec_safe_length (ehtype_list))
return NULL_TREE;
- FOR_EACH_VEC_ELT (ident_data_tuple, ehtype_list, count, ref)
+ FOR_EACH_VEC_ELT (*ehtype_list, count, ref)
if (ref->ident == id)
return ref->data;
return NULL_TREE;
else
{
/* Throw like the others... */
- VEC(tree, gc) *parms = VEC_alloc (tree, gc, 1);
- VEC_quick_push (tree, parms, throw_expr);
- t = build_function_call_vec (loc, objc_exception_throw_decl, parms, NULL);
- VEC_free (tree, gc, parms);
+ vec<tree, va_gc> *parms;
+ vec_alloc (parms, 1);
+ parms->quick_push (throw_expr);
+ t = build_function_call_vec (loc, objc_exception_throw_decl, parms, 0);
+ vec_free (parms);
}
return add_stmt (t);
}
/* Receiver is class Object, check runtime-specific. */
tree (*receiver_is_class_object) (tree);
/* Get the start of a method argument type list (receiver, _cmd). */
- void (*get_arg_type_list_base) (VEC(tree,gc) **, tree, int, int);
+ void (*get_arg_type_list_base) (vec<tree, va_gc> **, tree, int, int);
/* Build method call. */
tree (*build_objc_method_call) (location_t, tree, tree, tree, tree, tree, int);
tree
build_descriptor_table_initializer (tree type, tree entries)
{
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
do
{
- VEC(constructor_elt,gc) *elts = NULL;
+ vec<constructor_elt, va_gc> *elts = NULL;
CONSTRUCTOR_APPEND_ELT (elts, NULL_TREE,
build_selector (METHOD_SEL_NAME (entries)));
tree
build_dispatch_table_initializer (tree type, tree entries)
{
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
do
{
- VEC(constructor_elt,gc) *elems = NULL;
+ vec<constructor_elt, va_gc> *elems = NULL;
tree expr;
CONSTRUCTOR_APPEND_ELT (elems, NULL_TREE,
{
tree expr, ltyp;
location_t loc;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
/* No really useful place to point to. */
loc = UNKNOWN_LOCATION;
tree
build_ivar_list_initializer (tree type, tree field_decl)
{
- VEC(constructor_elt,gc) *inits = NULL;
+ vec<constructor_elt, va_gc> *inits = NULL;
do
{
- VEC(constructor_elt,gc) *ivar = NULL;
+ vec<constructor_elt, va_gc> *ivar = NULL;
tree id;
/* Set name. */
extern tree build_sized_array_type (tree, int);
extern tree lookup_interface (tree);
-extern tree objc_build_constructor (tree, VEC(constructor_elt,gc) *);
+extern tree objc_build_constructor (tree, vec<constructor_elt, va_gc> *);
extern tree start_var_decl (tree, const char *);
extern void finish_var_decl (tree, tree);
parallel+workshare call. WS_STMT is the workshare directive being
expanded. */
-static VEC(tree,gc) *
+static vec<tree, va_gc> *
get_ws_args_for (gimple ws_stmt)
{
tree t;
location_t loc = gimple_location (ws_stmt);
- VEC(tree,gc) *ws_args;
+ vec<tree, va_gc> *ws_args;
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
extract_omp_for_data (ws_stmt, &fd, NULL);
- ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
+ vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
- VEC_quick_push (tree, ws_args, t);
+ ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
- VEC_quick_push (tree, ws_args, t);
+ ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
- VEC_quick_push (tree, ws_args, t);
+ ws_args->quick_push (t);
if (fd.chunk_size)
{
t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
- VEC_quick_push (tree, ws_args, t);
+ ws_args->quick_push (t);
}
return ws_args;
the exit of the sections region. */
basic_block bb = single_succ (gimple_bb (ws_stmt));
t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
- ws_args = VEC_alloc (tree, gc, 1);
- VEC_quick_push (tree, ws_args, t);
+ vec_alloc (ws_args, 1);
+ ws_args->quick_push (t);
return ws_args;
}
static void
expand_parallel_call (struct omp_region *region, basic_block bb,
- gimple entry_stmt, VEC(tree,gc) *ws_args)
+ gimple entry_stmt, vec<tree, va_gc> *ws_args)
{
tree t, t1, t2, val, cond, c, clauses;
gimple_stmt_iterator gsi;
enum built_in_function start_ix;
int start_ix2;
location_t clause_loc;
- VEC(tree,gc) *args;
+ vec<tree, va_gc> *args;
clauses = gimple_omp_parallel_clauses (entry_stmt);
t1 = build_fold_addr_expr (t);
t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
- args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
- VEC_quick_push (tree, args, t2);
- VEC_quick_push (tree, args, t1);
- VEC_quick_push (tree, args, val);
- VEC_splice (tree, args, ws_args);
+ vec_alloc (args, 3 + vec_safe_length (ws_args));
+ args->quick_push (t2);
+ args->quick_push (t1);
+ args->quick_push (val);
+ if (ws_args)
+ args->splice (*ws_args);
t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
builtin_decl_explicit (start_ix), args);
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
static tree
-vec2chain (VEC(tree,gc) *v)
+vec2chain (vec<tree, va_gc> *v)
{
tree chain = NULL_TREE, t;
unsigned ix;
- FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
+ FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
{
DECL_CHAIN (t) = chain;
chain = t;
gimple_stmt_iterator gsi;
gimple entry_stmt, stmt;
edge e;
- VEC(tree,gc) *ws_args;
+ vec<tree, va_gc> *ws_args;
entry_stmt = last_stmt (region->entry);
child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
/* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
- num = VEC_length (tree, child_cfun->local_decls);
+ num = vec_safe_length (child_cfun->local_decls);
for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
{
- t = VEC_index (tree, child_cfun->local_decls, srcidx);
+ t = (*child_cfun->local_decls)[srcidx];
if (DECL_CONTEXT (t) == cfun->decl)
continue;
if (srcidx != dstidx)
- VEC_replace (tree, child_cfun->local_decls, dstidx, t);
+ (*child_cfun->local_decls)[dstidx] = t;
dstidx++;
}
if (dstidx != num)
- VEC_truncate (tree, child_cfun->local_decls, dstidx);
+ vec_safe_truncate (child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
DECL_STRUCT_FUNCTION (child_fn)->curr_properties
gimple_stmt_iterator psi;
gimple phi;
edge re, ene;
- edge_var_map_vector head;
+ edge_var_map_vector *head;
edge_var_map *vm;
size_t i;
ene = single_succ_edge (entry_bb);
psi = gsi_start_phis (fin_bb);
- for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
+ for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
gsi_next (&psi), ++i)
{
gimple nphi;
locus = redirect_edge_var_map_location (vm);
add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
}
- gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
+ gcc_assert (!gsi_end_p (psi) && i == head->length ());
redirect_edge_var_map_clear (re);
while (1)
{
expand_omp_sections (struct omp_region *region)
{
tree t, u, vin = NULL, vmain, vnext, l2;
- VEC (tree,heap) *label_vec;
+ vec<tree> label_vec;
unsigned len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
gimple_stmt_iterator si, switch_si;
and a default case to abort if something goes wrong. */
len = EDGE_COUNT (l0_bb->succs);
- /* Use VEC_quick_push on label_vec throughout, since we know the size
+ /* Use vec::quick_push on label_vec throughout, since we know the size
in advance. */
- label_vec = VEC_alloc (tree, heap, len);
+ label_vec.create (len);
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
GIMPLE_OMP_SECTIONS statement. */
}
t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
- VEC_quick_push (tree, label_vec, t);
+ label_vec.quick_push (t);
i = 1;
/* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
t = gimple_block_label (s_entry_bb);
u = build_int_cst (unsigned_type_node, casei);
u = build_case_label (u, NULL, t);
- VEC_quick_push (tree, label_vec, u);
+ label_vec.quick_push (u);
si = gsi_last_bb (s_entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
stmt = gimple_build_switch (vmain, u, label_vec);
gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
gsi_remove (&switch_si, true);
- VEC_free (tree, heap, label_vec);
+ label_vec.release ();
si = gsi_start_bb (default_bb);
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
case CLVC_DEFER:
{
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) *(void **) flag_var;
+ vec<cl_deferred_option> *v
+ = (vec<cl_deferred_option> *) *(void **) flag_var;
cl_deferred_option p = {opt_index, arg, value};
- VEC_safe_push (cl_deferred_option, heap, vec, p);
- *(void **) flag_var = vec;
+ if (!v)
+ v = XCNEW (vec<cl_deferred_option>);
+ v->safe_push (p);
+ *(void **) flag_var = v;
if (set_flag_var)
- *(void **) set_flag_var = vec;
+ *(void **) set_flag_var = v;
}
break;
}
#include "tree-pass.h"
typedef const char *const_char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(const_char_p);
-DEF_VEC_ALLOC_P(const_char_p,heap);
-static VEC(const_char_p,heap) *ignored_options;
+static vec<const_char_p> ignored_options;
/* Input file names. */
const char **in_fnames;
static void
postpone_unknown_option_warning (const char *opt)
{
- VEC_safe_push (const_char_p, heap, ignored_options, opt);
+ ignored_options.safe_push (opt);
}
/* Produce a warning for each option previously buffered. */
void
print_ignored_options (void)
{
- while (!VEC_empty (const_char_p, ignored_options))
+ while (!ignored_options.is_empty ())
{
const char *opt;
- opt = VEC_pop (const_char_p, ignored_options);
+ opt = ignored_options.pop ();
warning_at (UNKNOWN_LOCATION, 0,
"unrecognized command line option \"%s\"", opt);
}
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) common_deferred_options;
+ vec<cl_deferred_option> v;
+
+ if (common_deferred_options)
+ v = *((vec<cl_deferred_option> *) common_deferred_options);
+ else
+ v = vec<cl_deferred_option>();
if (flag_dump_all_passed)
enable_rtl_dump_file ();
if (flag_opt_info)
opt_info_switch_p (NULL);
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
+ FOR_EACH_VEC_ELT (v, i, opt)
{
switch (opt->opt_index)
{
static const char undocumented_msg[] = N_("This switch lacks documentation");
typedef char *char_p; /* For DEF_VEC_P. */
-DEF_VEC_P(char_p);
-DEF_VEC_ALLOC_P(char_p,heap);
static void handle_param (struct gcc_options *opts,
struct gcc_options *opts_set, location_t loc,
char *r;
char *w;
char *token_start;
- VEC(char_p,heap) *vec = (VEC(char_p,heap) *) *pvec;
+ vec<char_p> *v = (vec<char_p> *) *pvec;
+
+ vec_check_alloc (v, 1);
/* We never free this string. */
tmp = xstrdup (arg);
{
*w++ = '\0';
++r;
- VEC_safe_push (char_p, heap, vec, token_start);
+ v->safe_push (token_start);
token_start = w;
}
if (*r == '\\' && r[1] == ',')
*w++ = *r++;
}
if (*token_start != '\0')
- VEC_safe_push (char_p, heap, vec, token_start);
+ v->safe_push (token_start);
- *pvec = vec;
+ *pvec = v;
}
/* Initialize OPTS and OPTS_SET before using them in parsing options. */
const char *arg;
int value;
} cl_deferred_option;
-DEF_VEC_O(cl_deferred_option);
-DEF_VEC_ALLOC_O(cl_deferred_option,heap);
/* Structure describing a single option-handling callback. */
/* Map from pass id to canonicalized pass name. */
typedef const char *char_ptr;
-DEF_VEC_P(char_ptr);
-DEF_VEC_ALLOC_P(char_ptr, heap);
-static VEC(char_ptr, heap) *pass_tab = NULL;
+static vec<char_ptr> pass_tab = vec<char_ptr>();
/* Callback function for traversing NAME_TO_PASS_MAP. */
struct opt_pass *pass = (*p)->pass;
gcc_assert (pass->static_pass_number > 0);
- gcc_assert (pass_tab);
+ gcc_assert (pass_tab.exists ());
- VEC_replace (char_ptr, pass_tab, pass->static_pass_number,
- (*p)->unique_name);
+ pass_tab[pass->static_pass_number] = (*p)->unique_name;
return 1;
}
if (!flag_dump_passes)
return;
- VEC_safe_grow_cleared (char_ptr, heap,
- pass_tab, passes_by_id_size + 1);
+ pass_tab.safe_grow_cleared (passes_by_id_size + 1);
htab_traverse (name_to_pass_map, pass_traverse, NULL);
}
if (pass->static_pass_number <= 0)
pn = pass->name;
else
- pn = VEC_index (char_ptr, pass_tab, pass->static_pass_number);
+ pn = pass_tab[pass->static_pass_number];
fprintf (stderr, "%*s%-40s%*s:%s%s\n", indent, " ", pn,
(15 - indent < 0 ? 0 : 15 - indent), " ",
typedef struct uid_range *uid_range_p;
-DEF_VEC_P(uid_range_p);
-DEF_VEC_ALLOC_P(uid_range_p, heap);
-static VEC(uid_range_p, heap) *enabled_pass_uid_range_tab = NULL;
-static VEC(uid_range_p, heap) *disabled_pass_uid_range_tab = NULL;
+static vec<uid_range_p>
+ enabled_pass_uid_range_tab = vec<uid_range_p>();
+static vec<uid_range_p>
+ disabled_pass_uid_range_tab = vec<uid_range_p>();
/* Parse option string for -fdisable- and -fenable-
struct opt_pass *pass;
char *range_str, *phase_name;
char *argstr = xstrdup (arg);
- VEC(uid_range_p, heap) **tab = 0;
+ vec<uid_range_p> *tab = 0;
range_str = strchr (argstr,'=');
if (range_str)
else
tab = &disabled_pass_uid_range_tab;
- if ((unsigned) pass->static_pass_number >= VEC_length (uid_range_p, *tab))
- VEC_safe_grow_cleared (uid_range_p, heap,
- *tab, pass->static_pass_number + 1);
+ if ((unsigned) pass->static_pass_number >= tab->length ())
+ tab->safe_grow_cleared (pass->static_pass_number + 1);
if (!range_str)
{
new_range->start = 0;
new_range->last = (unsigned)-1;
- slot = VEC_index (uid_range_p, *tab, pass->static_pass_number);
+ slot = (*tab)[pass->static_pass_number];
new_range->next = slot;
- VEC_replace (uid_range_p, *tab, pass->static_pass_number,
- new_range);
+ (*tab)[pass->static_pass_number] = new_range;
if (is_enable)
inform (UNKNOWN_LOCATION, "enable pass %s for functions in the range "
"of [%u, %u]", phase_name, new_range->start, new_range->last);
new_range->last = (unsigned) last;
}
- slot = VEC_index (uid_range_p, *tab, pass->static_pass_number);
+ slot = (*tab)[pass->static_pass_number];
new_range->next = slot;
- VEC_replace (uid_range_p, *tab, pass->static_pass_number,
- new_range);
+ (*tab)[pass->static_pass_number] = new_range;
if (is_enable)
{
if (new_range->assem_name)
static bool
is_pass_explicitly_enabled_or_disabled (struct opt_pass *pass,
tree func,
- VEC(uid_range_p, heap) *tab)
+ vec<uid_range_p> tab)
{
uid_range_p slot, range;
int cgraph_uid;
const char *aname = NULL;
- if (!tab
- || (unsigned) pass->static_pass_number >= VEC_length (uid_range_p, tab)
+ if (!tab.exists ()
+ || (unsigned) pass->static_pass_number >= tab.length ()
|| pass->static_pass_number == -1)
return false;
- slot = VEC_index (uid_range_p, tab, pass->static_pass_number);
+ slot = tab[pass->static_pass_number];
if (!slot)
return false;
return;
node = cgraph_get_node (current_function_decl);
- if (node->ipa_transforms_to_apply)
+ if (node->ipa_transforms_to_apply.exists ())
{
unsigned int i;
- for (i = 0; i < VEC_length (ipa_opt_pass, node->ipa_transforms_to_apply);
- i++)
- execute_one_ipa_transform_pass (node,
- VEC_index (ipa_opt_pass,
- node->ipa_transforms_to_apply,
- i));
- VEC_free (ipa_opt_pass, heap, node->ipa_transforms_to_apply);
- node->ipa_transforms_to_apply = NULL;
+ for (i = 0; i < node->ipa_transforms_to_apply.length (); i++)
+ execute_one_ipa_transform_pass (node, node->ipa_transforms_to_apply[i]);
+ node->ipa_transforms_to_apply.release ();
}
}
apply_ipa_transforms (void *data)
{
struct cgraph_node *node = cgraph_get_node (current_function_decl);
- if (!node->global.inlined_to && node->ipa_transforms_to_apply)
+ if (!node->global.inlined_to && node->ipa_transforms_to_apply.exists ())
{
*(bool *)data = true;
execute_all_ipa_transforms();
{
struct cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
- VEC_safe_push (ipa_opt_pass, heap, node->ipa_transforms_to_apply,
- (struct ipa_opt_pass_d *)pass);
+ node->ipa_transforms_to_apply.safe_push ((struct ipa_opt_pass_d *)pass);
}
if (!current_function_decl)
{
basic_block bb, *bbs;
unsigned j, n_exits;
- VEC (edge, heap) *exits;
+ vec<edge> exits;
struct tree_niter_desc niter_desc;
edge ex;
struct nb_iter_bound *nb_iter;
gimple stmt = NULL;
exits = get_loop_exit_edges (loop);
- n_exits = VEC_length (edge, exits);
+ n_exits = exits.length ();
if (!n_exits)
{
- VEC_free (edge, heap, exits);
+ exits.release ();
continue;
}
- FOR_EACH_VEC_ELT (edge, exits, j, ex)
+ FOR_EACH_VEC_ELT (exits, j, ex)
{
tree niter = NULL;
HOST_WIDE_INT nitercst;
probability = ((REG_BR_PROB_BASE + nitercst / 2) / nitercst);
predict_edge (ex, predictor, probability);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
/* Find information about loop bound variables. */
for (nb_iter = loop->bounds; nb_iter;
down to a depth of six. */
DEBUG_FUNCTION void
-debug_vec_tree (VEC(tree,gc) *vec)
+debug_vec_tree (vec<tree, va_gc> *vec)
{
table = XCNEWVEC (struct bucket *, HASH_SIZE);
print_vec_tree (stderr, "", vec, 0);
{
unsigned HOST_WIDE_INT cnt;
tree index, value;
- len = VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node));
+ len = vec_safe_length (CONSTRUCTOR_ELTS (node));
fprintf (file, " lngt %d", len);
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node),
cnt, index, value)
starting in column INDENT. */
void
-print_vec_tree (FILE *file, const char *prefix, VEC(tree,gc) *vec, int indent)
+print_vec_tree (FILE *file, const char *prefix, vec<tree, va_gc> *vec, int indent)
{
tree elt;
unsigned ix;
/* Print the slot this node is in, and its code, and address. */
fprintf (file, "%s <VEC", prefix);
- dump_addr (file, " ", vec);
+ dump_addr (file, " ", vec->address ());
- FOR_EACH_VEC_ELT (tree, vec, ix, elt)
+ FOR_EACH_VEC_ELT (*vec, ix, elt)
{
char temp[10];
sprintf (temp, "elt %d", ix);
/* Emit code to generate the histograms before the insns. */
- for (i = 0; i < VEC_length (histogram_value, values); i++)
+ for (i = 0; i < values.length (); i++)
{
- histogram_value hist = VEC_index (histogram_value, values, i);
+ histogram_value hist = values[i];
unsigned t = COUNTER_FOR_HIST_TYPE (hist->type);
if (!coverage_counter_alloc (t, hist->n_counters))
static bool
-is_edge_inconsistent (VEC(edge,gc) *edges)
+is_edge_inconsistent (vec<edge, va_gc> *edges)
{
edge e;
edge_iterator ei;
for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
n_histogram_counters[t] = 0;
- for (i = 0; i < VEC_length (histogram_value, values); i++)
+ for (i = 0; i < values.length (); i++)
{
- histogram_value hist = VEC_index (histogram_value, values, i);
+ histogram_value hist = values[i];
n_histogram_counters[(int) hist->type] += hist->n_counters;
}
if (!any)
return;
- for (i = 0; i < VEC_length (histogram_value, values); i++)
+ for (i = 0; i < values.length (); i++)
{
- histogram_value hist = VEC_index (histogram_value, values, i);
+ histogram_value hist = values[i];
gimple stmt = hist->hvalue.stmt;
t = (int) hist->type;
unsigned num_edges, ignored_edges;
unsigned num_instrumented;
struct edge_list *el;
- histogram_values values = NULL;
+ histogram_values values = histogram_values();
unsigned cfg_checksum, lineno_checksum;
total_num_times_called++;
free_aux_for_edges ();
- VEC_free (histogram_value, heap, values);
+ values.release ();
free_edge_list (el);
coverage_end_function (lineno_checksum, cfg_checksum);
}
a minimum cost flow algorithm. */
extern void mcf_smooth_cfg (void);
-extern gcov_type sum_edge_counts (VEC (edge, gc) *edges);
+extern gcov_type sum_edge_counts (vec<edge, va_gc> *edges);
extern void init_node_map (void);
extern void del_node_map (void);
/* Vector definitions for the above. */
typedef struct mapping *mapping_ptr;
-DEF_VEC_P (mapping_ptr);
-DEF_VEC_ALLOC_P (mapping_ptr, heap);
/* A structure for abstracting the common parts of iterators. */
struct iterator_group {
/* Vector definitions for the above. */
typedef struct iterator_use iterator_use;
-DEF_VEC_O (iterator_use);
-DEF_VEC_ALLOC_O (iterator_use, heap);
/* Records one use of an attribute (the "<[iterator:]attribute>" syntax)
in a non-string rtx field. */
/* Vector definitions for the above. */
typedef struct attribute_use attribute_use;
-DEF_VEC_O (attribute_use);
-DEF_VEC_ALLOC_O (attribute_use, heap);
static void validate_const_int (const char *);
static rtx read_rtx_code (const char *);
static struct iterator_group modes, codes, ints;
/* All iterators used in the current rtx. */
-static VEC (mapping_ptr, heap) *current_iterators;
+static vec<mapping_ptr> current_iterators;
/* The list of all iterator uses in the current rtx. */
-static VEC (iterator_use, heap) *iterator_uses;
+static vec<iterator_use> iterator_uses;
/* The list of all attribute uses in the current rtx. */
-static VEC (attribute_use, heap) *attribute_uses;
+static vec<attribute_use> attribute_uses;
/* Implementations of the iterator_group callbacks for modes. */
attr++;
}
- FOR_EACH_VEC_ELT (mapping_ptr, current_iterators, i, iterator)
+ FOR_EACH_VEC_ELT (current_iterators, i, iterator)
{
/* If an iterator name was specified, check that it matches. */
if (iterator_name_len >= 0
attribute_use *ause;
unsigned int i;
- FOR_EACH_VEC_ELT (attribute_use, attribute_uses, i, ause)
+ FOR_EACH_VEC_ELT (attribute_uses, i, ause)
{
v = map_attr_string (ause->value);
if (!v)
iterator = (struct mapping *) *slot;
if (iterator->current_value)
- VEC_safe_push (mapping_ptr, heap, current_iterators, iterator);
+ current_iterators.safe_push (iterator);
return 1;
}
struct map_value *v;
rtx x;
- if (VEC_empty (iterator_use, iterator_uses))
+ if (iterator_uses.is_empty ())
{
/* Raise an error if any attributes were used. */
apply_attribute_uses ();
}
/* Clear out the iterators from the previous run. */
- FOR_EACH_VEC_ELT (mapping_ptr, current_iterators, i, iterator)
+ FOR_EACH_VEC_ELT (current_iterators, i, iterator)
iterator->current_value = NULL;
- VEC_truncate (mapping_ptr, current_iterators, 0);
+ current_iterators.truncate (0);
/* Mark the iterators that we need this time. */
- FOR_EACH_VEC_ELT (iterator_use, iterator_uses, i, iuse)
+ FOR_EACH_VEC_ELT (iterator_uses, i, iuse)
iuse->iterator->current_value = iuse->iterator->values;
/* Get the list of iterators that are in use, preserving the
htab_traverse (modes.iterators, add_current_iterators, NULL);
htab_traverse (codes.iterators, add_current_iterators, NULL);
htab_traverse (ints.iterators, add_current_iterators, NULL);
- gcc_assert (!VEC_empty (mapping_ptr, current_iterators));
+ gcc_assert (!current_iterators.is_empty ());
for (;;)
{
/* Apply the current iterator values. Accumulate a condition to
say when the resulting rtx can be used. */
condition = NULL;
- FOR_EACH_VEC_ELT (iterator_use, iterator_uses, i, iuse)
+ FOR_EACH_VEC_ELT (iterator_uses, i, iuse)
{
v = iuse->iterator->current_value;
iuse->iterator->group->apply_iterator (iuse->ptr, v->number);
/* Lexicographically increment the iterator value sequence.
That is, cycle through iterator values, starting from the right,
and stopping when one of them doesn't wrap around. */
- i = VEC_length (mapping_ptr, current_iterators);
+ i = current_iterators.length ();
for (;;)
{
if (i == 0)
return;
i--;
- iterator = VEC_index (mapping_ptr, current_iterators, i);
+ iterator = current_iterators[i];
iterator->current_value = iterator->current_value->next;
if (iterator->current_value)
break;
record_iterator_use (struct mapping *iterator, void *ptr)
{
struct iterator_use iuse = {iterator, ptr};
- VEC_safe_push (iterator_use, heap, iterator_uses, iuse);
+ iterator_uses.safe_push (iuse);
}
/* Record that PTR uses attribute VALUE, which must match a built-in
const char *value)
{
struct attribute_use ause = {group, value, ptr};
- VEC_safe_push (attribute_use, heap, attribute_uses, ause);
+ attribute_uses.safe_push (ause);
}
/* Interpret NAME as either a built-in value, iterator or attribute
}
apply_iterators (read_rtx_code (rtx_name), &queue_head);
- VEC_truncate (iterator_use, iterator_uses, 0);
- VEC_truncate (attribute_use, attribute_uses, 0);
+ iterator_uses.truncate (0);
+ attribute_uses.truncate (0);
*x = queue_head;
return true;
rtx insn;
} ext_cand;
-DEF_VEC_O(ext_cand);
-DEF_VEC_ALLOC_O(ext_cand, heap);
static int max_insn_uid;
of the definitions onto DEST. */
static struct df_link *
-get_defs (rtx insn, rtx reg, VEC (rtx,heap) **dest)
+get_defs (rtx insn, rtx reg, vec<rtx> *dest)
{
df_ref reg_info, *uses;
struct df_link *ref_chain, *ref_link;
if (dest)
for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
- VEC_safe_push (rtx, heap, *dest, DF_REF_INSN (ref_link->ref));
+ dest->safe_push (DF_REF_INSN (ref_link->ref));
return ref_chain;
}
/* Vectors used by combine_reaching_defs and its helpers. */
typedef struct ext_state
{
- /* In order to avoid constant VEC_alloc/VEC_free, we keep these
+ /* In order to avoid constant alloc/free, we keep these
4 vectors live through the entire find_and_remove_re and just
- VEC_truncate them each time. */
- VEC (rtx, heap) *defs_list;
- VEC (rtx, heap) *copies_list;
- VEC (rtx, heap) *modified_list;
- VEC (rtx, heap) *work_list;
+ truncate them each time. */
+ vec<rtx> defs_list;
+ vec<rtx> copies_list;
+ vec<rtx> modified_list;
+ vec<rtx> work_list;
/* For instructions that have been successfully modified, this is
the original mode from which the insn is extending and
bool *is_insn_visited;
bool ret = true;
- VEC_truncate (rtx, state->work_list, 0);
+ state->work_list.truncate (0);
/* Initialize the work list. */
if (!get_defs (extend_insn, src_reg, &state->work_list))
is_insn_visited = XCNEWVEC (bool, max_insn_uid);
/* Perform transitive closure for conditional copies. */
- while (!VEC_empty (rtx, state->work_list))
+ while (!state->work_list.is_empty ())
{
- rtx def_insn = VEC_pop (rtx, state->work_list);
+ rtx def_insn = state->work_list.pop ();
rtx reg1, reg2;
gcc_assert (INSN_UID (def_insn) < max_insn_uid);
if (is_cond_copy_insn (def_insn, ®1, ®2))
{
/* Push it onto the copy list first. */
- VEC_safe_push (rtx, heap, state->copies_list, def_insn);
+ state->copies_list.safe_push (def_insn);
/* Now perform the transitive closure. */
if (!get_defs (def_insn, reg1, &state->work_list)
}
}
else
- VEC_safe_push (rtx, heap, state->defs_list, def_insn);
+ state->defs_list.safe_push (def_insn);
}
XDELETEVEC (is_insn_visited);
int defs_ix;
bool outcome;
- VEC_truncate (rtx, state->defs_list, 0);
- VEC_truncate (rtx, state->copies_list, 0);
+ state->defs_list.truncate (0);
+ state->copies_list.truncate (0);
outcome = make_defs_and_copies_lists (cand->insn, set_pat, state);
/* Go through the defs vector and try to merge all the definitions
in this vector. */
- VEC_truncate (rtx, state->modified_list, 0);
- FOR_EACH_VEC_ELT (rtx, state->defs_list, defs_ix, def_insn)
+ state->modified_list.truncate (0);
+ FOR_EACH_VEC_ELT (state->defs_list, defs_ix, def_insn)
{
if (merge_def_and_ext (cand, def_insn, state))
- VEC_safe_push (rtx, heap, state->modified_list, def_insn);
+ state->modified_list.safe_push (def_insn);
else
{
merge_successful = false;
the copies in this vector. */
if (merge_successful)
{
- FOR_EACH_VEC_ELT (rtx, state->copies_list, i, def_insn)
+ FOR_EACH_VEC_ELT (state->copies_list, i, def_insn)
{
if (transform_ifelse (cand, def_insn))
- VEC_safe_push (rtx, heap, state->modified_list, def_insn);
+ state->modified_list.safe_push (def_insn);
else
{
merge_successful = false;
if (dump_file)
fprintf (dump_file, "All merges were successful.\n");
- FOR_EACH_VEC_ELT (rtx, state->modified_list, i, def_insn)
+ FOR_EACH_VEC_ELT (state->modified_list, i, def_insn)
if (state->modified[INSN_UID (def_insn)].kind == EXT_MODIFIED_NONE)
state->modified[INSN_UID (def_insn)].kind
= (cand->code == ZERO_EXTEND
{
fprintf (dump_file,
"Merge cancelled, non-mergeable definitions:\n");
- FOR_EACH_VEC_ELT (rtx, state->modified_list, i, def_insn)
+ FOR_EACH_VEC_ELT (state->modified_list, i, def_insn)
print_rtl_single (dump_file, def_insn);
}
}
static void
add_removable_extension (const_rtx expr, rtx insn,
- VEC (ext_cand, heap) **insn_list,
+ vec<ext_cand> *insn_list,
unsigned *def_map)
{
enum rtx_code code;
different extension. FIXME: this obviously can be improved. */
for (def = defs; def; def = def->next)
if ((idx = def_map[INSN_UID(DF_REF_INSN (def->ref))])
- && (cand = &VEC_index (ext_cand, *insn_list, idx - 1))
+ && (cand = &(*insn_list)[idx - 1])
&& cand->code != code)
{
if (dump_file)
/* Then add the candidate to the list and insert the reaching definitions
into the definition map. */
ext_cand e = {expr, code, mode, insn};
- VEC_safe_push (ext_cand, heap, *insn_list, e);
- idx = VEC_length (ext_cand, *insn_list);
+ insn_list->safe_push (e);
+ idx = insn_list->length ();
for (def = defs; def; def = def->next)
def_map[INSN_UID(DF_REF_INSN (def->ref))] = idx;
/* Traverse the instruction stream looking for extensions and return the
list of candidates. */
-static VEC (ext_cand, heap)*
+static vec<ext_cand>
find_removable_extensions (void)
{
- VEC (ext_cand, heap) *insn_list = NULL;
+ vec<ext_cand> insn_list = vec<ext_cand>();
basic_block bb;
rtx insn, set;
unsigned *def_map = XCNEWVEC (unsigned, max_insn_uid);
ext_cand *curr_cand;
rtx curr_insn = NULL_RTX;
int num_re_opportunities = 0, num_realized = 0, i;
- VEC (ext_cand, heap) *reinsn_list;
- VEC (rtx, heap) *reinsn_del_list;
+ vec<ext_cand> reinsn_list;
+ vec<rtx> reinsn_del_list;
ext_state state;
/* Construct DU chain to get all reaching definitions of each
df_set_flags (DF_DEFER_INSN_RESCAN);
max_insn_uid = get_max_uid ();
- reinsn_del_list = NULL;
+ reinsn_del_list.create (0);
reinsn_list = find_removable_extensions ();
- state.defs_list = NULL;
- state.copies_list = NULL;
- state.modified_list = NULL;
- state.work_list = NULL;
- if (VEC_empty (ext_cand, reinsn_list))
+ state.defs_list.create (0);
+ state.copies_list.create (0);
+ state.modified_list.create (0);
+ state.work_list.create (0);
+ if (reinsn_list.is_empty ())
state.modified = NULL;
else
state.modified = XCNEWVEC (struct ext_modified, max_insn_uid);
- FOR_EACH_VEC_ELT (ext_cand, reinsn_list, i, curr_cand)
+ FOR_EACH_VEC_ELT (reinsn_list, i, curr_cand)
{
num_re_opportunities++;
if (dump_file)
fprintf (dump_file, "Eliminated the extension.\n");
num_realized++;
- VEC_safe_push (rtx, heap, reinsn_del_list, curr_cand->insn);
+ reinsn_del_list.safe_push (curr_cand->insn);
state.modified[INSN_UID (curr_cand->insn)].deleted = 1;
}
}
/* Delete all useless extensions here in one sweep. */
- FOR_EACH_VEC_ELT (rtx, reinsn_del_list, i, curr_insn)
+ FOR_EACH_VEC_ELT (reinsn_del_list, i, curr_insn)
delete_insn (curr_insn);
- VEC_free (ext_cand, heap, reinsn_list);
- VEC_free (rtx, heap, reinsn_del_list);
- VEC_free (rtx, heap, state.defs_list);
- VEC_free (rtx, heap, state.copies_list);
- VEC_free (rtx, heap, state.modified_list);
- VEC_free (rtx, heap, state.work_list);
+ reinsn_list.release ();
+ reinsn_del_list.release ();
+ state.defs_list.release ();
+ state.copies_list.release ();
+ state.modified_list.release ();
+ state.work_list.release ();
XDELETEVEC (state.modified);
if (dump_file && num_re_opportunities > 0)
#include "tree-pass.h"
#include "target.h"
#include "df.h"
-#include "vecprim.h"
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
#ifdef STACK_REGS
Indexed by insn UIDs. A value of zero is uninitialized, one indicates
the insn uses stack registers, two indicates the insn does not use
stack registers. */
-static VEC(char,heap) *stack_regs_mentioned_data;
+static vec<char> stack_regs_mentioned_data;
#define REG_STACK_SIZE (LAST_STACK_REG - FIRST_STACK_REG + 1)
unsigned int uid, max;
int test;
- if (! INSN_P (insn) || !stack_regs_mentioned_data)
+ if (! INSN_P (insn) || !stack_regs_mentioned_data.exists ())
return 0;
uid = INSN_UID (insn);
- max = VEC_length (char, stack_regs_mentioned_data);
+ max = stack_regs_mentioned_data.length ();
if (uid >= max)
{
/* Allocate some extra size to avoid too many reallocs, but
do not grow too quickly. */
max = uid + uid / 20 + 1;
- VEC_safe_grow_cleared (char, heap, stack_regs_mentioned_data, max);
+ stack_regs_mentioned_data.safe_grow_cleared (max);
}
- test = VEC_index (char, stack_regs_mentioned_data, uid);
+ test = stack_regs_mentioned_data[uid];
if (test == 0)
{
/* This insn has yet to be examined. Do so now. */
test = stack_regs_mentioned_p (PATTERN (insn)) ? 1 : 2;
- VEC_replace (char, stack_regs_mentioned_data, uid, test);
+ stack_regs_mentioned_data[uid] = test;
}
return test == 1;
int max_uid;
/* Clean up previous run. */
- if (stack_regs_mentioned_data != NULL)
- VEC_free (char, heap, stack_regs_mentioned_data);
+ stack_regs_mentioned_data.release ();
/* See if there is something to do. Flow analysis is quite
expensive so we might save some compilation time. */
/* Allocate a cache for stack_regs_mentioned. */
max_uid = get_max_uid ();
- stack_regs_mentioned_data = VEC_alloc (char, heap, max_uid + 1);
- memset (VEC_address (char, stack_regs_mentioned_data),
+ stack_regs_mentioned_data.create (max_uid + 1);
+ memset (stack_regs_mentioned_data.address (),
0, sizeof (char) * (max_uid + 1));
convert_regs ();
/* If nonnull, the code calling into the register renamer requested
information about insn operands, and we store it here. */
-VEC(insn_rr_info, heap) *insn_rr;
+vec<insn_rr_info> insn_rr;
static void scan_rtx (rtx, rtx *, enum reg_class, enum scan_actions,
enum op_type);
static unsigned current_id;
/* A mapping of unique id numbers to chains. */
-static VEC(du_head_p, heap) *id_to_chain;
+static vec<du_head_p> id_to_chain;
/* List of currently open chains. */
static struct du_head *open_chains;
du_head_p
regrename_chain_from_id (unsigned int id)
{
- du_head_p first_chain = VEC_index (du_head_p, id_to_chain, id);
+ du_head_p first_chain = id_to_chain[id];
du_head_p chain = first_chain;
while (chain->id != id)
{
id = chain->id;
- chain = VEC_index (du_head_p, id_to_chain, id);
+ chain = id_to_chain[id];
}
first_chain->id = id;
return chain;
{
du_head_p head;
int i;
- FOR_EACH_VEC_ELT_FROM (du_head_p, id_to_chain, i, head, from)
+ FOR_EACH_VEC_ELT_FROM (id_to_chain, i, head, from)
{
struct du_chain *this_du = head->first;
{
int i;
du_head_p ptr;
- for (i = 0; VEC_iterate(du_head_p, id_to_chain, i, ptr); i++)
+ for (i = 0; id_to_chain.iterate (i, &ptr); i++)
bitmap_clear (&ptr->conflicts);
- VEC_free (du_head_p, heap, id_to_chain);
+ id_to_chain.release ();
}
/* Walk all chains starting with CHAINS and record that they conflict with
head->need_caller_save_reg = 0;
head->cannot_rename = 0;
- VEC_safe_push (du_head_p, heap, id_to_chain, head);
+ id_to_chain.safe_push (head);
head->id = current_id++;
bitmap_initialize (&head->conflicts, &bitmap_default_obstack);
#endif
}
- FOR_EACH_VEC_ELT (du_head_p, id_to_chain, i, this_head)
+ FOR_EACH_VEC_ELT (id_to_chain, i, this_head)
{
int best_new_reg;
int n_uses;
}
current_id = 0;
- id_to_chain = VEC_alloc (du_head_p, heap, 0);
+ id_to_chain.create (0);
bitmap_initialize (&open_chains_set, &bitmap_default_obstack);
/* The order in which we visit blocks ensures that whenever
bool success;
edge e;
edge_iterator ei;
- int old_length = VEC_length (du_head_p, id_to_chain);
+ int old_length = id_to_chain.length ();
this_info = (struct bb_rename_info *) bb1->aux;
if (this_info == NULL)
if (dump_file)
fprintf (dump_file, "failed\n");
bb1->aux = NULL;
- VEC_truncate (du_head_p, id_to_chain, old_length);
+ id_to_chain.truncate (old_length);
current_id = old_length;
bitmap_clear (&this_info->incoming_open_chains_set);
open_chains = NULL;
- if (insn_rr != NULL)
+ if (insn_rr.exists ())
{
rtx insn;
FOR_BB_INSNS (bb1, insn)
{
- insn_rr_info *p = &VEC_index (insn_rr_info, insn_rr,
- INSN_UID (insn));
+ insn_rr_info *p = &insn_rr[INSN_UID (insn)];
p->op_info = NULL;
}
}
n_ops = recog_data.n_operands;
untracked_operands = 0;
- if (insn_rr != NULL)
+ if (insn_rr.exists ())
{
- insn_info = &VEC_index (insn_rr_info, insn_rr, INSN_UID (insn));
+ insn_info = &insn_rr[INSN_UID (insn)];
insn_info->op_info = XOBNEWVEC (&rename_obstack, operand_rr_info,
recog_data.n_operands);
memset (insn_info->op_info, 0,
regrename_init (bool insn_info)
{
gcc_obstack_init (&rename_obstack);
- insn_rr = NULL;
+ insn_rr.create (0);
if (insn_info)
- VEC_safe_grow_cleared (insn_rr_info, heap, insn_rr, get_max_uid ());
+ insn_rr.safe_grow_cleared (get_max_uid ());
}
/* Free all global data used by the register renamer. */
void
regrename_finish (void)
{
- VEC_free (insn_rr_info, heap, insn_rr);
+ insn_rr.release ();
free_chain_data ();
obstack_free (&rename_obstack, NULL);
}
};
typedef struct du_head *du_head_p;
-DEF_VEC_P (du_head_p);
-DEF_VEC_ALLOC_P (du_head_p, heap);
/* This struct describes a single occurrence of a register. */
struct du_chain
operand_rr_info *op_info;
} insn_rr_info;
-DEF_VEC_O (insn_rr_info);
-DEF_VEC_ALLOC_O (insn_rr_info, heap);
-extern VEC(insn_rr_info, heap) *insn_rr;
+extern vec<insn_rr_info> insn_rr;
extern void regrename_init (bool);
extern void regrename_finish (void);
for (check_regno = 0; check_regno < max_regno; check_regno++)
{
#define CHECK_MODF(ARRAY) \
- gcc_assert (!VEC_index (reg_equivs_t, reg_equivs, check_regno).ARRAY \
+ gcc_assert (!reg_equivs[check_regno].ARRAY \
|| !loc_mentioned_in_p (r->where, \
- VEC_index (reg_equivs_t, reg_equivs, check_regno).ARRAY))
+ reg_equivs[check_regno).ARRAY)]
CHECK_MODF (equiv_constant);
CHECK_MODF (equiv_memory_loc);
} reg_equivs_t;
#define reg_equiv_constant(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).constant
+ (*reg_equivs)[(ELT)].constant
#define reg_equiv_invariant(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).invariant
+ (*reg_equivs)[(ELT)].invariant
#define reg_equiv_memory_loc(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).memory_loc
+ (*reg_equivs)[(ELT)].memory_loc
#define reg_equiv_address(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).address
+ (*reg_equivs)[(ELT)].address
#define reg_equiv_mem(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).mem
+ (*reg_equivs)[(ELT)].mem
#define reg_equiv_alt_mem_list(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).alt_mem_list
+ (*reg_equivs)[(ELT)].alt_mem_list
#define reg_equiv_init(ELT) \
- VEC_index (reg_equivs_t, reg_equivs, (ELT)).init
+ (*reg_equivs)[(ELT)].init
-DEF_VEC_O(reg_equivs_t);
-DEF_VEC_ALLOC_O(reg_equivs_t, gc);
-extern VEC(reg_equivs_t,gc) *reg_equivs;
+extern vec<reg_equivs_t, va_gc> *reg_equivs;
/* All the "earlyclobber" operands of the current insn
are recorded here. */
static char *offsets_known_at;
static HOST_WIDE_INT (*offsets_at)[NUM_ELIMINABLE_REGS];
-VEC(reg_equivs_t,gc) *reg_equivs;
+vec<reg_equivs_t, va_gc> *reg_equivs;
/* Stack of addresses where an rtx has been changed. We can undo the
changes by popping items off the stack and restoring the original
rtx expression would be changed. See PR 42431. */
typedef rtx *rtx_p;
-DEF_VEC_P(rtx_p);
-DEF_VEC_ALLOC_P(rtx_p,heap);
-static VEC(rtx_p,heap) *substitute_stack;
+static vec<rtx_p> substitute_stack;
/* Number of labels in the current function. */
void
grow_reg_equivs (void)
{
- int old_size = VEC_length (reg_equivs_t, reg_equivs);
+ int old_size = vec_safe_length (reg_equivs);
int max_regno = max_reg_num ();
int i;
reg_equivs_t ze;
memset (&ze, 0, sizeof (reg_equivs_t));
- VEC_reserve (reg_equivs_t, gc, reg_equivs, max_regno);
+ vec_safe_reserve (reg_equivs, max_regno);
for (i = old_size; i < max_regno; i++)
- VEC_quick_insert (reg_equivs_t, reg_equivs, i, ze);
+ reg_equivs->quick_insert (i, ze);
}
\f
REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = BITS_PER_UNIT;
#endif
- VEC_free (rtx_p, heap, substitute_stack);
+ substitute_stack.release ();
gcc_assert (bitmap_empty_p (&spilled_pseudos));
}
else if (reg_renumber[regno] < 0
- && reg_equivs != 0
+ && reg_equivs
&& reg_equiv_constant (regno)
&& ! function_invariant_p (reg_equiv_constant (regno)))
elimination_effects (reg_equiv_constant (regno), mem_mode);
if (REG_P (SUBREG_REG (x))
&& (GET_MODE_SIZE (GET_MODE (x))
<= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
- && reg_equivs != 0
+ && reg_equivs
&& reg_equiv_memory_loc (REGNO (SUBREG_REG (x))) != 0)
return;
{
int i;
-
free (offsets_known_at);
free (offsets_at);
offsets_at = 0;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (reg_equiv_alt_mem_list (i))
free_EXPR_LIST_list (®_equiv_alt_mem_list (i));
- VEC_free (reg_equivs_t, gc, reg_equivs);
- reg_equivs = NULL;
-
+ vec_free (reg_equivs);
}
\f
/* Kick all pseudos out of hard register REGNO.
if (*where == what || rtx_equal_p (*where, what))
{
/* Record the location of the changed rtx. */
- VEC_safe_push (rtx_p, heap, substitute_stack, where);
+ substitute_stack.safe_push (where);
*where = repl;
return;
}
}
/* Restore the original value at each changed address within R1. */
- while (!VEC_empty (rtx_p, substitute_stack))
+ while (!substitute_stack.is_empty ())
{
- rtx *where = VEC_pop (rtx_p, substitute_stack);
+ rtx *where = substitute_stack.pop ();
*where = rld[r2].in;
}
#include "input.h"
#include "real.h"
#include "vec.h"
-#include "vecir.h"
#include "fixed-value.h"
#include "alias.h"
#include "hashtab.h"
!SYMBOL_REF_ANCHOR_P (X)
SYMBOL_REF_BLOCK (X) == [address of this structure]
SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
- VEC(rtx,gc) *objects;
+ vec<rtx, va_gc> *objects;
/* All the anchor SYMBOL_REFs used to address these objects, sorted
in order of increasing offset, and then increasing TLS model.
SYMBOL_REF_ANCHOR_P (X)
SYMBOL_REF_BLOCK (X) == [address of this structure]
SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
- VEC(rtx,gc) *anchors;
+ vec<rtx, va_gc> *anchors;
};
/* RTL expression ("rtx"). */
struct sched_deps_info_def *sched_deps_info;
/* The data is specific to the Haifa scheduler. */
-VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
+vec<haifa_deps_insn_data_def>
+ h_d_i_d = vec<haifa_deps_insn_data_def>();
/* Return the major type present in the DS. */
enum reg_note
static void
init_deps_data_vector (void)
{
- int reserve = (sched_max_luid + 1
- - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
- if (reserve > 0
- && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
- VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
- 3 * sched_max_luid / 2);
+ int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
+ if (reserve > 0 && ! h_d_i_d.space (reserve))
+ h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
}
/* If it is profitable to use them, initialize or extend (depending on
free_alloc_pool_if_empty (&dl_pool);
gcc_assert (dn_pool == NULL && dl_pool == NULL);
- VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
+ h_d_i_d.release ();
cache_size = 0;
if (true_dependency_cache)
#include "df.h"
#include "basic-block.h"
-/* For VEC (int, heap). */
-#include "vecprim.h"
-
/* Identificator of a scheduler pass. */
enum sched_pass_id_t { SCHED_PASS_UNKNOWN, SCHED_RGN_PASS, SCHED_EBB_PASS,
SCHED_SMS_PASS, SCHED_SEL_PASS };
SCHED_PRESSURE_MODEL
};
-typedef VEC (basic_block, heap) *bb_vec_t;
-typedef VEC (rtx, heap) *insn_vec_t;
-typedef VEC (rtx, heap) *rtx_vec_t;
+typedef vec<basic_block> bb_vec_t;
+typedef vec<rtx> insn_vec_t;
+typedef vec<rtx> rtx_vec_t;
extern void sched_init_bbs (void);
/* Mapping from INSN_UID to INSN_LUID. In the end all other per insn data
structures should be indexed by luid. */
-extern VEC (int, heap) *sched_luids;
-#define INSN_LUID(INSN) (VEC_index (int, sched_luids, INSN_UID (INSN)))
-#define LUID_BY_UID(UID) (VEC_index (int, sched_luids, UID))
+extern vec<int> sched_luids;
+#define INSN_LUID(INSN) (sched_luids[INSN_UID (INSN)])
+#define LUID_BY_UID(UID) (sched_luids[UID])
#define SET_INSN_LUID(INSN, LUID) \
-(VEC_replace (int, sched_luids, INSN_UID (INSN), (LUID)))
+(sched_luids[INSN_UID (INSN)] = (LUID))
/* The highest INSN_LUID. */
extern int sched_max_luid;
typedef struct _haifa_insn_data haifa_insn_data_def;
typedef haifa_insn_data_def *haifa_insn_data_t;
-DEF_VEC_O (haifa_insn_data_def);
-DEF_VEC_ALLOC_O (haifa_insn_data_def, heap);
-extern VEC(haifa_insn_data_def, heap) *h_i_d;
+extern vec<haifa_insn_data_def> h_i_d;
-#define HID(INSN) (&VEC_index (haifa_insn_data_def, h_i_d, INSN_UID (INSN)))
+#define HID(INSN) (&h_i_d[INSN_UID (INSN)])
/* Accessor macros for h_i_d. There are more in haifa-sched.c and
sched-rgn.c. */
typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
-DEF_VEC_O (haifa_deps_insn_data_def);
-DEF_VEC_ALLOC_O (haifa_deps_insn_data_def, heap);
-extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
+extern vec<haifa_deps_insn_data_def> h_d_i_d;
-#define HDID(INSN) (&VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
- INSN_LUID (INSN)))
+#define HDID(INSN) (&h_d_i_d[INSN_LUID (INSN)])
#define INSN_DEP_COUNT(INSN) (HDID (INSN)->dep_count)
#define HAS_INTERNAL_DEP(INSN) (HDID (INSN)->has_internal_dep)
#define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
#define INSN_COND_DEPS(INSN) (HDID (INSN)->cond_deps)
#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
-#define CANT_MOVE_BY_LUID(LUID) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
- LUID).cant_move)
+#define CANT_MOVE_BY_LUID(LUID) (h_d_i_d[LUID].cant_move)
#define INSN_PRIORITY(INSN) (HID (INSN)->priority)
/* Declarations whose debug info was deferred till end of compilation. */
-static GTY(()) VEC(tree,gc) *deferred_global_decls;
+static GTY(()) vec<tree, va_gc> *deferred_global_decls;
/* The C front end may call sdbout_symbol before sdbout_init runs.
We save all such decls in this list and output them when we get
if (!DECL_INITIAL (decl) || !TREE_PUBLIC (decl))
sdbout_symbol (decl, 0);
else
- VEC_safe_push (tree, gc, deferred_global_decls, decl);
+ vec_safe_push (deferred_global_decls, decl);
/* Output COFF information for non-global file-scope initialized
variables. */
size_t i;
tree decl;
- FOR_EACH_VEC_ELT (tree, deferred_global_decls, i, decl)
+ FOR_EACH_VEC_SAFE_ELT (deferred_global_decls, i, decl)
sdbout_symbol (decl, 0);
}
{
tree t;
- deferred_global_decls = VEC_alloc (tree, gc, 12);
+ vec_alloc (deferred_global_decls, 12);
/* Emit debug information which was queued by sdbout_symbol before
we got here. */
int i;
rtx succ;
- FOR_EACH_VEC_ELT (rtx, succs, i, succ)
+ FOR_EACH_VEC_ELT (succs, i, succ)
if (succ)
dump_insn (succ);
else
/* '+' before insn means it is a new cycle start and it's not been
scheduled yet. '>' - has been scheduled. */
- if (s_i_d && INSN_LUID (insn) > 0)
+ if (s_i_d.exists () && INSN_LUID (insn) > 0)
if (GET_MODE (insn) == TImode)
sprintf (buf, "%s %4d",
INSN_SCHED_TIMES (insn) > 0 ? "> " : "< ",
{
int insn_flags = DUMP_INSN_UID | DUMP_INSN_PATTERN;
- if (sched_luids != NULL && INSN_LUID (insn) > 0)
+ if (sched_luids.exists () && INSN_LUID (insn) > 0)
{
if (flags & SEL_DUMP_CFG_INSN_SEQNO)
insn_flags |= DUMP_INSN_SEQNO | DUMP_INSN_SCHED_CYCLE | DUMP_INSN_EXPR;
#include "sel-sched-dump.h"
/* A vector holding bb info for whole scheduling pass. */
-VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
+vec<sel_global_bb_info_def>
+ sel_global_bb_info = vec<sel_global_bb_info_def>();
/* A vector holding bb info. */
-VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
+vec<sel_region_bb_info_def>
+ sel_region_bb_info = vec<sel_region_bb_info_def>();
/* A pool for allocating all lists. */
alloc_pool sched_lists_pool;
/* LOOP_NESTS is a vector containing the corresponding loop nest for
each region. */
-static VEC(loop_p, heap) *loop_nests = NULL;
+static vec<loop_p> loop_nests = vec<loop_p>();
/* Saves blocks already in loop regions, indexed by bb->index. */
static sbitmap bbs_in_loop_rgns = NULL;
static void cfg_preds (basic_block, insn_t **, int *);
static void prepare_insn_expr (insn_t, int);
-static void free_history_vect (VEC (expr_history_def, heap) **);
+static void free_history_vect (vec<expr_history_def> &);
static void move_bb_info (basic_block, basic_block);
static void remove_empty_bb (basic_block, bool);
/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
static void
flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
- insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
+ insn_t last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
int *ready_ticks, int ready_ticks_size, insn_t sched_next,
int cycle, int cycle_issued_insns, int issue_more,
bool starts_cycle_p, bool after_stall_p)
if (tc != NULL)
delete_target_context (tc);
- VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
+ vec_free (FENCE_EXECUTING_INSNS (f));
free (FENCE_READY_TICKS (f));
FENCE_READY_TICKS (f) = NULL;
}
static void
merge_fences (fence_t f, insn_t insn,
state_t state, deps_t dc, void *tc,
- rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
+ rtx last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
int *ready_ticks, int ready_ticks_size,
rtx sched_next, int cycle, int issue_more, bool after_stall_p)
{
FENCE_LAST_SCHEDULED_INSN (f) = NULL;
FENCE_ISSUE_MORE (f) = issue_rate;
- VEC_free (rtx, gc, executing_insns);
+ vec_free (executing_insns);
free (ready_ticks);
if (FENCE_EXECUTING_INSNS (f))
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
- VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
+ FENCE_EXECUTING_INSNS (f)->block_remove (0,
+ FENCE_EXECUTING_INSNS (f)->length ());
if (FENCE_READY_TICKS (f))
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
}
{
reset_deps_context (FENCE_DC (f));
delete_deps_context (dc);
- VEC_free (rtx, gc, executing_insns);
+ vec_free (executing_insns);
free (ready_ticks);
FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
if (FENCE_EXECUTING_INSNS (f))
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
- VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
+ FENCE_EXECUTING_INSNS (f)->block_remove (0,
+ FENCE_EXECUTING_INSNS (f)->length ());
if (FENCE_READY_TICKS (f))
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
}
{
delete_deps_context (FENCE_DC (f));
FENCE_DC (f) = dc;
- VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
+ vec_free (FENCE_EXECUTING_INSNS (f));
FENCE_EXECUTING_INSNS (f) = executing_insns;
free (FENCE_READY_TICKS (f));
FENCE_READY_TICKS (f) = ready_ticks;
{
/* Leave DC and CYCLE untouched. */
delete_deps_context (dc);
- VEC_free (rtx, gc, executing_insns);
+ vec_free (executing_insns);
free (ready_ticks);
}
}
static void
add_to_fences (flist_tail_t new_fences, insn_t insn,
state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
- VEC(rtx, gc) *executing_insns, int *ready_ticks,
+ vec<rtx, va_gc> *executing_insns, int *ready_ticks,
int ready_ticks_size, rtx sched_next, int cycle,
int cycle_issued_insns, int issue_rate,
bool starts_cycle_p, bool after_stall_p)
create_copy_of_deps_context (FENCE_DC (fence)),
create_copy_of_target_context (FENCE_TC (fence)),
FENCE_LAST_SCHEDULED_INSN (fence),
- VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
+ vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
new_ready_ticks,
FENCE_READY_TICKS_SIZE (fence),
FENCE_SCHED_NEXT (fence),
the search has stopped, such that inserting the new element at INDP will
retain VECT's sort order. */
static bool
-find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
+find_in_history_vect_1 (vec<expr_history_def> vect,
unsigned uid, vinsn_t new_vinsn,
bool compare_vinsns, int *indp)
{
expr_history_def *arr;
- int i, j, len = VEC_length (expr_history_def, vect);
+ int i, j, len = vect.length ();
if (len == 0)
{
return false;
}
- arr = VEC_address (expr_history_def, vect);
+ arr = vect.address ();
i = 0, j = len - 1;
while (i <= j)
the position found or -1, if no such value is in vector.
Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
int
-find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
+find_in_history_vect (vec<expr_history_def> vect, rtx insn,
vinsn_t new_vinsn, bool originators_p)
{
int ind;
UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
the history of a transformation. */
void
-insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
+insert_in_history_vect (vec<expr_history_def> *pvect,
unsigned uid, enum local_trans_type type,
vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
ds_t spec_ds)
{
- VEC(expr_history_def, heap) *vect = *pvect;
+ vec<expr_history_def> vect = *pvect;
expr_history_def temp;
bool res;
int ind;
if (res)
{
- expr_history_def *phist = &VEC_index (expr_history_def, vect, ind);
+ expr_history_def *phist = &vect[ind];
/* It is possible that speculation types of expressions that were
propagated through different paths will be different here. In this
vinsn_attach (old_expr_vinsn);
vinsn_attach (new_expr_vinsn);
- VEC_safe_insert (expr_history_def, heap, vect, ind, temp);
+ vect.safe_insert (ind, temp);
*pvect = vect;
}
/* Free history vector PVECT. */
static void
-free_history_vect (VEC (expr_history_def, heap) **pvect)
+free_history_vect (vec<expr_history_def> &pvect)
{
unsigned i;
expr_history_def *phist;
- if (! *pvect)
+ if (! pvect.exists ())
return;
- for (i = 0;
- VEC_iterate (expr_history_def, *pvect, i, phist);
- i++)
+ for (i = 0; pvect.iterate (i, &phist); i++)
{
vinsn_detach (phist->old_expr_vinsn);
vinsn_detach (phist->new_expr_vinsn);
}
- VEC_free (expr_history_def, heap, *pvect);
- *pvect = NULL;
+ pvect.release ();
}
/* Merge vector FROM to PVECT. */
static void
-merge_history_vect (VEC (expr_history_def, heap) **pvect,
- VEC (expr_history_def, heap) *from)
+merge_history_vect (vec<expr_history_def> *pvect,
+ vec<expr_history_def> from)
{
expr_history_def *phist;
int i;
/* We keep this vector sorted. */
- for (i = 0; VEC_iterate (expr_history_def, from, i, phist); i++)
+ for (i = 0; from.iterate (i, &phist); i++)
insert_in_history_vect (pvect, phist->uid, phist->type,
phist->old_expr_vinsn, phist->new_expr_vinsn,
phist->spec_ds);
init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
int sched_times, int orig_bb_index, ds_t spec_done_ds,
ds_t spec_to_check_ds, int orig_sched_cycle,
- VEC(expr_history_def, heap) *history, signed char target_available,
+ vec<expr_history_def> history,
+ signed char target_available,
bool was_substituted, bool was_renamed, bool needs_spec_check_p,
bool cant_move)
{
EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
- if (history)
+ if (history.exists ())
EXPR_HISTORY_OF_CHANGES (expr) = history;
else
- EXPR_HISTORY_OF_CHANGES (expr) = NULL;
+ EXPR_HISTORY_OF_CHANGES (expr).create (0);
EXPR_TARGET_AVAILABLE (expr) = target_available;
EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
void
copy_expr (expr_t to, expr_t from)
{
- VEC(expr_history_def, heap) *temp = NULL;
+ vec<expr_history_def> temp = vec<expr_history_def>();
- if (EXPR_HISTORY_OF_CHANGES (from))
+ if (EXPR_HISTORY_OF_CHANGES (from).exists ())
{
unsigned i;
expr_history_def *phist;
- temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
+ temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
for (i = 0;
- VEC_iterate (expr_history_def, temp, i, phist);
+ temp.iterate (i, &phist);
i++)
{
vinsn_attach (phist->old_expr_vinsn);
{
init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
- EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
+ EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
+ vec<expr_history_def>(),
EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
EXPR_CANT_MOVE (from));
if (ds)
EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
- free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
+ free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
}
/* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
vinsn_detach (EXPR_VINSN (expr));
EXPR_VINSN (expr) = NULL;
- free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
+ free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
}
/* For a given LV_SET, mark EXPR having unavailable target register. */
ssi->extend_bb ();
if (ssi->init_bb)
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
ssi->init_bb (bb);
if (ssi->extend_insn)
ssi->extend_insn ();
if (ssi->init_insn)
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
rtx insn;
/* Initialize INSN's expr. */
init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
- spec_done_ds, 0, 0, NULL, true, false, false, false,
- CANT_MOVE (insn));
+ spec_done_ds, 0, 0, vec<expr_history_def>(), true,
+ false, false, false, CANT_MOVE (insn));
}
init_first_time_insn_data (insn);
bb_vec_t bbs;
int i;
- bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
+ bbs.create (current_nr_blocks);
for (i = 0; i < current_nr_blocks; i++)
- VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
+ bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
/* Clear AV_SETs and INSN_EXPRs. */
{
sched_scan (&ssi, bbs);
}
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
}
finish_insns ();
bool
sel_insn_is_speculation_check (rtx insn)
{
- return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
+ return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
}
/* Extracts machine mode MODE and destination location DST_LOC
maybe_tidy_empty_bb (basic_block bb)
{
basic_block succ_bb, pred_bb, note_bb;
- VEC (basic_block, heap) *dom_bbs;
+ vec<basic_block> dom_bbs;
edge e;
edge_iterator ei;
bool rescan_p;
succ_bb = single_succ (bb);
rescan_p = true;
pred_bb = NULL;
- dom_bbs = NULL;
+ dom_bbs.create (0);
/* Save a pred/succ from the current region to attach the notes to. */
note_bb = NULL;
sel_redirect_edge_and_branch will take care of it. */
if (e->dest != bb
&& single_pred_p (e->dest))
- VEC_safe_push (basic_block, heap, dom_bbs, e->dest);
+ dom_bbs.safe_push (e->dest);
sel_redirect_edge_and_branch (e, succ_bb);
rescan_p = true;
break;
remove_empty_bb (bb, true);
}
- if (!VEC_empty (basic_block, dom_bbs))
+ if (!dom_bbs.is_empty ())
{
- VEC_safe_push (basic_block, heap, dom_bbs, succ_bb);
+ dom_bbs.safe_push (succ_bb);
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
}
return true;
void
sel_extend_global_bb_info (void)
{
- VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
- last_basic_block);
+ sel_global_bb_info.safe_grow_cleared (last_basic_block);
}
/* Extend region-scope data structures for basic blocks. */
static void
extend_region_bb_info (void)
{
- VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
- last_basic_block);
+ sel_region_bb_info.safe_grow_cleared (last_basic_block);
}
/* Extend all data structures to fit for all basic blocks. */
void
sel_finish_global_bb_info (void)
{
- VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
+ sel_global_bb_info.release ();
}
/* Finalize region-scope data structures for basic blocks. */
static void
finish_region_bb_info (void)
{
- VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
+ sel_region_bb_info.release ();
}
\f
/* Data for each insn in current region. */
-VEC (sel_insn_data_def, heap) *s_i_d = NULL;
+vec<sel_insn_data_def> s_i_d = vec<sel_insn_data_def>();
/* Extend data structures for insns from current region. */
static void
sched_deps_init (false);
/* Extend data structures for insns from current region. */
- reserve = (sched_max_luid + 1
- - VEC_length (sel_insn_data_def, s_i_d));
- if (reserve > 0
- && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
+ reserve = (sched_max_luid + 1 - s_i_d.length ());
+ if (reserve > 0 && ! s_i_d.space (reserve))
{
int size;
size = 3 * sched_max_luid / 2;
- VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
+ s_i_d.safe_grow_cleared (size);
}
}
/* Clear here all dependence contexts that may have left from insns that were
removed during the scheduling. */
- for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
+ for (i = 0; i < s_i_d.length (); i++)
{
- sel_insn_data_def *sid_entry = &VEC_index (sel_insn_data_def, s_i_d, i);
+ sel_insn_data_def *sid_entry = &s_i_d[i];
if (sid_entry->live)
return_regset_to_pool (sid_entry->live);
}
}
- VEC_free (sel_insn_data_def, heap, s_i_d);
+ s_i_d.release ();
}
/* A proxy to pass initialization data to init_insn (). */
init_simplejump_data (insn_t insn)
{
init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
- REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
+ REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
+ vec<expr_history_def>(), true, false, false,
false, true);
INSN_SEQNO (insn) = get_seqno_for_a_jump (insn);
init_first_time_insn_data (insn);
/* The basic block that already has been processed by the sched_data_update (),
but hasn't been in sel_add_bb () yet. */
-static VEC (basic_block, heap) *last_added_blocks = NULL;
+static vec<basic_block>
+ last_added_blocks = vec<basic_block>();
/* A pool for allocating successor infos. */
static struct
gcc_unreachable ();
i = ++succs_info_pool.top;
- succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
- succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
- succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
+ succs_info_pool.stack[i].succs_ok.create (10);
+ succs_info_pool.stack[i].succs_other.create (10);
+ succs_info_pool.stack[i].probs_ok.create (10);
}
else
succs_info_pool.top++;
succs_info_pool.top--;
/* Clear stale info. */
- VEC_block_remove (rtx, sinfo->succs_ok,
- 0, VEC_length (rtx, sinfo->succs_ok));
- VEC_block_remove (rtx, sinfo->succs_other,
- 0, VEC_length (rtx, sinfo->succs_other));
- VEC_block_remove (int, sinfo->probs_ok,
- 0, VEC_length (int, sinfo->probs_ok));
+ sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
+ sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
+ sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
sinfo->all_prob = 0;
sinfo->succs_ok_n = 0;
sinfo->all_succs_n = 0;
if (current_flags & flags)
{
- VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
- VEC_safe_push (int, heap, sinfo->probs_ok,
- /* FIXME: Improve calculation when skipping
- inner loop to exits. */
- (si.bb_end
- ? si.e1->probability
- : REG_BR_PROB_BASE));
+ sinfo->succs_ok.safe_push (succ);
+ sinfo->probs_ok.safe_push (
+ /* FIXME: Improve calculation when skipping
+ inner loop to exits. */
+ si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
sinfo->succs_ok_n++;
}
else
- VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
+ sinfo->succs_other.safe_push (succ);
/* Compute all_prob. */
if (!si.bb_end)
/* It turns out that current cfg infrastructure does not support
reuse of basic blocks. Don't bother for now. */
- /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
+ /*bb_note_pool.safe_push (note);*/
}
/* Get a bb_note from pool or return NULL_RTX if pool is empty. */
static rtx
get_bb_note_from_pool (void)
{
- if (VEC_empty (rtx, bb_note_pool))
+ if (bb_note_pool.is_empty ())
return NULL_RTX;
else
{
- rtx note = VEC_pop (rtx, bb_note_pool);
+ rtx note = bb_note_pool.pop ();
PREV_INSN (note) = NULL_RTX;
NEXT_INSN (note) = NULL_RTX;
void
free_bb_note_pool (void)
{
- VEC_free (rtx, heap, bb_note_pool);
+ bb_note_pool.release ();
}
/* Setup scheduler pool and successor structure. */
gcc_assert (succs_info_pool.top == -1);
for (i = 0; i < succs_info_pool.max_top; i++)
{
- VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
- VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
- VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
+ succs_info_pool.stack[i].succs_ok.release ();
+ succs_info_pool.stack[i].succs_other.release ();
+ succs_info_pool.stack[i].probs_ok.release ();
}
free (succs_info_pool.stack);
}
/* When bb is passed explicitly, the vector should contain
the only element that equals to bb; otherwise, the vector
should not be NULL. */
- gcc_assert (last_added_blocks != NULL);
+ gcc_assert (last_added_blocks.exists ());
if (bb != NULL)
{
- gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
- && VEC_index (basic_block,
- last_added_blocks, 0) == bb);
+ gcc_assert (last_added_blocks.length () == 1
+ && last_added_blocks[0] == bb);
add_block_to_current_region (bb);
/* We associate creating/deleting data sets with the first insn
if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
create_initial_data_sets (bb);
- VEC_free (basic_block, heap, last_added_blocks);
+ last_added_blocks.release ();
}
else
/* BB is NULL - process LAST_ADDED_BLOCKS instead. */
basic_block temp_bb = NULL;
for (i = 0;
- VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
+ last_added_blocks.iterate (i, &bb); i++)
{
add_block_to_current_region (bb);
temp_bb = bb;
gcc_assert (temp_bb != NULL);
bb = temp_bb;
- VEC_free (basic_block, heap, last_added_blocks);
+ last_added_blocks.release ();
}
rgn_setup_region (CONTAINING_RGN (bb->index));
insn_t new_bb_note;
gcc_assert (flag_sel_sched_pipelining_outer_loops
- || last_added_blocks == NULL);
+ || !last_added_blocks.exists ());
new_bb_note = get_bb_note_from_pool ();
new_bb->aux = NULL;
}
- VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
+ last_added_blocks.safe_push (new_bb);
return new_bb;
}
/* Some of the basic blocks might not have been added to the loop.
Add them here, until this is fixed in force_fallthru. */
for (i = 0;
- VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
+ last_added_blocks.iterate (i, &bb); i++)
if (!bb->loop_father)
{
add_bb_to_loop (bb, e->dest->loop_father);
/* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
later. */
- gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
- && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
+ gcc_assert (last_added_blocks.length () == 1
+ && last_added_blocks[0] == new_bb);
- VEC_free (basic_block, heap, last_added_blocks);
+ last_added_blocks.release ();
return new_bb;
}
redirected = redirect_edge_and_branch (e, to);
- gcc_assert (redirected && last_added_blocks == NULL);
+ gcc_assert (redirected && !last_added_blocks.exists ());
/* When we've redirected a latch edge, update the header. */
if (latch_edge_p)
/* Create a new region from preheader blocks LOOP_BLOCKS. */
void
-make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
+make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
{
unsigned int i;
int new_rgn_number = -1;
new_rgn_number = sel_create_new_region ();
- FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb)
+ FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
{
gcc_assert (new_rgn_number >= 0);
sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
}
- VEC_free (basic_block, heap, *loop_blocks);
- gcc_assert (*loop_blocks == NULL);
+ vec_free (loop_blocks);
}
if (rgn_number < 0)
return false;
- VEC_safe_push (loop_p, heap, loop_nests, loop);
+ loop_nests.safe_push (loop);
return true;
}
{
/* Regions created with extend_rgns don't have corresponding loop nests,
because they don't represent loops. */
- if (rgn < VEC_length (loop_p, loop_nests))
- return VEC_index (loop_p, loop_nests, rgn);
+ if (rgn < loop_nests.length ())
+ return loop_nests[rgn];
else
return NULL;
}
{
int rgn = CONTAINING_RGN (loop->latch->index);
- gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
+ gcc_assert ((unsigned) rgn < loop_nests.length ());
return true;
}
loop_optimizer_finalize ();
- VEC_free (loop_p, heap, loop_nests);
+ loop_nests.release ();
free (rev_top_order_index);
rev_top_order_index = NULL;
{
int i;
basic_block bb;
- VEC(basic_block, heap) *preheader_blocks
+ vec<basic_block> *preheader_blocks
= LOOP_PREHEADER_BLOCKS (current_loop_nest);
- for (i = 0;
- VEC_iterate (basic_block, preheader_blocks, i, bb);
- i++)
+ if (!preheader_blocks)
+ return;
+
+ for (i = 0; preheader_blocks->iterate (i, &bb); i++)
{
- VEC_safe_push (basic_block, heap, *bbs, bb);
- VEC_safe_push (basic_block, heap, last_added_blocks, bb);
+ bbs->safe_push (bb);
+ last_added_blocks.safe_push (bb);
sel_add_bb (bb);
}
- VEC_free (basic_block, heap, preheader_blocks);
+ vec_free (preheader_blocks);
}
/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
basic_block bb;
bool all_empty_p = true;
- VEC(basic_block, heap) *preheader_blocks
+ vec<basic_block> *preheader_blocks
= LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
+ vec_check_alloc (preheader_blocks, 0);
+
gcc_assert (current_loop_nest);
- old_len = VEC_length (basic_block, preheader_blocks);
+ old_len = preheader_blocks->length ();
/* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
corresponding loop, then it should be a preheader. */
if (sel_is_loop_preheader_p (bb))
{
- VEC_safe_push (basic_block, heap, preheader_blocks, bb);
+ preheader_blocks->safe_push (bb);
if (BB_END (bb) != bb_note (bb))
all_empty_p = false;
}
}
/* Remove these blocks only after iterating over the whole region. */
- for (i = VEC_length (basic_block, preheader_blocks) - 1;
- i >= old_len;
- i--)
+ for (i = preheader_blocks->length () - 1; i >= old_len; i--)
{
- bb = VEC_index (basic_block, preheader_blocks, i);
+ bb = (*preheader_blocks)[i];
sel_remove_bb (bb, false);
}
{
if (!all_empty_p)
/* Immediately create new region from preheader. */
- make_region_from_loop_preheader (&preheader_blocks);
+ make_region_from_loop_preheader (preheader_blocks);
else
{
/* If all preheader blocks are empty - dont create new empty region.
Instead, remove them completely. */
- FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb)
+ FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
{
edge e;
edge_iterator ei;
next_bb));
}
}
- VEC_free (basic_block, heap, preheader_blocks);
+ vec_free (preheader_blocks);
}
else
/* Store preheader within the father's loop structure. */
#include "rtl.h"
#include "ggc.h"
#include "bitmap.h"
-#include "vecprim.h"
#include "sched-int.h"
#include "cfgloop.h"
typedef struct expr_history_def_1 expr_history_def;
-DEF_VEC_O (expr_history_def);
-DEF_VEC_ALLOC_O (expr_history_def, heap);
/* Expression information. */
struct _expr
int orig_sched_cycle;
/* This vector contains the history of insn's transformations. */
- VEC(expr_history_def, heap) *history_of_changes;
+ vec<expr_history_def> history_of_changes;
/* True (1) when original target (register or memory) of this instruction
is available for scheduling, false otherwise. -1 means we're not sure;
#define EXPR_WAS_RENAMED(EXPR) ((EXPR)->was_renamed)
#define EXPR_CANT_MOVE(EXPR) ((EXPR)->cant_move)
-#define EXPR_WAS_CHANGED(EXPR) (VEC_length (expr_history_def, \
- EXPR_HISTORY_OF_CHANGES (EXPR)) > 0)
+#define EXPR_WAS_CHANGED(EXPR) (EXPR_HISTORY_OF_CHANGES (EXPR).length () > 0)
/* Insn definition for list of original insns in find_used_regs. */
struct _def
tc_t tc;
/* A vector of insns that are scheduled but not yet completed. */
- VEC (rtx,gc) *executing_insns;
+ vec<rtx, va_gc> *executing_insns;
/* A vector indexed by UIDs that caches the earliest cycle on which
an insn can be scheduled on this fence. */
typedef struct _sel_insn_data sel_insn_data_def;
typedef sel_insn_data_def *sel_insn_data_t;
-DEF_VEC_O (sel_insn_data_def);
-DEF_VEC_ALLOC_O (sel_insn_data_def, heap);
-extern VEC (sel_insn_data_def, heap) *s_i_d;
+extern vec<sel_insn_data_def> s_i_d;
/* Accessor macros for s_i_d. */
-#define SID(INSN) (&VEC_index (sel_insn_data_def, s_i_d, INSN_LUID (INSN)))
-#define SID_BY_UID(UID) (&VEC_index (sel_insn_data_def, s_i_d, LUID_BY_UID (UID)))
+#define SID(INSN) (&s_i_d[INSN_LUID (INSN)])
+#define SID_BY_UID(UID) (&s_i_d[LUID_BY_UID (UID)])
extern sel_insn_data_def insn_sid (insn_t);
/* Saved loop preheader to transfer when scheduling the loop. */
#define LOOP_PREHEADER_BLOCKS(LOOP) ((size_t)((LOOP)->aux) == 1 \
? NULL \
- : ((VEC(basic_block, heap) *) (LOOP)->aux))
+ : ((vec<basic_block> *) (LOOP)->aux))
#define SET_LOOP_PREHEADER_BLOCKS(LOOP,BLOCKS) ((LOOP)->aux \
= (BLOCKS != NULL \
? BLOCKS \
typedef sel_global_bb_info_def *sel_global_bb_info_t;
-DEF_VEC_O (sel_global_bb_info_def);
-DEF_VEC_ALLOC_O (sel_global_bb_info_def, heap);
/* Per basic block data. This array is indexed by basic block index. */
-extern VEC (sel_global_bb_info_def, heap) *sel_global_bb_info;
+extern vec<sel_global_bb_info_def> sel_global_bb_info;
extern void sel_extend_global_bb_info (void);
extern void sel_finish_global_bb_info (void);
/* Get data for BB. */
#define SEL_GLOBAL_BB_INFO(BB) \
- (&VEC_index (sel_global_bb_info_def, sel_global_bb_info, (BB)->index))
+ (&sel_global_bb_info[(BB)->index])
/* Access macros. */
#define BB_LV_SET(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set)
typedef sel_region_bb_info_def *sel_region_bb_info_t;
-DEF_VEC_O (sel_region_bb_info_def);
-DEF_VEC_ALLOC_O (sel_region_bb_info_def, heap);
/* Per basic block data. This array is indexed by basic block index. */
-extern VEC (sel_region_bb_info_def, heap) *sel_region_bb_info;
+extern vec<sel_region_bb_info_def> sel_region_bb_info;
/* Get data for BB. */
-#define SEL_REGION_BB_INFO(BB) (&VEC_index (sel_region_bb_info_def, \
- sel_region_bb_info, (BB)->index))
+#define SEL_REGION_BB_INFO(BB) (&sel_region_bb_info[(BB)->index])
/* Get BB's note_list.
A note_list is a list of various notes that was scattered across BB
/* If skip to loop exits, save here information about loop exits. */
int current_exit;
- VEC (edge, heap) *loop_exits;
+ vec<edge> loop_exits;
} succ_iterator;
/* A structure returning all successor's information. */
/* Their probabilities. As of now, we don't need this for other
successors. */
- VEC(int,heap) *probs_ok;
+ vec<int> probs_ok;
/* Other successors. */
insn_vec_t succs_other;
}
/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
-static inline VEC (edge, heap) *
+static inline vec<edge>
get_loop_exit_edges_unique_dests (const struct loop *loop)
{
- VEC (edge, heap) *edges = NULL;
+ vec<edge> edges = vec<edge>();
struct loop_exit *exit;
gcc_assert (loop->latch != EXIT_BLOCK_PTR
edge e;
bool was_dest = false;
- for (i = 0; VEC_iterate (edge, edges, i, e); i++)
+ for (i = 0; edges.iterate (i, &e); i++)
if (e->dest == exit->e->dest)
{
was_dest = true;
}
if (!was_dest)
- VEC_safe_push (edge, heap, edges, exit->e);
+ edges.safe_push (exit->e);
}
return edges;
}
traverse all of them and if any of them turns out to be another loop header
(after skipping empty BBs), add its loop exits to the resulting vector
as well. */
-static inline VEC(edge, heap) *
+static inline vec<edge>
get_all_loop_exits (basic_block bb)
{
- VEC(edge, heap) *exits = NULL;
+ vec<edge> exits = vec<edge>();
/* If bb is empty, and we're skipping to loop exits, then
consider bb as a possible gate to the inner loop now. */
exits = get_loop_exit_edges_unique_dests (this_loop);
/* Traverse all loop headers. */
- for (i = 0; VEC_iterate (edge, exits, i, e); i++)
+ for (i = 0; exits.iterate (i, &e); i++)
if (in_current_region_p (e->dest)
|| inner_loop_header_p (e->dest))
{
- VEC(edge, heap) *next_exits = get_all_loop_exits (e->dest);
+ vec<edge> next_exits = get_all_loop_exits (e->dest);
- if (next_exits)
+ if (next_exits.exists ())
{
int j;
edge ne;
/* Add all loop exits for the current edge into the
resulting vector. */
- for (j = 0; VEC_iterate (edge, next_exits, j, ne); j++)
- VEC_safe_push (edge, heap, exits, ne);
+ for (j = 0; next_exits.iterate (j, &ne); j++)
+ exits.safe_push (ne);
/* Remove the original edge. */
- VEC_ordered_remove (edge, exits, i);
+ exits.ordered_remove (i);
/* Decrease the loop counter so we won't skip anything. */
i--;
i.bb = bb;
i.current_flags = 0;
i.current_exit = -1;
- i.loop_exits = NULL;
+ i.loop_exits.create (0);
if (bb != EXIT_BLOCK_PTR && BB_END (bb) != insn)
{
/* Avoid 'uninitialized' warning. */
i.ei.index = 0;
- i.ei.container = NULL;
+ i.ei.container = 0;
}
else
{
edge e_tmp = NULL;
/* First, try loop exits, if we have them. */
- if (ip->loop_exits)
+ if (ip->loop_exits.exists ())
{
do
{
- VEC_iterate (edge, ip->loop_exits,
- ip->current_exit, e_tmp);
+ ip->loop_exits.iterate (ip->current_exit, &e_tmp);
ip->current_exit++;
}
while (e_tmp && !check (e_tmp, ip));
if (!e_tmp)
- VEC_free (edge, heap, ip->loop_exits);
+ ip->loop_exits.release ();
}
/* If we have found a successor, then great. */
/* Get all loop exits recursively. */
ip->loop_exits = get_all_loop_exits (bb);
- if (ip->loop_exits)
+ if (ip->loop_exits.exists ())
{
ip->current_exit = 0;
/* Move the iterator now, because we won't do
/* If loop_exits are non null, we have found an inner loop;
do one more iteration to fetch an edge from these exits. */
- if (ip->loop_exits)
+ if (ip->loop_exits.exists ())
continue;
/* Otherwise, we've found an edge in a usual way. Break now. */
{
gcc_assert (!ip->e2 || ip->e1);
- if (ip->bb_end && ip->e1 && !ip->loop_exits)
+ if (ip->bb_end && ip->e1 && !ip->loop_exits.exists ())
ei_next (&(ip->ei));
}
extern void clear_expr (expr_t);
extern unsigned expr_dest_regno (expr_t);
extern rtx expr_dest_reg (expr_t);
-extern int find_in_history_vect (VEC(expr_history_def, heap) *,
+extern int find_in_history_vect (vec<expr_history_def> ,
rtx, vinsn_t, bool);
-extern void insert_in_history_vect (VEC(expr_history_def, heap) **,
+extern void insert_in_history_vect (vec<expr_history_def> *,
unsigned, enum local_trans_type,
vinsn_t, vinsn_t, ds_t);
extern void mark_unavailable_targets (av_set_t, av_set_t, regset);
extern void sel_sched_region (int);
extern loop_p get_loop_nest_for_rgn (unsigned int);
extern bool considered_for_pipelining_p (struct loop *);
-extern void make_region_from_loop_preheader (VEC(basic_block, heap) **);
+extern void make_region_from_loop_preheader (vec<basic_block> *&);
extern void sel_add_loop_preheaders (bb_vec_t *);
extern bool sel_is_loop_preheader_p (basic_block);
extern void clear_outdated_rtx_info (basic_block);
static int num_insns_scheduled;
/* A vector of expressions is used to be able to sort them. */
-DEF_VEC_P(expr_t);
-DEF_VEC_ALLOC_P(expr_t,heap);
-static VEC(expr_t, heap) *vec_av_set = NULL;
+static vec<expr_t> vec_av_set = vec<expr_t>();
/* A vector of vinsns is used to hold temporary lists of vinsns. */
-DEF_VEC_P(vinsn_t);
-DEF_VEC_ALLOC_P(vinsn_t,heap);
-typedef VEC(vinsn_t, heap) *vinsn_vec_t;
+typedef vec<vinsn_t> vinsn_vec_t;
/* This vector has the exprs which may still present in av_sets, but actually
can't be moved up due to bookkeeping created during code motion to another
fence. See comment near the call to update_and_record_unavailable_insns
for the detailed explanations. */
-static vinsn_vec_t vec_bookkeeping_blocked_vinsns = NULL;
+static vinsn_vec_t vec_bookkeeping_blocked_vinsns = vinsn_vec_t();
/* This vector has vinsns which are scheduled with renaming on the first fence
and then seen on the second. For expressions with such vinsns, target
availability information may be wrong. */
-static vinsn_vec_t vec_target_unavailable_vinsns = NULL;
+static vinsn_vec_t vec_target_unavailable_vinsns = vinsn_vec_t();
/* Vector to store temporary nops inserted in move_op to prevent removal
of empty bbs. */
-DEF_VEC_P(insn_t);
-DEF_VEC_ALLOC_P(insn_t,heap);
-static VEC(insn_t, heap) *vec_temp_moveop_nops = NULL;
+static vec<insn_t> vec_temp_moveop_nops = vec<insn_t>();
/* These bitmaps record original instructions scheduled on the current
iteration and bookkeeping copies created by them. */
can_issue_more = issue_rate;
FENCE_ISSUE_MORE (fence) = can_issue_more;
- for (i = 0; VEC_iterate (rtx, FENCE_EXECUTING_INSNS (fence), i, insn); )
+ for (i = 0; vec_safe_iterate (FENCE_EXECUTING_INSNS (fence), i, &insn); )
{
if (INSN_READY_CYCLE (insn) < cycle)
{
remove_from_deps (FENCE_DC (fence), insn);
- VEC_unordered_remove (rtx, FENCE_EXECUTING_INSNS (fence), i);
+ FENCE_EXECUTING_INSNS (fence)->unordered_remove (i);
continue;
}
i++;
{
expr_history_def *phist;
- phist = &VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (expr),
- index);
+ phist = &EXPR_HISTORY_OF_CHANGES (expr)[index];
switch (phist->type)
{
/* Add insn to the tail of current path. */
ilist_add (&p, insn);
- FOR_EACH_VEC_ELT (rtx, sinfo->succs_ok, is, succ)
+ FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
{
av_set_t succ_set;
succ_set = compute_av_set_inside_bb (succ, p, ws, true);
av_set_split_usefulness (succ_set,
- VEC_index (int, sinfo->probs_ok, is),
+ sinfo->probs_ok[is],
sinfo->all_prob);
if (sinfo->all_succs_n > 1)
/* Check liveness restrictions via hard way when there are more than
two successors. */
if (sinfo->succs_ok_n > 2)
- FOR_EACH_VEC_ELT (rtx, sinfo->succs_ok, is, succ)
+ FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
{
basic_block succ_bb = BLOCK_FOR_INSN (succ);
/* Finally, check liveness restrictions on paths leaving the region. */
if (sinfo->all_succs_n > sinfo->succs_ok_n)
- FOR_EACH_VEC_ELT (rtx, sinfo->succs_other, is, succ)
+ FOR_EACH_VEC_ELT (sinfo->succs_other, is, succ)
mark_unavailable_targets
(av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
of expr taken from its history vector. */
for (i = 0, expr_vinsn = EXPR_VINSN (expr);
expr_vinsn;
- expr_vinsn = (i < VEC_length (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (expr))
- ? VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (expr),
- i++).old_expr_vinsn
+ expr_vinsn = (i < EXPR_HISTORY_OF_CHANGES (expr).length ()
+ ? EXPR_HISTORY_OF_CHANGES (expr)[i++].old_expr_vinsn
: NULL))
- FOR_EACH_VEC_ELT (vinsn_t, vinsn_vec, n, vinsn)
+ FOR_EACH_VEC_ELT (vinsn_vec, n, vinsn)
if (VINSN_SEPARABLE_P (vinsn))
{
if (vinsn_equal_p (vinsn, expr_vinsn))
static void
vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
{
- unsigned len = VEC_length (vinsn_t, *vinsn_vec);
+ unsigned len = vinsn_vec->length ();
if (len > 0)
{
vinsn_t vinsn;
int n;
- FOR_EACH_VEC_ELT (vinsn_t, *vinsn_vec, n, vinsn)
+ FOR_EACH_VEC_ELT (*vinsn_vec, n, vinsn)
vinsn_detach (vinsn);
- VEC_block_remove (vinsn_t, *vinsn_vec, 0, len);
+ vinsn_vec->block_remove (0, len);
}
}
vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
{
vinsn_attach (EXPR_VINSN (expr));
- VEC_safe_push (vinsn_t, heap, *vinsn_vec, EXPR_VINSN (expr));
+ vinsn_vec->safe_push (EXPR_VINSN (expr));
}
/* Free the vector representing blocked expressions. */
static void
-vinsn_vec_free (vinsn_vec_t *vinsn_vec)
+vinsn_vec_free (vinsn_vec_t &vinsn_vec)
{
- if (*vinsn_vec)
- VEC_free (vinsn_t, heap, *vinsn_vec);
+ vinsn_vec.release ();
}
/* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT. */
return false;
/* Empty vector from the previous stuff. */
- if (VEC_length (expr_t, vec_av_set) > 0)
- VEC_block_remove (expr_t, vec_av_set, 0, VEC_length (expr_t, vec_av_set));
+ if (vec_av_set.length () > 0)
+ vec_av_set.block_remove (0, vec_av_set.length ());
/* Turn the set into a vector for sorting and call sel_target_adjust_priority
for each insn. */
- gcc_assert (VEC_empty (expr_t, vec_av_set));
+ gcc_assert (vec_av_set.is_empty ());
FOR_EACH_EXPR (expr, si, av)
{
- VEC_safe_push (expr_t, heap, vec_av_set, expr);
+ vec_av_set.safe_push (expr);
gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
}
/* Sort the vector. */
- VEC_qsort (expr_t, vec_av_set, sel_rank_for_schedule);
+ vec_av_set.qsort (sel_rank_for_schedule);
/* We record maximal priority of insns in av set for current instruction
group. */
av_max_prio = est_ticks_till_branch = INT_MIN;
/* Filter out inappropriate expressions. Loop's direction is reversed to
- visit "best" instructions first. We assume that VEC_unordered_remove
+ visit "best" instructions first. We assume that vec::unordered_remove
moves last element in place of one being deleted. */
- for (n = VEC_length (expr_t, vec_av_set) - 1, stalled = 0; n >= 0; n--)
+ for (n = vec_av_set.length () - 1, stalled = 0; n >= 0; n--)
{
- expr_t expr = VEC_index (expr_t, vec_av_set, n);
+ expr_t expr = vec_av_set[n];
insn_t insn = EXPR_INSN_RTX (expr);
signed char target_available;
bool is_orig_reg_p = true;
/* Don't allow any insns other than from SCHED_GROUP if we have one. */
if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
{
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
continue;
}
we have to recompute the set of available registers for it. */
if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
{
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
INSN_UID (insn));
(target_available == false
&& !EXPR_SEPARABLE_P (expr))
/* Don't try to find a register for low-priority expression. */
- || (int) VEC_length (expr_t, vec_av_set) - 1 - n >= max_insns_to_rename
+ || (int) vec_av_set.length () - 1 - n >= max_insns_to_rename
/* ??? FIXME: Don't try to rename data speculation. */
|| (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
|| ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
{
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Expr %d has no suitable target register\n",
INSN_UID (insn));
if ((int) current_loop_nest->ninsns < 9)
{
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Pipelining expr %d will likely cause stall\n",
INSN_UID (insn));
< need_n_ticks_till_branch * issue_rate / 2
&& est_ticks_till_branch < need_n_ticks_till_branch)
{
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Pipelining expr %d will likely cause stall\n",
INSN_UID (insn));
{
stalled++;
min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Delaying speculation check %d until its first use\n",
INSN_UID (insn));
min_need_stall = (min_need_stall < 0
? need_cycles
: MIN (min_need_stall, need_cycles));
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Expr %d is not ready until cycle %d (cached)\n",
? need_cycles
: MIN (min_need_stall, need_cycles));
- VEC_unordered_remove (expr_t, vec_av_set, n);
+ vec_av_set.unordered_remove (n);
if (sched_verbose >= 4)
sel_print ("Expr %d is not ready yet until cycle %d\n",
if (min_need_stall < 0)
min_need_stall = 0;
- if (VEC_empty (expr_t, vec_av_set))
+ if (vec_av_set.is_empty ())
{
/* We need to set *pneed_stall here, because later we skip this code
when ready list is empty. */
gcc_assert (min_need_stall == 0);
/* Sort the vector. */
- VEC_qsort (expr_t, vec_av_set, sel_rank_for_schedule);
+ vec_av_set.qsort (sel_rank_for_schedule);
if (sched_verbose >= 4)
{
sel_print ("Total ready exprs: %d, stalled: %d\n",
- VEC_length (expr_t, vec_av_set), stalled);
- sel_print ("Sorted av set (%d): ", VEC_length (expr_t, vec_av_set));
- FOR_EACH_VEC_ELT (expr_t, vec_av_set, n, expr)
+ vec_av_set.length (), stalled);
+ sel_print ("Sorted av set (%d): ", vec_av_set.length ());
+ FOR_EACH_VEC_ELT (vec_av_set, n, expr)
dump_expr (expr);
sel_print ("\n");
}
expr_t expr;
/* Allocate and fill the ready list from the sorted vector. */
- ready.n_ready = VEC_length (expr_t, vec_av_set);
+ ready.n_ready = vec_av_set.length ();
ready.first = ready.n_ready - 1;
gcc_assert (ready.n_ready > 0);
sched_extend_ready_list (ready.n_ready);
}
- FOR_EACH_VEC_ELT (expr_t, vec_av_set, n, expr)
+ FOR_EACH_VEC_ELT (vec_av_set, n, expr)
{
vinsn_t vi = EXPR_VINSN (expr);
insn_t insn = VINSN_INSN_RTX (vi);
{
int i, j, n;
rtx *arr = ready.vec;
- expr_t *vec = VEC_address (expr_t, vec_av_set);
+ expr_t *vec = vec_av_set.address ();
for (i = 0, n = ready.n_ready; i < n; i++)
if (EXPR_INSN_RTX (vec[i]) != arr[i])
real_index = follow_ready_element ? ready.first - index : index;
- expr = VEC_index (expr_t, vec_av_set, real_index);
+ expr = vec_av_set[real_index];
gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
return expr;
int i;
insn_t insn;
- FOR_EACH_VEC_ELT (insn_t, vec_temp_moveop_nops, i, insn)
+ FOR_EACH_VEC_ELT (vec_temp_moveop_nops, i, insn)
{
gcc_assert (INSN_NOP_P (insn));
return_nop_to_pool (insn, full_tidying);
}
/* Empty the vector. */
- if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
- VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
- VEC_length (insn_t, vec_temp_moveop_nops));
+ if (vec_temp_moveop_nops.length () > 0)
+ vec_temp_moveop_nops.block_remove (0, vec_temp_moveop_nops.length ());
}
/* Records the maximal UID before moving up an instruction. Used for
/* First, reflect that something is scheduled on this fence. */
asm_p = advance_state_on_fence (fence, insn);
FENCE_LAST_SCHEDULED_INSN (fence) = insn;
- VEC_safe_push (rtx, gc, FENCE_EXECUTING_INSNS (fence), insn);
+ vec_safe_push (FENCE_EXECUTING_INSNS (fence), insn);
if (SCHED_GROUP_P (insn))
{
FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
{
insn_t nop = get_nop_from_pool (insn);
gcc_assert (INSN_NOP_P (nop));
- VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
+ vec_temp_moveop_nops.safe_push (nop);
}
sel_remove_insn (insn, only_disconnect, false);
if (current_region_empty_p ())
return true;
- bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
+ bbs.create (current_nr_blocks);
for (i = 0; i < current_nr_blocks; i++)
- VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
+ bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
sel_init_bbs (bbs);
sel_setup_sched_infos ();
sel_init_global_and_expr (bbs);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
blocks_to_reschedule = BITMAP_ALLOC (NULL);
free_nop_pool ();
/* Free the vectors. */
- if (vec_av_set)
- VEC_free (expr_t, heap, vec_av_set);
+ vec_av_set.release ();
BITMAP_FREE (current_copies);
BITMAP_FREE (current_originators);
BITMAP_FREE (code_motion_visited_blocks);
- vinsn_vec_free (&vec_bookkeeping_blocked_vinsns);
- vinsn_vec_free (&vec_target_unavailable_vinsns);
+ vinsn_vec_free (vec_bookkeeping_blocked_vinsns);
+ vinsn_vec_free (vec_target_unavailable_vinsns);
/* If LV_SET of the region head should be updated, do it now because
there will be no other chance. */
finish_deps_global ();
sched_finish_luids ();
- VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
+ h_d_i_d.release ();
sel_finish_bbs ();
BITMAP_FREE (blocks_to_reschedule);
return;
bitmap_set_bit (SESE_LOOPS (region), loop->num);
- VEC_safe_push (loop_p, heap, SESE_LOOP_NEST (region), loop);
+ SESE_LOOP_NEST (region).safe_push (loop);
}
/* Build the loop nests contained in REGION. Returns true when the
/* Make sure that the loops in the SESE_LOOP_NEST are ordered. It
can be the case that an inner loop is inserted before an outer
loop. To avoid this, semi-sort once. */
- FOR_EACH_VEC_ELT (loop_p, SESE_LOOP_NEST (region), i, loop0)
+ FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region), i, loop0)
{
- if (VEC_length (loop_p, SESE_LOOP_NEST (region)) == i + 1)
+ if (SESE_LOOP_NEST (region).length () == i + 1)
break;
- loop1 = VEC_index (loop_p, SESE_LOOP_NEST (region), i + 1);
+ loop1 = SESE_LOOP_NEST (region)[i + 1];
if (loop0->num > loop1->num)
{
- VEC_replace (loop_p, SESE_LOOP_NEST (region), i, loop1);
- VEC_replace (loop_p, SESE_LOOP_NEST (region), i + 1, loop0);
+ SESE_LOOP_NEST (region)[i] = loop1;
+ SESE_LOOP_NEST (region)[i + 1] = loop0;
}
}
}
SESE_ENTRY (region) = entry;
SESE_EXIT (region) = exit;
SESE_LOOPS (region) = BITMAP_ALLOC (NULL);
- SESE_LOOP_NEST (region) = VEC_alloc (loop_p, heap, 3);
+ SESE_LOOP_NEST (region).create (3);
SESE_ADD_PARAMS (region) = true;
- SESE_PARAMS (region) = VEC_alloc (tree, heap, 3);
+ SESE_PARAMS (region).create (3);
return region;
}
if (SESE_LOOPS (region))
SESE_LOOPS (region) = BITMAP_ALLOC (NULL);
- VEC_free (tree, heap, SESE_PARAMS (region));
- VEC_free (loop_p, heap, SESE_LOOP_NEST (region));
+ SESE_PARAMS (region).release ();
+ SESE_LOOP_NEST (region).release ();
XDELETE (region);
}
static bool
rename_uses (gimple copy, htab_t rename_map, gimple_stmt_iterator *gsi_tgt,
- sese region, loop_p loop, VEC (tree, heap) *iv_map,
+ sese region, loop_p loop, vec<tree> iv_map,
bool *gloog_error)
{
use_operand_p use_p;
static void
graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb,
htab_t rename_map,
- VEC (tree, heap) *iv_map, sese region,
+ vec<tree> iv_map, sese region,
bool *gloog_error)
{
gimple_stmt_iterator gsi, gsi_tgt;
edge
copy_bb_and_scalar_dependences (basic_block bb, sese region,
- edge next_e, VEC (tree, heap) *iv_map,
+ edge next_e, vec<tree> iv_map,
bool *gloog_error)
{
basic_block new_bb = split_edge (next_e);
edge entry, exit;
/* Parameters used within the SCOP. */
- VEC (tree, heap) *params;
+ vec<tree> params;
/* Loops completely contained in the SCOP. */
bitmap loops;
- VEC (loop_p, heap) *loop_nest;
+ vec<loop_p> loop_nest;
/* Are we allowed to add more params? This is for debugging purpose. We
can only add new params before generating the bb domains, otherwise they
extern void sese_insert_phis_for_liveouts (sese, basic_block, edge, edge);
extern void build_sese_loop_nests (sese);
extern edge copy_bb_and_scalar_dependences (basic_block, sese, edge,
- VEC (tree, heap) *, bool *);
+ vec<tree> , bool *);
extern struct loop *outermost_loop_in_sese (sese, basic_block);
extern void insert_loop_close_phis (htab_t, loop_p);
extern void insert_guard_phis (basic_block, edge, edge, htab_t, htab_t);
static inline unsigned
sese_nb_params (sese region)
{
- return VEC_length (tree, SESE_PARAMS (region));
+ return SESE_PARAMS (region).length ();
}
/* Checks whether BB is contained in the region delimited by ENTRY and
tree old_name, expr;
} *rename_map_elt;
-DEF_VEC_P(rename_map_elt);
-DEF_VEC_ALLOC_P (rename_map_elt, heap);
extern void debug_rename_map (htab_t);
extern hashval_t rename_map_elt_info (const void *);
corresponding element in CONDITION_CASES is not NULL_TREE. For a
SWITCH_EXPR the corresponding element in CONDITION_CASES is a
CASE_LABEL_EXPR. */
- VEC (gimple, heap) *conditions;
- VEC (gimple, heap) *condition_cases;
- VEC (data_reference_p, heap) *data_refs;
+ vec<gimple> conditions;
+ vec<gimple> condition_cases;
+ vec<data_reference_p> data_refs;
} *gimple_bb_p;
#define GBB_BB(GBB) (GBB)->bb
#define FINAL_PASS_MEM_STAT , ALONE_FINAL_PASS_MEM_STAT
#define MEM_STAT_INFO , ALONE_MEM_STAT_INFO
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
-#define CXX_MEM_STAT_INFO , const char * _loc_name = __builtin_FILE (), int _loc_line = __builtin_LINE (), const char * _loc_function = __builtin_FUNCTION ()
+#define ALONE_CXX_MEM_STAT_INFO const char * _loc_name = __builtin_FILE (), int _loc_line = __builtin_LINE (), const char * _loc_function = __builtin_FUNCTION ()
#else
-#define CXX_MEM_STAT_INFO , const char * _loc_name = __FILE__, int _loc_line = __LINE__, const char * _loc_function = NULL
+#define ALONE_CXX_MEM_STAT_INFO const char * _loc_name = __FILE__, int _loc_line = __LINE__, const char * _loc_function = NULL
#endif
+#define CXX_MEM_STAT_INFO , ALONE_CXX_MEM_STAT_INFO
#else
#define ALONE_MEM_STAT_DECL void
#define ALONE_FINAL_MEM_STAT_DECL GCC_MEM_STAT_ARGUMENTS
#define PASS_MEM_STAT
#define FINAL_PASS_MEM_STAT , ALONE_FINAL_PASS_MEM_STAT
#define MEM_STAT_INFO ALONE_MEM_STAT_INFO
+#define ALONE_CXX_MEM_STAT_INFO
#define CXX_MEM_STAT_INFO
#endif
void
expand_sjlj_dispatch_table (rtx dispatch_index,
- VEC(tree,heap) *dispatch_table)
+ vec<tree> dispatch_table)
{
tree index_type = integer_type_node;
enum machine_mode index_mode = TYPE_MODE (index_type);
- int ncases = VEC_length (tree, dispatch_table);
+ int ncases = dispatch_table.length ();
do_pending_stack_adjust ();
rtx before_case = get_last_insn ();
and seems to be a reasonable compromise between the "old way"
of expanding as a decision tree or dispatch table vs. the "new
way" with decrement chain or dispatch table. */
- if (VEC_length (tree, dispatch_table) <= 5
+ if (dispatch_table.length () <= 5
|| (!HAVE_casesi && !HAVE_tablejump)
|| !flag_jump_tables)
{
rtx zero = CONST0_RTX (index_mode);
for (int i = 0; i < ncases; i++)
{
- tree elt = VEC_index (tree, dispatch_table, i);
+ tree elt = dispatch_table[i];
rtx lab = label_rtx (CASE_LABEL (elt));
do_jump_if_equal (index_mode, index, zero, lab, 0, -1);
force_expand_binop (index_mode, sub_optab,
ncases);
tree index_expr = make_tree (index_type, dispatch_index);
tree minval = build_int_cst (index_type, 0);
- tree maxval = CASE_LOW (VEC_last (tree, dispatch_table));
+ tree maxval = CASE_LOW (dispatch_table.last ());
tree range = maxval;
rtx default_label = gen_label_rtx ();
for (int i = ncases - 1; i > 0; --i)
{
- tree elt = VEC_index (tree, dispatch_table, i);
+ tree elt = dispatch_table[i];
tree low = CASE_LOW (elt);
tree lab = CASE_LABEL (elt);
case_list = add_case_node (case_list, low, low, lab, 0, case_node_pool);
}
/* An array of functions used for self-referential size computation. */
-static GTY(()) VEC (tree, gc) *size_functions;
+static GTY(()) vec<tree, va_gc> *size_functions;
/* Look inside EXPR into simple arithmetic operations involving constants.
Return the outermost non-arithmetic or non-constant node. */
self_referential_size (tree size)
{
static unsigned HOST_WIDE_INT fnno = 0;
- VEC (tree, heap) *self_refs = NULL;
+ vec<tree> self_refs = vec<tree>();
tree param_type_list = NULL, param_decl_list = NULL;
tree t, ref, return_type, fntype, fnname, fndecl;
unsigned int i;
char buf[128];
- VEC(tree,gc) *args = NULL;
+ vec<tree, va_gc> *args = NULL;
/* Do not factor out simple operations. */
t = skip_simple_constant_arithmetic (size);
/* Collect the list of self-references in the expression. */
find_placeholder_in_expr (size, &self_refs);
- gcc_assert (VEC_length (tree, self_refs) > 0);
+ gcc_assert (self_refs.length () > 0);
/* Obtain a private copy of the expression. */
t = size;
/* Build the parameter and argument lists in parallel; also
substitute the former for the latter in the expression. */
- args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
- FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
+ vec_alloc (args, self_refs.length ());
+ FOR_EACH_VEC_ELT (self_refs, i, ref)
{
tree subst, param_name, param_type, param_decl;
param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
param_decl_list = chainon (param_decl, param_decl_list);
- VEC_quick_push (tree, args, ref);
+ args->quick_push (ref);
}
- VEC_free (tree, heap, self_refs);
+ self_refs.release ();
/* Append 'void' to indicate that the number of parameters is fixed. */
param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
TREE_STATIC (fndecl) = 1;
/* Put it onto the list of size functions. */
- VEC_safe_push (tree, gc, size_functions, fndecl);
+ vec_safe_push (size_functions, fndecl);
/* Replace the original expression with a call to the size function. */
return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
unsigned int i;
tree fndecl;
- for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
+ for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
{
dump_function (TDI_original, fndecl);
gimplify_function_tree (fndecl);
cgraph_finalize_function (fndecl, false);
}
- VEC_free (tree, gc, size_functions);
+ vec_free (size_functions);
}
\f
/* Return the machine mode to use for a nonscalar of SIZE bits. The
rli->offset = size_zero_node;
rli->bitpos = bitsize_zero_node;
rli->prev_field = 0;
- rli->pending_statics = NULL;
+ rli->pending_statics = 0;
rli->packed_maybe_necessary = 0;
rli->remaining_in_alignment = 0;
if (rli->packed_maybe_necessary)
fprintf (stderr, "packed may be necessary\n");
- if (!VEC_empty (tree, rli->pending_statics))
+ if (!vec_safe_is_empty (rli->pending_statics))
{
fprintf (stderr, "pending statics:\n");
debug_vec_tree (rli->pending_statics);
it *after* the record is laid out. */
if (TREE_CODE (field) == VAR_DECL)
{
- VEC_safe_push (tree, gc, rli->pending_statics, field);
+ vec_safe_push (rli->pending_statics, field);
return;
}
/* Lay out any static members. This is done now because their type
may use the record's type. */
- while (!VEC_empty (tree, rli->pending_statics))
- layout_decl (VEC_pop (tree, rli->pending_statics), 0);
+ while (!vec_safe_is_empty (rli->pending_statics))
+ layout_decl (rli->pending_statics->pop (), 0);
/* Clean up. */
if (free_p)
{
- VEC_free (tree, gc, rli->pending_statics);
+ vec_free (rli->pending_statics);
free (rli);
}
}
invalidate REG_EQUAL/REG_EQUIV notes for?).
- pattern_regs in st_expr should be a regset (on its own obstack).
- antic_stores and avail_stores should be VECs instead of lists.
- - store_motion_mems should be a VEC instead of a list.
+ - store_motion_mems should be a vec instead of a list.
- there should be an alloc pool for struct st_expr objects.
- investigate whether it is helpful to make the address of an st_expr
a cselib VALUE.
} file;
typedef const char *str;
-DEF_VEC_P(str);
-DEF_VEC_ALLOC_P(str,heap);
typedef struct demangled_hash_entry
{
const char *key;
- VEC(str,heap) *mangled;
+ vec<str> mangled;
} demangled;
/* Hash and comparison functions for these hash tables. */
continue;
dem = demangled_hash_lookup (p, true);
- VEC_safe_push (str, heap, dem->mangled, sym->key);
+ dem->mangled.safe_push (sym->key);
}
}
on the next attempt we will switch all of them the other way
and that will cause it to succeed. */
int chosen = 0;
- int len = VEC_length (str, dem->mangled);
+ int len = dem->mangled.length ();
ok = true;
- FOR_EACH_VEC_ELT (str, dem->mangled, ix, s)
+ FOR_EACH_VEC_ELT (dem->mangled, ix, s)
{
sym = symbol_hash_lookup (s, false);
if (ix == 0)
"%q+D defined but not used", decl);
}
-/* Issue appropriate warnings for the global declarations in VEC (of
+/* Issue appropriate warnings for the global declarations in V (of
which there are LEN). */
void
-check_global_declarations (tree *vec, int len)
+check_global_declarations (tree *v, int len)
{
int i;
for (i = 0; i < len; i++)
- check_global_declaration_1 (vec[i]);
+ check_global_declaration_1 (v[i]);
}
/* Emit debugging information for all global declarations in VEC. */
/* Addresses to log with a save/restore sequence. These should be in
dominator order. */
-static VEC(tree,heap) *tm_log_save_addresses;
+static vec<tree> tm_log_save_addresses;
/* Map for an SSA_NAME originally pointing to a non aliased new piece
of memory (malloc, alloc, etc). */
tm_log_free (void *p)
{
struct tm_log_entry *lp = (struct tm_log_entry *) p;
- VEC_free (gimple, heap, lp->stmts);
+ lp->stmts.release ();
free (lp);
}
{
tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
- tm_log_save_addresses = VEC_alloc (tree, heap, 5);
+ tm_log_save_addresses.create (5);
}
/* Free logging data structures. */
{
htab_delete (tm_log);
htab_delete (tm_new_mem_hash);
- VEC_free (tree, heap, tm_log_save_addresses);
+ tm_log_save_addresses.release ();
}
/* Return true if MEM is a transaction invariant memory for the TM
&& !TREE_ADDRESSABLE (type))
{
lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
- lp->stmts = NULL;
+ lp->stmts.create (0);
lp->entry_block = entry_block;
/* Save addresses separately in dominator order so we don't
get confused by overlapping addresses in the save/restore
sequence. */
- VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
+ tm_log_save_addresses.safe_push (lp->addr);
}
else
{
/* Use the logging functions. */
- lp->stmts = VEC_alloc (gimple, heap, 5);
- VEC_quick_push (gimple, lp->stmts, stmt);
+ lp->stmts.create (5);
+ lp->stmts.quick_push (stmt);
lp->save_var = NULL;
}
}
if (lp->save_var)
return;
- for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
+ for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
{
if (stmt == oldstmt)
return;
gimple_bb (oldstmt), gimple_bb (stmt)));
}
/* Store is on a different code path. */
- VEC_safe_push (gimple, heap, lp->stmts, stmt);
+ lp->stmts.safe_push (stmt);
}
}
{
if (dump_file)
fprintf (dump_file, "DUMPING with logging functions\n");
- for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
+ for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
tm_log_emit_stmt (lp->addr, stmt);
}
}
gimple stmt;
struct tm_log_entry l, *lp;
- for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
+ for (i = 0; i < tm_log_save_addresses.length (); ++i)
{
- l.addr = VEC_index (tree, tm_log_save_addresses, i);
+ l.addr = tm_log_save_addresses[i];
lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
gcc_assert (lp->save_var != NULL);
gimple_stmt_iterator gsi;
gimple stmt;
- for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
+ for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
{
- l.addr = VEC_index (tree, tm_log_save_addresses, i);
+ l.addr = tm_log_save_addresses[i];
lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
gcc_assert (lp->save_var != NULL);
};
typedef struct tm_region *tm_region_p;
-DEF_VEC_P (tm_region_p);
-DEF_VEC_ALLOC_P (tm_region_p, heap);
/* True if there are pending edge statements to be committed for the
current function being scanned in the tmmark pass. */
edge_iterator ei;
edge e;
basic_block bb;
- VEC(basic_block, heap) *queue = NULL;
+ vec<basic_block> queue = vec<basic_block>();
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
- VEC(tm_region_p, heap) *bb_regions = NULL;
+ vec<tm_region_p> bb_regions = vec<tm_region_p>();
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR);
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
using bb->aux. */
- VEC_safe_grow_cleared (tm_region_p, heap, bb_regions, last_basic_block);
+ bb_regions.safe_grow_cleared (last_basic_block);
- VEC_safe_push (basic_block, heap, queue, bb);
- VEC_replace (tm_region_p, bb_regions, bb->index, region);
+ queue.safe_push (bb);
+ bb_regions[bb->index] = region;
do
{
- bb = VEC_pop (basic_block, queue);
- region = VEC_index (tm_region_p, bb_regions, bb->index);
- VEC_replace (tm_region_p, bb_regions, bb->index, NULL);
+ bb = queue.pop ();
+ region = bb_regions[bb->index];
+ bb_regions[bb->index] = NULL;
/* Record exit and irrevocable blocks. */
region = tm_region_init_1 (region, bb);
if (!bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
- VEC_safe_push (basic_block, heap, queue, e->dest);
+ queue.safe_push (e->dest);
/* If the current block started a new region, make sure that only
the entry block of the new region is associated with this region.
Other successors are still part of the old region. */
if (old_region != region && e->dest != region->entry_block)
- VEC_replace (tm_region_p, bb_regions, e->dest->index, old_region);
+ bb_regions[e->dest->index] = old_region;
else
- VEC_replace (tm_region_p, bb_regions, e->dest->index, region);
+ bb_regions[e->dest->index] = region;
}
}
- while (!VEC_empty (basic_block, queue));
- VEC_free (basic_block, heap, queue);
+ while (!queue.is_empty ());
+ queue.release ();
BITMAP_FREE (visited_blocks);
- VEC_free (tm_region_p, heap, bb_regions);
+ bb_regions.release ();
}
/* The "gate" function for all transactional memory expansion and optimization
if (TREE_CODE (rhs) == CONSTRUCTOR)
{
/* Handle the easy initialization to zero. */
- if (CONSTRUCTOR_ELTS (rhs) == 0)
+ if (!CONSTRUCTOR_ELTS (rhs))
rhs = build_int_cst (simple_type, 0);
else
{
STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
following a TM_IRREVOCABLE call. */
-static VEC (basic_block, heap) *
+static vec<basic_block>
get_tm_region_blocks (basic_block entry_block,
bitmap exit_blocks,
bitmap irr_blocks,
bitmap all_region_blocks,
bool stop_at_irrevocable_p)
{
- VEC(basic_block, heap) *bbs = NULL;
+ vec<basic_block> bbs = vec<basic_block>();
unsigned i;
edge e;
edge_iterator ei;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
i = 0;
- VEC_safe_push (basic_block, heap, bbs, entry_block);
+ bbs.safe_push (entry_block);
bitmap_set_bit (visited_blocks, entry_block->index);
do
{
- basic_block bb = VEC_index (basic_block, bbs, i++);
+ basic_block bb = bbs[i++];
if (exit_blocks &&
bitmap_bit_p (exit_blocks, bb->index))
if (!bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
- VEC_safe_push (basic_block, heap, bbs, e->dest);
+ bbs.safe_push (e->dest);
}
}
- while (i < VEC_length (basic_block, bbs));
+ while (i < bbs.length ());
if (all_region_blocks)
bitmap_ior_into (all_region_blocks, visited_blocks);
static void *
collect_bb2reg (struct tm_region *region, void *data)
{
- VEC(tm_region_p, heap) *bb2reg = (VEC(tm_region_p, heap) *) data;
- VEC (basic_block, heap) *queue;
+ vec<tm_region_p> *bb2reg = (vec<tm_region_p> *) data;
+ vec<basic_block> queue;
unsigned int i;
basic_block bb;
// We expect expand_region to perform a post-order traversal of the region
// tree. Therefore the last region seen for any bb is the innermost.
- FOR_EACH_VEC_ELT (basic_block, queue, i, bb)
- VEC_replace (tm_region_p, bb2reg, bb->index, region);
+ FOR_EACH_VEC_ELT (queue, i, bb)
+ (*bb2reg)[bb->index] = region;
- VEC_free (basic_block, heap, queue);
+ queue.release ();
return NULL;
}
// ??? There is currently a hack inside tree-ssa-pre.c to work around the
// only known instance of this block sharing.
-static VEC(tm_region_p, heap) *
+static vec<tm_region_p>
get_bb_regions_instrumented (void)
{
unsigned n = last_basic_block;
- VEC(tm_region_p, heap) *ret;
+ vec<tm_region_p> ret;
- ret = VEC_alloc (tm_region_p, heap, n);
- VEC_safe_grow_cleared (tm_region_p, heap, ret, n);
- expand_regions (all_tm_regions, collect_bb2reg, ret);
+ ret.create (n);
+ ret.safe_grow_cleared (n);
+ expand_regions (all_tm_regions, collect_bb2reg, &ret);
return ret;
}
compute_transaction_bits (void)
{
struct tm_region *region;
- VEC (basic_block, heap) *queue;
+ vec<basic_block> queue;
unsigned int i;
basic_block bb;
region->irr_blocks,
NULL,
/*stop_at_irr_p=*/true);
- for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
+ for (i = 0; queue.iterate (i, &bb); ++i)
bb->flags |= BB_IN_TRANSACTION;
- VEC_free (basic_block, heap, queue);
+ queue.release ();
}
if (all_tm_regions)
}
// Generate log saves.
- if (!VEC_empty (tree, tm_log_save_addresses))
+ if (!tm_log_save_addresses.is_empty ())
tm_log_emit_saves (region->entry_block, transaction_bb);
// In the beginning, we've no tests to perform on transaction restart.
region->restart_block = region->entry_block;
// Generate log restores.
- if (!VEC_empty (tree, tm_log_save_addresses))
+ if (!tm_log_save_addresses.is_empty ())
{
basic_block test_bb = create_empty_bb (transaction_bb);
basic_block code_bb = create_empty_bb (test_bb);
tm_log_init ();
- VEC(tm_region_p, heap) *bb_regions = get_bb_regions_instrumented ();
+ vec<tm_region_p> bb_regions = get_bb_regions_instrumented ();
struct tm_region *r;
unsigned i;
// Expand memory operations into calls into the runtime.
// This collects log entries as well.
- FOR_EACH_VEC_ELT (tm_region_p, bb_regions, i, r)
+ FOR_EACH_VEC_ELT (bb_regions, i, r)
if (r != NULL)
expand_block_tm (r, BASIC_BLOCK (i));
static unsigned int
execute_tm_edges (void)
{
- VEC(tm_region_p, heap) *bb_regions = get_bb_regions_instrumented ();
+ vec<tm_region_p> bb_regions = get_bb_regions_instrumented ();
struct tm_region *r;
unsigned i;
- FOR_EACH_VEC_ELT (tm_region_p, bb_regions, i, r)
+ FOR_EACH_VEC_ELT (bb_regions, i, r)
if (r != NULL)
expand_block_edges (r, BASIC_BLOCK (i));
- VEC_free (tm_region_p, heap, bb_regions);
+ bb_regions.release ();
/* We've got to release the dominance info now, to indicate that it
must be rebuilt completely. Otherwise we'll crash trying to update
/* Prettily dump all of the memopt sets in BLOCKS. */
static void
-dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
+dump_tm_memopt_sets (vec<basic_block> blocks)
{
size_t i;
basic_block bb;
- for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ for (i = 0; blocks.iterate (i, &bb); ++i)
{
fprintf (dump_file, "------------BB %d---------\n", bb->index);
dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
static void
tm_memopt_compute_available (struct tm_region *region,
- VEC (basic_block, heap) *blocks)
+ vec<basic_block> blocks)
{
edge e;
basic_block *worklist, *qin, *qout, *qend, bb;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks in the region. */
- qlen = VEC_length (basic_block, blocks) - 1;
+ qlen = blocks.length () - 1;
qin = qout = worklist =
XNEWVEC (basic_block, qlen);
/* Put every block in the region on the worklist. */
- for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ for (i = 0; blocks.iterate (i, &bb); ++i)
{
/* Seed AVAIL_OUT with the LOCAL set. */
bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
static void
tm_memopt_compute_antic (struct tm_region *region,
- VEC (basic_block, heap) *blocks)
+ vec<basic_block> blocks)
{
edge e;
basic_block *worklist, *qin, *qout, *qend, bb;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks in the region. */
- qin = qout = worklist =
- XNEWVEC (basic_block, VEC_length (basic_block, blocks));
+ qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
- for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
+ for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
{
- bb = VEC_index (basic_block, blocks, i);
+ bb = blocks[i];
/* Seed ANTIC_OUT with the LOCAL set. */
bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
basic blocks in BLOCKS. */
static void
-tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
+tm_memopt_transform_blocks (vec<basic_block> blocks)
{
size_t i;
basic_block bb;
gimple_stmt_iterator gsi;
- for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ for (i = 0; blocks.iterate (i, &bb); ++i)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
/* Free sets computed for each BB. */
static void
-tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
+tm_memopt_free_sets (vec<basic_block> blocks)
{
size_t i;
basic_block bb;
- for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ for (i = 0; blocks.iterate (i, &bb); ++i)
bb->aux = NULL;
}
/* Clear the visited bit for every basic block in BLOCKS. */
static void
-tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
+tm_memopt_clear_visited (vec<basic_block> blocks)
{
size_t i;
basic_block bb;
- for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ for (i = 0; blocks.iterate (i, &bb); ++i)
BB_VISITED_P (bb) = false;
}
execute_tm_memopt (void)
{
struct tm_region *region;
- VEC (basic_block, heap) *bbs;
+ vec<basic_block> bbs;
tm_memopt_value_id = 0;
tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
false);
/* Collect all the memory operations. */
- for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
+ for (i = 0; bbs.iterate (i, &bb); ++i)
{
bb->aux = tm_memopt_init_sets ();
tm_memopt_accumulate_memops (bb);
tm_memopt_transform_blocks (bbs);
tm_memopt_free_sets (bbs);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
bitmap_obstack_release (&tm_memopt_obstack);
htab_empty (tm_memopt_value_numbers);
}
bool want_irr_scan_normal;
};
-typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
+typedef vec<cgraph_node_ptr> cgraph_node_queue;
/* Return the ipa data associated with NODE, allocating zeroed memory
if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
if (!*in_queue_p)
{
*in_queue_p = true;
- VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
+ queue_p->safe_push (node);
}
}
static void
ipa_uninstrument_transaction (struct tm_region *region,
- VEC (basic_block, heap) *queue)
+ vec<basic_block> queue)
{
gimple transaction = region->transaction_stmt;
basic_block transaction_bb = gimple_bb (transaction);
- int n = VEC_length (basic_block, queue);
+ int n = queue.length ();
basic_block *new_bbs = XNEWVEC (basic_block, n);
- copy_bbs (VEC_address (basic_block, queue), n, new_bbs,
- NULL, 0, NULL, NULL, transaction_bb);
+ copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb);
edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
add_phi_args_after_copy (new_bbs, n, e);
for (r = all_tm_regions; r; r = r->next)
{
- VEC (basic_block, heap) *bbs;
+ vec<basic_block> bbs;
basic_block bb;
unsigned i;
// Generate the uninstrumented code path for this transaction.
ipa_uninstrument_transaction (r, bbs);
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
ipa_tm_scan_calls_block (callees_p, bb, false);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
}
// ??? copy_bbs should maintain cgraph edges for the blocks as it is
scanning past OLD_IRR or EXIT_BLOCKS. */
static bool
-ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
+ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
bitmap old_irr, bitmap exit_blocks)
{
bool any_new_irr = false;
do
{
- basic_block bb = VEC_pop (basic_block, *pqueue);
+ basic_block bb = pqueue->pop ();
/* Don't re-scan blocks we know already are irrevocable. */
if (old_irr && bitmap_bit_p (old_irr, bb->index))
if (!bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
- VEC_safe_push (basic_block, heap, *pqueue, e->dest);
+ pqueue->safe_push (e->dest);
}
}
}
- while (!VEC_empty (basic_block, *pqueue));
+ while (!pqueue->is_empty ());
BITMAP_FREE (visited_blocks);
ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
bitmap old_irr, bitmap exit_blocks)
{
- VEC (basic_block, heap) *bbs;
+ vec<basic_block> bbs;
bitmap all_region_blocks;
/* If this block is in the old set, no need to rescan. */
all_region_blocks, false);
do
{
- basic_block bb = VEC_pop (basic_block, bbs);
+ basic_block bb = bbs.pop ();
bool this_irr = bitmap_bit_p (new_irr, bb->index);
bool all_son_irr = false;
edge_iterator ei;
}
}
}
- while (!VEC_empty (basic_block, bbs));
+ while (!bbs.is_empty ());
BITMAP_FREE (all_region_blocks);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
}
static void
{
struct tm_ipa_cg_data *d;
bitmap new_irr, old_irr;
- VEC (basic_block, heap) *queue;
+ vec<basic_block> queue;
bool ret = false;
/* Builtin operators (operator new, and such). */
calculate_dominance_info (CDI_DOMINATORS);
d = get_cg_data (&node, true);
- queue = VEC_alloc (basic_block, heap, 10);
+ queue.create (10);
new_irr = BITMAP_ALLOC (&tm_obstack);
/* Scan each tm region, propagating irrevocable status through the tree. */
if (for_clone)
{
old_irr = d->irrevocable_blocks_clone;
- VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
+ queue.quick_push (single_succ (ENTRY_BLOCK_PTR));
if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
{
ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
old_irr = d->irrevocable_blocks_normal;
for (region = d->all_tm_regions; region; region = region->next)
{
- VEC_quick_push (basic_block, queue, region->entry_block);
+ queue.quick_push (region->entry_block);
if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
region->exit_blocks))
ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
else
BITMAP_FREE (new_irr);
- VEC_free (basic_block, heap, queue);
+ queue.release ();
pop_cfun ();
return ret;
}
else
{
- VEC (basic_block, heap) *bbs;
+ vec<basic_block> bbs;
gimple_stmt_iterator gsi;
basic_block bb;
size_t i;
bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
r->irr_blocks, NULL, false);
- for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
+ for (i = 0; bbs.iterate (i, &bb); ++i)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
"atomic transaction", fndecl);
}
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
}
}
if (DECL_ONE_ONLY (new_decl))
DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
- new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
+ new_node = cgraph_copy_node_for_versioning (old_node, new_decl,
+ vec<cgraph_edge_p>(),
+ NULL);
new_node->symbol.externally_visible = old_node->symbol.externally_visible;
new_node->lowered = true;
new_node->tm_clone = 1;
DECL_WEAK (new_decl) = 0;
}
- tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
- NULL, NULL);
+ tree_function_versioning (old_decl, new_decl,
+ NULL, false, NULL,
+ false, NULL, NULL);
}
record_tm_clone_pair (old_decl, new_decl);
bool need_ssa_rename = false;
edge e;
edge_iterator ei;
- VEC(basic_block, heap) *queue = NULL;
+ vec<basic_block> queue = vec<basic_block>();
bitmap visited_blocks = BITMAP_ALLOC (NULL);
- VEC_safe_push (basic_block, heap, queue, bb);
+ queue.safe_push (bb);
do
{
- bb = VEC_pop (basic_block, queue);
+ bb = queue.pop ();
need_ssa_rename |=
ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
if (!bitmap_bit_p (visited_blocks, e->dest->index))
{
bitmap_set_bit (visited_blocks, e->dest->index);
- VEC_safe_push (basic_block, heap, queue, e->dest);
+ queue.safe_push (e->dest);
}
}
- while (!VEC_empty (basic_block, queue));
+ while (!queue.is_empty ());
- VEC_free (basic_block, heap, queue);
+ queue.release ();
BITMAP_FREE (visited_blocks);
return need_ssa_rename;
static unsigned int
ipa_tm_execute (void)
{
- cgraph_node_queue tm_callees = NULL;
+ cgraph_node_queue tm_callees = cgraph_node_queue();
/* List of functions that will go irrevocable. */
- cgraph_node_queue irr_worklist = NULL;
+ cgraph_node_queue irr_worklist = cgraph_node_queue();
struct cgraph_node *node;
struct tm_ipa_cg_data *d;
/* For every local function on the callee list, scan as if we will be
creating a transactional clone, queueing all new functions we find
along the way. */
- for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ for (i = 0; i < tm_callees.length (); ++i)
{
- node = VEC_index (cgraph_node_p, tm_callees, i);
+ node = tm_callees[i];
a = cgraph_function_body_availability (node);
d = get_cg_data (&node, true);
}
/* Iterate scans until no more work to be done. Prefer not to use
- VEC_pop because the worklist tends to follow a breadth-first
+ vec::pop because the worklist tends to follow a breadth-first
search of the callgraph, which should allow convergance with a
minimum number of scans. But we also don't want the worklist
array to grow without bound, so we shift the array up periodically. */
- for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
+ for (i = 0; i < irr_worklist.length (); ++i)
{
- if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
+ if (i > 256 && i == irr_worklist.length () / 8)
{
- VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
+ irr_worklist.block_remove (0, i);
i = 0;
}
- node = VEC_index (cgraph_node_p, irr_worklist, i);
+ node = irr_worklist[i];
d = get_cg_data (&node, true);
d->in_worklist = false;
/* For every function on the callee list, collect the tm_may_enter_irr
bit on the node. */
- VEC_truncate (cgraph_node_p, irr_worklist, 0);
- for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ irr_worklist.truncate (0);
+ for (i = 0; i < tm_callees.length (); ++i)
{
- node = VEC_index (cgraph_node_p, tm_callees, i);
+ node = tm_callees[i];
if (ipa_tm_mayenterirr_function (node))
{
d = get_cg_data (&node, true);
}
/* Propagate the tm_may_enter_irr bit to callers until stable. */
- for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
+ for (i = 0; i < irr_worklist.length (); ++i)
{
struct cgraph_node *caller;
struct cgraph_edge *e;
struct ipa_ref *ref;
unsigned j;
- if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
+ if (i > 256 && i == irr_worklist.length () / 8)
{
- VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
+ irr_worklist.block_remove (0, i);
i = 0;
}
- node = VEC_index (cgraph_node_p, irr_worklist, i);
+ node = irr_worklist[i];
d = get_cg_data (&node, true);
d->in_worklist = false;
node->local.tm_may_enter_irr = true;
/* Create clones. Do those that are not irrevocable and have a
positive call count. Do those publicly visible functions that
the user directed us to clone. */
- for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ for (i = 0; i < tm_callees.length (); ++i)
{
bool doit = false;
- node = VEC_index (cgraph_node_p, tm_callees, i);
+ node = tm_callees[i];
if (node->same_body_alias)
continue;
}
/* Redirect calls to the new clones, and insert irrevocable marks. */
- for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ for (i = 0; i < tm_callees.length (); ++i)
{
- node = VEC_index (cgraph_node_p, tm_callees, i);
+ node = tm_callees[i];
if (node->analyzed)
{
d = get_cg_data (&node, true);
}
/* Free and clear all data structures. */
- VEC_free (cgraph_node_p, heap, tm_callees);
- VEC_free (cgraph_node_p, heap, irr_worklist);
+ tm_callees.release ();
+ irr_worklist.release ();
bitmap_obstack_release (&tm_obstack);
free_original_copy_tables ();
/* Static variables. */
static htab_t TB_up_ht;
-static VEC(tree,gc) *TB_history_stack;
+static vec<tree, va_gc> *TB_history_stack;
static int TB_verbose = 1;
fprintf (TB_OUT_FILE, "\nTree Browser\n");
#define TB_SET_HEAD(N) do { \
- VEC_safe_push (tree, gc, TB_history_stack, N); \
+ vec_safe_push (TB_history_stack, N); \
head = N; \
if (TB_verbose) \
if (head) \
static tree
TB_history_prev (void)
{
- if (!VEC_empty (tree, TB_history_stack))
+ if (!vec_safe_is_empty (TB_history_stack))
{
- tree last = VEC_last (tree, TB_history_stack);
- VEC_pop (tree, TB_history_stack);
+ tree last = TB_history_stack->last ();
+ TB_history_stack->pop ();
return last;
}
return NULL_TREE;
enum tree_code tcode,
const char *temp_name1,
const char *temp_name2,
- VEC (gimple, heap) *conds,
+ vec<gimple> conds,
unsigned *nconds)
{
tree lbub_real_cst, lbub_cst, float_type;
gimple_assign_set_lhs (stmt2, tempcn);
stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE);
- VEC_quick_push (gimple, conds, stmt1);
- VEC_quick_push (gimple, conds, stmt2);
- VEC_quick_push (gimple, conds, stmt3);
+ conds.quick_push (stmt1);
+ conds.quick_push (stmt2);
+ conds.quick_push (stmt3);
(*nconds)++;
}
static void
gen_conditions_for_domain (tree arg, inp_domain domain,
- VEC (gimple, heap) *conds,
+ vec<gimple> conds,
unsigned *nconds)
{
if (domain.has_lb)
{
/* Now push a separator. */
if (domain.has_lb)
- VEC_quick_push (gimple, conds, NULL);
+ conds.quick_push (NULL);
gen_one_condition (arg, domain.ub,
(domain.is_ub_inclusive
static void
gen_conditions_for_pow_cst_base (tree base, tree expn,
- VEC (gimple, heap) *conds,
+ vec<gimple> conds,
unsigned *nconds)
{
inp_domain exp_domain;
static void
gen_conditions_for_pow_int_base (tree base, tree expn,
- VEC (gimple, heap) *conds,
+ vec<gimple> conds,
unsigned *nconds)
{
gimple base_def;
type is integer. */
/* Push a separator. */
- VEC_quick_push (gimple, conds, NULL);
+ conds.quick_push (NULL);
temp = create_tmp_var (int_type, "DCE_COND1");
cst0 = build_int_cst (int_type, 0);
gimple_assign_set_lhs (stmt1, tempn);
stmt2 = gimple_build_cond (LE_EXPR, tempn, cst0, NULL_TREE, NULL_TREE);
- VEC_quick_push (gimple, conds, stmt1);
- VEC_quick_push (gimple, conds, stmt2);
+ conds.quick_push (stmt1);
+ conds.quick_push (stmt2);
(*nconds)++;
}
/* Method to generate conditional statements for guarding conditionally
dead calls to pow. One or more statements can be generated for
each logical condition. Statement groups of different conditions
- are separated by a NULL tree and they are stored in the VEC
+ are separated by a NULL tree and they are stored in the vec
conds. The number of logical conditions are stored in *nconds.
See C99 standard, 7.12.7.4:2, for description of pow (x, y).
and *NCONDS is the number of logical conditions. */
static void
-gen_conditions_for_pow (gimple pow_call, VEC (gimple, heap) *conds,
+gen_conditions_for_pow (gimple pow_call, vec<gimple> conds,
unsigned *nconds)
{
tree base, expn;
condition are separated by NULL tree in the vector. */
static void
-gen_shrink_wrap_conditions (gimple bi_call, VEC (gimple, heap) *conds,
+gen_shrink_wrap_conditions (gimple bi_call, vec<gimple> conds,
unsigned int *nconds)
{
gimple call;
tree fn;
enum built_in_function fnc;
- gcc_assert (nconds && conds);
- gcc_assert (VEC_length (gimple, conds) == 0);
+ gcc_assert (nconds && conds.exists ());
+ gcc_assert (conds.length () == 0);
gcc_assert (is_gimple_call (bi_call));
call = bi_call;
basic_block bi_call_bb, join_tgt_bb, guard_bb, guard_bb0;
edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
edge bi_call_in_edge0, guard_bb_in_edge;
- VEC (gimple, heap) *conds;
+ vec<gimple> conds;
unsigned tn_cond_stmts, nconds;
unsigned ci;
gimple cond_expr = NULL;
tree bi_call_label_decl;
gimple bi_call_label;
- conds = VEC_alloc (gimple, heap, 12);
+ conds.create (12);
gen_shrink_wrap_conditions (bi_call, conds, &nconds);
/* This can happen if the condition generator decides
/* Now it is time to insert the first conditional expression
into bi_call_bb and split this bb so that bi_call is
shrink-wrapped. */
- tn_cond_stmts = VEC_length (gimple, conds);
+ tn_cond_stmts = conds.length ();
cond_expr = NULL;
- cond_expr_start = VEC_index (gimple, conds, 0);
+ cond_expr_start = conds[0];
for (ci = 0; ci < tn_cond_stmts; ci++)
{
- gimple c = VEC_index (gimple, conds, ci);
+ gimple c = conds[ci];
gcc_assert (c || ci != 0);
if (!c)
break;
edge bi_call_in_edge;
gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start);
ci0 = ci;
- cond_expr_start = VEC_index (gimple, conds, ci0);
+ cond_expr_start = conds[ci0];
for (; ci < tn_cond_stmts; ci++)
{
- gimple c = VEC_index (gimple, conds, ci);
+ gimple c = conds[ci];
gcc_assert (c || ci != ci0);
if (!c)
break;
guard_bb_in_edge->count = guard_bb->count - bi_call_in_edge->count;
}
- VEC_free (gimple, heap, conds);
+ conds.release ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
location_t loc;
wrapping transformation. */
static bool
-shrink_wrap_conditional_dead_built_in_calls (VEC (gimple, heap) *calls)
+shrink_wrap_conditional_dead_built_in_calls (vec<gimple> calls)
{
bool changed = false;
unsigned i = 0;
- unsigned n = VEC_length (gimple, calls);
+ unsigned n = calls.length ();
if (n == 0)
return false;
for (; i < n ; i++)
{
- gimple bi_call = VEC_index (gimple, calls, i);
+ gimple bi_call = calls[i];
changed |= shrink_wrap_one_built_in_call (bi_call);
}
basic_block bb;
gimple_stmt_iterator i;
bool something_changed = false;
- VEC (gimple, heap) *cond_dead_built_in_calls = NULL;
+ vec<gimple> cond_dead_built_in_calls = vec<gimple>();
FOR_EACH_BB (bb)
{
/* Collect dead call candidates. */
print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
fprintf (dump_file, "\n");
}
- if (cond_dead_built_in_calls == NULL)
- cond_dead_built_in_calls = VEC_alloc (gimple, heap, 64);
- VEC_safe_push (gimple, heap, cond_dead_built_in_calls, stmt);
+ if (!cond_dead_built_in_calls.exists ())
+ cond_dead_built_in_calls.create (64);
+ cond_dead_built_in_calls.safe_push (stmt);
}
}
}
- if (cond_dead_built_in_calls == NULL)
+ if (!cond_dead_built_in_calls.exists ())
return 0;
something_changed
= shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);
- VEC_free (gimple, heap, cond_dead_built_in_calls);
+ cond_dead_built_in_calls.release ();
if (something_changed)
{
profile_status_for_function (fn) = PROFILE_ABSENT;
n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS;
last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
- basic_block_info_for_function (fn)
- = VEC_alloc (basic_block, gc, initial_cfg_capacity);
- VEC_safe_grow_cleared (basic_block, gc,
- basic_block_info_for_function (fn),
+ vec_alloc (basic_block_info_for_function (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (basic_block_info_for_function (fn),
initial_cfg_capacity);
/* Build a mapping of labels to their associated blocks. */
- label_to_block_map_for_function (fn)
- = VEC_alloc (basic_block, gc, initial_cfg_capacity);
- VEC_safe_grow_cleared (basic_block, gc,
- label_to_block_map_for_function (fn),
+ vec_alloc (label_to_block_map_for_function (fn), initial_cfg_capacity);
+ vec_safe_grow_cleared (label_to_block_map_for_function (fn),
initial_cfg_capacity);
SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
create_empty_bb (ENTRY_BLOCK_PTR);
/* Adjust the size of the array. */
- if (VEC_length (basic_block, basic_block_info) < (size_t) n_basic_blocks)
- VEC_safe_grow_cleared (basic_block, gc, basic_block_info, n_basic_blocks);
+ if (basic_block_info->length () < (size_t) n_basic_blocks)
+ vec_safe_grow_cleared (basic_block_info, n_basic_blocks);
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
link_block (bb, after);
/* Grow the basic block array if needed. */
- if ((size_t) last_basic_block == VEC_length (basic_block, basic_block_info))
+ if ((size_t) last_basic_block == basic_block_info->length ())
{
size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
- VEC_safe_grow_cleared (basic_block, gc, basic_block_info, new_size);
+ vec_safe_grow_cleared (basic_block_info, new_size);
}
/* Add the newly created block to the array. */
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
uid = LABEL_DECL_UID (dest);
}
- if (VEC_length (basic_block, ifun->cfg->x_label_to_block_map)
- <= (unsigned int) uid)
+ if (vec_safe_length (ifun->cfg->x_label_to_block_map) <= (unsigned int) uid)
return NULL;
- return VEC_index (basic_block, ifun->cfg->x_label_to_block_map, uid);
+ return (*ifun->cfg->x_label_to_block_map)[uid];
}
/* Create edges for an abnormal goto statement at block BB. If FOR_CALL
if (cfun->eh == NULL)
return;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp && lp->post_landing_pad)
{
lab = main_block_label (lp->post_landing_pad);
void
delete_tree_cfg_annotations (void)
{
- label_to_block_map = NULL;
+ vec_free (label_to_block_map);
}
static void
reinstall_phi_args (edge new_edge, edge old_edge)
{
- edge_var_map_vector v;
+ edge_var_map_vector *v;
edge_var_map *vm;
int i;
gimple_stmt_iterator phis;
return;
for (i = 0, phis = gsi_start_phis (new_edge->dest);
- VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis);
+ v->iterate (i, &vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
{
gimple phi = gsi_stmt (phis);
uid = LABEL_DECL_UID (decl);
if (cfun->cfg
- && (uid == -1
- || VEC_index (basic_block,
- label_to_block_map, uid) != gimple_bb (stmt)))
+ && (uid == -1 || (*label_to_block_map)[uid] != gimple_bb (stmt)))
{
error ("incorrect entry in label_to_block_map");
err |= true;
bool free_region_copy = false, copying_header = false;
struct loop *loop = entry->dest->loop_father;
edge exit_copy;
- VEC (basic_block, heap) *doms;
+ vec<basic_block> doms;
edge redirected;
int total_freq = 0, entry_freq = 0;
gcov_type total_count = 0, entry_count = 0;
/* Record blocks outside the region that are dominated by something
inside. */
- doms = NULL;
+ doms.create (0);
initialize_original_copy_tables ();
doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
region, but was dominated by something inside needs recounting as
well. */
set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
- VEC_safe_push (basic_block, heap, doms, get_bb_original (entry->dest));
+ doms.safe_push (get_bb_original (entry->dest));
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
- VEC_free (basic_block, heap, doms);
+ doms.release ();
/* Add the other PHI node arguments. */
add_phi_args_after_copy (region_copy, n_region, NULL);
struct loop *loop = exit->dest->loop_father;
struct loop *orig_loop = entry->dest->loop_father;
basic_block switch_bb, entry_bb, nentry_bb;
- VEC (basic_block, heap) *doms;
+ vec<basic_block> doms;
int total_freq = 0, exit_freq = 0;
gcov_type total_count = 0, exit_count = 0;
edge exits[2], nexits[2], e;
/* Anything that is outside of the region, but was dominated by something
inside needs to update dominance info. */
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
- VEC_free (basic_block, heap, doms);
+ doms.release ();
/* Update the SSA web. */
update_ssa (TODO_update_ssa);
void
gather_blocks_in_sese_region (basic_block entry, basic_block exit,
- VEC(basic_block,heap) **bbs_p)
+ vec<basic_block> *bbs_p)
{
basic_block son;
son;
son = next_dom_son (CDI_DOMINATORS, son))
{
- VEC_safe_push (basic_block, heap, *bbs_p, son);
+ bbs_p->safe_push (son);
if (son != exit)
gather_blocks_in_sese_region (son, exit, bbs_p);
}
}
/* Remove BB from the original basic block array. */
- VEC_replace (basic_block, cfun->cfg->x_basic_block_info, bb->index, NULL);
+ (*cfun->cfg->x_basic_block_info)[bb->index] = NULL;
cfun->cfg->x_n_basic_blocks--;
/* Grow DEST_CFUN's basic block array if needed. */
if (bb->index >= cfg->x_last_basic_block)
cfg->x_last_basic_block = bb->index + 1;
- old_len = VEC_length (basic_block, cfg->x_basic_block_info);
+ old_len = vec_safe_length (cfg->x_basic_block_info);
if ((unsigned) cfg->x_last_basic_block >= old_len)
{
new_len = cfg->x_last_basic_block + (cfg->x_last_basic_block + 3) / 4;
- VEC_safe_grow_cleared (basic_block, gc, cfg->x_basic_block_info,
- new_len);
+ vec_safe_grow_cleared (cfg->x_basic_block_info, new_len);
}
- VEC_replace (basic_block, cfg->x_basic_block_info,
- bb->index, bb);
+ (*cfg->x_basic_block_info)[bb->index] = bb;
/* Remap the variables in phi nodes. */
for (si = gsi_start_phis (bb); !gsi_end_p (si); )
gcc_assert (uid > -1);
- old_len = VEC_length (basic_block, cfg->x_label_to_block_map);
+ old_len = vec_safe_length (cfg->x_label_to_block_map);
if (old_len <= (unsigned) uid)
{
new_len = 3 * uid / 2 + 1;
- VEC_safe_grow_cleared (basic_block, gc,
- cfg->x_label_to_block_map, new_len);
+ vec_safe_grow_cleared (cfg->x_label_to_block_map, new_len);
}
- VEC_replace (basic_block, cfg->x_label_to_block_map, uid, bb);
- VEC_replace (basic_block, cfun->cfg->x_label_to_block_map, uid, NULL);
+ (*cfg->x_label_to_block_map)[uid] = bb;
+ (*cfun->cfg->x_label_to_block_map)[uid] = NULL;
gcc_assert (DECL_CONTEXT (label) == dest_cfun->decl);
move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
basic_block exit_bb, tree orig_block)
{
- VEC(basic_block,heap) *bbs, *dom_bbs;
+ vec<basic_block> bbs, dom_bbs;
basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
basic_block after, bb, *entry_pred, *exit_succ, abb;
struct function *saved_cfun = cfun;
/* Collect all the blocks in the region. Manually add ENTRY_BB
because it won't be added by dfs_enumerate_from. */
- bbs = NULL;
- VEC_safe_push (basic_block, heap, bbs, entry_bb);
+ bbs.create (0);
+ bbs.safe_push (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
/* The blocks that used to be dominated by something in BBS will now be
dominated by the new block. */
dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
- VEC_address (basic_block, bbs),
- VEC_length (basic_block, bbs));
+ bbs.address (),
+ bbs.length ());
/* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
the predecessor edges to ENTRY_BB and the successor edges to
{
eh_region region = NULL;
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
region = find_outermost_region_in_block (saved_cfun, bb, region);
init_eh_for_function ();
pop_cfun ();
/* Move blocks from BBS into DEST_CFUN. */
- gcc_assert (VEC_length (basic_block, bbs) >= 2);
+ gcc_assert (bbs.length () >= 2);
after = dest_cfun->cfg->x_entry_block_ptr;
vars_map = pointer_map_create ();
d.eh_map = eh_map;
d.remap_decls_p = true;
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
{
/* No need to update edge counts on the last block. It has
already been updated earlier when we detached the region from
}
set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
- FOR_EACH_VEC_ELT (basic_block, dom_bbs, i, abb)
+ FOR_EACH_VEC_ELT (dom_bbs, i, abb)
set_immediate_dominator (CDI_DOMINATORS, abb, bb);
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
if (exit_bb)
{
free (entry_prob);
free (entry_flag);
free (entry_pred);
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
return bb;
}
ignore_topmost_bind = true;
fprintf (file, "{\n");
- if (!VEC_empty (tree, fun->local_decls))
+ if (!vec_safe_is_empty (fun->local_decls))
FOR_EACH_LOCAL_DECL (fun, ix, var)
{
print_generic_decl (file, var, flags);
}
}
- if (fun && fun->decl == fndecl && fun->cfg
+ if (fun && fun->decl == fndecl
+ && fun->cfg
&& basic_block_info_for_function (fun))
{
/* If the CFG has been built, emit a CFG-based dump. */
void
remove_edge_and_dominated_blocks (edge e)
{
- VEC (basic_block, heap) *bbs_to_remove = NULL;
- VEC (basic_block, heap) *bbs_to_fix_dom = NULL;
+ vec<basic_block> bbs_to_remove = vec<basic_block>();
+ vec<basic_block> bbs_to_fix_dom = vec<basic_block>();
bitmap df, df_idom;
edge f;
edge_iterator ei;
else
{
bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
- FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
+ FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
{
FOR_EACH_EDGE (f, ei, bb->succs)
{
bitmap_set_bit (df, f->dest->index);
}
}
- FOR_EACH_VEC_ELT (basic_block, bbs_to_remove, i, bb)
+ FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
bitmap_clear_bit (df, bb->index);
EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
released DEFs into debug stmts. See
eliminate_unnecessary_stmts() in tree-ssa-dce.c for more
details. */
- for (i = VEC_length (basic_block, bbs_to_remove); i-- > 0; )
- delete_basic_block (VEC_index (basic_block, bbs_to_remove, i));
+ for (i = bbs_to_remove.length (); i-- > 0; )
+ delete_basic_block (bbs_to_remove[i]);
}
/* Update the dominance information. The immediate dominator may change only
for (dbb = first_dom_son (CDI_DOMINATORS, bb);
dbb;
dbb = next_dom_son (CDI_DOMINATORS, dbb))
- VEC_safe_push (basic_block, heap, bbs_to_fix_dom, dbb);
+ bbs_to_fix_dom.safe_push (dbb);
}
iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
BITMAP_FREE (df);
BITMAP_FREE (df_idom);
- VEC_free (basic_block, heap, bbs_to_remove);
- VEC_free (basic_block, heap, bbs_to_fix_dom);
+ bbs_to_remove.release ();
+ bbs_to_fix_dom.release ();
}
/* Purge dead EH edges from basic block BB. */
/* Remove any fallthru edge from EV. Return true if an edge was removed. */
static bool
-remove_fallthru_edge (VEC(edge,gc) *ev)
+remove_fallthru_edge (vec<edge, va_gc> *ev)
{
edge_iterator ei;
edge e;
/* Detect cases where a mid-block call is now known not to return. */
if (cfun->gimple_df)
- while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
+ while (vec_safe_length (MODIFIED_NORETURN_CALLS (cfun)))
{
- stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
+ stmt = MODIFIED_NORETURN_CALLS (cfun)->pop ();
bb = gimple_bb (stmt);
/* BB might be deleted at this point, so verify first
BB is present in the cfg. */
if (TREE_CODE (def) == SSA_NAME)
{
- edge_var_map_vector head;
+ edge_var_map_vector *head;
edge_var_map *vm;
size_t i;
redirection, replace it with the PHI argument that used
to be on E. */
head = redirect_edge_var_map_vector (e);
- FOR_EACH_VEC_ELT (edge_var_map, head, i, vm)
+ FOR_EACH_VEC_ELT (*head, i, vm)
{
tree old_arg = redirect_edge_var_map_result (vm);
tree new_arg = redirect_edge_var_map_def (vm);
expression, calls chrec_apply when the expression is not NULL. */
tree
-chrec_apply_map (tree chrec, VEC (tree, heap) *iv_map)
+chrec_apply_map (tree chrec, vec<tree> iv_map)
{
int i;
tree expr;
- FOR_EACH_VEC_ELT (tree, iv_map, i, expr)
+ FOR_EACH_VEC_ELT (iv_map, i, expr)
if (expr)
chrec = chrec_apply (i, chrec, expr);
/* Operations. */
extern tree chrec_apply (unsigned, tree, tree);
-extern tree chrec_apply_map (tree, VEC (tree, heap) *);
+extern tree chrec_apply_map (tree, vec<tree> );
extern tree chrec_replace_initial_condition (tree, tree);
extern tree initial_condition (tree);
extern tree initial_condition_in_loop_num (tree, unsigned);
#define PAIR(a, b) ((a) << 2 | (b))
-DEF_VEC_I(complex_lattice_t);
-DEF_VEC_ALLOC_I(complex_lattice_t, heap);
-static VEC(complex_lattice_t, heap) *complex_lattice_values;
+static vec<complex_lattice_t> complex_lattice_values;
/* For each complex variable, a pair of variables for the components exists in
the hashtable. */
static htab_t complex_variable_components;
/* For each complex SSA_NAME, a pair of ssa names for the components. */
-static VEC(tree, heap) *complex_ssa_name_components;
+static vec<tree> complex_ssa_name_components;
/* Lookup UID in the complex_variable_components hashtable and return the
associated tree. */
switch (TREE_CODE (t))
{
case SSA_NAME:
- return VEC_index (complex_lattice_t, complex_lattice_values,
- SSA_NAME_VERSION (t));
+ return complex_lattice_values[SSA_NAME_VERSION (t)];
case COMPLEX_CST:
real = TREE_REALPART (t);
for (parm = DECL_ARGUMENTS (cfun->decl); parm ; parm = DECL_CHAIN (parm))
if (is_complex_reg (parm)
&& (ssa_name = ssa_default_def (cfun, parm)) != NULL_TREE)
- VEC_replace (complex_lattice_t, complex_lattice_values,
- SSA_NAME_VERSION (ssa_name), VARYING);
+ complex_lattice_values[SSA_NAME_VERSION (ssa_name)] = VARYING;
}
/* Initialize simulation state for each statement. Return false if we
*result_p = lhs;
ver = SSA_NAME_VERSION (lhs);
- old_l = VEC_index (complex_lattice_t, complex_lattice_values, ver);
+ old_l = complex_lattice_values[ver];
switch (gimple_expr_code (stmt))
{
if (new_l == old_l)
return SSA_PROP_NOT_INTERESTING;
- VEC_replace (complex_lattice_t, complex_lattice_values, ver, new_l);
+ complex_lattice_values[ver] = new_l;
return new_l == VARYING ? SSA_PROP_VARYING : SSA_PROP_INTERESTING;
}
new_l |= find_lattice_value (gimple_phi_arg_def (phi, i));
ver = SSA_NAME_VERSION (lhs);
- old_l = VEC_index (complex_lattice_t, complex_lattice_values, ver);
+ old_l = complex_lattice_values[ver];
if (new_l == old_l)
return SSA_PROP_NOT_INTERESTING;
- VEC_replace (complex_lattice_t, complex_lattice_values, ver, new_l);
+ complex_lattice_values[ver] = new_l;
return new_l == VARYING ? SSA_PROP_VARYING : SSA_PROP_INTERESTING;
}
}
ssa_name_index = SSA_NAME_VERSION (ssa_name) * 2 + imag_p;
- ret = VEC_index (tree, complex_ssa_name_components, ssa_name_index);
+ ret = complex_ssa_name_components[ssa_name_index];
if (ret == NULL)
{
if (SSA_NAME_VAR (ssa_name))
set_ssa_default_def (cfun, SSA_NAME_VAR (ret), ret);
}
- VEC_replace (tree, complex_ssa_name_components, ssa_name_index, ret);
+ complex_ssa_name_components[ssa_name_index] = ret;
}
return ret;
This is fine. Now we should create an initialization for the value
we created earlier. */
ssa_name_index = SSA_NAME_VERSION (ssa_name) * 2 + imag_p;
- comp = VEC_index (tree, complex_ssa_name_components, ssa_name_index);
+ comp = complex_ssa_name_components[ssa_name_index];
if (comp)
;
else if (is_gimple_min_invariant (value)
&& !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ssa_name))
{
- VEC_replace (tree, complex_ssa_name_components, ssa_name_index, value);
+ complex_ssa_name_components[ssa_name_index] = value;
return NULL;
}
else if (TREE_CODE (value) == SSA_NAME
replace_ssa_name_symbol (value, comp);
}
- VEC_replace (tree, complex_ssa_name_components, ssa_name_index, value);
+ complex_ssa_name_components[ssa_name_index] = value;
return NULL;
}
if (!init_dont_simulate_again ())
return 0;
- complex_lattice_values = VEC_alloc (complex_lattice_t, heap, num_ssa_names);
- VEC_safe_grow_cleared (complex_lattice_t, heap,
- complex_lattice_values, num_ssa_names);
+ complex_lattice_values.create (num_ssa_names);
+ complex_lattice_values.safe_grow_cleared (num_ssa_names);
init_parameter_lattice_values ();
ssa_propagate (complex_visit_stmt, complex_visit_phi);
complex_variable_components = htab_create (10, int_tree_map_hash,
int_tree_map_eq, free);
- complex_ssa_name_components = VEC_alloc (tree, heap, 2*num_ssa_names);
- VEC_safe_grow_cleared (tree, heap, complex_ssa_name_components,
- 2 * num_ssa_names);
+ complex_ssa_name_components.create (2 * num_ssa_names);
+ complex_ssa_name_components.safe_grow_cleared (2 * num_ssa_names);
update_parameter_components ();
gsi_commit_edge_inserts ();
htab_delete (complex_variable_components);
- VEC_free (tree, heap, complex_ssa_name_components);
- VEC_free (complex_lattice_t, heap, complex_lattice_values);
+ complex_ssa_name_components.release ();
+ complex_lattice_values.release ();
return 0;
}
/* Dump into FILE all the data references from DATAREFS. */
static void
-dump_data_references (FILE *file, VEC (data_reference_p, heap) *datarefs)
+dump_data_references (FILE *file, vec<data_reference_p> datarefs)
{
unsigned int i;
struct data_reference *dr;
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
dump_data_reference (file, dr);
}
/* Dump into STDERR all the data references from DATAREFS. */
DEBUG_FUNCTION void
-debug_data_references (VEC (data_reference_p, heap) *datarefs)
+debug_data_references (vec<data_reference_p> datarefs)
{
dump_data_references (stderr, datarefs);
}
unsigned i;
tree coef;
- print_generic_expr (outf, VEC_index (tree, fn, 0), TDF_SLIM);
- for (i = 1; VEC_iterate (tree, fn, i, coef); i++)
+ print_generic_expr (outf, fn[0], TDF_SLIM);
+ for (i = 1; fn.iterate (i, &coef); i++)
{
fprintf (outf, " + ");
print_generic_expr (outf, coef, TDF_SLIM);
/* Print a vector of direction vectors. */
static void
-print_dir_vectors (FILE *outf, VEC (lambda_vector, heap) *dir_vects,
+print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
int length)
{
unsigned j;
lambda_vector v;
- FOR_EACH_VEC_ELT (lambda_vector, dir_vects, j, v)
+ FOR_EACH_VEC_ELT (dir_vects, j, v)
print_direction_vector (outf, v, length);
}
/* Print a vector of distance vectors. */
static void
-print_dist_vectors (FILE *outf, VEC (lambda_vector, heap) *dist_vects,
+print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
int length)
{
unsigned j;
lambda_vector v;
- FOR_EACH_VEC_ELT (lambda_vector, dist_vects, j, v)
+ FOR_EACH_VEC_ELT (dist_vects, j, v)
print_lambda_vector (outf, v, length);
}
fprintf (outf, " inner loop index: %d\n", DDR_INNER_LOOP (ddr));
fprintf (outf, " loop nest: (");
- FOR_EACH_VEC_ELT (loop_p, DDR_LOOP_NEST (ddr), i, loopi)
+ FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
fprintf (outf, "%d ", loopi->num);
fprintf (outf, ")\n");
void
dump_data_dependence_relations (FILE *file,
- VEC (ddr_p, heap) *ddrs)
+ vec<ddr_p> ddrs)
{
unsigned int i;
struct data_dependence_relation *ddr;
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
dump_data_dependence_relation (file, ddr);
}
/* Dump to STDERR all the dependence relations from DDRS. */
DEBUG_FUNCTION void
-debug_data_dependence_relations (VEC (ddr_p, heap) *ddrs)
+debug_data_dependence_relations (vec<ddr_p> ddrs)
{
dump_data_dependence_relations (stderr, ddrs);
}
considered nest. */
static void
-dump_dist_dir_vectors (FILE *file, VEC (ddr_p, heap) *ddrs)
+dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
{
unsigned int i, j;
struct data_dependence_relation *ddr;
lambda_vector v;
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
{
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), j, v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
{
fprintf (file, "DISTANCE_V (");
print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
fprintf (file, ")\n");
}
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIR_VECTS (ddr), j, v)
+ FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
{
fprintf (file, "DIRECTION_V (");
print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
/* Dumps the data dependence relations DDRS in FILE. */
static void
-dump_ddrs (FILE *file, VEC (ddr_p, heap) *ddrs)
+dump_ddrs (FILE *file, vec<ddr_p> ddrs)
{
unsigned int i;
struct data_dependence_relation *ddr;
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
dump_data_dependence_relation (file, ddr);
fprintf (file, "\n\n");
}
DEBUG_FUNCTION void
-debug_ddrs (VEC (ddr_p, heap) *ddrs)
+debug_ddrs (vec<ddr_p> ddrs)
{
dump_ddrs (stderr, ddrs);
}
static void
dr_analyze_indices (struct data_reference *dr, loop_p nest, loop_p loop)
{
- VEC (tree, heap) *access_fns = NULL;
+ vec<tree> access_fns = vec<tree>();
tree ref, op;
tree base, off, access_fn;
basic_block before_loop;
if (!nest)
{
DR_BASE_OBJECT (dr) = DR_REF (dr);
- DR_ACCESS_FNS (dr) = NULL;
+ DR_ACCESS_FNS (dr).create (0);
return;
}
if (TREE_CODE (ref) == REALPART_EXPR)
{
ref = TREE_OPERAND (ref, 0);
- VEC_safe_push (tree, heap, access_fns, integer_zero_node);
+ access_fns.safe_push (integer_zero_node);
}
else if (TREE_CODE (ref) == IMAGPART_EXPR)
{
ref = TREE_OPERAND (ref, 0);
- VEC_safe_push (tree, heap, access_fns, integer_one_node);
+ access_fns.safe_push (integer_one_node);
}
/* Analyze access functions of dimensions we know to be independent. */
op = TREE_OPERAND (ref, 1);
access_fn = analyze_scalar_evolution (loop, op);
access_fn = instantiate_scev (before_loop, loop, access_fn);
- VEC_safe_push (tree, heap, access_fns, access_fn);
+ access_fns.safe_push (access_fn);
}
else if (TREE_CODE (ref) == COMPONENT_REF
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
fold_convert (bitsizetype, off),
bitsize_int (BITS_PER_UNIT)),
DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
- VEC_safe_push (tree, heap, access_fns, off);
+ access_fns.safe_push (off);
}
else
/* If we have an unhandled component we could not translate
MEM_REF, TREE_TYPE (ref),
base, memoff);
DR_UNCONSTRAINED_BASE (dr) = true;
- VEC_safe_push (tree, heap, access_fns, access_fn);
+ access_fns.safe_push (access_fn);
}
}
else if (DECL_P (ref))
void
free_data_ref (data_reference_p dr)
{
- VEC_free (tree, heap, DR_ACCESS_FNS (dr));
+ DR_ACCESS_FNS (dr).release ();
free (dr);
}
static bool
affine_function_equal_p (affine_fn fna, affine_fn fnb)
{
- unsigned i, n = VEC_length (tree, fna);
+ unsigned i, n = fna.length ();
- if (n != VEC_length (tree, fnb))
+ if (n != fnb.length ())
return false;
for (i = 0; i < n; i++)
- if (!operand_equal_p (VEC_index (tree, fna, i),
- VEC_index (tree, fnb, i), 0))
+ if (!operand_equal_p (fna[i], fnb[i], 0))
return false;
return true;
affine_fn comm;
if (!CF_NONTRIVIAL_P (cf))
- return NULL;
+ return affine_fn();
comm = cf->fns[0];
for (i = 1; i < cf->n; i++)
if (!affine_function_equal_p (comm, cf->fns[i]))
- return NULL;
+ return affine_fn();
return comm;
}
static tree
affine_function_base (affine_fn fn)
{
- return VEC_index (tree, fn, 0);
+ return fn[0];
}
/* Returns true if FN is a constant. */
unsigned i;
tree coef;
- for (i = 1; VEC_iterate (tree, fn, i, coef); i++)
+ for (i = 1; fn.iterate (i, &coef); i++)
if (!integer_zerop (coef))
return false;
affine_fn ret;
tree coef;
- if (VEC_length (tree, fnb) > VEC_length (tree, fna))
+ if (fnb.length () > fna.length ())
{
- n = VEC_length (tree, fna);
- m = VEC_length (tree, fnb);
+ n = fna.length ();
+ m = fnb.length ();
}
else
{
- n = VEC_length (tree, fnb);
- m = VEC_length (tree, fna);
+ n = fnb.length ();
+ m = fna.length ();
}
- ret = VEC_alloc (tree, heap, m);
+ ret.create (m);
for (i = 0; i < n; i++)
{
- tree type = signed_type_for_types (TREE_TYPE (VEC_index (tree, fna, i)),
- TREE_TYPE (VEC_index (tree, fnb, i)));
-
- VEC_quick_push (tree, ret,
- fold_build2 (op, type,
- VEC_index (tree, fna, i),
- VEC_index (tree, fnb, i)));
+ tree type = signed_type_for_types (TREE_TYPE (fna[i]),
+ TREE_TYPE (fnb[i]));
+ ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
}
- for (; VEC_iterate (tree, fna, i, coef); i++)
- VEC_quick_push (tree, ret,
- fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
+ for (; fna.iterate (i, &coef); i++)
+ ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
coef, integer_zero_node));
- for (; VEC_iterate (tree, fnb, i, coef); i++)
- VEC_quick_push (tree, ret,
- fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
+ for (; fnb.iterate (i, &coef); i++)
+ ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
integer_zero_node, coef));
return ret;
static void
affine_fn_free (affine_fn fn)
{
- VEC_free (tree, heap, fn);
+ fn.release ();
}
/* Determine for each subscript in the data dependence relation DDR
fn_a = common_affine_function (cf_a);
fn_b = common_affine_function (cf_b);
- if (!fn_a || !fn_b)
+ if (!fn_a.exists () || !fn_b.exists ())
{
SUB_DISTANCE (subscript) = chrec_dont_know;
return;
struct data_dependence_relation *
initialize_data_dependence_relation (struct data_reference *a,
struct data_reference *b,
- VEC (loop_p, heap) *loop_nest)
+ vec<loop_p> loop_nest)
{
struct data_dependence_relation *res;
unsigned int i;
res = XNEW (struct data_dependence_relation);
DDR_A (res) = a;
DDR_B (res) = b;
- DDR_LOOP_NEST (res) = NULL;
+ DDR_LOOP_NEST (res).create (0);
DDR_REVERSED_P (res) = false;
- DDR_SUBSCRIPTS (res) = NULL;
- DDR_DIR_VECTS (res) = NULL;
- DDR_DIST_VECTS (res) = NULL;
+ DDR_SUBSCRIPTS (res).create (0);
+ DDR_DIR_VECTS (res).create (0);
+ DDR_DIST_VECTS (res).create (0);
if (a == NULL || b == NULL)
{
}
/* If the data references do not alias, then they are independent. */
- if (!dr_may_alias_p (a, b, loop_nest != NULL))
+ if (!dr_may_alias_p (a, b, loop_nest.exists ()))
{
DDR_ARE_DEPENDENT (res) = chrec_known;
return res;
/* The case where the references are exactly the same. */
if (operand_equal_p (DR_REF (a), DR_REF (b), 0))
{
- if (loop_nest
- && !object_address_invariant_in_loop_p (VEC_index (loop_p, loop_nest, 0),
+ if (loop_nest.exists ()
+ && !object_address_invariant_in_loop_p (loop_nest[0],
DR_BASE_OBJECT (a)))
{
DDR_ARE_DEPENDENT (res) = chrec_dont_know;
}
DDR_AFFINE_P (res) = true;
DDR_ARE_DEPENDENT (res) = NULL_TREE;
- DDR_SUBSCRIPTS (res) = VEC_alloc (subscript_p, heap, DR_NUM_DIMENSIONS (a));
+ DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
DDR_LOOP_NEST (res) = loop_nest;
DDR_INNER_LOOP (res) = 0;
DDR_SELF_REFERENCE (res) = true;
SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
SUB_DISTANCE (subscript) = chrec_dont_know;
- VEC_safe_push (subscript_p, heap, DDR_SUBSCRIPTS (res), subscript);
+ DDR_SUBSCRIPTS (res).safe_push (subscript);
}
return res;
}
/* If the base of the object is not invariant in the loop nest, we cannot
analyze it. TODO -- in fact, it would suffice to record that there may
be arbitrary dependences in the loops where the base object varies. */
- if (loop_nest
- && !object_address_invariant_in_loop_p (VEC_index (loop_p, loop_nest, 0),
+ if (loop_nest.exists ()
+ && !object_address_invariant_in_loop_p (loop_nest[0],
DR_BASE_OBJECT (a)))
{
DDR_ARE_DEPENDENT (res) = chrec_dont_know;
DDR_AFFINE_P (res) = true;
DDR_ARE_DEPENDENT (res) = NULL_TREE;
- DDR_SUBSCRIPTS (res) = VEC_alloc (subscript_p, heap, DR_NUM_DIMENSIONS (a));
+ DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
DDR_LOOP_NEST (res) = loop_nest;
DDR_INNER_LOOP (res) = 0;
DDR_SELF_REFERENCE (res) = false;
SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
SUB_DISTANCE (subscript) = chrec_dont_know;
- VEC_safe_push (subscript_p, heap, DDR_SUBSCRIPTS (res), subscript);
+ DDR_SUBSCRIPTS (res).safe_push (subscript);
}
return res;
/* Frees memory used by SUBSCRIPTS. */
static void
-free_subscripts (VEC (subscript_p, heap) *subscripts)
+free_subscripts (vec<subscript_p> subscripts)
{
unsigned i;
subscript_p s;
- FOR_EACH_VEC_ELT (subscript_p, subscripts, i, s)
+ FOR_EACH_VEC_ELT (subscripts, i, s)
{
free_conflict_function (s->conflicting_iterations_in_a);
free_conflict_function (s->conflicting_iterations_in_b);
free (s);
}
- VEC_free (subscript_p, heap, subscripts);
+ subscripts.release ();
}
/* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
DDR_ARE_DEPENDENT (ddr) = chrec;
free_subscripts (DDR_SUBSCRIPTS (ddr));
- DDR_SUBSCRIPTS (ddr) = NULL;
+ DDR_SUBSCRIPTS (ddr).create (0);
}
/* The dependence relation DDR cannot be represented by a distance
static affine_fn
affine_fn_cst (tree cst)
{
- affine_fn fn = VEC_alloc (tree, heap, 1);
- VEC_quick_push (tree, fn, cst);
+ affine_fn fn;
+ fn.create (1);
+ fn.quick_push (cst);
return fn;
}
static affine_fn
affine_fn_univar (tree cst, unsigned dim, tree coef)
{
- affine_fn fn = VEC_alloc (tree, heap, dim + 1);
+ affine_fn fn;
+ fn.create (dim + 1);
unsigned i;
gcc_assert (dim > 0);
- VEC_quick_push (tree, fn, cst);
+ fn.quick_push (cst);
for (i = 1; i < dim; i++)
- VEC_quick_push (tree, fn, integer_zero_node);
- VEC_quick_push (tree, fn, coef);
+ fn.quick_push (integer_zero_node);
+ fn.quick_push (coef);
return fn;
}
unsigned i;
lambda_vector v;
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
return;
- VEC_safe_push (lambda_vector, heap, DDR_DIST_VECTS (ddr), dist_v);
+ DDR_DIST_VECTS (ddr).safe_push (dist_v);
}
/* Helper function for uniquely inserting direction vectors. */
unsigned i;
lambda_vector v;
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIR_VECTS (ddr), i, v)
+ FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
return;
- VEC_safe_push (lambda_vector, heap, DDR_DIR_VECTS (ddr), dir_v);
+ DDR_DIR_VECTS (ddr).safe_push (dir_v);
}
/* Add a distance of 1 on all the loops outer than INDEX. If we
unsigned i, j;
lambda_vector dist_v;
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
struct subscript *subscript;
tree res = NULL_TREE;
- for (i = 0; VEC_iterate (subscript_p, DDR_SUBSCRIPTS (ddr), i, subscript);
- i++)
+ for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
{
conflict_function *overlaps_a, *overlaps_b;
const struct loop *loop_nest)
{
unsigned int i;
- VEC(tree,heap) *fns = DR_ACCESS_FNS (a);
+ vec<tree> fns = DR_ACCESS_FNS (a);
tree t;
- FOR_EACH_VEC_ELT (tree, fns, i, t)
+ FOR_EACH_VEC_ELT (fns, i, t)
if (!evolution_function_is_invariant_p (t, loop_nest->num)
&& !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
return false;
problem that we have initialized until now. On top of this we
add new constraints. */
for (i = 0; i <= DDR_INNER_LOOP (ddr)
- && VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), i, loopi); i++)
+ && DDR_LOOP_NEST (ddr).iterate (i, &loopi); i++)
{
int dist = 0;
omega_pb copy = omega_alloc_problem (2 * DDR_NB_LOOPS (ddr),
omega_copy_problem (copy, pb);
/* For all the outer loops "loop_j", add "dj = 0". */
- for (j = 0;
- j < i && VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), j, loopj); j++)
+ for (j = 0; j < i && DDR_LOOP_NEST (ddr).iterate (j, &loopj); j++)
{
eq = omega_add_zero_eq (copy, omega_black);
copy->eqs[eq].coef[j + 1] = 1;
{
/* Reinitialize problem... */
omega_copy_problem (copy, pb);
- for (j = 0;
- j < i && VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), j, loopj); j++)
+ for (j = 0; j < i && DDR_LOOP_NEST (ddr).iterate (j, &loopj); j++)
{
eq = omega_add_zero_eq (copy, omega_black);
copy->eqs[eq].coef[j + 1] = 1;
- coef[nb_loops + 1, 2*nb_loops] are the loop variables: "loop_x".
*/
for (i = 0; i <= DDR_INNER_LOOP (ddr)
- && VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), i, loopi); i++)
+ && DDR_LOOP_NEST (ddr).iterate (i, &loopi); i++)
{
HOST_WIDE_INT nbi = max_stmt_executions_int (loopi);
static bool
ddr_consistent_p (FILE *file,
struct data_dependence_relation *ddr,
- VEC (lambda_vector, heap) *dist_vects,
- VEC (lambda_vector, heap) *dir_vects)
+ vec<lambda_vector> dist_vects,
+ vec<lambda_vector> dir_vects)
{
unsigned int i, j;
if (dump_file && (dump_flags & TDF_DETAILS))
file = dump_file;
- if (VEC_length (lambda_vector, dist_vects) != DDR_NUM_DIST_VECTS (ddr))
+ if (dist_vects.length () != DDR_NUM_DIST_VECTS (ddr))
{
lambda_vector b_dist_v;
fprintf (file, "\n(Number of distance vectors differ: Banerjee has %d, Omega has %d.\n",
- VEC_length (lambda_vector, dist_vects),
+ dist_vects.length (),
DDR_NUM_DIST_VECTS (ddr));
fprintf (file, "Banerjee dist vectors:\n");
- FOR_EACH_VEC_ELT (lambda_vector, dist_vects, i, b_dist_v)
+ FOR_EACH_VEC_ELT (dist_vects, i, b_dist_v)
print_lambda_vector (file, b_dist_v, DDR_NB_LOOPS (ddr));
fprintf (file, "Omega dist vectors:\n");
return false;
}
- if (VEC_length (lambda_vector, dir_vects) != DDR_NUM_DIR_VECTS (ddr))
+ if (dir_vects.length () != DDR_NUM_DIR_VECTS (ddr))
{
fprintf (file, "\n(Number of direction vectors differ: Banerjee has %d, Omega has %d.)\n",
- VEC_length (lambda_vector, dir_vects),
+ dir_vects.length (),
DDR_NUM_DIR_VECTS (ddr));
return false;
}
/* Distance vectors are not ordered in the same way in the DDR
and in the DIST_VECTS: search for a matching vector. */
- FOR_EACH_VEC_ELT (lambda_vector, dist_vects, j, a_dist_v)
+ FOR_EACH_VEC_ELT (dist_vects, j, a_dist_v)
if (lambda_vector_equal (a_dist_v, b_dist_v, DDR_NB_LOOPS (ddr)))
break;
- if (j == VEC_length (lambda_vector, dist_vects))
+ if (j == dist_vects.length ())
{
fprintf (file, "\n(Dist vectors from the first dependence analyzer:\n");
print_dist_vectors (file, dist_vects, DDR_NB_LOOPS (ddr));
/* Direction vectors are not ordered in the same way in the DDR
and in the DIR_VECTS: search for a matching vector. */
- FOR_EACH_VEC_ELT (lambda_vector, dir_vects, j, a_dir_v)
+ FOR_EACH_VEC_ELT (dir_vects, j, a_dir_v)
if (lambda_vector_equal (a_dir_v, b_dir_v, DDR_NB_LOOPS (ddr)))
break;
- if (j == VEC_length (lambda_vector, dist_vects))
+ if (j == dist_vects.length ())
{
fprintf (file, "\n(Dir vectors from the first dependence analyzer:\n");
print_dir_vectors (file, dir_vects, DDR_NB_LOOPS (ddr));
if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
{
bool maybe_dependent;
- VEC (lambda_vector, heap) *dir_vects, *dist_vects;
+ vec<lambda_vector> dir_vects, dist_vects;
/* Save the result of the first DD analyzer. */
dist_vects = DDR_DIST_VECTS (ddr);
dir_vects = DDR_DIR_VECTS (ddr);
/* Reset the information. */
- DDR_DIST_VECTS (ddr) = NULL;
- DDR_DIR_VECTS (ddr) = NULL;
+ DDR_DIST_VECTS (ddr).create (0);
+ DDR_DIR_VECTS (ddr).create (0);
/* Compute the same information using Omega. */
if (!init_omega_for_ddr (ddr, &maybe_dependent))
is small enough to be handled. */
bool
-compute_all_dependences (VEC (data_reference_p, heap) *datarefs,
- VEC (ddr_p, heap) **dependence_relations,
- VEC (loop_p, heap) *loop_nest,
+compute_all_dependences (vec<data_reference_p> datarefs,
+ vec<ddr_p> *dependence_relations,
+ vec<loop_p> loop_nest,
bool compute_self_and_rr)
{
struct data_dependence_relation *ddr;
struct data_reference *a, *b;
unsigned int i, j;
- if ((int) VEC_length (data_reference_p, datarefs)
+ if ((int) datarefs.length ()
> PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
{
struct data_dependence_relation *ddr;
/* Insert a single relation into dependence_relations:
chrec_dont_know. */
ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
- VEC_safe_push (ddr_p, heap, *dependence_relations, ddr);
+ dependence_relations->safe_push (ddr);
return false;
}
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, a)
- for (j = i + 1; VEC_iterate (data_reference_p, datarefs, j, b); j++)
+ FOR_EACH_VEC_ELT (datarefs, i, a)
+ for (j = i + 1; datarefs.iterate (j, &b); j++)
if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
{
ddr = initialize_data_dependence_relation (a, b, loop_nest);
- VEC_safe_push (ddr_p, heap, *dependence_relations, ddr);
- if (loop_nest)
- compute_affine_dependence (ddr, VEC_index (loop_p, loop_nest, 0));
+ dependence_relations->safe_push (ddr);
+ if (loop_nest.exists ())
+ compute_affine_dependence (ddr, loop_nest[0]);
}
if (compute_self_and_rr)
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, a)
+ FOR_EACH_VEC_ELT (datarefs, i, a)
{
ddr = initialize_data_dependence_relation (a, a, loop_nest);
- VEC_safe_push (ddr_p, heap, *dependence_relations, ddr);
- if (loop_nest)
- compute_affine_dependence (ddr, VEC_index (loop_p, loop_nest, 0));
+ dependence_relations->safe_push (ddr);
+ if (loop_nest.exists ())
+ compute_affine_dependence (ddr, loop_nest[0]);
}
return true;
bool is_read;
} data_ref_loc;
-DEF_VEC_O (data_ref_loc);
-DEF_VEC_ALLOC_O (data_ref_loc, heap);
/* Stores the locations of memory references in STMT to REFERENCES. Returns
true if STMT clobbers memory, false otherwise. */
static bool
-get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
+get_references_in_stmt (gimple stmt, vec<data_ref_loc> *references)
{
bool clobbers_memory = false;
data_ref_loc ref;
tree *op0, *op1;
enum gimple_code stmt_code = gimple_code (stmt);
- *references = NULL;
+ references->create (0);
/* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
As we cannot model data-references to not spelled out
{
ref.pos = op1;
ref.is_read = true;
- VEC_safe_push (data_ref_loc, heap, *references, ref);
+ references->safe_push (ref);
}
}
else if (stmt_code == GIMPLE_CALL)
{
ref.pos = op1;
ref.is_read = true;
- VEC_safe_push (data_ref_loc, heap, *references, ref);
+ references->safe_push (ref);
}
}
}
{
ref.pos = op0;
ref.is_read = false;
- VEC_safe_push (data_ref_loc, heap, *references, ref);
+ references->safe_push (ref);
}
return clobbers_memory;
}
bool
find_data_references_in_stmt (struct loop *nest, gimple stmt,
- VEC (data_reference_p, heap) **datarefs)
+ vec<data_reference_p> *datarefs)
{
unsigned i;
- VEC (data_ref_loc, heap) *references;
+ vec<data_ref_loc> references;
data_ref_loc *ref;
bool ret = true;
data_reference_p dr;
if (get_references_in_stmt (stmt, &references))
{
- VEC_free (data_ref_loc, heap, references);
+ references.release ();
return false;
}
- FOR_EACH_VEC_ELT (data_ref_loc, references, i, ref)
+ FOR_EACH_VEC_ELT (references, i, ref)
{
dr = create_data_ref (nest, loop_containing_stmt (stmt),
*ref->pos, stmt, ref->is_read);
gcc_assert (dr != NULL);
- VEC_safe_push (data_reference_p, heap, *datarefs, dr);
+ datarefs->safe_push (dr);
}
- VEC_free (data_ref_loc, heap, references);
+ references.release ();
return ret;
}
bool
graphite_find_data_references_in_stmt (loop_p nest, loop_p loop, gimple stmt,
- VEC (data_reference_p, heap) **datarefs)
+ vec<data_reference_p> *datarefs)
{
unsigned i;
- VEC (data_ref_loc, heap) *references;
+ vec<data_ref_loc> references;
data_ref_loc *ref;
bool ret = true;
data_reference_p dr;
if (get_references_in_stmt (stmt, &references))
{
- VEC_free (data_ref_loc, heap, references);
+ references.release ();
return false;
}
- FOR_EACH_VEC_ELT (data_ref_loc, references, i, ref)
+ FOR_EACH_VEC_ELT (references, i, ref)
{
dr = create_data_ref (nest, loop, *ref->pos, stmt, ref->is_read);
gcc_assert (dr != NULL);
- VEC_safe_push (data_reference_p, heap, *datarefs, dr);
+ datarefs->safe_push (dr);
}
- VEC_free (data_ref_loc, heap, references);
+ references.release ();
return ret;
}
tree
find_data_references_in_bb (struct loop *loop, basic_block bb,
- VEC (data_reference_p, heap) **datarefs)
+ vec<data_reference_p> *datarefs)
{
gimple_stmt_iterator bsi;
{
struct data_reference *res;
res = XCNEW (struct data_reference);
- VEC_safe_push (data_reference_p, heap, *datarefs, res);
+ datarefs->safe_push (res);
return chrec_dont_know;
}
static tree
find_data_references_in_loop (struct loop *loop,
- VEC (data_reference_p, heap) **datarefs)
+ vec<data_reference_p> *datarefs)
{
basic_block bb, *bbs;
unsigned int i;
/* Recursive helper function. */
static bool
-find_loop_nest_1 (struct loop *loop, VEC (loop_p, heap) **loop_nest)
+find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
{
/* Inner loops of the nest should not contain siblings. Example:
when there are two consecutive loops,
if (loop->next)
return false;
- VEC_safe_push (loop_p, heap, *loop_nest, loop);
+ loop_nest->safe_push (loop);
if (loop->inner)
return find_loop_nest_1 (loop->inner, loop_nest);
return true;
appear in the classic distance vector. */
bool
-find_loop_nest (struct loop *loop, VEC (loop_p, heap) **loop_nest)
+find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
{
- VEC_safe_push (loop_p, heap, *loop_nest, loop);
+ loop_nest->safe_push (loop);
if (loop->inner)
return find_loop_nest_1 (loop->inner, loop_nest);
return true;
bool
compute_data_dependences_for_loop (struct loop *loop,
bool compute_self_and_read_read_dependences,
- VEC (loop_p, heap) **loop_nest,
- VEC (data_reference_p, heap) **datarefs,
- VEC (ddr_p, heap) **dependence_relations)
+ vec<loop_p> *loop_nest,
+ vec<data_reference_p> *datarefs,
+ vec<ddr_p> *dependence_relations)
{
bool res = true;
bool
compute_data_dependences_for_bb (basic_block bb,
bool compute_self_and_read_read_dependences,
- VEC (data_reference_p, heap) **datarefs,
- VEC (ddr_p, heap) **dependence_relations)
+ vec<data_reference_p> *datarefs,
+ vec<ddr_p> *dependence_relations)
{
if (find_data_references_in_bb (NULL, bb, datarefs) == chrec_dont_know)
return false;
- return compute_all_dependences (*datarefs, dependence_relations, NULL,
+ return compute_all_dependences (*datarefs, dependence_relations,
+ vec<loop_p>(),
compute_self_and_read_read_dependences);
}
{
unsigned int i;
int nb_data_refs = 10;
- VEC (data_reference_p, heap) *datarefs =
- VEC_alloc (data_reference_p, heap, nb_data_refs);
- VEC (ddr_p, heap) *dependence_relations =
- VEC_alloc (ddr_p, heap, nb_data_refs * nb_data_refs);
- VEC (loop_p, heap) *loop_nest = VEC_alloc (loop_p, heap, 3);
+ vec<data_reference_p> datarefs;
+ datarefs.create (nb_data_refs);
+ vec<ddr_p> dependence_relations;
+ dependence_relations.create (nb_data_refs * nb_data_refs);
+ vec<loop_p> loop_nest;
+ loop_nest.create (3);
/* Compute DDs on the whole function. */
compute_data_dependences_for_loop (loop, false, &loop_nest, &datarefs,
unsigned nb_chrec_relations = 0;
struct data_dependence_relation *ddr;
- FOR_EACH_VEC_ELT (ddr_p, dependence_relations, i, ddr)
+ FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
{
if (chrec_contains_undetermined (DDR_ARE_DEPENDENT (ddr)))
nb_top_relations++;
}
}
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
}
if (ddr == NULL)
return;
- if (DDR_SUBSCRIPTS (ddr))
+ if (DDR_SUBSCRIPTS (ddr).exists ())
free_subscripts (DDR_SUBSCRIPTS (ddr));
- if (DDR_DIST_VECTS (ddr))
- VEC_free (lambda_vector, heap, DDR_DIST_VECTS (ddr));
- if (DDR_DIR_VECTS (ddr))
- VEC_free (lambda_vector, heap, DDR_DIR_VECTS (ddr));
+ DDR_DIST_VECTS (ddr).release ();
+ DDR_DIR_VECTS (ddr).release ();
free (ddr);
}
DEPENDENCE_RELATIONS. */
void
-free_dependence_relations (VEC (ddr_p, heap) *dependence_relations)
+free_dependence_relations (vec<ddr_p> dependence_relations)
{
unsigned int i;
struct data_dependence_relation *ddr;
- FOR_EACH_VEC_ELT (ddr_p, dependence_relations, i, ddr)
+ FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
if (ddr)
free_dependence_relation (ddr);
- VEC_free (ddr_p, heap, dependence_relations);
+ dependence_relations.release ();
}
/* Free the memory used by the data references from DATAREFS. */
void
-free_data_refs (VEC (data_reference_p, heap) *datarefs)
+free_data_refs (vec<data_reference_p> datarefs)
{
unsigned int i;
struct data_reference *dr;
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
free_data_ref (dr);
- VEC_free (data_reference_p, heap, datarefs);
+ datarefs.release ();
}
\f
/* Creates the edges of the reduced dependence graph RDG. */
static void
-create_rdg_edges (struct graph *rdg, VEC (ddr_p, heap) *ddrs)
+create_rdg_edges (struct graph *rdg, vec<ddr_p> ddrs)
{
int i;
struct data_dependence_relation *ddr;
def_operand_p def_p;
ssa_op_iter iter;
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
create_rdg_edge_for_ddr (rdg, ddr);
/* Build the vertices of the reduced dependence graph RDG. */
static void
-create_rdg_vertices (struct graph *rdg, VEC (gimple, heap) *stmts, loop_p loop)
+create_rdg_vertices (struct graph *rdg, vec<gimple> stmts, loop_p loop)
{
int i, j;
gimple stmt;
- FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
+ FOR_EACH_VEC_ELT (stmts, i, stmt)
{
- VEC (data_ref_loc, heap) *references;
+ vec<data_ref_loc> references;
data_ref_loc *ref;
struct vertex *v = &(rdg->vertices[i]);
v->data = XNEW (struct rdg_vertex);
RDGV_STMT (v) = stmt;
- RDGV_DATAREFS (v) = NULL;
+ RDGV_DATAREFS (v).create (0);
RDGV_HAS_MEM_WRITE (v) = false;
RDGV_HAS_MEM_READS (v) = false;
if (gimple_code (stmt) == GIMPLE_PHI)
continue;
get_references_in_stmt (stmt, &references);
- FOR_EACH_VEC_ELT (data_ref_loc, references, j, ref)
+ FOR_EACH_VEC_ELT (references, j, ref)
{
data_reference_p dr;
if (!ref->is_read)
dr = create_data_ref (loop, loop_containing_stmt (stmt),
*ref->pos, stmt, ref->is_read);
if (dr)
- VEC_safe_push (data_reference_p, heap, RDGV_DATAREFS (v), dr);
+ RDGV_DATAREFS (v).safe_push (dr);
}
- VEC_free (data_ref_loc, heap, references);
+ references.release ();
}
}
identifying statements. */
static void
-stmts_from_loop (struct loop *loop, VEC (gimple, heap) **stmts)
+stmts_from_loop (struct loop *loop, vec<gimple> *stmts)
{
unsigned int i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
gimple stmt;
for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- VEC_safe_push (gimple, heap, *stmts, gsi_stmt (bsi));
+ stmts->safe_push (gsi_stmt (bsi));
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
stmt = gsi_stmt (bsi);
if (gimple_code (stmt) != GIMPLE_LABEL && !is_gimple_debug (stmt))
- VEC_safe_push (gimple, heap, *stmts, stmt);
+ stmts->safe_push (stmt);
}
}
/* Returns true when all the dependences are computable. */
static bool
-known_dependences_p (VEC (ddr_p, heap) *dependence_relations)
+known_dependences_p (vec<ddr_p> dependence_relations)
{
ddr_p ddr;
unsigned int i;
- FOR_EACH_VEC_ELT (ddr_p, dependence_relations, i, ddr)
+ FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return false;
struct graph *
build_rdg (struct loop *loop,
- VEC (loop_p, heap) **loop_nest,
- VEC (ddr_p, heap) **dependence_relations,
- VEC (data_reference_p, heap) **datarefs)
+ vec<loop_p> *loop_nest,
+ vec<ddr_p> *dependence_relations,
+ vec<data_reference_p> *datarefs)
{
struct graph *rdg = NULL;
dependence_relations)
&& known_dependences_p (*dependence_relations))
{
- VEC (gimple, heap) *stmts = VEC_alloc (gimple, heap, 10);
+ vec<gimple> stmts;
+ stmts.create (10);
stmts_from_loop (loop, &stmts);
- rdg = build_empty_rdg (VEC_length (gimple, stmts));
+ rdg = build_empty_rdg (stmts.length ());
create_rdg_vertices (rdg, stmts, loop);
create_rdg_edges (rdg, *dependence_relations);
- VEC_free (gimple, heap, stmts);
+ stmts.release ();
}
return rdg;
tree base_object;
/* A list of chrecs. Access functions of the indices. */
- VEC(tree,heap) *access_fns;
+ vec<tree> access_fns;
/* Whether BASE_OBJECT is an access representing the whole object
or whether the access could not be constrained. */
and scalar multiplication. In this vector space, an element is a list of
integers. */
typedef int *lambda_vector;
-DEF_VEC_P(lambda_vector);
-DEF_VEC_ALLOC_P(lambda_vector,heap);
-DEF_VEC_ALLOC_P(lambda_vector,gc);
/* An integer matrix. A matrix consists of m vectors of length n (IE
all vectors are the same length). */
*/
struct access_matrix
{
- VEC (loop_p, heap) *loop_nest;
+ vec<loop_p> loop_nest;
int nb_induction_vars;
- VEC (tree, heap) *parameters;
- VEC (lambda_vector, gc) *matrix;
+ vec<tree> parameters;
+ vec<lambda_vector, va_gc> *matrix;
};
#define AM_LOOP_NEST(M) (M)->loop_nest
#define AM_NB_INDUCTION_VARS(M) (M)->nb_induction_vars
#define AM_PARAMETERS(M) (M)->parameters
#define AM_MATRIX(M) (M)->matrix
-#define AM_NB_PARAMETERS(M) (VEC_length (tree, AM_PARAMETERS(M)))
+#define AM_NB_PARAMETERS(M) (AM_PARAMETERS(M)).length ()
#define AM_CONST_COLUMN_INDEX(M) (AM_NB_INDUCTION_VARS (M) + AM_NB_PARAMETERS (M))
#define AM_NB_COLUMNS(M) (AM_NB_INDUCTION_VARS (M) + AM_NB_PARAMETERS (M) + 1)
-#define AM_GET_SUBSCRIPT_ACCESS_VECTOR(M, I) VEC_index (lambda_vector, AM_MATRIX (M), I)
+#define AM_GET_SUBSCRIPT_ACCESS_VECTOR(M, I) AM_MATRIX (M)[I]
#define AM_GET_ACCESS_MATRIX_ELEMENT(M, I, J) AM_GET_SUBSCRIPT_ACCESS_VECTOR (M, I)[J]
/* Return the column in the access matrix of LOOP_NUM. */
int i;
loop_p l;
- for (i = 0; VEC_iterate (loop_p, AM_LOOP_NEST (access_matrix), i, l); i++)
+ for (i = 0; AM_LOOP_NEST (access_matrix).iterate (i, &l); i++)
if (l->num == loop_num)
return i;
#define DR_BASE_OBJECT(DR) (DR)->indices.base_object
#define DR_UNCONSTRAINED_BASE(DR) (DR)->indices.unconstrained_base
#define DR_ACCESS_FNS(DR) (DR)->indices.access_fns
-#define DR_ACCESS_FN(DR, I) VEC_index (tree, DR_ACCESS_FNS (DR), I)
-#define DR_NUM_DIMENSIONS(DR) VEC_length (tree, DR_ACCESS_FNS (DR))
+#define DR_ACCESS_FN(DR, I) DR_ACCESS_FNS (DR)[I]
+#define DR_NUM_DIMENSIONS(DR) DR_ACCESS_FNS (DR).length ()
#define DR_IS_READ(DR) (DR)->is_read
#define DR_IS_WRITE(DR) (!DR_IS_READ (DR))
#define DR_BASE_ADDRESS(DR) (DR)->innermost.base_address
#define DR_ACCESS_MATRIX(DR) (DR)->access_matrix
typedef struct data_reference *data_reference_p;
-DEF_VEC_P(data_reference_p);
-DEF_VEC_ALLOC_P (data_reference_p, heap);
enum data_dependence_direction {
dir_positive,
#define CF_NOT_KNOWN_P(CF) ((CF)->n == NOT_KNOWN)
#define CF_NO_DEPENDENCE_P(CF) ((CF)->n == NO_DEPENDENCE)
-typedef VEC (tree, heap) *affine_fn;
+typedef vec<tree> affine_fn;
typedef struct
{
};
typedef struct subscript *subscript_p;
-DEF_VEC_P(subscript_p);
-DEF_VEC_ALLOC_P (subscript_p, heap);
#define SUB_CONFLICTS_IN_A(SUB) SUB->conflicting_iterations_in_a
#define SUB_CONFLICTS_IN_B(SUB) SUB->conflicting_iterations_in_b
/* For each subscript in the dependence test, there is an element in
this array. This is the attribute that labels the edge A->B of
the data_dependence_relation. */
- VEC (subscript_p, heap) *subscripts;
+ vec<subscript_p> subscripts;
/* The analyzed loop nest. */
- VEC (loop_p, heap) *loop_nest;
+ vec<loop_p> loop_nest;
/* The classic direction vector. */
- VEC (lambda_vector, heap) *dir_vects;
+ vec<lambda_vector> dir_vects;
/* The classic distance vector. */
- VEC (lambda_vector, heap) *dist_vects;
+ vec<lambda_vector> dist_vects;
/* An index in loop_nest for the innermost loop that varies for
this data dependence relation. */
};
typedef struct data_dependence_relation *ddr_p;
-DEF_VEC_P(ddr_p);
-DEF_VEC_ALLOC_P(ddr_p,heap);
#define DDR_A(DDR) DDR->a
#define DDR_B(DDR) DDR->b
#define DDR_AFFINE_P(DDR) DDR->affine_p
#define DDR_ARE_DEPENDENT(DDR) DDR->are_dependent
#define DDR_SUBSCRIPTS(DDR) DDR->subscripts
-#define DDR_SUBSCRIPT(DDR, I) VEC_index (subscript_p, DDR_SUBSCRIPTS (DDR), I)
-#define DDR_NUM_SUBSCRIPTS(DDR) VEC_length (subscript_p, DDR_SUBSCRIPTS (DDR))
+#define DDR_SUBSCRIPT(DDR, I) DDR_SUBSCRIPTS (DDR)[I]
+#define DDR_NUM_SUBSCRIPTS(DDR) DDR_SUBSCRIPTS (DDR).length ()
#define DDR_LOOP_NEST(DDR) DDR->loop_nest
/* The size of the direction/distance vectors: the number of loops in
the loop nest. */
-#define DDR_NB_LOOPS(DDR) (VEC_length (loop_p, DDR_LOOP_NEST (DDR)))
+#define DDR_NB_LOOPS(DDR) (DDR_LOOP_NEST (DDR).length ())
#define DDR_INNER_LOOP(DDR) DDR->inner_loop
#define DDR_SELF_REFERENCE(DDR) DDR->self_reference_p
#define DDR_DIST_VECTS(DDR) ((DDR)->dist_vects)
#define DDR_DIR_VECTS(DDR) ((DDR)->dir_vects)
#define DDR_NUM_DIST_VECTS(DDR) \
- (VEC_length (lambda_vector, DDR_DIST_VECTS (DDR)))
+ (DDR_DIST_VECTS (DDR).length ())
#define DDR_NUM_DIR_VECTS(DDR) \
- (VEC_length (lambda_vector, DDR_DIR_VECTS (DDR)))
+ (DDR_DIR_VECTS (DDR).length ())
#define DDR_DIR_VECT(DDR, I) \
- VEC_index (lambda_vector, DDR_DIR_VECTS (DDR), I)
+ DDR_DIR_VECTS (DDR)[I]
#define DDR_DIST_VECT(DDR, I) \
- VEC_index (lambda_vector, DDR_DIST_VECTS (DDR), I)
+ DDR_DIST_VECTS (DDR)[I]
#define DDR_REVERSED_P(DDR) DDR->reversed_p
\f
bool dr_analyze_innermost (struct data_reference *, struct loop *);
extern bool compute_data_dependences_for_loop (struct loop *, bool,
- VEC (loop_p, heap) **,
- VEC (data_reference_p, heap) **,
- VEC (ddr_p, heap) **);
+ vec<loop_p> *,
+ vec<data_reference_p> *,
+ vec<ddr_p> *);
extern bool compute_data_dependences_for_bb (basic_block, bool,
- VEC (data_reference_p, heap) **,
- VEC (ddr_p, heap) **);
-extern void debug_ddrs (VEC (ddr_p, heap) *);
+ vec<data_reference_p> *,
+ vec<ddr_p> *);
+extern void debug_ddrs (vec<ddr_p> );
extern void dump_data_reference (FILE *, struct data_reference *);
extern void debug_data_reference (struct data_reference *);
-extern void debug_data_references (VEC (data_reference_p, heap) *);
+extern void debug_data_references (vec<data_reference_p> );
extern void debug_data_dependence_relation (struct data_dependence_relation *);
-extern void dump_data_dependence_relations (FILE *, VEC (ddr_p, heap) *);
-extern void debug_data_dependence_relations (VEC (ddr_p, heap) *);
+extern void dump_data_dependence_relations (FILE *, vec<ddr_p> );
+extern void debug_data_dependence_relations (vec<ddr_p> );
extern void free_dependence_relation (struct data_dependence_relation *);
-extern void free_dependence_relations (VEC (ddr_p, heap) *);
+extern void free_dependence_relations (vec<ddr_p> );
extern void free_data_ref (data_reference_p);
-extern void free_data_refs (VEC (data_reference_p, heap) *);
+extern void free_data_refs (vec<data_reference_p> );
extern bool find_data_references_in_stmt (struct loop *, gimple,
- VEC (data_reference_p, heap) **);
+ vec<data_reference_p> *);
extern bool graphite_find_data_references_in_stmt (loop_p, loop_p, gimple,
- VEC (data_reference_p, heap) **);
+ vec<data_reference_p> *);
struct data_reference *create_data_ref (loop_p, loop_p, tree, gimple, bool);
-extern bool find_loop_nest (struct loop *, VEC (loop_p, heap) **);
+extern bool find_loop_nest (struct loop *, vec<loop_p> *);
extern struct data_dependence_relation *initialize_data_dependence_relation
- (struct data_reference *, struct data_reference *, VEC (loop_p, heap) *);
+ (struct data_reference *, struct data_reference *, vec<loop_p>);
extern void compute_affine_dependence (struct data_dependence_relation *,
loop_p);
extern void compute_self_dependence (struct data_dependence_relation *);
-extern bool compute_all_dependences (VEC (data_reference_p, heap) *,
- VEC (ddr_p, heap) **, VEC (loop_p, heap) *,
- bool);
+extern bool compute_all_dependences (vec<data_reference_p> ,
+ vec<ddr_p> *,
+ vec<loop_p>, bool);
extern tree find_data_references_in_bb (struct loop *, basic_block,
- VEC (data_reference_p, heap) **);
+ vec<data_reference_p> *);
extern bool dr_may_alias_p (const struct data_reference *,
const struct data_reference *, bool);
/* Return true when DEPENDENCE_RELATIONS contains an anti-dependence. */
static inline bool
-ddrs_have_anti_deps (VEC (ddr_p, heap) *dependence_relations)
+ddrs_have_anti_deps (vec<ddr_p> dependence_relations)
{
unsigned i;
ddr_p ddr;
- for (i = 0; VEC_iterate (ddr_p, dependence_relations, i, ddr); i++)
+ for (i = 0; dependence_relations.iterate (i, &ddr); i++)
if (ddr_is_anti_dependent (ddr))
return true;
unsigned vector;
unsigned level = 0;
- if (DDR_DIST_VECTS (ddr))
+ if (DDR_DIST_VECTS (ddr).exists ())
level = dependence_level (DDR_DIST_VECT (ddr, 0), DDR_NB_LOOPS (ddr));
for (vector = 1; vector < DDR_NUM_DIST_VECTS (ddr); vector++)
gimple stmt;
/* Vector of data-references in this statement. */
- VEC(data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
/* True when the statement contains a write to memory. */
bool has_mem_write;
#define RDGE_RELATION(E) ((struct rdg_edge *) ((E)->data))->relation
struct graph *build_rdg (struct loop *,
- VEC (loop_p, heap) **,
- VEC (ddr_p, heap) **,
- VEC (data_reference_p, heap) **);
+ vec<loop_p> *,
+ vec<ddr_p> *,
+ vec<data_reference_p> *);
struct graph *build_empty_rdg (int);
void free_rdg (struct graph *);
/* Return the index of the variable VAR in the LOOP_NEST array. */
static inline int
-index_in_loop_nest (int var, VEC (loop_p, heap) *loop_nest)
+index_in_loop_nest (int var, vec<loop_p> loop_nest)
{
struct loop *loopi;
int var_index;
- for (var_index = 0; VEC_iterate (loop_p, loop_nest, var_index, loopi);
+ for (var_index = 0; loop_nest.iterate (var_index, &loopi);
var_index++)
if (loopi->num == var)
break;
typedef struct rdg_component
{
int num;
- VEC (int, heap) *vertices;
+ vec<int> vertices;
} *rdgc;
-DEF_VEC_P (rdgc);
-DEF_VEC_ALLOC_P (rdgc, heap);
-DEF_VEC_P (bitmap);
-DEF_VEC_ALLOC_P (bitmap, heap);
/* Compute the greatest common divisor of a VECTOR of SIZE numbers. */
};
typedef struct numbered_tree_d numbered_tree;
-DEF_VEC_O (numbered_tree);
-DEF_VEC_ALLOC_O (numbered_tree, heap);
/* Compare two declarations references by their DECL_UID / sequence number.
Called via qsort. */
dump_enumerated_decls_push (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
- VEC (numbered_tree, heap) **list = (VEC (numbered_tree, heap) **) &wi->info;
+ vec<numbered_tree> *list = (vec<numbered_tree> *) wi->info;
numbered_tree nt;
if (!DECL_P (*tp))
return NULL_TREE;
nt.t = *tp;
- nt.num = VEC_length (numbered_tree, *list);
- VEC_safe_push (numbered_tree, heap, *list, nt);
+ nt.num = list->length ();
+ list->safe_push (nt);
*walk_subtrees = 0;
return NULL_TREE;
}
{
basic_block bb;
struct walk_stmt_info wi;
- VEC (numbered_tree, heap) *decl_list = VEC_alloc (numbered_tree, heap, 40);
+ vec<numbered_tree> decl_list;
+ decl_list.create (40);
memset (&wi, '\0', sizeof (wi));
- wi.info = (void*) decl_list;
+ wi.info = (void *) &decl_list;
FOR_EACH_BB (bb)
{
gimple_stmt_iterator gsi;
if (!is_gimple_debug (gsi_stmt (gsi)))
walk_gimple_stmt (&gsi, NULL, dump_enumerated_decls_push, &wi);
}
- decl_list = (VEC (numbered_tree, heap) *) wi.info;
- VEC_qsort (numbered_tree, decl_list, compare_decls_by_uid);
- if (VEC_length (numbered_tree, decl_list))
+ decl_list.qsort (compare_decls_by_uid);
+ if (decl_list.length ())
{
unsigned ix;
numbered_tree *ntp;
fprintf (file, "Declarations used by %s, sorted by DECL_UID:\n",
current_function_name ());
- FOR_EACH_VEC_ELT (numbered_tree, decl_list, ix, ntp)
+ FOR_EACH_VEC_ELT (decl_list, ix, ntp)
{
if (ntp->t == last)
continue;
last = ntp->t;
}
}
- VEC_free (numbered_tree, heap, decl_list);
+ decl_list.release ();
}
-
source_location where;
} loc_map_pair;
-DEF_VEC_O (loc_map_pair);
-DEF_VEC_ALLOC_O (loc_map_pair, heap);
/* Unwind the different macro expansions that lead to the token which
location is WHERE and emit diagnostics showing the resulting
source_location where)
{
const struct line_map *map;
- VEC(loc_map_pair,heap) *loc_vec = NULL;
+ vec<loc_map_pair> loc_vec = vec<loc_map_pair>();
unsigned ix;
loc_map_pair loc, *iter;
loc.where = where;
loc.map = map;
- VEC_safe_push (loc_map_pair, heap, loc_vec, loc);
+ loc_vec.safe_push (loc);
/* WHERE is the location of a token inside the expansion of a
macro. MAP is the map holding the locations of that macro
expand_location_to_spelling_point (diagnostic->location).line;
if (!LINEMAP_SYSP (map))
- FOR_EACH_VEC_ELT (loc_map_pair, loc_vec, ix, iter)
+ FOR_EACH_VEC_ELT (loc_vec, ix, iter)
{
/* Sometimes, in the unwound macro expansion trace, we want to
print a part of the context that shows where, in the
linemap_map_get_macro_name (iter->map));
}
- VEC_free (loc_map_pair, heap, loc_vec);
+ loc_vec.release ();
}
/* This is a diagnostic finalizer implementation that is aware of
{
unsigned ix;
tree base;
- VEC(tree,gc) *accesses = BINFO_BASE_ACCESSES (t);
+ vec<tree, va_gc> *accesses = BINFO_BASE_ACCESSES (t);
dump_child ("type", BINFO_TYPE (t));
dump_int (di, "bases", BINFO_N_BASE_BINFOS (t));
for (ix = 0; BINFO_BASE_ITERATE (t, ix, base); ix++)
{
- tree access = (accesses ? VEC_index (tree, accesses, ix)
- : access_public_node);
+ tree access = (accesses ? (*accesses)[ix] : access_public_node);
const char *string = NULL;
if (access == access_public_node)
{
unsigned HOST_WIDE_INT cnt;
tree index, value;
- dump_int (di, "lngt", VEC_length (constructor_elt,
- CONSTRUCTOR_ELTS (t)));
+ dump_int (di, "lngt", vec_safe_length (CONSTRUCTOR_ELTS (t)));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t), cnt, index, value)
{
dump_child ("idx", index);
struct pointer_map_t *goto_queue_map;
/* The set of unique labels seen as entries in the goto queue. */
- VEC(tree,heap) *dest_array;
+ vec<tree> dest_array;
/* A label to be added at the end of the completed transformed
sequence. It will be set if may_fallthru was true *at one time*,
if (!outside_finally_tree (temp, tf->try_finally_expr))
return;
- if (! tf->dest_array)
+ if (! tf->dest_array.exists ())
{
- tf->dest_array = VEC_alloc (tree, heap, 10);
- VEC_quick_push (tree, tf->dest_array, label);
+ tf->dest_array.create (10);
+ tf->dest_array.quick_push (label);
index = 0;
}
else
{
- int n = VEC_length (tree, tf->dest_array);
+ int n = tf->dest_array.length ();
for (index = 0; index < n; ++index)
- if (VEC_index (tree, tf->dest_array, index) == label)
+ if (tf->dest_array[index] == label)
break;
if (index == n)
- VEC_safe_push (tree, heap, tf->dest_array, label);
+ tf->dest_array.safe_push (label);
}
/* In the case of a GOTO we want to record the destination label,
gcc_assert (q->is_label);
- q->cont_stmt = gimple_build_goto (VEC_index (tree, tf->dest_array, q->index));
+ q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
if (mod)
gimple_seq_add_seq (&q->repl_stmt, mod);
do_goto_redirection (q, finally_label, NULL, tf);
replace_goto_queue (tf);
- if (VEC_index (tree, tf->dest_array, 0) == tf->fallthru_label)
+ if (tf->dest_array[0] == tf->fallthru_label)
{
/* Reachable by goto to fallthru label only. Redirect it
to the new label (already created, sadly), and do not
tree label;
} *labels;
- return_index = VEC_length (tree, tf->dest_array);
+ return_index = tf->dest_array.length ();
labels = XCNEWVEC (struct labels_s, return_index + 1);
q = tf->goto_queue;
int return_index, eh_index, fallthru_index;
int nlabels, ndests, j, last_case_index;
tree last_case;
- VEC (tree,heap) *case_label_vec;
+ vec<tree> case_label_vec;
gimple_seq switch_body = NULL;
gimple x, eh_else;
tree tmp;
lower_eh_constructs_1 (state, &finally);
/* Prepare for switch statement generation. */
- nlabels = VEC_length (tree, tf->dest_array);
+ nlabels = tf->dest_array.length ();
return_index = nlabels;
eh_index = return_index + tf->may_return;
fallthru_index = eh_index + (tf->may_throw && !eh_else);
finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
finally_label = create_artificial_label (finally_loc);
- /* We use VEC_quick_push on case_label_vec throughout this function,
+ /* We use vec::quick_push on case_label_vec throughout this function,
since we know the size in advance and allocate precisely as muce
space as needed. */
- case_label_vec = VEC_alloc (tree, heap, ndests);
+ case_label_vec.create (ndests);
last_case = NULL;
last_case_index = 0;
tmp = build_int_cst (integer_type_node, fallthru_index);
last_case = build_case_label (tmp, NULL,
create_artificial_label (tf_loc));
- VEC_quick_push (tree, case_label_vec, last_case);
+ case_label_vec.quick_push (last_case);
last_case_index++;
x = gimple_build_label (CASE_LABEL (last_case));
tmp = build_int_cst (integer_type_node, eh_index);
last_case = build_case_label (tmp, NULL,
create_artificial_label (tf_loc));
- VEC_quick_push (tree, case_label_vec, last_case);
+ case_label_vec.quick_push (last_case);
last_case_index++;
x = gimple_build_label (CASE_LABEL (last_case));
}
case_index = j + q->index;
- if (VEC_length (tree, case_label_vec) <= case_index
- || !VEC_index (tree, case_label_vec, case_index))
+ if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
{
tree case_lab;
void **slot;
cont_map = pointer_map_create ();
slot = pointer_map_insert (cont_map, case_lab);
*slot = q->cont_stmt;
- VEC_quick_push (tree, case_label_vec, case_lab);
+ case_label_vec.quick_push (case_lab);
}
}
for (j = last_case_index; j < last_case_index + nlabels; j++)
gimple cont_stmt;
void **slot;
- last_case = VEC_index (tree, case_label_vec, j);
+ last_case = case_label_vec[j];
gcc_assert (last_case);
gcc_assert (cont_map);
how many destinations are reached by the finally block. Use this to
determine how we process the finally block itself. */
- ndests = VEC_length (tree, this_tf.dest_array);
+ ndests = this_tf.dest_array.length ();
ndests += this_tf.may_fallthru;
ndests += this_tf.may_return;
ndests += this_tf.may_throw;
gimple_seq_add_stmt (&this_tf.top_p_seq, x);
}
- VEC_free (tree, heap, this_tf.dest_array);
+ this_tf.dest_array.release ();
free (this_tf.goto_queue);
if (this_tf.goto_queue_map)
pointer_map_destroy (this_tf.goto_queue_map);
{
eh_landing_pad old_lp, new_lp;
- old_lp = VEC_index (eh_landing_pad, old_fun->eh->lp_array, old_lp_nr);
+ old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
slot = pointer_map_contains (map, old_lp);
new_lp = (eh_landing_pad) *slot;
new_lp_nr = new_lp->index;
{
eh_region old_r, new_r;
- old_r = VEC_index (eh_region, old_fun->eh->region_array, -old_lp_nr);
+ old_r = (*old_fun->eh->region_array)[-old_lp_nr];
slot = pointer_map_contains (map, old_r);
new_r = (eh_region) *slot;
new_lp_nr = -new_r->index;
{
case ERT_TRY:
{
- VEC (tree, heap) *labels = NULL;
+ vec<tree> labels = vec<tree>();
tree default_label = NULL;
eh_catch c;
edge_iterator ei;
{
tree t = build_case_label (TREE_VALUE (flt_node),
NULL, lab);
- VEC_safe_push (tree, heap, labels, t);
+ labels.safe_push (t);
pointer_set_insert (seen_values, TREE_VALUE (flt_node));
have_label = true;
}
/* Don't generate a switch if there's only a default case.
This is common in the form of try { A; } catch (...) { B; }. */
- if (labels == NULL)
+ if (!labels.exists ())
{
e = single_succ_edge (src);
e->flags |= EDGE_FALLTHRU;
x = gimple_build_switch (filter, default_label, labels);
gsi_insert_before (&gsi, x, GSI_SAME_STMT);
- VEC_free (tree, heap, labels);
+ labels.release ();
}
pointer_set_destroy (seen_values);
}
basic_block bb;
int lp_nr, r_nr;
- r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
- lp_reachable
- = sbitmap_alloc (VEC_length (eh_landing_pad, cfun->eh->lp_array));
+ r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
+ lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
bitmap_clear (r_reachable);
bitmap_clear (lp_reachable);
}
for (r_nr = 1;
- VEC_iterate (eh_region, cfun->eh->region_array, r_nr, region); ++r_nr)
+ vec_safe_iterate (cfun->eh->region_array, r_nr, ®ion); ++r_nr)
if (region && !bitmap_bit_p (r_reachable, r_nr))
{
if (dump_file)
}
for (lp_nr = 1;
- VEC_iterate (eh_landing_pad, cfun->eh->lp_array, lp_nr, lp); ++lp_nr)
+ vec_safe_iterate (cfun->eh->lp_array, lp_nr, &lp); ++lp_nr)
if (lp && !bitmap_bit_p (lp_reachable, lp_nr))
{
if (dump_file)
if (cfun->eh == NULL)
return;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp && lp->post_landing_pad)
{
if (label_to_block (lp->post_landing_pad) == NULL)
sbitmap r_reachable;
basic_block bb;
- r_reachable = sbitmap_alloc (VEC_length (eh_region, cfun->eh->region_array));
+ r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
bitmap_clear (r_reachable);
FOR_EACH_BB (bb)
}
}
- for (i = 1; VEC_iterate (eh_region, cfun->eh->region_array, i, r); ++i)
+ for (i = 1; cfun->eh->region_array->iterate (i, &r); ++i)
if (r && r->landing_pads == NULL && r->type != ERT_MUST_NOT_THROW
&& !bitmap_bit_p (r_reachable, i))
{
eh_landing_pad lp;
int i;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp)
changed |= unsplit_eh (lp);
eh_landing_pad lp;
int i;
- for (i = 1; VEC_iterate (eh_landing_pad, cfun->eh->lp_array, i, lp); ++i)
+ for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
if (lp)
changed |= cleanup_empty_eh (lp);
the index of a TLS variable equals the index of its control variable in
the other vector. */
static varpool_node_set tls_vars;
-static VEC(varpool_node_ptr, heap) *control_vars;
+static vec<varpool_node_ptr> control_vars;
/* For the current basic block, an SSA_NAME that has computed the address
of the TLS variable at the corresponding index. */
-static VEC(tree, heap) *access_vars;
+static vec<tree> access_vars;
/* The type of the control structure, shared with the emutls.c runtime. */
static tree emutls_object_type;
tree
default_emutls_var_init (tree to, tree decl, tree proxy)
{
- VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 4);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, 4);
constructor_elt elt;
tree type = TREE_TYPE (to);
tree field = TYPE_FIELDS (type);
elt.index = field;
elt.value = fold_convert (TREE_TYPE (field), DECL_SIZE_UNIT (decl));
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
field = DECL_CHAIN (field);
elt.index = field;
elt.value = build_int_cst (TREE_TYPE (field),
DECL_ALIGN_UNIT (decl));
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
field = DECL_CHAIN (field);
elt.index = field;
elt.value = null_pointer_node;
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
field = DECL_CHAIN (field);
elt.index = field;
elt.value = proxy;
- VEC_quick_push (constructor_elt, v, elt);
+ v->quick_push (elt);
return build_constructor (type, v);
}
unsigned int i;
i = emutls_index (decl);
- var = VEC_index (varpool_node_ptr, control_vars, i);
+ var = control_vars[i];
return var->symbol.decl;
}
/* Compute the address of the TLS variable with help from runtime. */
index = emutls_index (decl);
- addr = VEC_index (tree, access_vars, index);
+ addr = access_vars[index];
if (addr == NULL)
{
struct varpool_node *cvar;
tree cdecl;
gimple x;
- cvar = VEC_index (varpool_node_ptr, control_vars, index);
+ cvar = control_vars[index];
cdecl = cvar->symbol.decl;
TREE_ADDRESSABLE (cdecl) = 1;
ipa_record_reference ((symtab_node)d->cfun_node, (symtab_node)cvar, IPA_REF_ADDR, x);
/* Record this ssa_name for possible use later in the basic block. */
- VEC_replace (tree, access_vars, index, addr);
+ access_vars[index] = addr;
}
return addr;
static inline void
clear_access_vars (void)
{
- memset (VEC_address (tree, access_vars), 0,
- VEC_length (tree, access_vars) * sizeof(tree));
+ memset (access_vars.address (), 0,
+ access_vars.length () * sizeof(tree));
}
/* Lower the entire function NODE. */
cdecl = new_emutls_decl (var->symbol.decl, var->alias_of);
cvar = varpool_get_node (cdecl);
- VEC_quick_push (varpool_node_ptr, control_vars, cvar);
+ control_vars.quick_push (cvar);
if (!var->alias)
{
}
/* If we found no TLS variables, then there is no further work to do. */
- if (tls_vars->nodes == NULL)
+ if (!tls_vars->nodes.exists ())
{
tls_vars = NULL;
if (dump_file)
}
/* Allocate the on-the-side arrays that share indicies with the TLS vars. */
- n_tls = VEC_length (varpool_node_ptr, tls_vars->nodes);
- control_vars = VEC_alloc (varpool_node_ptr, heap, n_tls);
- access_vars = VEC_alloc (tree, heap, n_tls);
- VEC_safe_grow (tree, heap, access_vars, n_tls);
+ n_tls = tls_vars->nodes.length ();
+ control_vars.create (n_tls);
+ access_vars.create (n_tls);
+ access_vars.safe_grow_cleared (n_tls);
/* Create the control variables for each TLS variable. */
- FOR_EACH_VEC_ELT (varpool_node_ptr, tls_vars->nodes, i, var)
+ FOR_EACH_VEC_ELT (tls_vars->nodes, i, var)
{
- var = VEC_index (varpool_node_ptr, tls_vars->nodes, i);
+ var = tls_vars->nodes[i];
if (var->alias && !var->alias_of)
any_aliases = true;
if (any_aliases)
{
alias_pair *p;
- FOR_EACH_VEC_ELT (alias_pair, alias_pairs, i, p)
+ FOR_EACH_VEC_SAFE_ELT (alias_pairs, i, p)
if (DECL_THREAD_LOCAL_P (p->decl))
{
p->decl = emutls_decl (p->decl);
if (ctor_body)
cgraph_build_static_cdtor ('I', ctor_body, DEFAULT_INIT_PRIORITY);
- VEC_free (varpool_node_ptr, heap, control_vars);
- VEC_free (tree, heap, access_vars);
+ control_vars.release ();
+ access_vars.release ();
free_varpool_node_set (tls_vars);
return TODO_ggc_collect | TODO_verify_all;
indirect call has been turned into a noreturn call. When this
happens, all the instructions after the call are no longer
reachable and must be deleted as dead. */
- VEC(gimple,gc) *modified_noreturn_calls;
+ vec<gimple, va_gc> *modified_noreturn_calls;
/* Array of all SSA_NAMEs used in the function. */
- VEC(tree,gc) *ssa_names;
+ vec<tree, va_gc> *ssa_names;
/* Artificial variable used for the virtual operand FUD chain. */
tree vop;
struct pointer_map_t * GTY((skip(""))) decls_to_pointers;
/* Free list of SSA_NAMEs. */
- VEC(tree,gc) *free_ssanames;
+ vec<tree, va_gc> *free_ssanames;
/* Hashtable holding definition for symbol. If this field is not NULL, it
means that the first reference to this variable in the function is a
extern unsigned int uid_decl_map_hash (const void *);
extern int uid_decl_map_eq (const void *, const void *);
-#define num_ssa_names (VEC_length (tree, cfun->gimple_df->ssa_names))
-#define ssa_name(i) (VEC_index (tree, cfun->gimple_df->ssa_names, (i)))
+#define num_ssa_names (vec_safe_length (cfun->gimple_df->ssa_names))
+#define ssa_name(i) ((*cfun->gimple_df->ssa_names)[(i)])
/* Macros for showing usage statistics. */
#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
/* If this is a combined parallel+workshare region, this is a list
of additional arguments needed by the combined parallel+workshare
library call. */
- VEC(tree,gc) *ws_args;
+ vec<tree, va_gc> *ws_args;
/* The code for the omp directive of this region. */
enum gimple_code type;
extern bool gimple_duplicate_sese_tail (edge, edge, basic_block *, unsigned,
basic_block *);
extern void gather_blocks_in_sese_region (basic_block entry, basic_block exit,
- VEC(basic_block,heap) **bbs_p);
+ vec<basic_block> *bbs_p);
extern void add_phi_args_after_copy_bb (basic_block);
extern void add_phi_args_after_copy (basic_block *, unsigned, edge);
extern bool gimple_purge_dead_eh_edges (basic_block);
};
typedef struct _edge_var_map edge_var_map;
-DEF_VEC_O(edge_var_map);
-DEF_VEC_ALLOC_O(edge_var_map, heap);
/* A vector of var maps. */
-typedef VEC(edge_var_map, heap) *edge_var_map_vector;
+typedef vec<edge_var_map> edge_var_map_vector;
extern void init_tree_ssa (struct function *);
extern void redirect_edge_var_map_add (edge, tree, tree, source_location);
extern void redirect_edge_var_map_clear (edge);
extern void redirect_edge_var_map_dup (edge, edge);
-extern edge_var_map_vector redirect_edge_var_map_vector (edge);
+extern edge_var_map_vector *redirect_edge_var_map_vector (edge);
extern void redirect_edge_var_map_destroy (void);
extern edge ssa_redirect_edge (edge, basic_block);
basic_block ip_normal_pos (struct loop *);
bool gimple_duplicate_loop_to_header_edge (struct loop *, edge,
unsigned int, sbitmap,
- edge, VEC (edge, heap) **,
+ edge, vec<edge> *,
int);
struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, edge);
void rename_variables_in_loop (struct loop *);
/* In tree-ssa-threadedge.c */
extern void threadedge_initialize_values (void);
extern void threadedge_finalize_values (void);
-extern VEC(tree,heap) *ssa_name_values;
+extern vec<tree> ssa_name_values;
#define SSA_NAME_VALUE(x) \
- (SSA_NAME_VERSION(x) < VEC_length(tree, ssa_name_values) \
- ? VEC_index(tree, ssa_name_values, SSA_NAME_VERSION(x)) \
+ (SSA_NAME_VERSION(x) < ssa_name_values.length () \
+ ? ssa_name_values[SSA_NAME_VERSION(x)] \
: NULL_TREE)
extern void set_ssa_name_value (tree, tree);
extern bool potentially_threadable_block (basic_block);
extern void thread_across_edge (gimple, edge, bool,
- VEC(tree, heap) **, tree (*) (gimple, gimple));
+ vec<tree> *, tree (*) (gimple, gimple));
extern void propagate_threaded_block_debug_into (basic_block, basic_block);
/* In tree-ssa-loop-im.c */
static bool
memrefs_read_or_written_unconditionally (gimple stmt,
- VEC (data_reference_p, heap) *drs)
+ vec<data_reference_p> drs)
{
int i, j;
data_reference_p a, b;
tree ca = bb_predicate (gimple_bb (stmt));
- for (i = 0; VEC_iterate (data_reference_p, drs, i, a); i++)
+ for (i = 0; drs.iterate (i, &a); i++)
if (DR_STMT (a) == stmt)
{
bool found = false;
if (x == 1)
continue;
- for (j = 0; VEC_iterate (data_reference_p, drs, j, b); j++)
+ for (j = 0; drs.iterate (j, &b); j++)
{
tree ref_base_a = DR_REF (a);
tree ref_base_b = DR_REF (b);
static bool
write_memrefs_written_at_least_once (gimple stmt,
- VEC (data_reference_p, heap) *drs)
+ vec<data_reference_p> drs)
{
int i, j;
data_reference_p a, b;
tree ca = bb_predicate (gimple_bb (stmt));
- for (i = 0; VEC_iterate (data_reference_p, drs, i, a); i++)
+ for (i = 0; drs.iterate (i, &a); i++)
if (DR_STMT (a) == stmt
&& DR_IS_WRITE (a))
{
if (x == 1)
continue;
- for (j = 0; VEC_iterate (data_reference_p, drs, j, b); j++)
+ for (j = 0; drs.iterate (j, &b); j++)
if (DR_STMT (b) != stmt
&& DR_IS_WRITE (b)
&& same_data_refs_base_objects (a, b))
iteration unconditionally. */
static bool
-ifcvt_memrefs_wont_trap (gimple stmt, VEC (data_reference_p, heap) *refs)
+ifcvt_memrefs_wont_trap (gimple stmt, vec<data_reference_p> refs)
{
return write_memrefs_written_at_least_once (stmt, refs)
&& memrefs_read_or_written_unconditionally (stmt, refs);
not trap in the innermost loop containing STMT. */
static bool
-ifcvt_could_trap_p (gimple stmt, VEC (data_reference_p, heap) *refs)
+ifcvt_could_trap_p (gimple stmt, vec<data_reference_p> refs)
{
if (gimple_vuse (stmt)
&& !gimple_could_trap_p_1 (stmt, false, false)
static bool
if_convertible_gimple_assign_stmt_p (gimple stmt,
- VEC (data_reference_p, heap) *refs)
+ vec<data_reference_p> refs)
{
tree lhs = gimple_assign_lhs (stmt);
basic_block bb;
- it is a GIMPLE_LABEL or a GIMPLE_COND. */
static bool
-if_convertible_stmt_p (gimple stmt, VEC (data_reference_p, heap) *refs)
+if_convertible_stmt_p (gimple stmt, vec<data_reference_p> refs)
{
switch (gimple_code (stmt))
{
static bool
if_convertible_loop_p_1 (struct loop *loop,
- VEC (loop_p, heap) **loop_nest,
- VEC (data_reference_p, heap) **refs,
- VEC (ddr_p, heap) **ddrs)
+ vec<loop_p> *loop_nest,
+ vec<data_reference_p> *refs,
+ vec<ddr_p> *ddrs)
{
bool res;
unsigned int i;
{
data_reference_p dr;
- for (i = 0; VEC_iterate (data_reference_p, *refs, i, dr); i++)
+ for (i = 0; refs->iterate (i, &dr); i++)
{
dr->aux = XNEW (struct ifc_dr);
DR_WRITTEN_AT_LEAST_ONCE (dr) = -1;
edge e;
edge_iterator ei;
bool res = false;
- VEC (data_reference_p, heap) *refs;
- VEC (ddr_p, heap) *ddrs;
- VEC (loop_p, heap) *loop_nest;
+ vec<data_reference_p> refs;
+ vec<ddr_p> ddrs;
+ vec<loop_p> loop_nest;
/* Handle only innermost loop. */
if (!loop || loop->inner)
if (loop_exit_edge_p (loop, e))
return false;
- refs = VEC_alloc (data_reference_p, heap, 5);
- ddrs = VEC_alloc (ddr_p, heap, 25);
- loop_nest = VEC_alloc (loop_p, heap, 3);
+ refs.create (5);
+ ddrs.create (25);
+ loop_nest.create (3);
res = if_convertible_loop_p_1 (loop, &loop_nest, &refs, &ddrs);
if (flag_tree_loop_if_convert_stores)
data_reference_p dr;
unsigned int i;
- for (i = 0; VEC_iterate (data_reference_p, refs, i, dr); i++)
+ for (i = 0; refs.iterate (i, &dr); i++)
free (dr->aux);
}
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
free_data_refs (refs);
free_dependence_relations (ddrs);
return res;
}
static tree
-remap_decls (tree decls, VEC(tree,gc) **nonlocalized_list, copy_body_data *id)
+remap_decls (tree decls, vec<tree, va_gc> *nonlocalized_list,
+ copy_body_data *id)
{
tree old_var;
tree new_decls = NULL_TREE;
if ((!optimize || debug_info_level > DINFO_LEVEL_TERSE)
&& !DECL_IGNORED_P (old_var)
&& nonlocalized_list)
- VEC_safe_push (tree, gc, *nonlocalized_list, old_var);
+ vec_safe_push (nonlocalized_list, old_var);
continue;
}
if ((!optimize || debug_info_level > DINFO_LEVEL_TERSE)
&& !DECL_IGNORED_P (old_var)
&& nonlocalized_list)
- VEC_safe_push (tree, gc, *nonlocalized_list, old_var);
+ vec_safe_push (nonlocalized_list, old_var);
}
else
{
BLOCK_ABSTRACT_ORIGIN (new_block) = old_block;
BLOCK_SOURCE_LOCATION (new_block) = BLOCK_SOURCE_LOCATION (old_block);
BLOCK_NONLOCALIZED_VARS (new_block)
- = VEC_copy (tree, gc, BLOCK_NONLOCALIZED_VARS (old_block));
+ = vec_safe_copy (BLOCK_NONLOCALIZED_VARS (old_block));
*block = new_block;
/* Remap its variables. */
BLOCK_VARS (new_block) = remap_decls (BLOCK_VARS (old_block),
- &BLOCK_NONLOCALIZED_VARS (new_block),
+ BLOCK_NONLOCALIZED_VARS (new_block),
id);
if (id->transform_lang_insert_block)
copy = gimple_build_debug_bind (gimple_debug_bind_get_var (stmt),
gimple_debug_bind_get_value (stmt),
stmt);
- VEC_safe_push (gimple, heap, id->debug_stmts, copy);
+ id->debug_stmts.safe_push (copy);
return copy;
}
if (gimple_debug_source_bind_p (stmt))
copy = gimple_build_debug_source_bind
(gimple_debug_source_bind_get_var (stmt),
gimple_debug_source_bind_get_value (stmt), stmt);
- VEC_safe_push (gimple, heap, id->debug_stmts, copy);
+ id->debug_stmts.safe_push (copy);
return copy;
}
all arguments corresponding to ... in the caller. */
tree p;
gimple new_call;
- VEC(tree, heap) *argarray;
+ vec<tree> argarray;
size_t nargs = gimple_call_num_args (id->gimple_call);
size_t n;
/* Create the new array of arguments. */
n = nargs + gimple_call_num_args (stmt);
- argarray = VEC_alloc (tree, heap, n);
- VEC_safe_grow (tree, heap, argarray, n);
+ argarray.create (n);
+ argarray.safe_grow_cleared (n);
/* Copy all the arguments before '...' */
- memcpy (VEC_address (tree, argarray),
+ memcpy (argarray.address (),
gimple_call_arg_ptr (stmt, 0),
gimple_call_num_args (stmt) * sizeof (tree));
/* Append the arguments passed in '...' */
- memcpy (VEC_address(tree, argarray) + gimple_call_num_args (stmt),
+ memcpy (argarray.address () + gimple_call_num_args (stmt),
gimple_call_arg_ptr (id->gimple_call, 0)
+ (gimple_call_num_args (id->gimple_call) - nargs),
nargs * sizeof (tree));
new_call = gimple_build_call_vec (gimple_call_fn (stmt),
argarray);
- VEC_free (tree, heap, argarray);
+ argarray.release ();
/* Copy all GIMPLE_CALL flags, location and block, except
GF_CALL_VA_ARG_PACK. */
else
gcc_unreachable ();
gsi_insert_before (&dsi, new_stmt, GSI_SAME_STMT);
- VEC_safe_push (gimple, heap, id->debug_stmts, new_stmt);
+ id->debug_stmts.safe_push (new_stmt);
gsi_prev (&ssi);
}
}
&& TREE_CODE (t) == PARM_DECL
&& id->gimple_call)
{
- VEC(tree, gc) **debug_args = decl_debug_args_lookup (id->src_fn);
+ vec<tree, va_gc> **debug_args = decl_debug_args_lookup (id->src_fn);
unsigned int i;
if (debug_args != NULL)
{
- for (i = 0; i < VEC_length (tree, *debug_args); i += 2)
- if (VEC_index (tree, *debug_args, i) == DECL_ORIGIN (t)
- && TREE_CODE (VEC_index (tree, *debug_args, i + 1))
- == DEBUG_EXPR_DECL)
+ for (i = 0; i < vec_safe_length (*debug_args); i += 2)
+ if ((**debug_args)[i] == DECL_ORIGIN (t)
+ && TREE_CODE ((**debug_args)[i + 1]) == DEBUG_EXPR_DECL)
{
- t = VEC_index (tree, *debug_args, i + 1);
+ t = (**debug_args)[i + 1];
stmt->gsbase.subcode = GIMPLE_DEBUG_BIND;
gimple_debug_bind_set_value (stmt, t);
break;
size_t i;
gimple stmt;
- if (!id->debug_stmts)
+ if (!id->debug_stmts.exists ())
return;
- FOR_EACH_VEC_ELT (gimple, id->debug_stmts, i, stmt)
+ FOR_EACH_VEC_ELT (id->debug_stmts, i, stmt)
copy_debug_stmt (stmt, id);
- VEC_free (gimple, heap, id->debug_stmts);
+ id->debug_stmts.release ();
}
/* Make a copy of the body of SRC_FN so that it can be inserted inline in
fold_marked_statements (last, id.statements_to_fold);
pointer_set_destroy (id.statements_to_fold);
- gcc_assert (!id.debug_stmts);
+ gcc_assert (!id.debug_stmts.exists ());
/* If we didn't inline into the function there is nothing to do. */
if (!inlined_p)
if (flag_mudflap && mf_marked_p (*tp))
mf_mark (new_tree);
- CONSTRUCTOR_ELTS (new_tree) = VEC_copy (constructor_elt, gc,
- CONSTRUCTOR_ELTS (*tp));
+ CONSTRUCTOR_ELTS (new_tree) = vec_safe_copy (CONSTRUCTOR_ELTS (*tp));
*tp = new_tree;
}
else if (code == STATEMENT_LIST)
/* This will remap a lot of the same decls again, but this should be
harmless. */
if (gimple_bind_vars (stmt))
- gimple_bind_set_vars (stmt, remap_decls (gimple_bind_vars (stmt), NULL, id));
+ gimple_bind_set_vars (stmt, remap_decls (gimple_bind_vars (stmt),
+ NULL, id));
}
/* Keep iterating. */
if (node->clone.tree_map)
{
unsigned int i;
- for (i = 0; i < VEC_length (ipa_replace_map_p, node->clone.tree_map); i++)
+ for (i = 0; i < vec_safe_length (node->clone.tree_map); i++)
{
struct ipa_replace_map *replace_info;
- replace_info = VEC_index (ipa_replace_map_p, node->clone.tree_map, i);
+ replace_info = (*node->clone.tree_map)[i];
walk_tree (&replace_info->old_tree, copy_tree_body_r, id, NULL);
walk_tree (&replace_info->new_tree, copy_tree_body_r, id, NULL);
}
*/
void
tree_function_versioning (tree old_decl, tree new_decl,
- VEC(ipa_replace_map_p,gc)* tree_map,
+ vec<ipa_replace_map_p, va_gc> *tree_map,
bool update_clones, bitmap args_to_skip,
bool skip_return, bitmap blocks_to_copy,
basic_block new_entry)
unsigned i;
struct ipa_replace_map *replace_info;
basic_block old_entry_block, bb;
- VEC (gimple, heap) *init_stmts = VEC_alloc (gimple, heap, 10);
+ vec<gimple> init_stmts;
+ init_stmts.create (10);
tree vars = NULL_TREE;
gcc_assert (TREE_CODE (old_decl) == FUNCTION_DECL
/* Copy over debug args. */
if (DECL_HAS_DEBUG_ARGS_P (old_decl))
{
- VEC(tree, gc) **new_debug_args, **old_debug_args;
+ vec<tree, va_gc> **new_debug_args, **old_debug_args;
gcc_checking_assert (decl_debug_args_lookup (new_decl) == NULL);
DECL_HAS_DEBUG_ARGS_P (new_decl) = 0;
old_debug_args = decl_debug_args_lookup (old_decl);
if (old_debug_args)
{
new_debug_args = decl_debug_args_insert (new_decl);
- *new_debug_args = VEC_copy (tree, gc, *old_debug_args);
+ *new_debug_args = vec_safe_copy (*old_debug_args);
}
}
id.src_node = old_version_node;
id.dst_node = new_version_node;
id.src_cfun = DECL_STRUCT_FUNCTION (old_decl);
- if (id.src_node->ipa_transforms_to_apply)
+ if (id.src_node->ipa_transforms_to_apply.exists ())
{
- VEC(ipa_opt_pass,heap) * old_transforms_to_apply = id.dst_node->ipa_transforms_to_apply;
+ vec<ipa_opt_pass> old_transforms_to_apply
+ = id.dst_node->ipa_transforms_to_apply;
unsigned int i;
- id.dst_node->ipa_transforms_to_apply = VEC_copy (ipa_opt_pass, heap,
- id.src_node->ipa_transforms_to_apply);
- for (i = 0; i < VEC_length (ipa_opt_pass, old_transforms_to_apply); i++)
- VEC_safe_push (ipa_opt_pass, heap, id.dst_node->ipa_transforms_to_apply,
- VEC_index (ipa_opt_pass,
- old_transforms_to_apply,
- i));
- VEC_free (ipa_opt_pass, heap, old_transforms_to_apply);
+ id.dst_node->ipa_transforms_to_apply
+ = id.src_node->ipa_transforms_to_apply.copy ();
+ for (i = 0; i < old_transforms_to_apply.length (); i++)
+ id.dst_node->ipa_transforms_to_apply.safe_push (old_transforms_to_apply[i]);
+ old_transforms_to_apply.release ();
}
id.copy_decl = copy_decl_no_change;
/* If there's a tree_map, prepare for substitution. */
if (tree_map)
- for (i = 0; i < VEC_length (ipa_replace_map_p, tree_map); i++)
+ for (i = 0; i < tree_map->length (); i++)
{
gimple init;
- replace_info = VEC_index (ipa_replace_map_p, tree_map, i);
+ replace_info = (*tree_map)[i];
if (replace_info->replace_p)
{
tree op = replace_info->new_tree;
NULL,
&vars);
if (init)
- VEC_safe_push (gimple, heap, init_stmts, init);
+ init_stmts.safe_push (init);
}
}
/* Copy the function's arguments. */
declare_inline_vars (DECL_INITIAL (new_decl), vars);
- if (!VEC_empty (tree, DECL_STRUCT_FUNCTION (old_decl)->local_decls))
+ if (!vec_safe_is_empty (DECL_STRUCT_FUNCTION (old_decl)->local_decls))
/* Add local vars. */
add_local_variables (DECL_STRUCT_FUNCTION (old_decl), cfun, &id);
debug stmts doesn't affect BB count, which may in the end cause
codegen differences. */
bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
- while (VEC_length (gimple, init_stmts))
- insert_init_stmt (&id, bb, VEC_pop (gimple, init_stmts));
+ while (init_stmts.length ())
+ insert_init_stmt (&id, bb, init_stmts.pop ());
update_clone_info (&id);
/* Remap the nonlocal_goto_save_area, if any. */
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
- gcc_assert (!id.debug_stmts);
- VEC_free (gimple, heap, init_stmts);
+ gcc_assert (!id.debug_stmts.exists ());
+ init_stmts.release ();
pop_cfun ();
return;
}
#ifndef GCC_TREE_INLINE_H
#define GCC_TREE_INLINE_H
-#include "vecir.h" /* For VEC(gimple,heap). */
-
struct cgraph_edge;
/* Indicate the desired behavior wrt call graph edges. We can either
basic_block entry_bb;
/* Debug statements that need processing. */
- VEC(gimple,heap) *debug_stmts;
+ vec<gimple> debug_stmts;
/* A map from local declarations in the inlined function to
equivalents in the function into which it is being inlined, where
#include "cfgloop.h"
#include "domwalk.h"
#include "params.h"
-#include "vecprim.h"
#include "diagnostic-core.h"
- A NULL node at the top entry is used to mark the last slot
associated with the current block. */
-static VEC(tree,heap) *block_defs_stack;
+static vec<tree> block_defs_stack;
/* Set of existing SSA names being replaced by update_ssa. */
released after we finish updating the SSA web. */
static bitmap names_to_release;
-/* VEC of VECs of PHIs to rewrite in a basic block. Element I corresponds
+/* vec of vec of PHIs to rewrite in a basic block. Element I corresponds
the to basic block with index I. Allocated once per compilation, *not*
released between different functions. */
-static VEC(gimple_vec, heap) *phis_to_rewrite;
+static vec<gimple_vec> phis_to_rewrite;
/* The bitmap of non-NULL elements of PHIS_TO_REWRITE. */
static bitmap blocks_with_phis_to_rewrite;
/* The information associated with decls. */
typedef struct var_info_d *var_info_p;
-DEF_VEC_P(var_info_p);
-DEF_VEC_ALLOC_P(var_info_p,heap);
/* Each entry in VAR_INFOS contains an element of type STRUCT
VAR_INFO_D. */
/* The information associated with names. */
typedef struct ssa_name_info *ssa_name_info_p;
-DEF_VEC_P (ssa_name_info_p);
-DEF_VEC_ALLOC_P (ssa_name_info_p, heap);
-static VEC(ssa_name_info_p, heap) *info_for_ssa_name;
+static vec<ssa_name_info_p> info_for_ssa_name;
static unsigned current_info_for_ssa_name_age;
static bitmap_obstack update_ssa_obstack;
/* The set of symbols we ought to re-write into SSA form in update_ssa. */
static bitmap symbols_to_rename_set;
-static VEC(tree,heap) *symbols_to_rename;
+static vec<tree> symbols_to_rename;
/* Mark SYM for renaming. */
if (!symbols_to_rename_set)
symbols_to_rename_set = BITMAP_ALLOC (NULL);
if (bitmap_set_bit (symbols_to_rename_set, DECL_UID (sym)))
- VEC_safe_push (tree, heap, symbols_to_rename, sym);
+ symbols_to_rename.safe_push (sym);
}
/* Return true if SYM is marked for renaming. */
get_ssa_name_ann (tree name)
{
unsigned ver = SSA_NAME_VERSION (name);
- unsigned len = VEC_length (ssa_name_info_p, info_for_ssa_name);
+ unsigned len = info_for_ssa_name.length ();
struct ssa_name_info *info;
/* Re-allocate the vector at most once per update/into-SSA. */
if (ver >= len)
- VEC_safe_grow_cleared (ssa_name_info_p, heap,
- info_for_ssa_name, num_ssa_names);
+ info_for_ssa_name.safe_grow_cleared (num_ssa_names);
/* But allocate infos lazily. */
- info = VEC_index (ssa_name_info_p, info_for_ssa_name, ver);
+ info = info_for_ssa_name[ver];
if (!info)
{
info = XCNEW (struct ssa_name_info);
info->age = current_info_for_ssa_name_age;
info->info.need_phi_state = NEED_PHI_STATE_UNKNOWN;
- VEC_replace (ssa_name_info_p, info_for_ssa_name, ver, info);
+ info_for_ssa_name[ver] = info;
}
if (info->age < current_info_for_ssa_name_age)
static void
prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
{
- VEC(int, heap) *worklist;
+ vec<int> worklist;
bitmap_iterator bi;
unsigned i, b, p, u, top;
bitmap live_phis;
dfs_out numbers, increase the dfs number by one (so that it corresponds
to the start of the following interval, not to the end of the current
one). We use WORKLIST as a stack. */
- worklist = VEC_alloc (int, heap, n_defs + 1);
- VEC_quick_push (int, worklist, 1);
+ worklist.create (n_defs + 1);
+ worklist.quick_push (1);
top = 1;
n_defs = 1;
for (i = 1; i < adef; i++)
{
/* This is a closing element. Interval corresponding to the top
of the stack after removing it follows. */
- VEC_pop (int, worklist);
- top = VEC_index (int, worklist, VEC_length (int, worklist) - 1);
+ worklist.pop ();
+ top = worklist[worklist.length () - 1];
defs[n_defs].bb_index = top;
defs[n_defs].dfs_num = defs[i].dfs_num + 1;
}
it to the correct position. */
defs[n_defs].bb_index = defs[i].bb_index;
defs[n_defs].dfs_num = defs[i].dfs_num;
- VEC_quick_push (int, worklist, b);
+ worklist.quick_push (b);
top = b;
}
else
n_defs++;
}
- VEC_pop (int, worklist);
- gcc_assert (VEC_empty (int, worklist));
+ worklist.pop ();
+ gcc_assert (worklist.is_empty ());
/* Now process the uses. */
live_phis = BITMAP_ALLOC (NULL);
EXECUTE_IF_SET_IN_BITMAP (uses, 0, i, bi)
{
- VEC_safe_push (int, heap, worklist, i);
+ worklist.safe_push (i);
}
- while (!VEC_empty (int, worklist))
+ while (!worklist.is_empty ())
{
- b = VEC_pop (int, worklist);
+ b = worklist.pop ();
if (b == ENTRY_BLOCK)
continue;
continue;
bitmap_set_bit (uses, u);
- VEC_safe_push (int, heap, worklist, u);
+ worklist.safe_push (u);
}
}
- VEC_free (int, heap, worklist);
+ worklist.release ();
bitmap_copy (phis, live_phis);
BITMAP_FREE (live_phis);
free (defs);
bitmap_set_bit (blocks_with_phis_to_rewrite, idx);
n = (unsigned) last_basic_block + 1;
- if (VEC_length (gimple_vec, phis_to_rewrite) < n)
- VEC_safe_grow_cleared (gimple_vec, heap, phis_to_rewrite, n);
+ if (phis_to_rewrite.length () < n)
+ phis_to_rewrite.safe_grow_cleared (n);
- phis = VEC_index (gimple_vec, phis_to_rewrite, idx);
- if (!phis)
- phis = VEC_alloc (gimple, heap, 10);
+ phis = phis_to_rewrite[idx];
+ phis.reserve (10);
- VEC_safe_push (gimple, heap, phis, phi);
- VEC_replace (gimple_vec, phis_to_rewrite, idx, phis);
+ phis.safe_push (phi);
+ phis_to_rewrite[idx] = phis;
}
/* Insert PHI nodes for variable VAR using the iterated dominance
htab_iterator hi;
unsigned i;
var_info_p info;
- VEC(var_info_p,heap) *vars;
+ vec<var_info_p> vars;
timevar_push (TV_TREE_INSERT_PHI_NODES);
- vars = VEC_alloc (var_info_p, heap, htab_elements (var_infos));
+ vars.create (htab_elements (var_infos));
FOR_EACH_HTAB_ELEMENT (var_infos, info, var_info_p, hi)
if (info->info.need_phi_state != NEED_PHI_STATE_NO)
- VEC_quick_push (var_info_p, vars, info);
+ vars.quick_push (info);
/* Do two stages to avoid code generation differences for UID
differences but no UID ordering differences. */
- VEC_qsort (var_info_p, vars, insert_phi_nodes_compare_var_infos);
+ vars.qsort (insert_phi_nodes_compare_var_infos);
- FOR_EACH_VEC_ELT (var_info_p, vars, i, info)
+ FOR_EACH_VEC_ELT (vars, i, info)
{
bitmap idf = compute_idf (info->info.def_blocks.def_blocks, dfs);
insert_phi_nodes_for (info->var, idf, false);
BITMAP_FREE (idf);
}
- VEC_free(var_info_p, heap, vars);
+ vars.release ();
timevar_pop (TV_TREE_INSERT_PHI_NODES);
}
in the stack so that we know which symbol is being defined by
this SSA name when we unwind the stack. */
if (currdef && !is_gimple_reg (sym))
- VEC_safe_push (tree, heap, block_defs_stack, sym);
+ block_defs_stack.safe_push (sym);
/* Push the current reaching definition into BLOCK_DEFS_STACK. This
stack is later used by the dominator tree callbacks to restore
block after a recursive visit to all its immediately dominated
blocks. If there is no current reaching definition, then just
record the underlying _DECL node. */
- VEC_safe_push (tree, heap, block_defs_stack, currdef ? currdef : sym);
+ block_defs_stack.safe_push (currdef ? currdef : sym);
/* Set the current reaching definition for SYM to be DEF. */
info->current_def = def;
fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index);
/* Mark the unwind point for this block. */
- VEC_safe_push (tree, heap, block_defs_stack, NULL_TREE);
+ block_defs_stack.safe_push (NULL_TREE);
/* Step 1. Register new definitions for every PHI node in the block.
Conceptually, all the PHI nodes are executed in parallel and each PHI
basic_block bb ATTRIBUTE_UNUSED)
{
/* Restore CURRDEFS to its original state. */
- while (VEC_length (tree, block_defs_stack) > 0)
+ while (block_defs_stack.length () > 0)
{
- tree tmp = VEC_pop (tree, block_defs_stack);
+ tree tmp = block_defs_stack.pop ();
tree saved_def, var;
if (tmp == NULL_TREE)
saved_def = tmp;
var = SSA_NAME_VAR (saved_def);
if (!is_gimple_reg (var))
- var = VEC_pop (tree, block_defs_stack);
+ var = block_defs_stack.pop ();
}
else
{
i = 1;
fprintf (file, "Level %d (current level)\n", i);
- for (j = (int) VEC_length (tree, block_defs_stack) - 1; j >= 0; j--)
+ for (j = (int) block_defs_stack.length () - 1; j >= 0; j--)
{
tree name, var;
- name = VEC_index (tree, block_defs_stack, j);
+ name = block_defs_stack[j];
if (name == NULL_TREE)
{
i++;
if (!is_gimple_reg (var))
{
j--;
- var = VEC_index (tree, block_defs_stack, j);
+ var = block_defs_stack[j];
}
}
unsigned i;
tree var;
- if (VEC_empty (tree, symbols_to_rename))
+ if (symbols_to_rename.is_empty ())
return;
fprintf (file, "\n\nCurrent reaching definitions\n\n");
- FOR_EACH_VEC_ELT (tree, symbols_to_rename, i, var)
+ FOR_EACH_VEC_ELT (symbols_to_rename, i, var)
{
common_info_p info = get_common_info (var);
fprintf (file, "CURRDEF (");
restore the reaching definitions for all the variables
defined in the block after a recursive visit to all its
immediately dominated blocks. */
- VEC_reserve (tree, heap, block_defs_stack, 2);
- VEC_quick_push (tree, block_defs_stack, currdef);
- VEC_quick_push (tree, block_defs_stack, old_name);
+ block_defs_stack.reserve (2);
+ block_defs_stack.quick_push (currdef);
+ block_defs_stack.quick_push (old_name);
/* Set the current reaching definition for OLD_NAME to be
NEW_NAME. */
if (!bitmap_bit_p (blocks_with_phis_to_rewrite, e->dest->index))
continue;
- phis = VEC_index (gimple_vec, phis_to_rewrite, e->dest->index);
- FOR_EACH_VEC_ELT (gimple, phis, i, phi)
+ phis = phis_to_rewrite[e->dest->index];
+ FOR_EACH_VEC_ELT (phis, i, phi)
{
tree arg, lhs_sym, reaching_def = NULL;
use_operand_p arg_p;
bb->index);
/* Mark the unwind point for this block. */
- VEC_safe_push (tree, heap, block_defs_stack, NULL_TREE);
+ block_defs_stack.safe_push (NULL_TREE);
if (!bitmap_bit_p (blocks_to_update, bb->index))
return;
rewrite_update_leave_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
basic_block bb ATTRIBUTE_UNUSED)
{
- while (VEC_length (tree, block_defs_stack) > 0)
+ while (block_defs_stack.length () > 0)
{
- tree var = VEC_pop (tree, block_defs_stack);
+ tree var = block_defs_stack.pop ();
tree saved_def;
/* NULL indicates the unwind stop point for this block (see
if (var == NULL)
return;
- saved_def = VEC_pop (tree, block_defs_stack);
+ saved_def = block_defs_stack.pop ();
get_common_info (var)->current_def = saved_def;
}
}
else
gcc_unreachable ();
- block_defs_stack = VEC_alloc (tree, heap, 10);
+ block_defs_stack.create (10);
/* Initialize the dominator walker. */
init_walk_dominator_tree (&walk_data);
dump_tree_ssa_stats (dump_file);
}
- VEC_free (tree, heap, block_defs_stack);
+ block_defs_stack.release ();
timevar_pop (TV_TREE_SSA_REWRITE_BLOCKS);
}
/* Allocate memory for the DEF_BLOCKS hash table. */
gcc_assert (var_infos == NULL);
- var_infos = htab_create (VEC_length (tree, cfun->local_decls),
+ var_infos = htab_create (vec_safe_length (cfun->local_decls),
var_info_hash, var_info_eq, free);
bitmap_obstack_initialize (&update_ssa_obstack);
BITMAP_FREE (symbols_to_rename_set);
symbols_to_rename_set = NULL;
- VEC_free (tree, heap, symbols_to_rename);
+ symbols_to_rename.release ();
if (names_to_release)
{
if (blocks_with_phis_to_rewrite)
EXECUTE_IF_SET_IN_BITMAP (blocks_with_phis_to_rewrite, 0, i, bi)
{
- gimple_vec phis = VEC_index (gimple_vec, phis_to_rewrite, i);
-
- VEC_free (gimple, heap, phis);
- VEC_replace (gimple_vec, phis_to_rewrite, i, NULL);
+ gimple_vec phis = phis_to_rewrite[i];
+ phis.release ();
+ phis_to_rewrite[i].create (0);
}
BITMAP_FREE (blocks_with_phis_to_rewrite);
gcc_assert (update_ssa_initialized_fn == cfun);
blocks_with_phis_to_rewrite = BITMAP_ALLOC (NULL);
- if (!phis_to_rewrite)
- phis_to_rewrite = VEC_alloc (gimple_vec, heap, last_basic_block + 1);
+ if (!phis_to_rewrite.exists ())
+ phis_to_rewrite.create (last_basic_block + 1);
blocks_to_update = BITMAP_ALLOC (NULL);
/* Ensure that the dominance information is up-to-date. */
sbitmap_free (tmp);
}
- FOR_EACH_VEC_ELT (tree, symbols_to_rename, i, sym)
+ FOR_EACH_VEC_ELT (symbols_to_rename, i, sym)
insert_updated_phi_nodes_for (sym, dfs, blocks_to_update,
update_flags);
EXECUTE_IF_SET_IN_BITMAP (old_ssa_names, 0, i, sbi)
get_ssa_name_ann (ssa_name (i))->info.current_def = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, symbols_to_rename, i, sym)
+ FOR_EACH_VEC_ELT (symbols_to_rename, i, sym)
get_var_info (sym)->info.current_def = NULL_TREE;
/* Now start the renaming process at START_BB. */
/* This is a cache of STATEMENT_LIST nodes. We create and destroy them
fairly often during gimplification. */
-static GTY ((deletable (""))) VEC(tree,gc) *stmt_list_cache;
+static GTY ((deletable (""))) vec<tree, va_gc> *stmt_list_cache;
tree
alloc_stmt_list (void)
{
tree list;
- if (!VEC_empty (tree, stmt_list_cache))
+ if (!vec_safe_is_empty (stmt_list_cache))
{
- list = VEC_pop (tree, stmt_list_cache);
+ list = stmt_list_cache->pop ();
memset (list, 0, sizeof(struct tree_base));
TREE_SET_CODE (list, STATEMENT_LIST);
}
{
gcc_assert (!STATEMENT_LIST_HEAD (t));
gcc_assert (!STATEMENT_LIST_TAIL (t));
- VEC_safe_push (tree, gc, stmt_list_cache, t);
+ vec_safe_push (stmt_list_cache, t);
}
/* A subroutine of append_to_statement_list{,_force}. T is not NULL. */
data_reference_p secondary_dr;
} *partition_t;
-DEF_VEC_P (partition_t);
-DEF_VEC_ALLOC_P (partition_t, heap);
/* Allocate and initialize a partition from BITMAP. */
if (!bitmap_bit_p (seen, v))
{
unsigned i;
- VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
+ vec<int> nodes;
+ nodes.create (3);
graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
- FOR_EACH_VEC_ELT (int, nodes, i, x)
+ FOR_EACH_VEC_ELT (nodes, i, x)
{
if (!bitmap_set_bit (seen, x))
continue;
}
}
- VEC_free (int, heap, nodes);
+ nodes.release ();
}
}
bitmap loops, bitmap processed)
{
unsigned i;
- VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
+ vec<int> nodes;
+ nodes.create (3);
int x;
bitmap_set_bit (processed, v);
graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
rdg_flag_vertex (rdg, v, partition, loops);
- FOR_EACH_VEC_ELT (int, nodes, i, x)
+ FOR_EACH_VEC_ELT (nodes, i, x)
if (!already_processed_vertex_p (processed, x))
rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed);
- VEC_free (int, heap, nodes);
+ nodes.release ();
}
/* Initialize CONDS with all the condition statements from the basic
blocks of LOOP. */
static void
-collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
+collect_condition_stmts (struct loop *loop, vec<gimple> *conds)
{
unsigned i;
edge e;
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
- FOR_EACH_VEC_ELT (edge, exits, i, e)
+ FOR_EACH_VEC_ELT (exits, i, e)
{
gimple cond = last_stmt (e->src);
if (cond)
- VEC_safe_push (gimple, heap, *conds, cond);
+ conds->safe_push (cond);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
}
/* Add to PARTITION all the exit condition statements for LOOPS
{
unsigned i;
bitmap_iterator bi;
- VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
+ vec<gimple> conds;
+ conds.create (3);
EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
collect_condition_stmts (get_loop (i), &conds);
- while (!VEC_empty (gimple, conds))
+ while (!conds.is_empty ())
{
- gimple cond = VEC_pop (gimple, conds);
+ gimple cond = conds.pop ();
int v = rdg_vertex_for_stmt (rdg, cond);
bitmap new_loops = BITMAP_ALLOC (NULL);
BITMAP_FREE (new_loops);
}
- VEC_free (gimple, heap, conds);
+ conds.release ();
}
/* Returns a bitmap in which all the statements needed for computing
bitmap loops = BITMAP_ALLOC (NULL);
bitmap processed = BITMAP_ALLOC (NULL);
- FOR_EACH_VEC_ELT (int, c->vertices, i, v)
+ FOR_EACH_VEC_ELT (c->vertices, i, v)
if (!already_processed_vertex_p (processed, v))
rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed);
/* Free memory for COMPONENTS. */
static void
-free_rdg_components (VEC (rdgc, heap) *components)
+free_rdg_components (vec<rdgc> components)
{
int i;
rdgc x;
- FOR_EACH_VEC_ELT (rdgc, components, i, x)
+ FOR_EACH_VEC_ELT (components, i, x)
{
- VEC_free (int, heap, x->vertices);
+ x->vertices.release ();
free (x);
}
- VEC_free (rdgc, heap, components);
+ components.release ();
}
/* Build the COMPONENTS vector with the strongly connected components
of RDG in which the STARTING_VERTICES occur. */
static void
-rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
- VEC (rdgc, heap) **components)
+rdg_build_components (struct graph *rdg, vec<int> starting_vertices,
+ vec<rdgc> *components)
{
int i, v;
bitmap saved_components = BITMAP_ALLOC (NULL);
int n_components = graphds_scc (rdg, NULL);
- VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
+ /* ??? Macros cannot process template types with more than one
+ argument, so we need this typedef. */
+ typedef vec<int> vec_int_heap;
+ vec<int> *all_components = XNEWVEC (vec_int_heap, n_components);
for (i = 0; i < n_components; i++)
- all_components[i] = VEC_alloc (int, heap, 3);
+ all_components[i].create (3);
for (i = 0; i < rdg->n_vertices; i++)
- VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
+ all_components[rdg->vertices[i].component].safe_push (i);
- FOR_EACH_VEC_ELT (int, starting_vertices, i, v)
+ FOR_EACH_VEC_ELT (starting_vertices, i, v)
{
int c = rdg->vertices[v].component;
x->num = c;
x->vertices = all_components[c];
- VEC_safe_push (rdgc, heap, *components, x);
+ components->safe_push (x);
}
}
for (i = 0; i < n_components; i++)
if (!bitmap_bit_p (saved_components, i))
- VEC_free (int, heap, all_components[i]);
+ all_components[i].release ();
free (all_components);
BITMAP_FREE (saved_components);
return;
/* But exactly one store and/or load. */
- for (j = 0;
- VEC_iterate (data_reference_p, RDG_DATAREFS (rdg, i), j, dr); ++j)
+ for (j = 0; RDG_DATAREFS (rdg, i).iterate (j, &dr); ++j)
{
if (DR_IS_READ (dr))
{
return;
/* Now check that if there is a dependence this dependence is
of a suitable form for memmove. */
- VEC(loop_p, heap) *loops = NULL;
+ vec<loop_p> loops = vec<loop_p>();
ddr_p ddr;
- VEC_safe_push (loop_p, heap, loops, loop);
+ loops.safe_push (loop);
ddr = initialize_data_dependence_relation (single_load, single_store,
loops);
compute_affine_dependence (ddr, loop);
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
free_dependence_relation (ddr);
- VEC_free (loop_p, heap, loops);
+ loops.release ();
return;
}
if (DDR_ARE_DEPENDENT (ddr) != chrec_known)
if (DDR_NUM_DIST_VECTS (ddr) == 0)
{
free_dependence_relation (ddr);
- VEC_free (loop_p, heap, loops);
+ loops.release ();
return;
}
lambda_vector dist_v;
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[index_in_loop_nest (loop->num,
DDR_LOOP_NEST (ddr))];
if (dist > 0 && !DDR_REVERSED_P (ddr))
{
free_dependence_relation (ddr);
- VEC_free (loop_p, heap, loops);
+ loops.release ();
return;
}
}
}
free_dependence_relation (ddr);
- VEC_free (loop_p, heap, loops);
+ loops.release ();
partition->kind = PKIND_MEMCPY;
partition->main_dr = single_store;
partition->secondary_dr = single_load;
if (RDG_MEM_WRITE_STMT (rdg, j)
|| RDG_MEM_READS_STMT (rdg, j))
{
- FOR_EACH_VEC_ELT (data_reference_p, RDG_DATAREFS (rdg, i), k, ref1)
+ FOR_EACH_VEC_ELT (RDG_DATAREFS (rdg, i), k, ref1)
{
tree base1 = ref_base_address (ref1);
if (base1)
- FOR_EACH_VEC_ELT (data_reference_p,
- RDG_DATAREFS (rdg, j), l, ref2)
+ FOR_EACH_VEC_ELT (RDG_DATAREFS (rdg, j), l, ref2)
if (base1 == ref_base_address (ref2))
return true;
}
distributed in different loops. */
static void
-rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
- VEC (int, heap) **other_stores,
- VEC (partition_t, heap) **partitions, bitmap processed)
+rdg_build_partitions (struct graph *rdg, vec<rdgc> components,
+ vec<int> *other_stores,
+ vec<partition_t> *partitions, bitmap processed)
{
int i;
rdgc x;
partition_t partition = partition_alloc (NULL);
- FOR_EACH_VEC_ELT (rdgc, components, i, x)
+ FOR_EACH_VEC_ELT (components, i, x)
{
partition_t np;
- int v = VEC_index (int, x->vertices, 0);
+ int v = x->vertices[0];
if (bitmap_bit_p (processed, v))
continue;
dump_bitmap (dump_file, partition->stmts);
}
- VEC_safe_push (partition_t, heap, *partitions, partition);
+ partitions->safe_push (partition);
partition = partition_alloc (NULL);
}
}
for (i = 0; i < rdg->n_vertices; i++)
if (!bitmap_bit_p (processed, i)
&& rdg_defs_used_in_other_loops_p (rdg, i))
- VEC_safe_push (int, heap, *other_stores, i);
+ other_stores->safe_push (i);
/* If there are still statements left in the OTHER_STORES array,
create other components and partitions with these stores and
their dependences. */
- if (VEC_length (int, *other_stores) > 0)
+ if (other_stores->length () > 0)
{
- VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
- VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
+ vec<rdgc> comps;
+ comps.create (3);
+ vec<int> foo;
+ foo.create (3);
rdg_build_components (rdg, *other_stores, &comps);
rdg_build_partitions (rdg, comps, &foo, partitions, processed);
- VEC_free (int, heap, foo);
+ foo.release ();
free_rdg_components (comps);
}
/* If there is something left in the last partition, save it. */
if (bitmap_count_bits (partition->stmts) > 0)
- VEC_safe_push (partition_t, heap, *partitions, partition);
+ partitions->safe_push (partition);
else
partition_free (partition);
}
/* Dump to FILE the PARTITIONS. */
static void
-dump_rdg_partitions (FILE *file, VEC (partition_t, heap) *partitions)
+dump_rdg_partitions (FILE *file, vec<partition_t> partitions)
{
int i;
partition_t partition;
- FOR_EACH_VEC_ELT (partition_t, partitions, i, partition)
+ FOR_EACH_VEC_ELT (partitions, i, partition)
debug_bitmap_file (file, partition->stmts);
}
/* Debug PARTITIONS. */
-extern void debug_rdg_partitions (VEC (partition_t, heap) *);
+extern void debug_rdg_partitions (vec<partition_t> );
DEBUG_FUNCTION void
-debug_rdg_partitions (VEC (partition_t, heap) *partitions)
+debug_rdg_partitions (vec<partition_t> partitions)
{
dump_rdg_partitions (stderr, partitions);
}
write operations of RDG. */
static bool
-partition_contains_all_rw (struct graph *rdg, VEC (partition_t, heap) *partitions)
+partition_contains_all_rw (struct graph *rdg,
+ vec<partition_t> partitions)
{
int i;
partition_t partition;
int nrw = number_of_rw_in_rdg (rdg);
- FOR_EACH_VEC_ELT (partition_t, partitions, i, partition)
+ FOR_EACH_VEC_ELT (partitions, i, partition)
if (nrw == number_of_rw_in_partition (rdg, partition))
return true;
static int
ldist_gen (struct loop *loop, struct graph *rdg,
- VEC (int, heap) *starting_vertices)
+ vec<int> starting_vertices)
{
int i, nbp;
- VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
- VEC (partition_t, heap) *partitions = VEC_alloc (partition_t, heap, 3);
- VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
+ vec<rdgc> components;
+ components.create (3);
+ vec<partition_t> partitions;
+ partitions.create (3);
+ vec<int> other_stores;
+ other_stores.create (3);
partition_t partition;
bitmap processed = BITMAP_ALLOC (NULL);
bool any_builtin;
unsigned j;
bool found = false;
- FOR_EACH_VEC_ELT (int, starting_vertices, j, v)
+ FOR_EACH_VEC_ELT (starting_vertices, j, v)
if (i == v)
{
found = true;
}
if (!found)
- VEC_safe_push (int, heap, other_stores, i);
+ other_stores.safe_push (i);
}
}
BITMAP_FREE (processed);
any_builtin = false;
- FOR_EACH_VEC_ELT (partition_t, partitions, i, partition)
+ FOR_EACH_VEC_ELT (partitions, i, partition)
{
classify_partition (loop, rdg, partition);
any_builtin |= partition_builtin_p (partition);
i = 0;
do
{
- for (; VEC_iterate (partition_t, partitions, i, into); ++i)
+ for (; partitions.iterate (i, &into); ++i)
if (!partition_builtin_p (into))
break;
- for (++i; VEC_iterate (partition_t, partitions, i, partition); ++i)
+ for (++i; partitions.iterate (i, &partition); ++i)
if (!partition_builtin_p (partition))
{
bitmap_ior_into (into->stmts, partition->stmts);
- VEC_ordered_remove (partition_t, partitions, i);
+ partitions.ordered_remove (i);
i--;
}
else
break;
}
- while ((unsigned) i < VEC_length (partition_t, partitions));
+ while ((unsigned) i < partitions.length ());
}
else
{
partition_t into;
int j;
- for (i = 0; VEC_iterate (partition_t, partitions, i, into); ++i)
+ for (i = 0; partitions.iterate (i, &into); ++i)
{
if (partition_builtin_p (into))
continue;
for (j = i + 1;
- VEC_iterate (partition_t, partitions, j, partition); ++j)
+ partitions.iterate (j, &partition); ++j)
{
if (!partition_builtin_p (partition)
/* ??? The following is horribly inefficient,
"memory accesses\n");
}
bitmap_ior_into (into->stmts, partition->stmts);
- VEC_ordered_remove (partition_t, partitions, j);
+ partitions.ordered_remove (j);
j--;
}
}
}
}
- nbp = VEC_length (partition_t, partitions);
+ nbp = partitions.length ();
if (nbp == 0
- || (nbp == 1
- && !partition_builtin_p (VEC_index (partition_t, partitions, 0)))
- || (nbp > 1
- && partition_contains_all_rw (rdg, partitions)))
+ || (nbp == 1 && !partition_builtin_p (partitions[0]))
+ || (nbp > 1 && partition_contains_all_rw (rdg, partitions)))
{
nbp = 0;
goto ldist_done;
if (dump_file && (dump_flags & TDF_DETAILS))
dump_rdg_partitions (dump_file, partitions);
- FOR_EACH_VEC_ELT (partition_t, partitions, i, partition)
+ FOR_EACH_VEC_ELT (partitions, i, partition)
generate_code_for_partition (loop, partition, i < nbp - 1);
ldist_done:
BITMAP_FREE (remaining_stmts);
BITMAP_FREE (upstream_mem_writes);
- FOR_EACH_VEC_ELT (partition_t, partitions, i, partition)
+ FOR_EACH_VEC_ELT (partitions, i, partition)
partition_free (partition);
- VEC_free (int, heap, other_stores);
- VEC_free (partition_t, heap, partitions);
+ other_stores.release ();
+ partitions.release ();
free_rdg_components (components);
return nbp;
}
Returns the number of distributed loops. */
static int
-distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
+distribute_loop (struct loop *loop, vec<gimple> stmts)
{
int res = 0;
struct graph *rdg;
gimple s;
unsigned i;
- VEC (int, heap) *vertices;
- VEC (ddr_p, heap) *dependence_relations;
- VEC (data_reference_p, heap) *datarefs;
- VEC (loop_p, heap) *loop_nest;
-
- datarefs = VEC_alloc (data_reference_p, heap, 10);
- dependence_relations = VEC_alloc (ddr_p, heap, 100);
- loop_nest = VEC_alloc (loop_p, heap, 3);
+ vec<int> vertices;
+ vec<ddr_p> dependence_relations;
+ vec<data_reference_p> datarefs;
+ vec<loop_p> loop_nest;
+
+ datarefs.create (10);
+ dependence_relations.create (100);
+ loop_nest.create (3);
rdg = build_rdg (loop, &loop_nest, &dependence_relations, &datarefs);
if (!rdg)
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
return res;
}
- vertices = VEC_alloc (int, heap, 3);
+ vertices.create (3);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_rdg (dump_file, rdg);
- FOR_EACH_VEC_ELT (gimple, stmts, i, s)
+ FOR_EACH_VEC_ELT (stmts, i, s)
{
int v = rdg_vertex_for_stmt (rdg, s);
if (v >= 0)
{
- VEC_safe_push (int, heap, vertices, v);
+ vertices.safe_push (v);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
}
res = ldist_gen (loop, rdg, vertices);
- VEC_free (int, heap, vertices);
+ vertices.release ();
free_rdg (rdg);
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
return res;
}
walking to innermost loops. */
FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
{
- VEC (gimple, heap) *work_list = NULL;
+ vec<gimple> work_list = vec<gimple>();
basic_block *bbs;
int num = loop->num;
int nb_generated_loops = 0;
|| is_gimple_reg (gimple_assign_lhs (stmt)))
continue;
- VEC_safe_push (gimple, heap, work_list, stmt);
+ work_list.safe_push (stmt);
}
}
free (bbs);
- if (VEC_length (gimple, work_list) > 0)
+ if (work_list.length () > 0)
nb_generated_loops = distribute_loop (loop, work_list);
if (nb_generated_loops > 0)
fprintf (dump_file, "Loop %d is the same.\n", num);
}
- VEC_free (gimple, heap, work_list);
+ work_list.release ();
}
if (changed)
delayed until program finish time. If they're still incomplete by
then, warnings are emitted. */
-static GTY (()) VEC(tree,gc) *deferred_static_decls;
+static GTY (()) vec<tree, va_gc> *deferred_static_decls;
/* A list of statements for calling __mf_register() at startup time. */
static GTY (()) tree enqueued_call_stmt_chain;
if (DECL_P (obj) && DECL_EXTERNAL (obj) && mf_artificial (obj))
return;
- VEC_safe_push (tree, gc, deferred_static_decls, obj);
+ vec_safe_push (deferred_static_decls, obj);
}
{
size_t i;
tree obj;
- FOR_EACH_VEC_ELT (tree, deferred_static_decls, i, obj)
+ FOR_EACH_VEC_ELT (*deferred_static_decls, i, obj)
{
gcc_assert (DECL_P (obj));
mf_varname_tree (obj));
}
- VEC_truncate (tree, deferred_static_decls, 0);
+ deferred_static_decls->truncate (0);
}
/* Append all the enqueued registration calls. */
/* We just processed all calls. */
if (cfun->gimple_df)
- {
- VEC_free (gimple, gc, MODIFIED_NORETURN_CALLS (cfun));
- MODIFIED_NORETURN_CALLS (cfun) = NULL;
- }
+ vec_free (MODIFIED_NORETURN_CALLS (cfun));
/* Dump a textual representation of the flowgraph. */
if (dump_file)
#include "expr.h"
-DEF_VEC_I(source_location);
-DEF_VEC_ALLOC_I(source_location,heap);
/* Used to hold all the components required to do SSA PHI elimination.
The node and pred/succ list is a simple linear list of nodes and
int size;
/* List of nodes in the elimination graph. */
- VEC(int,heap) *nodes;
+ vec<int> nodes;
/* The predecessor and successor edge list. */
- VEC(int,heap) *edge_list;
+ vec<int> edge_list;
/* Source locus on each edge */
- VEC(source_location,heap) *edge_locus;
+ vec<source_location> edge_locus;
/* Visited vector. */
sbitmap visited;
/* Stack for visited nodes. */
- VEC(int,heap) *stack;
+ vec<int> stack;
/* The variable partition map. */
var_map map;
edge e;
/* List of constant copies to emit. These are pushed on in pairs. */
- VEC(int,heap) *const_dests;
- VEC(tree,heap) *const_copies;
+ vec<int> const_dests;
+ vec<tree> const_copies;
/* Source locations for any constant copies. */
- VEC(source_location,heap) *copy_locus;
+ vec<source_location> copy_locus;
} *elim_graph;
{
elim_graph g = (elim_graph) xmalloc (sizeof (struct _elim_graph));
- g->nodes = VEC_alloc (int, heap, 30);
- g->const_dests = VEC_alloc (int, heap, 20);
- g->const_copies = VEC_alloc (tree, heap, 20);
- g->copy_locus = VEC_alloc (source_location, heap, 10);
- g->edge_list = VEC_alloc (int, heap, 20);
- g->edge_locus = VEC_alloc (source_location, heap, 10);
- g->stack = VEC_alloc (int, heap, 30);
+ g->nodes.create (30);
+ g->const_dests.create (20);
+ g->const_copies.create (20);
+ g->copy_locus.create (10);
+ g->edge_list.create (20);
+ g->edge_locus.create (10);
+ g->stack.create (30);
g->visited = sbitmap_alloc (size);
static inline void
clear_elim_graph (elim_graph g)
{
- VEC_truncate (int, g->nodes, 0);
- VEC_truncate (int, g->edge_list, 0);
- VEC_truncate (source_location, g->edge_locus, 0);
+ g->nodes.truncate (0);
+ g->edge_list.truncate (0);
+ g->edge_locus.truncate (0);
}
delete_elim_graph (elim_graph g)
{
sbitmap_free (g->visited);
- VEC_free (int, heap, g->stack);
- VEC_free (int, heap, g->edge_list);
- VEC_free (tree, heap, g->const_copies);
- VEC_free (int, heap, g->const_dests);
- VEC_free (int, heap, g->nodes);
- VEC_free (source_location, heap, g->copy_locus);
- VEC_free (source_location, heap, g->edge_locus);
+ g->stack.release ();
+ g->edge_list.release ();
+ g->const_copies.release ();
+ g->const_dests.release ();
+ g->nodes.release ();
+ g->copy_locus.release ();
+ g->edge_locus.release ();
free (g);
}
static inline int
elim_graph_size (elim_graph g)
{
- return VEC_length (int, g->nodes);
+ return g->nodes.length ();
}
int x;
int t;
- FOR_EACH_VEC_ELT (int, g->nodes, x, t)
+ FOR_EACH_VEC_ELT (g->nodes, x, t)
if (t == node)
return;
- VEC_safe_push (int, heap, g->nodes, node);
+ g->nodes.safe_push (node);
}
static inline void
elim_graph_add_edge (elim_graph g, int pred, int succ, source_location locus)
{
- VEC_safe_push (int, heap, g->edge_list, pred);
- VEC_safe_push (int, heap, g->edge_list, succ);
- VEC_safe_push (source_location, heap, g->edge_locus, locus);
+ g->edge_list.safe_push (pred);
+ g->edge_list.safe_push (succ);
+ g->edge_locus.safe_push (locus);
}
{
int y;
unsigned x;
- for (x = 0; x < VEC_length (int, g->edge_list); x += 2)
- if (VEC_index (int, g->edge_list, x) == node)
+ for (x = 0; x < g->edge_list.length (); x += 2)
+ if (g->edge_list[x] == node)
{
- VEC_replace (int, g->edge_list, x, -1);
- y = VEC_index (int, g->edge_list, x + 1);
- VEC_replace (int, g->edge_list, x + 1, -1);
- *locus = VEC_index (source_location, g->edge_locus, x / 2);
- VEC_replace (source_location, g->edge_locus, x / 2, UNKNOWN_LOCATION);
+ g->edge_list[x] = -1;
+ y = g->edge_list[x + 1];
+ g->edge_list[x + 1] = -1;
+ *locus = g->edge_locus[x / 2];
+ g->edge_locus[x / 2] = UNKNOWN_LOCATION;
return y;
}
*locus = UNKNOWN_LOCATION;
do { \
unsigned x_; \
int y_; \
- for (x_ = 0; x_ < VEC_length (int, (GRAPH)->edge_list); x_ += 2) \
+ for (x_ = 0; x_ < (GRAPH)->edge_list.length (); x_ += 2) \
{ \
- y_ = VEC_index (int, (GRAPH)->edge_list, x_); \
+ y_ = (GRAPH)->edge_list[x_]; \
if (y_ != (NODE)) \
continue; \
- (void) ((VAR) = VEC_index (int, (GRAPH)->edge_list, x_ + 1)); \
- (void) ((LOCUS) = VEC_index (source_location, \
- (GRAPH)->edge_locus, x_ / 2)); \
+ (void) ((VAR) = (GRAPH)->edge_list[x_ + 1]); \
+ (void) ((LOCUS) = (GRAPH)->edge_locus[x_ / 2]); \
CODE; \
} \
} while (0)
do { \
unsigned x_; \
int y_; \
- for (x_ = 0; x_ < VEC_length (int, (GRAPH)->edge_list); x_ += 2) \
+ for (x_ = 0; x_ < (GRAPH)->edge_list.length (); x_ += 2) \
{ \
- y_ = VEC_index (int, (GRAPH)->edge_list, x_ + 1); \
+ y_ = (GRAPH)->edge_list[x_ + 1]; \
if (y_ != (NODE)) \
continue; \
- (void) ((VAR) = VEC_index (int, (GRAPH)->edge_list, x_)); \
- (void) ((LOCUS) = VEC_index (source_location, \
- (GRAPH)->edge_locus, x_ / 2)); \
+ (void) ((VAR) = (GRAPH)->edge_list[x_]); \
+ (void) ((LOCUS) = (GRAPH)->edge_locus[x_ / 2]); \
CODE; \
} \
} while (0)
{
/* Save constant copies until all other copies have been emitted
on this edge. */
- VEC_safe_push (int, heap, g->const_dests, p0);
- VEC_safe_push (tree, heap, g->const_copies, Ti);
- VEC_safe_push (source_location, heap, g->copy_locus, locus);
+ g->const_dests.safe_push (p0);
+ g->const_copies.safe_push (Ti);
+ g->copy_locus.safe_push (locus);
}
else
{
if (!bitmap_bit_p (g->visited, S))
elim_forward (g, S);
});
- VEC_safe_push (int, heap, g->stack, T);
+ g->stack.safe_push (T);
}
{
int x;
- gcc_assert (VEC_length (tree, g->const_copies) == 0);
- gcc_assert (VEC_length (source_location, g->copy_locus) == 0);
+ gcc_assert (g->const_copies.length () == 0);
+ gcc_assert (g->copy_locus.length () == 0);
/* Abnormal edges already have everything coalesced. */
if (e->flags & EDGE_ABNORMAL)
int part;
bitmap_clear (g->visited);
- VEC_truncate (int, g->stack, 0);
+ g->stack.truncate (0);
- FOR_EACH_VEC_ELT (int, g->nodes, x, part)
+ FOR_EACH_VEC_ELT (g->nodes, x, part)
{
if (!bitmap_bit_p (g->visited, part))
elim_forward (g, part);
}
bitmap_clear (g->visited);
- while (VEC_length (int, g->stack) > 0)
+ while (g->stack.length () > 0)
{
- x = VEC_pop (int, g->stack);
+ x = g->stack.pop ();
if (!bitmap_bit_p (g->visited, x))
elim_create (g, x);
}
}
/* If there are any pending constant copies, issue them now. */
- while (VEC_length (tree, g->const_copies) > 0)
+ while (g->const_copies.length () > 0)
{
int dest;
tree src;
source_location locus;
- src = VEC_pop (tree, g->const_copies);
- dest = VEC_pop (int, g->const_dests);
- locus = VEC_pop (source_location, g->copy_locus);
+ src = g->const_copies.pop ();
+ dest = g->const_dests.pop ();
+ locus = g->copy_locus.pop ();
insert_value_copy_on_edge (e, dest, src, locus);
}
}
static bool
lambda_transform_legal_p (lambda_trans_matrix trans,
int nb_loops,
- VEC (ddr_p, heap) *dependence_relations)
+ vec<ddr_p> dependence_relations)
{
unsigned int i, j;
lambda_vector distres;
&& LTM_ROWSIZE (trans) == nb_loops);
/* When there are no dependences, the transformation is correct. */
- if (VEC_length (ddr_p, dependence_relations) == 0)
+ if (dependence_relations.length () == 0)
return true;
- ddr = VEC_index (ddr_p, dependence_relations, 0);
+ ddr = dependence_relations[0];
if (ddr == NULL)
return true;
distres = lambda_vector_new (nb_loops);
/* For each distance vector in the dependence graph. */
- FOR_EACH_VEC_ELT (ddr_p, dependence_relations, i, ddr)
+ FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
{
/* Don't care about relations for which we know that there is no
dependence, nor about read-read (aka. output-dependences):
static bool
loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack)
{
- VEC (loop_p, heap) *loop_nest;
- VEC (ddr_p, heap) *dependence_relations;
- VEC (data_reference_p, heap) *datarefs;
+ vec<loop_p> loop_nest;
+ vec<ddr_p> dependence_relations;
+ vec<data_reference_p> datarefs;
lambda_trans_matrix trans;
bool ret = false;
/* Check for problems with dependences. If the loop can be reversed,
the iterations are independent. */
- datarefs = VEC_alloc (data_reference_p, heap, 10);
- dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
- loop_nest = VEC_alloc (loop_p, heap, 3);
+ datarefs.create (10);
+ dependence_relations.create (10 * 10);
+ loop_nest.create (3);
if (! compute_data_dependences_for_loop (loop, true, &loop_nest, &datarefs,
&dependence_relations))
{
" FAILED: data dependencies exist across iterations\n");
end:
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
eliminate_local_variables (edge entry, edge exit)
{
basic_block bb;
- VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
+ vec<basic_block> body;
+ body.create (3);
unsigned i;
gimple_stmt_iterator gsi;
bool has_debug_stmt = false;
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
- FOR_EACH_VEC_ELT (basic_block, body, i, bb)
+ FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (is_gimple_debug (gsi_stmt (gsi)))
eliminate_local_variables_stmt (entry, &gsi, decl_address);
if (has_debug_stmt)
- FOR_EACH_VEC_ELT (basic_block, body, i, bb)
+ FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (gimple_debug_bind_p (gsi_stmt (gsi)))
eliminate_local_variables_stmt (entry, &gsi, decl_address);
htab_delete (decl_address);
- VEC_free (basic_block, heap, body);
+ body.release ();
}
/* Returns true if expression EXPR is not defined between ENTRY and
tree type, type_name, nvar;
gimple_stmt_iterator gsi;
struct clsn_data clsn_data;
- VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
+ vec<basic_block> body;
+ body.create (3);
basic_block bb;
basic_block entry_bb = bb1;
basic_block exit_bb = exit->dest;
entry = single_succ_edge (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
- FOR_EACH_VEC_ELT (basic_block, body, i, bb)
+ FOR_EACH_VEC_ELT (body, i, bb)
{
if (bb != entry_bb && bb != exit_bb)
{
and discard those for which we know there's nothing we can
do. */
if (has_debug_stmt)
- FOR_EACH_VEC_ELT (basic_block, body, i, bb)
+ FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
}
}
- VEC_free (basic_block, heap, body);
+ body.release ();
if (htab_elements (name_copies) == 0 && htab_elements (reduction_list) == 0)
{
the -2 on all the calculations below. */
#define NUM_BUCKETS 10
-static GTY ((deletable (""))) VEC(gimple,gc) *free_phinodes[NUM_BUCKETS - 2];
+static GTY ((deletable (""))) vec<gimple, va_gc> *free_phinodes[NUM_BUCKETS - 2];
static unsigned long free_phinode_count;
static int ideal_phi_node_len (int);
/* If our free list has an element, then use it. */
if (bucket < NUM_BUCKETS - 2
- && gimple_phi_capacity (VEC_index (gimple, free_phinodes[bucket], 0))
- >= len)
+ && gimple_phi_capacity ((*free_phinodes[bucket])[0]) >= len)
{
free_phinode_count--;
- phi = VEC_pop (gimple, free_phinodes[bucket]);
- if (VEC_empty (gimple, free_phinodes[bucket]))
- VEC_free (gimple, gc, free_phinodes[bucket]);
+ phi = free_phinodes[bucket]->pop ();
+ if (free_phinodes[bucket]->is_empty ())
+ vec_free (free_phinodes[bucket]);
if (GATHER_STATISTICS)
phi_nodes_reused++;
}
bucket = len > NUM_BUCKETS - 1 ? NUM_BUCKETS - 1 : len;
bucket -= 2;
- VEC_safe_push (gimple, gc, free_phinodes[bucket], phi);
+ vec_safe_push (free_phinodes[bucket], phi);
free_phinode_count++;
}
unsigned always_accessed : 1;
} *dref;
-DEF_VEC_P (dref);
-DEF_VEC_ALLOC_P (dref, heap);
/* Type of the chain of the references. */
struct chain *ch1, *ch2;
/* The references in the chain. */
- VEC(dref,heap) *refs;
+ vec<dref> refs;
/* The maximum distance of the reference in the chain from the root. */
unsigned length;
/* The variables used to copy the value throughout iterations. */
- VEC(tree,heap) *vars;
+ vec<tree> vars;
/* Initializers for the variables. */
- VEC(tree,heap) *inits;
+ vec<tree> inits;
/* True if there is a use of a variable with the maximal distance
that comes after the root in the loop. */
unsigned combined : 1;
} *chain_p;
-DEF_VEC_P (chain_p);
-DEF_VEC_ALLOC_P (chain_p, heap);
/* Describes the knowledge about the step of the memory references in
the component. */
struct component
{
/* The references in the component. */
- VEC(dref,heap) *refs;
+ vec<dref> refs;
/* What we know about the step of the references in the component. */
enum ref_step_type comp_step;
fprintf (file, "\n");
}
- if (chain->vars)
+ if (chain->vars.exists ())
{
fprintf (file, " vars");
- FOR_EACH_VEC_ELT (tree, chain->vars, i, var)
+ FOR_EACH_VEC_ELT (chain->vars, i, var)
{
fprintf (file, " ");
print_generic_expr (file, var, TDF_SLIM);
fprintf (file, "\n");
}
- if (chain->inits)
+ if (chain->inits.exists ())
{
fprintf (file, " inits");
- FOR_EACH_VEC_ELT (tree, chain->inits, i, var)
+ FOR_EACH_VEC_ELT (chain->inits, i, var)
{
fprintf (file, " ");
print_generic_expr (file, var, TDF_SLIM);
}
fprintf (file, " references:\n");
- FOR_EACH_VEC_ELT (dref, chain->refs, i, a)
+ FOR_EACH_VEC_ELT (chain->refs, i, a)
dump_dref (file, a);
fprintf (file, "\n");
/* Dumps CHAINS to FILE. */
-extern void dump_chains (FILE *, VEC (chain_p, heap) *);
+extern void dump_chains (FILE *, vec<chain_p> );
void
-dump_chains (FILE *file, VEC (chain_p, heap) *chains)
+dump_chains (FILE *file, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
+ FOR_EACH_VEC_ELT (chains, i, chain)
dump_chain (file, chain);
}
fprintf (file, "Component%s:\n",
comp->comp_step == RS_INVARIANT ? " (invariant)" : "");
- FOR_EACH_VEC_ELT (dref, comp->refs, i, a)
+ FOR_EACH_VEC_ELT (comp->refs, i, a)
dump_dref (file, a);
fprintf (file, "\n");
}
if (chain == NULL)
return;
- FOR_EACH_VEC_ELT (dref, chain->refs, i, ref)
+ FOR_EACH_VEC_ELT (chain->refs, i, ref)
free (ref);
- VEC_free (dref, heap, chain->refs);
- VEC_free (tree, heap, chain->vars);
- VEC_free (tree, heap, chain->inits);
+ chain->refs.release ();
+ chain->vars.release ();
+ chain->inits.release ();
free (chain);
}
/* Frees CHAINS. */
static void
-release_chains (VEC (chain_p, heap) *chains)
+release_chains (vec<chain_p> chains)
{
unsigned i;
chain_p chain;
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
+ FOR_EACH_VEC_ELT (chains, i, chain)
release_chain (chain);
- VEC_free (chain_p, heap, chains);
+ chains.release ();
}
/* Frees a component COMP. */
static void
release_component (struct component *comp)
{
- VEC_free (dref, heap, comp->refs);
+ comp->refs.release ();
free (comp);
}
last_always_executed_block (struct loop *loop)
{
unsigned i;
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge ex;
basic_block last = loop->latch;
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
last = nearest_common_dominator (CDI_DOMINATORS, last, ex->src);
- VEC_free (edge, heap, exits);
+ exits.release ();
return last;
}
static struct component *
split_data_refs_to_components (struct loop *loop,
- VEC (data_reference_p, heap) *datarefs,
- VEC (ddr_p, heap) *depends)
+ vec<data_reference_p> datarefs,
+ vec<ddr_p> depends)
{
- unsigned i, n = VEC_length (data_reference_p, datarefs);
+ unsigned i, n = datarefs.length ();
unsigned ca, ia, ib, bad;
unsigned *comp_father = XNEWVEC (unsigned, n + 1);
unsigned *comp_size = XNEWVEC (unsigned, n + 1);
dref dataref;
basic_block last_always_executed = last_always_executed_block (loop);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
if (!DR_REF (dr))
{
comp_father[n] = n;
comp_size[n] = 1;
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
enum ref_step_type dummy;
}
}
- FOR_EACH_VEC_ELT (ddr_p, depends, i, ddr)
+ FOR_EACH_VEC_ELT (depends, i, ddr)
{
double_int dummy_off;
comps = XCNEWVEC (struct component *, n);
bad = component_of (comp_father, n);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
ia = (unsigned) (size_t) dr->aux;
ca = component_of (comp_father, ia);
if (!comp)
{
comp = XCNEW (struct component);
- comp->refs = VEC_alloc (dref, heap, comp_size[ca]);
+ comp->refs.create (comp_size[ca]);
comps[ca] = comp;
}
dataref->always_accessed
= dominated_by_p (CDI_DOMINATORS, last_always_executed,
gimple_bb (dataref->stmt));
- dataref->pos = VEC_length (dref, comp->refs);
- VEC_quick_push (dref, comp->refs, dataref);
+ dataref->pos = comp->refs.length ();
+ comp->refs.quick_push (dataref);
}
for (i = 0; i < n; i++)
basic_block ba, bp = loop->header;
bool ok, has_write = false;
- FOR_EACH_VEC_ELT (dref, comp->refs, i, a)
+ FOR_EACH_VEC_ELT (comp->refs, i, a)
{
ba = gimple_bb (a->stmt);
has_write = true;
}
- first = VEC_index (dref, comp->refs, 0);
+ first = comp->refs[0];
ok = suitable_reference_p (first->ref, &comp->comp_step);
gcc_assert (ok);
first->offset = double_int_zero;
- for (i = 1; VEC_iterate (dref, comp->refs, i, a); i++)
+ for (i = 1; comp->refs.iterate (i, &a); i++)
{
if (!determine_offset (first->ref, a->ref, &a->offset))
return false;
unsigned i;
*comp = act->next;
- FOR_EACH_VEC_ELT (dref, act->refs, i, ref)
+ FOR_EACH_VEC_ELT (act->refs, i, ref)
free (ref);
release_component (act);
}
static inline dref
get_chain_root (chain_p chain)
{
- return VEC_index (dref, chain->refs, 0);
+ return chain->refs[0];
}
/* Adds REF to the chain CHAIN. */
}
gcc_assert (dist.fits_uhwi ());
- VEC_safe_push (dref, heap, chain->refs, ref);
+ chain->refs.safe_push (ref);
ref->distance = dist.to_uhwi ();
chain->all_always_accessed = true;
- FOR_EACH_VEC_ELT (dref, comp->refs, i, ref)
+ FOR_EACH_VEC_ELT (comp->refs, i, ref)
{
- VEC_safe_push (dref, heap, chain->refs, ref);
+ chain->refs.safe_push (ref);
chain->all_always_accessed &= ref->always_accessed;
}
chain->type = DR_IS_READ (ref->ref) ? CT_LOAD : CT_STORE_LOAD;
- VEC_safe_push (dref, heap, chain->refs, ref);
+ chain->refs.safe_push (ref);
chain->all_always_accessed = ref->always_accessed;
ref->distance = 0;
static bool
nontrivial_chain_p (chain_p chain)
{
- return chain != NULL && VEC_length (dref, chain->refs) > 1;
+ return chain != NULL && chain->refs.length () > 1;
}
/* Returns the ssa name that contains the value of REF, or NULL_TREE if there
nw->distance = ref->distance + 1;
nw->always_accessed = 1;
- FOR_EACH_VEC_ELT (dref, chain->refs, i, aref)
+ FOR_EACH_VEC_ELT (chain->refs, i, aref)
if (aref->distance >= nw->distance)
break;
- VEC_safe_insert (dref, heap, chain->refs, i, nw);
+ chain->refs.safe_insert (i, nw);
if (nw->distance > chain->length)
{
dref ref, root = get_chain_root (chain);
gimple phi;
- FOR_EACH_VEC_ELT (dref, chain->refs, i, ref)
+ FOR_EACH_VEC_ELT (chain->refs, i, ref)
{
phi = find_looparound_phi (loop, ref, root);
if (!phi)
static void
determine_roots_comp (struct loop *loop,
struct component *comp,
- VEC (chain_p, heap) **chains)
+ vec<chain_p> *chains)
{
unsigned i;
dref a;
if (comp->comp_step == RS_INVARIANT)
{
chain = make_invariant_chain (comp);
- VEC_safe_push (chain_p, heap, *chains, chain);
+ chains->safe_push (chain);
return;
}
- VEC_qsort (dref, comp->refs, order_drefs);
+ comp->refs.qsort (order_drefs);
- FOR_EACH_VEC_ELT (dref, comp->refs, i, a)
+ FOR_EACH_VEC_ELT (comp->refs, i, a)
{
if (!chain || DR_IS_WRITE (a->ref)
|| double_int::from_uhwi (MAX_DISTANCE).ule (a->offset - last_ofs))
if (nontrivial_chain_p (chain))
{
add_looparound_copies (loop, chain);
- VEC_safe_push (chain_p, heap, *chains, chain);
+ chains->safe_push (chain);
}
else
release_chain (chain);
if (nontrivial_chain_p (chain))
{
add_looparound_copies (loop, chain);
- VEC_safe_push (chain_p, heap, *chains, chain);
+ chains->safe_push (chain);
}
else
release_chain (chain);
static void
determine_roots (struct loop *loop,
- struct component *comps, VEC (chain_p, heap) **chains)
+ struct component *comps, vec<chain_p> *chains)
{
struct component *comp;
return fold_build2 (chain->op, chain->rslt_type, e1, e2);
}
else
- return VEC_index (tree, chain->inits, index);
+ return chain->inits[index];
}
/* Returns a new temporary variable used for the I-th variable carrying
since this is an nonempty chain, reuse_first cannot be true. */
gcc_assert (n > 0 || !reuse_first);
- chain->vars = VEC_alloc (tree, heap, n + 1);
+ chain->vars.create (n + 1);
if (chain->type == CT_COMBINATION)
ref = gimple_assign_lhs (root->stmt);
for (i = 0; i < n + (reuse_first ? 0 : 1); i++)
{
var = predcom_tmp_var (ref, i, tmp_vars);
- VEC_quick_push (tree, chain->vars, var);
+ chain->vars.quick_push (var);
}
if (reuse_first)
- VEC_quick_push (tree, chain->vars, VEC_index (tree, chain->vars, 0));
+ chain->vars.quick_push (chain->vars[0]);
- FOR_EACH_VEC_ELT (tree, chain->vars, i, var)
- VEC_replace (tree, chain->vars, i, make_ssa_name (var, NULL));
+ FOR_EACH_VEC_ELT (chain->vars, i, var)
+ chain->vars[i] = make_ssa_name (var, NULL);
for (i = 0; i < n; i++)
{
- var = VEC_index (tree, chain->vars, i);
- next = VEC_index (tree, chain->vars, i + 1);
+ var = chain->vars[i];
+ next = chain->vars[i + 1];
init = get_init_expr (chain, i);
init = force_gimple_operand (init, &stmts, true, NULL_TREE);
initialize_root_vars (loop, chain, tmp_vars);
replace_ref_with (root->stmt,
- VEC_index (tree, chain->vars, chain->length),
+ chain->vars[chain->length],
true, in_lhs);
}
static void
initialize_root_vars_lm (struct loop *loop, dref root, bool written,
- VEC(tree, heap) **vars, VEC(tree, heap) *inits,
+ vec<tree> *vars, vec<tree> inits,
bitmap tmp_vars)
{
unsigned i;
/* Find the initializer for the variable, and check that it cannot
trap. */
- init = VEC_index (tree, inits, 0);
+ init = inits[0];
- *vars = VEC_alloc (tree, heap, written ? 2 : 1);
+ vars->create (written ? 2 : 1);
var = predcom_tmp_var (ref, 0, tmp_vars);
- VEC_quick_push (tree, *vars, var);
+ vars->quick_push (var);
if (written)
- VEC_quick_push (tree, *vars, VEC_index (tree, *vars, 0));
+ vars->quick_push ((*vars)[0]);
- FOR_EACH_VEC_ELT (tree, *vars, i, var)
- VEC_replace (tree, *vars, i, make_ssa_name (var, NULL));
+ FOR_EACH_VEC_ELT (*vars, i, var)
+ (*vars)[i] = make_ssa_name (var, NULL);
- var = VEC_index (tree, *vars, 0);
+ var = (*vars)[0];
init = force_gimple_operand (init, &stmts, written, NULL_TREE);
if (stmts)
if (written)
{
- next = VEC_index (tree, *vars, 1);
+ next = (*vars)[1];
phi = create_phi_node (var, loop->header);
add_phi_arg (phi, init, entry, UNKNOWN_LOCATION);
add_phi_arg (phi, next, latch, UNKNOWN_LOCATION);
static void
execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
{
- VEC (tree, heap) *vars;
+ vec<tree> vars;
dref a;
unsigned n_writes = 0, ridx, i;
tree var;
gcc_assert (chain->type == CT_INVARIANT);
gcc_assert (!chain->combined);
- FOR_EACH_VEC_ELT (dref, chain->refs, i, a)
+ FOR_EACH_VEC_ELT (chain->refs, i, a)
if (DR_IS_WRITE (a->ref))
n_writes++;
/* If there are no reads in the loop, there is nothing to do. */
- if (n_writes == VEC_length (dref, chain->refs))
+ if (n_writes == chain->refs.length ())
return;
initialize_root_vars_lm (loop, get_chain_root (chain), n_writes > 0,
&vars, chain->inits, tmp_vars);
ridx = 0;
- FOR_EACH_VEC_ELT (dref, chain->refs, i, a)
+ FOR_EACH_VEC_ELT (chain->refs, i, a)
{
bool is_read = DR_IS_READ (a->ref);
n_writes--;
if (n_writes)
{
- var = VEC_index (tree, vars, 0);
+ var = vars[0];
var = make_ssa_name (SSA_NAME_VAR (var), NULL);
- VEC_replace (tree, vars, 0, var);
+ vars[0] = var;
}
else
ridx = 1;
}
- replace_ref_with (a->stmt, VEC_index (tree, vars, ridx),
+ replace_ref_with (a->stmt, vars[ridx],
!is_read, !is_read);
}
- VEC_free (tree, heap, vars);
+ vars.release ();
}
/* Returns the single statement in that NAME is used, excepting
{
/* For combined chains, just remove the statements that are used to
compute the values of the expression (except for the root one). */
- for (i = 1; VEC_iterate (dref, chain->refs, i, a); i++)
+ for (i = 1; chain->refs.iterate (i, &a); i++)
remove_stmt (a->stmt);
}
else
and replace the uses of the original references by these
variables. */
initialize_root (loop, chain, tmp_vars);
- for (i = 1; VEC_iterate (dref, chain->refs, i, a); i++)
+ for (i = 1; chain->refs.iterate (i, &a); i++)
{
- var = VEC_index (tree, chain->vars, chain->length - a->distance);
+ var = chain->vars[chain->length - a->distance];
replace_ref_with (a->stmt, var, false, false);
}
}
optimized. */
static unsigned
-determine_unroll_factor (VEC (chain_p, heap) *chains)
+determine_unroll_factor (vec<chain_p> chains)
{
chain_p chain;
unsigned factor = 1, af, nfactor, i;
unsigned max = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
+ FOR_EACH_VEC_ELT (chains, i, chain)
{
if (chain->type == CT_INVARIANT || chain->combined)
continue;
Uids of the newly created temporary variables are marked in TMP_VARS. */
static void
-execute_pred_commoning (struct loop *loop, VEC (chain_p, heap) *chains,
+execute_pred_commoning (struct loop *loop, vec<chain_p> chains,
bitmap tmp_vars)
{
chain_p chain;
unsigned i;
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
+ FOR_EACH_VEC_ELT (chains, i, chain)
{
if (chain->type == CT_INVARIANT)
execute_load_motion (loop, chain, tmp_vars);
phi node, record the ssa name that is defined by it. */
static void
-replace_phis_by_defined_names (VEC (chain_p, heap) *chains)
+replace_phis_by_defined_names (vec<chain_p> chains)
{
chain_p chain;
dref a;
unsigned i, j;
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
- FOR_EACH_VEC_ELT (dref, chain->refs, j, a)
+ FOR_EACH_VEC_ELT (chains, i, chain)
+ FOR_EACH_VEC_ELT (chain->refs, j, a)
{
if (gimple_code (a->stmt) == GIMPLE_PHI)
{
NULL, use it to set the stmt field. */
static void
-replace_names_by_phis (VEC (chain_p, heap) *chains)
+replace_names_by_phis (vec<chain_p> chains)
{
chain_p chain;
dref a;
unsigned i, j;
- FOR_EACH_VEC_ELT (chain_p, chains, i, chain)
- FOR_EACH_VEC_ELT (dref, chain->refs, j, a)
+ FOR_EACH_VEC_ELT (chains, i, chain)
+ FOR_EACH_VEC_ELT (chain->refs, j, a)
if (a->stmt == NULL)
{
a->stmt = SSA_NAME_DEF_STMT (a->name_defined_by_phi);
struct epcc_data
{
- VEC (chain_p, heap) *chains;
+ vec<chain_p> chains;
bitmap tmp_vars;
};
if (ch1->length != ch2->length)
return NULL;
- if (VEC_length (dref, ch1->refs) != VEC_length (dref, ch2->refs))
+ if (ch1->refs.length () != ch2->refs.length ())
return NULL;
- for (i = 0; (VEC_iterate (dref, ch1->refs, i, r1)
- && VEC_iterate (dref, ch2->refs, i, r2)); i++)
+ for (i = 0; (ch1->refs.iterate (i, &r1)
+ && ch2->refs.iterate (i, &r2)); i++)
{
if (r1->distance != r2->distance)
return NULL;
new_chain->rslt_type = rslt_type;
new_chain->length = ch1->length;
- for (i = 0; (VEC_iterate (dref, ch1->refs, i, r1)
- && VEC_iterate (dref, ch2->refs, i, r2)); i++)
+ for (i = 0; (ch1->refs.iterate (i, &r1)
+ && ch2->refs.iterate (i, &r2)); i++)
{
nw = XCNEW (struct dref_d);
nw->stmt = stmt_combining_refs (r1, r2);
nw->distance = r1->distance;
- VEC_safe_push (dref, heap, new_chain->refs, nw);
+ new_chain->refs.safe_push (nw);
}
new_chain->has_max_use_after = false;
root_stmt = get_chain_root (new_chain)->stmt;
- for (i = 1; VEC_iterate (dref, new_chain->refs, i, nw); i++)
+ for (i = 1; new_chain->refs.iterate (i, &nw); i++)
{
if (nw->distance == new_chain->length
&& !stmt_dominates_stmt_p (nw->stmt, root_stmt))
/* Try to combine the CHAINS. */
static void
-try_combine_chains (VEC (chain_p, heap) **chains)
+try_combine_chains (vec<chain_p> *chains)
{
unsigned i, j;
chain_p ch1, ch2, cch;
- VEC (chain_p, heap) *worklist = NULL;
+ vec<chain_p> worklist = vec<chain_p>();
- FOR_EACH_VEC_ELT (chain_p, *chains, i, ch1)
+ FOR_EACH_VEC_ELT (*chains, i, ch1)
if (chain_can_be_combined_p (ch1))
- VEC_safe_push (chain_p, heap, worklist, ch1);
+ worklist.safe_push (ch1);
- while (!VEC_empty (chain_p, worklist))
+ while (!worklist.is_empty ())
{
- ch1 = VEC_pop (chain_p, worklist);
+ ch1 = worklist.pop ();
if (!chain_can_be_combined_p (ch1))
continue;
- FOR_EACH_VEC_ELT (chain_p, *chains, j, ch2)
+ FOR_EACH_VEC_ELT (*chains, j, ch2)
{
if (!chain_can_be_combined_p (ch2))
continue;
cch = combine_chains (ch1, ch2);
if (cch)
{
- VEC_safe_push (chain_p, heap, worklist, cch);
- VEC_safe_push (chain_p, heap, *chains, cch);
+ worklist.safe_push (cch);
+ chains->safe_push (cch);
break;
}
}
}
- VEC_free (chain_p, heap, worklist);
+ worklist.release ();
}
/* Prepare initializers for CHAIN in LOOP. Returns false if this is
/* Find the initializers for the variables, and check that they cannot
trap. */
- chain->inits = VEC_alloc (tree, heap, n);
+ chain->inits.create (n);
for (i = 0; i < n; i++)
- VEC_quick_push (tree, chain->inits, NULL_TREE);
+ chain->inits.quick_push (NULL_TREE);
/* If we have replaced some looparound phi nodes, use their initializers
instead of creating our own. */
- FOR_EACH_VEC_ELT (dref, chain->refs, i, laref)
+ FOR_EACH_VEC_ELT (chain->refs, i, laref)
{
if (gimple_code (laref->stmt) != GIMPLE_PHI)
continue;
gcc_assert (laref->distance > 0);
- VEC_replace (tree, chain->inits, n - laref->distance,
- PHI_ARG_DEF_FROM_EDGE (laref->stmt, entry));
+ chain->inits[n - laref->distance]
+ = PHI_ARG_DEF_FROM_EDGE (laref->stmt, entry);
}
for (i = 0; i < n; i++)
{
- if (VEC_index (tree, chain->inits, i) != NULL_TREE)
+ if (chain->inits[i] != NULL_TREE)
continue;
init = ref_at_iteration (loop, DR_REF (dr), (int) i - n);
if (stmts)
gsi_insert_seq_on_edge_immediate (entry, stmts);
- VEC_replace (tree, chain->inits, i, init);
+ chain->inits[i] = init;
}
return true;
be used because the initializers might trap. */
static void
-prepare_initializers (struct loop *loop, VEC (chain_p, heap) *chains)
+prepare_initializers (struct loop *loop, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
- for (i = 0; i < VEC_length (chain_p, chains); )
+ for (i = 0; i < chains.length (); )
{
- chain = VEC_index (chain_p, chains, i);
+ chain = chains[i];
if (prepare_initializers_chain (loop, chain))
i++;
else
{
release_chain (chain);
- VEC_unordered_remove (chain_p, chains, i);
+ chains.unordered_remove (i);
}
}
}
static bool
tree_predictive_commoning_loop (struct loop *loop)
{
- VEC (loop_p, heap) *loop_nest;
- VEC (data_reference_p, heap) *datarefs;
- VEC (ddr_p, heap) *dependences;
+ vec<loop_p> loop_nest;
+ vec<data_reference_p> datarefs;
+ vec<ddr_p> dependences;
struct component *components;
- VEC (chain_p, heap) *chains = NULL;
+ vec<chain_p> chains = vec<chain_p>();
unsigned unroll_factor;
struct tree_niter_desc desc;
bool unroll = false;
/* Find the data references and split them into components according to their
dependence relations. */
- datarefs = VEC_alloc (data_reference_p, heap, 10);
- dependences = VEC_alloc (ddr_p, heap, 10);
- loop_nest = VEC_alloc (loop_p, heap, 3);
+ datarefs.create (10);
+ dependences.create (10);
+ loop_nest.create (3);
if (! compute_data_dependences_for_loop (loop, true, &loop_nest, &datarefs,
&dependences))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Cannot analyze data dependencies\n");
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
free_data_refs (datarefs);
free_dependence_relations (dependences);
return false;
dump_data_dependence_relations (dump_file, dependences);
components = split_data_refs_to_components (loop, datarefs, dependences);
- VEC_free (loop_p, heap, loop_nest);
+ loop_nest.release ();
free_dependence_relations (dependences);
if (!components)
{
determine_roots (loop, components, &chains);
release_components (components);
- if (!chains)
+ if (!chains.exists ())
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
newline_and_indent (buffer, spc + 2);
}
- if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0)
+ if (vec_safe_length (BLOCK_NONLOCALIZED_VARS (block)) > 0)
{
unsigned i;
- VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block);
+ vec<tree, va_gc> *nlv = BLOCK_NONLOCALIZED_VARS (block);
pp_string (buffer, "NONLOCALIZED_VARS: ");
- FOR_EACH_VEC_ELT (tree, nlv, i, t)
+ FOR_EACH_VEC_ELT (*nlv, i, t)
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
dump_decl_name (buffer, val, flags);
else
dump_generic_node (buffer, val, spc, flags, false);
- if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1)
+ if (ix != vec_safe_length (CONSTRUCTOR_ELTS (node)) - 1)
{
pp_character (buffer, ',');
pp_space (buffer);
static void
get_exit_conditions_rec (struct loop *loop,
- VEC(gimple,heap) **exit_conditions)
+ vec<gimple> *exit_conditions)
{
if (!loop)
return;
gimple loop_condition = get_loop_exit_condition (loop);
if (loop_condition)
- VEC_safe_push (gimple, heap, *exit_conditions, loop_condition);
+ exit_conditions->safe_push (loop_condition);
}
}
initializes the EXIT_CONDITIONS array. */
static void
-select_loops_exit_conditions (VEC(gimple,heap) **exit_conditions)
+select_loops_exit_conditions (vec<gimple> *exit_conditions)
{
struct loop *function_body = current_loops->tree_root;
from the EXIT_CONDITIONS array. */
static void
-number_of_iterations_for_all_loops (VEC(gimple,heap) **exit_conditions)
+number_of_iterations_for_all_loops (vec<gimple> *exit_conditions)
{
unsigned int i;
unsigned nb_chrec_dont_know_loops = 0;
unsigned nb_static_loops = 0;
gimple cond;
- FOR_EACH_VEC_ELT (gimple, *exit_conditions, i, cond)
+ FOR_EACH_VEC_ELT (*exit_conditions, i, cond)
{
tree res = number_of_latch_executions (loop_containing_stmt (cond));
if (chrec_contains_undetermined (res))
index. This allows the parallelization of the loop. */
static void
-analyze_scalar_evolution_for_all_loop_phi_nodes (VEC(gimple,heap) **exit_conditions)
+analyze_scalar_evolution_for_all_loop_phi_nodes (vec<gimple> *exit_conditions)
{
unsigned int i;
struct chrec_stats stats;
reset_chrecs_counters (&stats);
- FOR_EACH_VEC_ELT (gimple, *exit_conditions, i, cond)
+ FOR_EACH_VEC_ELT (*exit_conditions, i, cond)
{
struct loop *loop;
basic_block bb;
void
scev_analysis (void)
{
- VEC(gimple,heap) *exit_conditions;
+ vec<gimple> exit_conditions;
- exit_conditions = VEC_alloc (gimple, heap, 37);
+ exit_conditions.create (37);
select_loops_exit_conditions (&exit_conditions);
if (dump_file && (dump_flags & TDF_STATS))
analyze_scalar_evolution_for_all_loop_phi_nodes (&exit_conditions);
number_of_iterations_for_all_loops (&exit_conditions);
- VEC_free (gimple, heap, exit_conditions);
+ exit_conditions.release ();
}
/* Finalize the scalar evolution analysis. */
typedef struct access *access_p;
-DEF_VEC_P (access_p);
-DEF_VEC_ALLOC_P (access_p, heap);
/* Alloc pool for allocating access structures. */
static alloc_pool access_pool;
/* Alloc pool for allocating assign link structures. */
static alloc_pool link_pool;
-/* Base (tree) -> Vector (VEC(access_p,heap) *) map. */
+/* Base (tree) -> Vector (vec<access_p> *) map. */
static struct pointer_map_t *base_access_vec;
/* Set of candidates. */
/* Return a vector of pointers to accesses for the variable given in BASE or
NULL if there is none. */
-static VEC (access_p, heap) *
+static vec<access_p> *
get_base_access_vector (tree base)
{
void **slot;
if (!slot)
return NULL;
else
- return *(VEC (access_p, heap) **) slot;
+ return *(vec<access_p> **) slot;
}
/* Find an access with required OFFSET and SIZE in a subtree of accesses rooted
static struct access *
get_first_repr_for_decl (tree base)
{
- VEC (access_p, heap) *access_vec;
+ vec<access_p> *access_vec;
access_vec = get_base_access_vector (base);
if (!access_vec)
return NULL;
- return VEC_index (access_p, access_vec, 0);
+ return (*access_vec)[0];
}
/* Find an access representative for the variable BASE and given OFFSET and
sra_initialize (void)
{
candidate_bitmap = BITMAP_ALLOC (NULL);
- candidates = htab_create (VEC_length (tree, cfun->local_decls) / 2,
+ candidates = htab_create (vec_safe_length (cfun->local_decls) / 2,
uid_decl_map_hash, uid_decl_map_eq, NULL);
should_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
delete_base_accesses (const void *key ATTRIBUTE_UNUSED, void **value,
void *data ATTRIBUTE_UNUSED)
{
- VEC (access_p, heap) *access_vec;
- access_vec = (VEC (access_p, heap) *) *value;
- VEC_free (access_p, heap, access_vec);
-
+ vec<access_p> *access_vec = (vec<access_p> *) *value;
+ vec_free (access_vec);
return true;
}
static struct access *
create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size)
{
- VEC (access_p, heap) *vec;
+ vec<access_p> *v;
struct access *access;
void **slot;
slot = pointer_map_contains (base_access_vec, base);
if (slot)
- vec = (VEC (access_p, heap) *) *slot;
+ v = (vec<access_p> *) *slot;
else
- vec = VEC_alloc (access_p, heap, 32);
+ vec_alloc (v, 32);
- VEC_safe_push (access_p, heap, vec, access);
+ v->safe_push (access);
- *((struct VEC (access_p,heap) **)
- pointer_map_insert (base_access_vec, base)) = vec;
+ *((vec<access_p> **)
+ pointer_map_insert (base_access_vec, base)) = v;
return access;
}
{
int i, j, access_count;
struct access *res, **prev_acc_ptr = &res;
- VEC (access_p, heap) *access_vec;
+ vec<access_p> *access_vec;
bool first = true;
HOST_WIDE_INT low = -1, high = 0;
access_vec = get_base_access_vector (var);
if (!access_vec)
return NULL;
- access_count = VEC_length (access_p, access_vec);
+ access_count = access_vec->length ();
/* Sort by <OFFSET, SIZE>. */
- VEC_qsort (access_p, access_vec, compare_access_positions);
+ access_vec->qsort (compare_access_positions);
i = 0;
while (i < access_count)
{
- struct access *access = VEC_index (access_p, access_vec, i);
+ struct access *access = (*access_vec)[i];
bool grp_write = access->write;
bool grp_read = !access->write;
bool grp_scalar_write = access->write
j = i + 1;
while (j < access_count)
{
- struct access *ac2 = VEC_index (access_p, access_vec, j);
+ struct access *ac2 = (*access_vec)[j];
if (ac2->offset != access->offset || ac2->size != access->size)
break;
if (ac2->write)
prev_acc_ptr = &access->next_grp;
}
- gcc_assert (res == VEC_index (access_p, access_vec, 0));
+ gcc_assert (res == (*access_vec)[0]);
return res;
}
}
loc = gimple_location (*stmt);
- if (VEC_length (constructor_elt,
- CONSTRUCTOR_ELTS (gimple_assign_rhs1 (*stmt))) > 0)
+ if (vec_safe_length (CONSTRUCTOR_ELTS (gimple_assign_rhs1 (*stmt))) > 0)
{
/* I have never seen this code path trigger but if it can happen the
following should handle it gracefully. */
parm;
parm = DECL_CHAIN (parm))
{
- VEC (access_p, heap) *access_vec;
+ vec<access_p> *access_vec;
struct access *access;
if (!bitmap_bit_p (candidate_bitmap, DECL_UID (parm)))
if (!access_vec)
continue;
- for (access = VEC_index (access_p, access_vec, 0);
+ for (access = (*access_vec)[0];
access;
access = access->next_grp)
generate_subtree_copies (access, parm, 0, 0, 0, &gsi, true, true,
current function. */
static void
-analyze_modified_params (VEC (access_p, heap) *representatives)
+analyze_modified_params (vec<access_p> representatives)
{
int i;
{
struct access *repr;
- for (repr = VEC_index (access_p, representatives, i);
+ for (repr = representatives[i];
repr;
repr = repr->next_grp)
{
static void
propagate_dereference_distances (void)
{
- VEC (basic_block, heap) *queue;
+ vec<basic_block> queue;
basic_block bb;
- queue = VEC_alloc (basic_block, heap, last_basic_block_for_function (cfun));
- VEC_quick_push (basic_block, queue, ENTRY_BLOCK_PTR);
+ queue.create (last_basic_block_for_function (cfun));
+ queue.quick_push (ENTRY_BLOCK_PTR);
FOR_EACH_BB (bb)
{
- VEC_quick_push (basic_block, queue, bb);
+ queue.quick_push (bb);
bb->aux = bb;
}
- while (!VEC_empty (basic_block, queue))
+ while (!queue.is_empty ())
{
edge_iterator ei;
edge e;
bool change = false;
int i;
- bb = VEC_pop (basic_block, queue);
+ bb = queue.pop ();
bb->aux = NULL;
if (bitmap_bit_p (final_bbs, bb->index))
continue;
e->src->aux = e->src;
- VEC_quick_push (basic_block, queue, e->src);
+ queue.quick_push (e->src);
}
}
- VEC_free (basic_block, heap, queue);
+ queue.release ();
}
/* Dump a dereferences TABLE with heading STR to file F. */
distances of each representative of a (fraction of a) parameter. */
static void
-analyze_caller_dereference_legality (VEC (access_p, heap) *representatives)
+analyze_caller_dereference_legality (vec<access_p> representatives)
{
int i;
for (i = 0; i < func_param_count; i++)
{
- struct access *repr = VEC_index (access_p, representatives, i);
+ struct access *repr = representatives[i];
int idx = ENTRY_BLOCK_PTR->index * func_param_count + i;
if (!repr || no_accesses_p (repr))
{
int i, access_count;
struct access *repr;
- VEC (access_p, heap) *access_vec;
+ vec<access_p> *access_vec;
access_vec = get_base_access_vector (parm);
gcc_assert (access_vec);
- repr = VEC_index (access_p, access_vec, 0);
+ repr = (*access_vec)[0];
if (repr->write)
return NULL;
repr->group_representative = repr;
- access_count = VEC_length (access_p, access_vec);
+ access_count = access_vec->length ();
for (i = 1; i < access_count; i++)
{
- struct access *access = VEC_index (access_p, access_vec, i);
+ struct access *access = (*access_vec)[i];
if (access->write)
return NULL;
access->group_representative = repr;
int i, j, access_count, group_count;
int agg_size, total_size = 0;
struct access *access, *res, **prev_acc_ptr = &res;
- VEC (access_p, heap) *access_vec;
+ vec<access_p> *access_vec;
access_vec = get_base_access_vector (parm);
if (!access_vec)
return &no_accesses_representant;
- access_count = VEC_length (access_p, access_vec);
+ access_count = access_vec->length ();
- VEC_qsort (access_p, access_vec, compare_access_positions);
+ access_vec->qsort (compare_access_positions);
i = 0;
total_size = 0;
{
bool modification;
tree a1_alias_type;
- access = VEC_index (access_p, access_vec, i);
+ access = (*access_vec)[i];
modification = access->write;
if (access_precludes_ipa_sra_p (access))
return NULL;
j = i + 1;
while (j < access_count)
{
- struct access *ac2 = VEC_index (access_p, access_vec, j);
+ struct access *ac2 = (*access_vec)[j];
if (ac2->offset != access->offset)
{
/* All or nothing law for parameters. */
IPA-SRA. Return result based on what representatives have been found. */
static enum ipa_splicing_result
-splice_all_param_accesses (VEC (access_p, heap) **representatives)
+splice_all_param_accesses (vec<access_p> &representatives)
{
enum ipa_splicing_result result = NO_GOOD_ACCESS;
tree parm;
struct access *repr;
- *representatives = VEC_alloc (access_p, heap, func_param_count);
+ representatives.create (func_param_count);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm;
{
if (is_unused_scalar_param (parm))
{
- VEC_quick_push (access_p, *representatives,
- &no_accesses_representant);
+ representatives.quick_push (&no_accesses_representant);
if (result == NO_GOOD_ACCESS)
result = UNUSED_PARAMS;
}
&& bitmap_bit_p (candidate_bitmap, DECL_UID (parm)))
{
repr = unmodified_by_ref_scalar_representative (parm);
- VEC_quick_push (access_p, *representatives, repr);
+ representatives.quick_push (repr);
if (repr)
result = UNMODIF_BY_REF_ACCESSES;
}
{
bool ro_grp = false;
repr = splice_param_accesses (parm, &ro_grp);
- VEC_quick_push (access_p, *representatives, repr);
+ representatives.quick_push (repr);
if (repr && !no_accesses_p (repr))
{
result = UNUSED_PARAMS;
}
else
- VEC_quick_push (access_p, *representatives, NULL);
+ representatives.quick_push (NULL);
}
if (result == NO_GOOD_ACCESS)
{
- VEC_free (access_p, heap, *representatives);
- *representatives = NULL;
+ representatives.release ();
return NO_GOOD_ACCESS;
}
/* Return the index of BASE in PARMS. Abort if it is not found. */
static inline int
-get_param_index (tree base, VEC(tree, heap) *parms)
+get_param_index (tree base, vec<tree> parms)
{
int i, len;
- len = VEC_length (tree, parms);
+ len = parms.length ();
for (i = 0; i < len; i++)
- if (VEC_index (tree, parms, i) == base)
+ if (parms[i] == base)
return i;
gcc_unreachable ();
}
final number of adjustments. */
static ipa_parm_adjustment_vec
-turn_representatives_into_adjustments (VEC (access_p, heap) *representatives,
+turn_representatives_into_adjustments (vec<access_p> representatives,
int adjustments_count)
{
- VEC (tree, heap) *parms;
+ vec<tree> parms;
ipa_parm_adjustment_vec adjustments;
tree parm;
int i;
gcc_assert (adjustments_count > 0);
parms = ipa_get_vector_of_formal_parms (current_function_decl);
- adjustments = VEC_alloc (ipa_parm_adjustment_t, heap, adjustments_count);
+ adjustments.create (adjustments_count);
parm = DECL_ARGUMENTS (current_function_decl);
for (i = 0; i < func_param_count; i++, parm = DECL_CHAIN (parm))
{
- struct access *repr = VEC_index (access_p, representatives, i);
+ struct access *repr = representatives[i];
if (!repr || no_accesses_p (repr))
{
adj.copy_param = 1;
else
adj.remove_param = 1;
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, adj);
+ adjustments.quick_push (adj);
}
else
{
adj.by_ref = (POINTER_TYPE_P (TREE_TYPE (repr->base))
&& (repr->grp_maybe_modified
|| repr->grp_not_necessarilly_dereferenced));
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, adj);
+ adjustments.quick_push (adj);
}
}
}
- VEC_free (tree, heap, parms);
+ parms.release ();
return adjustments;
}
enum ipa_splicing_result repr_state;
bool proceed = false;
int i, adjustments_count = 0;
- VEC (access_p, heap) *representatives;
+ vec<access_p> representatives;
ipa_parm_adjustment_vec adjustments;
- repr_state = splice_all_param_accesses (&representatives);
+ repr_state = splice_all_param_accesses (representatives);
if (repr_state == NO_GOOD_ACCESS)
- return NULL;
+ return ipa_parm_adjustment_vec();
/* If there are any parameters passed by reference which are not modified
directly, we need to check whether they can be modified indirectly. */
for (i = 0; i < func_param_count; i++)
{
- struct access *repr = VEC_index (access_p, representatives, i);
+ struct access *repr = representatives[i];
if (repr && !no_accesses_p (repr))
{
adjustments_count++;
if (repr->grp_not_necessarilly_dereferenced
|| repr->grp_maybe_modified)
- VEC_replace (access_p, representatives, i, NULL);
+ representatives[i] = NULL;
else
{
proceed = true;
if (new_components == 0)
{
- VEC_replace (access_p, representatives, i, NULL);
+ representatives[i] = NULL;
adjustments_count++;
}
else
adjustments = turn_representatives_into_adjustments (representatives,
adjustments_count);
else
- adjustments = NULL;
+ adjustments = ipa_parm_adjustment_vec();
- VEC_free (access_p, heap, representatives);
+ representatives.release ();
return adjustments;
}
{
int i, len;
- len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ len = adjustments.length ();
for (i = 0; i < len; i++)
{
struct ipa_parm_adjustment *adj;
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (!adj->copy_param && adj->base == base)
return adj;
}
HOST_WIDE_INT offset, size, max_size;
tree base, src;
- len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ len = adjustments.length ();
if (TREE_CODE (*expr) == BIT_FIELD_REF
|| TREE_CODE (*expr) == IMAGPART_EXPR
for (i = 0; i < len; i++)
{
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (adj->base == base &&
(adj->offset == offset || adj->remove_param))
if (is_gimple_reg_type (TREE_TYPE (*lhs_p)))
*rhs_p = build_zero_cst (TREE_TYPE (*lhs_p));
else
- *rhs_p = build_constructor (TREE_TYPE (*lhs_p), 0);
+ *rhs_p = build_constructor (TREE_TYPE (*lhs_p),
+ NULL);
}
else
new_rhs = fold_build1_loc (gimple_location (stmt),
gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
gsip = &gsi;
}
- len = VEC_length (ipa_parm_adjustment_t, adjustments);
+ len = adjustments.length ();
for (i = 0; i < len; i++)
{
struct ipa_parm_adjustment *adj;
tree name, vexpr, copy = NULL_TREE;
use_operand_p use_p;
- adj = &VEC_index (ipa_parm_adjustment_t, adjustments, i);
+ adj = &adjustments[i];
if (adj->copy_param || !is_gimple_reg (adj->base))
continue;
name = ssa_default_def (cfun, adj->base);
convert_callers_for_node (struct cgraph_node *node,
void *data)
{
- ipa_parm_adjustment_vec adjustments = (ipa_parm_adjustment_vec)data;
+ ipa_parm_adjustment_vec *adjustments = (ipa_parm_adjustment_vec *) data;
bitmap recomputed_callers = BITMAP_ALLOC (NULL);
struct cgraph_edge *cs;
xstrdup (cgraph_node_name (cs->caller)),
xstrdup (cgraph_node_name (cs->callee)));
- ipa_modify_call_arguments (cs, cs->call_stmt, adjustments);
+ ipa_modify_call_arguments (cs, cs->call_stmt, *adjustments);
pop_cfun ();
}
basic_block this_block;
cgraph_for_node_and_aliases (node, convert_callers_for_node,
- adjustments, false);
+ &adjustments, false);
if (!encountered_recursive_call)
return;
{
struct cgraph_node *new_node;
bool cfg_changed;
- VEC (cgraph_edge_p, heap) * redirect_callers = collect_callers_of_node (node);
+ vec<cgraph_edge_p> redirect_callers = collect_callers_of_node (node);
rebuild_cgraph_edges ();
free_dominance_info (CDI_DOMINATORS);
pop_cfun ();
- new_node = cgraph_function_versioning (node, redirect_callers, NULL, NULL,
- false, NULL, NULL, "isra");
- VEC_free (cgraph_edge_p, heap, redirect_callers);
+ new_node = cgraph_function_versioning (node, redirect_callers,
+ NULL,
+ NULL, false, NULL, NULL, "isra");
+ redirect_callers.release ();
push_cfun (DECL_STRUCT_FUNCTION (new_node->symbol.decl));
ipa_modify_formal_parameters (current_function_decl, adjustments, "ISRA");
}
adjustments = analyze_all_param_acesses ();
- if (!adjustments)
+ if (!adjustments.exists ())
goto out;
if (dump_file)
ipa_dump_param_adjustments (dump_file, adjustments, current_function_decl);
ret = TODO_update_ssa | TODO_cleanup_cfg;
else
ret = TODO_update_ssa;
- VEC_free (ipa_parm_adjustment_t, heap, adjustments);
+ adjustments.release ();
statistics_counter_event (cfun, "Unused parameters deleted",
sra_stats.deleted_unused_parameters);
be filled in. */
} mem_addr_template;
-DEF_VEC_O (mem_addr_template);
-DEF_VEC_ALLOC_O (mem_addr_template, gc);
/* The templates. Each of the low five bits of the index corresponds to one
component of TARGET_MEM_REF being present, while the high bits identify
the address space. See TEMPL_IDX. */
-static GTY(()) VEC (mem_addr_template, gc) *mem_addr_template_list;
+static GTY(()) vec<mem_addr_template, va_gc> *mem_addr_template_list;
#define TEMPL_IDX(AS, SYMBOL, BASE, INDEX, STEP, OFFSET) \
(((int) (AS) << 5) \
unsigned int templ_index
= TEMPL_IDX (as, addr->symbol, addr->base, addr->index, st, off);
- if (templ_index
- >= VEC_length (mem_addr_template, mem_addr_template_list))
- VEC_safe_grow_cleared (mem_addr_template, gc, mem_addr_template_list,
- templ_index + 1);
+ if (templ_index >= vec_safe_length (mem_addr_template_list))
+ vec_safe_grow_cleared (mem_addr_template_list, templ_index + 1);
/* Reuse the templates for addresses, so that we do not waste memory. */
- templ = &VEC_index (mem_addr_template, mem_addr_template_list,
- templ_index);
+ templ = &(*mem_addr_template_list)[templ_index];
if (!templ->ref)
{
sym = (addr->symbol ?
#include "params.h"
#include "vec.h"
#include "bitmap.h"
-#include "vecprim.h"
#include "pointer-set.h"
#include "alloc-pool.h"
#include "tree-ssa-alias.h"
FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
{
- clobber = build_constructor (TREE_TYPE (var), NULL);
+ clobber = build_constructor (TREE_TYPE (var),
+ NULL);
TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (var, clobber);
typedef struct ssa_conflicts_d
{
bitmap_obstack obstack; /* A place to allocate our bitmaps. */
- VEC(bitmap, heap)* conflicts;
+ vec<bitmap> conflicts;
} * ssa_conflicts_p;
/* Return an empty new conflict graph for SIZE elements. */
ptr = XNEW (struct ssa_conflicts_d);
bitmap_obstack_initialize (&ptr->obstack);
- ptr->conflicts = VEC_alloc (bitmap, heap, size);
- VEC_safe_grow_cleared (bitmap, heap, ptr->conflicts, size);
+ ptr->conflicts.create (size);
+ ptr->conflicts.safe_grow_cleared (size);
return ptr;
}
ssa_conflicts_delete (ssa_conflicts_p ptr)
{
bitmap_obstack_release (&ptr->obstack);
- VEC_free (bitmap, heap, ptr->conflicts);
+ ptr->conflicts.release ();
free (ptr);
}
static inline bool
ssa_conflicts_test_p (ssa_conflicts_p ptr, unsigned x, unsigned y)
{
- bitmap bx = VEC_index (bitmap, ptr->conflicts, x);
- bitmap by = VEC_index (bitmap, ptr->conflicts, y);
+ bitmap bx = ptr->conflicts[x];
+ bitmap by = ptr->conflicts[y];
gcc_checking_assert (x != y);
static inline void
ssa_conflicts_add_one (ssa_conflicts_p ptr, unsigned x, unsigned y)
{
- bitmap bx = VEC_index (bitmap, ptr->conflicts, x);
+ bitmap bx = ptr->conflicts[x];
/* If there are no conflicts yet, allocate the bitmap and set bit. */
if (! bx)
- bx = VEC_index (bitmap, ptr->conflicts, x) = BITMAP_ALLOC (&ptr->obstack);
+ bx = ptr->conflicts[x] = BITMAP_ALLOC (&ptr->obstack);
bitmap_set_bit (bx, y);
}
{
unsigned z;
bitmap_iterator bi;
- bitmap bx = VEC_index (bitmap, ptr->conflicts, x);
- bitmap by = VEC_index (bitmap, ptr->conflicts, y);
+ bitmap bx = ptr->conflicts[x];
+ bitmap by = ptr->conflicts[y];
gcc_checking_assert (x != y);
if (! by)
conflict. */
EXECUTE_IF_SET_IN_BITMAP (by, 0, z, bi)
{
- bitmap bz = VEC_index (bitmap, ptr->conflicts, z);
+ bitmap bz = ptr->conflicts[z];
if (bz)
bitmap_set_bit (bz, x);
}
/* If X has conflicts, add Y's to X. */
bitmap_ior_into (bx, by);
BITMAP_FREE (by);
- VEC_replace (bitmap, ptr->conflicts, y, NULL);
+ ptr->conflicts[y] = NULL;
}
else
{
/* If X has no conflicts, simply use Y's. */
- VEC_replace (bitmap, ptr->conflicts, x, by);
- VEC_replace (bitmap, ptr->conflicts, y, NULL);
+ ptr->conflicts[x] = by;
+ ptr->conflicts[y] = NULL;
}
}
fprintf (file, "\nConflict graph:\n");
- FOR_EACH_VEC_ELT (bitmap, ptr->conflicts, x, b)
+ FOR_EACH_VEC_ELT (ptr->conflicts, x, b)
if (b)
{
fprintf (file, "%d: ", x);
#define STMT_NECESSARY GF_PLF_1
-static VEC(gimple,heap) *worklist;
+static vec<gimple> worklist;
/* Vector indicating an SSA name has already been processed and marked
as necessary. */
gimple_set_plf (stmt, STMT_NECESSARY, true);
if (add_to_worklist)
- VEC_safe_push (gimple, heap, worklist, stmt);
+ worklist.safe_push (stmt);
if (bb_contains_live_stmts && !is_gimple_debug (stmt))
bitmap_set_bit (bb_contains_live_stmts, gimple_bb (stmt)->index);
}
gimple_set_plf (stmt, STMT_NECESSARY, true);
if (bb_contains_live_stmts)
bitmap_set_bit (bb_contains_live_stmts, gimple_bb (stmt)->index);
- VEC_safe_push (gimple, heap, worklist, stmt);
+ worklist.safe_push (stmt);
}
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nProcessing worklist:\n");
- while (VEC_length (gimple, worklist) > 0)
+ while (worklist.length () > 0)
{
/* Take STMT from worklist. */
- stmt = VEC_pop (gimple, worklist);
+ stmt = worklist.pop ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
gimple_stmt_iterator gsi, psi;
gimple stmt;
tree call;
- VEC (basic_block, heap) *h;
+ vec<basic_block> h;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nEliminating unnecessary statements:\n");
gcc_assert (dom_info_available_p (CDI_DOMINATORS));
h = get_all_dominated_blocks (CDI_DOMINATORS, single_succ (ENTRY_BLOCK_PTR));
- while (VEC_length (basic_block, h))
+ while (h.length ())
{
- bb = VEC_pop (basic_block, h);
+ bb = h.pop ();
/* Remove dead statements. */
for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi = psi)
}
}
- VEC_free (basic_block, heap, h);
+ h.release ();
/* Since we don't track liveness of virtual PHI nodes, it is possible that we
rendered some PHI nodes unreachable while they are still in use.
{
h = get_all_dominated_blocks (CDI_DOMINATORS, bb);
- while (VEC_length (basic_block, h))
+ while (h.length ())
{
- bb = VEC_pop (basic_block, h);
+ bb = h.pop ();
prev_bb = bb->prev_bb;
/* Rearrangements to the CFG may have failed
to update the dominators tree, so that
delete_basic_block (bb);
}
- VEC_free (basic_block, heap, h);
+ h.release ();
}
}
}
processed = sbitmap_alloc (num_ssa_names + 1);
bitmap_clear (processed);
- worklist = VEC_alloc (gimple, heap, 64);
+ worklist.create (64);
cfg_altered = false;
}
sbitmap_free (processed);
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
}
/* Main routine to eliminate dead code.
tree value;
} cond_equivalence;
-DEF_VEC_O(cond_equivalence);
-DEF_VEC_ALLOC_O(cond_equivalence,heap);
/* Structure for recording edge equivalences as well as any pending
edge redirections during the dominator optimizer.
/* Traversing an edge may also indicate one or more particular conditions
are true or false. */
- VEC(cond_equivalence, heap) *cond_equivalences;
+ vec<cond_equivalence> cond_equivalences;
};
/* Hash table with expressions made available during the renaming process.
remove the expressions from the global hash table until we hit the
marker. */
typedef struct expr_hash_elt * expr_hash_elt_t;
-DEF_VEC_P(expr_hash_elt_t);
-DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
-static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
+static vec<expr_hash_elt_t> avail_exprs_stack;
/* Structure for entries in the expression hash table. */
A NULL entry is used to mark the end of pairs which need to be
restored during finalization of this block. */
-static VEC(tree,heap) *const_and_copies_stack;
+static vec<tree> const_and_copies_stack;
/* Track whether or not we have changed the control flow graph. */
static bool cfg_altered;
if (edge_info)
{
- if (edge_info->cond_equivalences)
- VEC_free (cond_equivalence, heap, edge_info->cond_equivalences);
+ edge_info->cond_equivalences.release ();
free (edge_info);
e->aux = NULL;
}
/* Create our hash tables. */
avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
- avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
- const_and_copies_stack = VEC_alloc (tree, heap, 20);
+ avail_exprs_stack.create (20);
+ const_and_copies_stack.create (20);
need_eh_cleanup = BITMAP_ALLOC (NULL);
/* Setup callbacks for the generic dominator tree walker. */
/* Free asserted bitmaps and stacks. */
BITMAP_FREE (need_eh_cleanup);
- VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
- VEC_free (tree, heap, const_and_copies_stack);
+ avail_exprs_stack.release ();
+ const_and_copies_stack.release ();
/* Free the value-handle array. */
threadedge_finalize_values ();
- ssa_name_values = NULL;
+ ssa_name_values.release ();
return 0;
}
remove_local_expressions_from_table (void)
{
/* Remove all the expressions made available in this block. */
- while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
+ while (avail_exprs_stack.length () > 0)
{
- expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
+ expr_hash_elt_t victim = avail_exprs_stack.pop ();
void **slot;
if (victim == NULL)
static void
restore_vars_to_original_value (void)
{
- while (VEC_length (tree, const_and_copies_stack) > 0)
+ while (const_and_copies_stack.length () > 0)
{
tree prev_value, dest;
- dest = VEC_pop (tree, const_and_copies_stack);
+ dest = const_and_copies_stack.pop ();
if (dest == NULL)
break;
fprintf (dump_file, "\n");
}
- prev_value = VEC_pop (tree, const_and_copies_stack);
+ prev_value = const_and_copies_stack.pop ();
set_ssa_name_value (dest, prev_value);
}
}
if (lhs)
record_equality (lhs, rhs);
- for (i = 0; VEC_iterate (cond_equivalence,
- edge_info->cond_equivalences, i, eq); ++i)
+ for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
record_cond (eq);
}
}
print_expr_hash_elt (dump_file, element);
}
- VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
+ avail_exprs_stack.safe_push (element);
}
else
free_expr_hash_elt (element);
static void
build_and_record_new_cond (enum tree_code code,
tree op0, tree op1,
- VEC(cond_equivalence, heap) **p)
+ vec<cond_equivalence> *p)
{
cond_equivalence c;
struct hashable_expr *cond = &c.cond;
cond->ops.binary.opnd1 = op1;
c.value = boolean_true_node;
- VEC_safe_push (cond_equivalence, heap, *p, c);
+ p->safe_push (c);
}
/* Record that COND is true and INVERTED is false into the edge information
two slots. */
initialize_expr_from_cond (cond, &c.cond);
c.value = boolean_true_node;
- VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, c);
+ edge_info->cond_equivalences.safe_push (c);
/* It is possible for INVERTED to be the negation of a comparison,
and not a valid RHS or GIMPLE_COND condition. This happens because
obey the trichotomy law. */
initialize_expr_from_cond (inverted, &c.cond);
c.value = boolean_false_node;
- VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, c);
+ edge_info->cond_equivalences.safe_push (c);
}
/* A helper function for record_const_or_copy and record_equality.
fprintf (dump_file, "\n");
}
- VEC_reserve (tree, heap, const_and_copies_stack, 2);
- VEC_quick_push (tree, const_and_copies_stack, prev_x);
- VEC_quick_push (tree, const_and_copies_stack, x);
+ const_and_copies_stack.reserve (2);
+ const_and_copies_stack.quick_push (prev_x);
+ const_and_copies_stack.quick_push (x);
}
/* Return the loop depth of the basic block of the defining statement of X.
/* Push a marker on the stacks of local information so that we know how
far to unwind when we finalize this block. */
- VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack,
- (expr_hash_elt_t)NULL);
- VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
+ avail_exprs_stack.safe_push (NULL);
+ const_and_copies_stack.safe_push (NULL_TREE);
record_equivalences_from_incoming_edge (bb);
/* Create equivalences from redundant PHIs. PHIs are only truly
redundant when they exist in the same block, so push another
marker and unwind right afterwards. */
- VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack,
- (expr_hash_elt_t)NULL);
+ avail_exprs_stack.safe_push (NULL);
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
eliminate_redundant_computations (&gsi);
remove_local_expressions_from_table ();
{
/* Push a marker on the stack, which thread_across_edge expects
and will remove. */
- VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
+ const_and_copies_stack.safe_push (NULL_TREE);
dom_thread_across_edge (walk_data, single_succ_edge (bb));
}
else if ((last = last_stmt (bb))
/* Push a marker onto the available expression stack so that we
unwind any expressions related to the TRUE arm before processing
the false arm below. */
- VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack,
- (expr_hash_elt_t)NULL);
- VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
+ avail_exprs_stack.safe_push (NULL);
+ const_and_copies_stack.safe_push (NULL_TREE);
edge_info = (struct edge_info *) true_edge->aux;
/* If we have 0 = COND or 1 = COND equivalences, record them
into our expression hash tables. */
- for (i = 0; VEC_iterate (cond_equivalence,
- edge_info->cond_equivalences, i, eq); ++i)
+ for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
record_cond (eq);
}
struct edge_info *edge_info;
unsigned int i;
- VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
+ const_and_copies_stack.safe_push (NULL_TREE);
edge_info = (struct edge_info *) false_edge->aux;
/* If we have info associated with this edge, record it into
/* If we have 0 = COND or 1 = COND equivalences, record them
into our expression hash tables. */
- for (i = 0; VEC_iterate (cond_equivalence,
- edge_info->cond_equivalences, i, eq); ++i)
+ for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
record_cond (eq);
}
print_expr_hash_elt (dump_file, element2);
}
- VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
+ avail_exprs_stack.safe_push (element2);
return NULL_TREE;
}
else
simplify_gimple_switch_label_vec (gimple stmt, tree index_type)
{
unsigned int branch_num = gimple_switch_num_labels (stmt);
- VEC(tree, heap) *labels = VEC_alloc (tree, heap, branch_num);
+ vec<tree> labels;
+ labels.create (branch_num);
unsigned int i, len;
/* Collect the existing case labels in a VEC, and preprocess it as if
we are gimplifying a GENERIC SWITCH_EXPR. */
for (i = 1; i < branch_num; i++)
- VEC_quick_push (tree, labels, gimple_switch_label (stmt, i));
+ labels.quick_push (gimple_switch_label (stmt, i));
preprocess_case_label_vec_for_gimple (labels, index_type, NULL);
/* If any labels were removed, replace the existing case labels
Note that the type updates were done in-place on the case labels,
so we only have to replace the case labels in the GIMPLE_SWITCH
if the number of labels changed. */
- len = VEC_length (tree, labels);
+ len = labels.length ();
if (len < branch_num - 1)
{
bitmap target_blocks;
label = CASE_LABEL (gimple_switch_default_label (stmt));
elt = build_case_label (build_int_cst (index_type, 0), NULL, label);
- VEC_quick_push (tree, labels, elt);
+ labels.quick_push (elt);
len = 1;
}
- for (i = 0; i < VEC_length (tree, labels); i++)
- gimple_switch_set_label (stmt, i + 1, VEC_index (tree, labels, i));
+ for (i = 0; i < labels.length (); i++)
+ gimple_switch_set_label (stmt, i + 1, labels[i]);
for (i++ ; i < branch_num; i++)
gimple_switch_set_label (stmt, i, NULL_TREE);
gimple_switch_set_num_labels (stmt, len + 1);
BITMAP_FREE (target_blocks);
}
- VEC_free (tree, heap, labels);
+ labels.release ();
}
/* STMT is a SWITCH_EXPR for which we attempt to find equivalent forms of
sel = XALLOCAVEC (unsigned char, nelts);
orig = NULL;
maybe_ident = true;
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (op), i, elt)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (op), i, elt)
{
tree ref, op1;
cfun->has_local_explicit_reg_vars = false;
/* Remove unmarked local and global vars from local_decls. */
- num = VEC_length (tree, cfun->local_decls);
+ num = vec_safe_length (cfun->local_decls);
for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
{
- var = VEC_index (tree, cfun->local_decls, srcidx);
+ var = (*cfun->local_decls)[srcidx];
if (TREE_CODE (var) == VAR_DECL)
{
if (!is_used_p (var))
cfun->has_local_explicit_reg_vars = true;
if (srcidx != dstidx)
- VEC_replace (tree, cfun->local_decls, dstidx, var);
+ (*cfun->local_decls)[dstidx] = var;
dstidx++;
}
if (dstidx != num)
- VEC_truncate (tree, cfun->local_decls, dstidx);
+ cfun->local_decls->truncate (dstidx);
remove_unused_scope_block_p (DECL_INITIAL (current_function_decl));
clear_unused_block_pointer ();
#define _TREE_SSA_LIVE_H 1
#include "partition.h"
-#include "vecprim.h"
-
-
/* Used to create the variable mapping when we go out of SSA form.
gimple stmt; /* The statement in that it occurs. */
} *mem_ref_loc_p;
-DEF_VEC_P(mem_ref_loc_p);
-DEF_VEC_ALLOC_P(mem_ref_loc_p, heap);
/* The list of memory reference locations in a loop. */
typedef struct mem_ref_locs
{
- VEC (mem_ref_loc_p, heap) *locs;
+ vec<mem_ref_loc_p> locs;
} *mem_ref_locs_p;
-DEF_VEC_P(mem_ref_locs_p);
-DEF_VEC_ALLOC_P(mem_ref_locs_p, heap);
/* Description of a memory reference. */
hashval_t hash; /* Its hash value. */
bitmap stored; /* The set of loops in that this memory location
is stored to. */
- VEC (mem_ref_locs_p, heap) *accesses_in_loop;
+ vec<mem_ref_locs_p> accesses_in_loop;
/* The locations of the accesses. Vector
indexed by the loop number. */
bitmap dep_ref; /* The complement of INDEP_REF. */
} *mem_ref_p;
-DEF_VEC_P(mem_ref_p);
-DEF_VEC_ALLOC_P(mem_ref_p, heap);
-DEF_VEC_P(bitmap);
-DEF_VEC_ALLOC_P(bitmap, heap);
-DEF_VEC_P(htab_t);
-DEF_VEC_ALLOC_P(htab_t, heap);
/* Description of memory accesses in loops. */
htab_t refs;
/* The list of memory references. */
- VEC (mem_ref_p, heap) *refs_list;
+ vec<mem_ref_p> refs_list;
/* The set of memory references accessed in each loop. */
- VEC (bitmap, heap) *refs_in_loop;
+ vec<bitmap> refs_in_loop;
/* The set of memory references accessed in each loop, including
subloops. */
- VEC (bitmap, heap) *all_refs_in_loop;
+ vec<bitmap> all_refs_in_loop;
/* The set of memory references stored in each loop, including
subloops. */
- VEC (bitmap, heap) *all_refs_stored_in_loop;
+ vec<bitmap> all_refs_stored_in_loop;
/* Cache for expanding memory addresses. */
struct pointer_map_t *ttae_cache;
if (!accs)
return;
- FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
+ FOR_EACH_VEC_ELT (accs->locs, i, loc)
free (loc);
- VEC_free (mem_ref_loc_p, heap, accs->locs);
+ accs->locs.release ();
free (accs);
}
unsigned i;
mem_ref_locs_p accs;
- FOR_EACH_VEC_ELT (mem_ref_locs_p, mem->accesses_in_loop, i, accs)
+ FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
free_mem_ref_locs (accs);
- VEC_free (mem_ref_locs_p, heap, mem->accesses_in_loop);
+ mem->accesses_in_loop.release ();
free (mem);
}
ref->dep_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
ref->indep_ref = BITMAP_ALLOC (&lim_bitmap_obstack);
ref->dep_ref = BITMAP_ALLOC (&lim_bitmap_obstack);
- ref->accesses_in_loop = NULL;
+ ref->accesses_in_loop.create (0);
return ref;
}
mem_ref_locs_alloc (void)
{
mem_ref_locs_p accs = XNEW (struct mem_ref_locs);
- accs->locs = NULL;
+ accs->locs.create (0);
return accs;
}
{
mem_ref_loc_p aref = XNEW (struct mem_ref_loc);
mem_ref_locs_p accs;
- bitmap ril = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
+ bitmap ril = memory_accesses.refs_in_loop[loop->num];
- if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
+ if (ref->accesses_in_loop.length ()
<= (unsigned) loop->num)
- VEC_safe_grow_cleared (mem_ref_locs_p, heap, ref->accesses_in_loop,
- loop->num + 1);
- accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
+ ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
+ accs = ref->accesses_in_loop[loop->num];
if (!accs)
{
accs = mem_ref_locs_alloc ();
- VEC_replace (mem_ref_locs_p, ref->accesses_in_loop, loop->num, accs);
+ ref->accesses_in_loop[loop->num] = accs;
}
aref->stmt = stmt;
aref->ref = loc;
- VEC_safe_push (mem_ref_loc_p, heap, accs->locs, aref);
+ accs->locs.safe_push (aref);
bitmap_set_bit (ril, ref->id);
}
mem = simple_mem_ref_in_stmt (stmt, &is_stored);
if (!mem)
{
- id = VEC_length (mem_ref_p, memory_accesses.refs_list);
+ id = memory_accesses.refs_list.length ();
ref = mem_ref_alloc (error_mark_node, 0, id);
- VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
+ memory_accesses.refs_list.safe_push (ref);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
}
else
{
- id = VEC_length (mem_ref_p, memory_accesses.refs_list);
+ id = memory_accesses.refs_list.length ();
ref = mem_ref_alloc (*mem, hash, id);
- VEC_safe_push (mem_ref_p, heap, memory_accesses.refs_list, ref);
+ memory_accesses.refs_list.safe_push (ref);
*slot = ref;
if (dump_file && (dump_flags & TDF_DETAILS))
the loop hierarchy. */
FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
{
- lrefs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
- alrefs = VEC_index (bitmap, memory_accesses.all_refs_in_loop, loop->num);
+ lrefs = memory_accesses.refs_in_loop[loop->num];
+ alrefs = memory_accesses.all_refs_in_loop[loop->num];
bitmap_ior_into (alrefs, lrefs);
if (loop_outer (loop) == current_loops->tree_root)
continue;
- alrefso = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
- loop_outer (loop)->num);
+ alrefso = memory_accesses.all_refs_in_loop[loop_outer (loop)->num];
bitmap_ior_into (alrefso, alrefs);
}
}
static void
create_vop_ref_mapping_loop (struct loop *loop)
{
- bitmap refs = VEC_index (bitmap, memory_accesses.refs_in_loop, loop->num);
+ bitmap refs = memory_accesses.refs_in_loop[loop->num];
struct loop *sloop;
bitmap_iterator bi;
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (refs, 0, i, bi)
{
- ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
+ ref = memory_accesses.refs_list[i];
for (sloop = loop; sloop != current_loops->tree_root;
sloop = loop_outer (sloop))
if (bitmap_bit_p (ref->stored, loop->num))
{
bitmap refs_stored
- = VEC_index (bitmap, memory_accesses.all_refs_stored_in_loop,
- sloop->num);
+ = memory_accesses.all_refs_stored_in_loop[sloop->num];
bitmap_set_bit (refs_stored, ref->id);
}
}
bitmap empty;
memory_accesses.refs = htab_create (100, memref_hash, memref_eq, NULL);
- memory_accesses.refs_list = NULL;
- memory_accesses.refs_in_loop = VEC_alloc (bitmap, heap,
- number_of_loops ());
- memory_accesses.all_refs_in_loop = VEC_alloc (bitmap, heap,
- number_of_loops ());
- memory_accesses.all_refs_stored_in_loop = VEC_alloc (bitmap, heap,
- number_of_loops ());
+ memory_accesses.refs_list.create (0);
+ memory_accesses.refs_in_loop.create (number_of_loops ());
+ memory_accesses.all_refs_in_loop.create (number_of_loops ());
+ memory_accesses.all_refs_stored_in_loop.create (number_of_loops ());
for (i = 0; i < number_of_loops (); i++)
{
empty = BITMAP_ALLOC (&lim_bitmap_obstack);
- VEC_quick_push (bitmap, memory_accesses.refs_in_loop, empty);
+ memory_accesses.refs_in_loop.quick_push (empty);
empty = BITMAP_ALLOC (&lim_bitmap_obstack);
- VEC_quick_push (bitmap, memory_accesses.all_refs_in_loop, empty);
+ memory_accesses.all_refs_in_loop.quick_push (empty);
empty = BITMAP_ALLOC (&lim_bitmap_obstack);
- VEC_quick_push (bitmap, memory_accesses.all_refs_stored_in_loop, empty);
+ memory_accesses.all_refs_stored_in_loop.quick_push (empty);
}
memory_accesses.ttae_cache = NULL;
static void
get_all_locs_in_loop (struct loop *loop, mem_ref_p ref,
- VEC (mem_ref_loc_p, heap) **locs)
+ vec<mem_ref_loc_p> *locs)
{
mem_ref_locs_p accs;
unsigned i;
mem_ref_loc_p loc;
- bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
- loop->num);
+ bitmap refs = memory_accesses.all_refs_in_loop[loop->num];
struct loop *subloop;
if (!bitmap_bit_p (refs, ref->id))
return;
- if (VEC_length (mem_ref_locs_p, ref->accesses_in_loop)
+ if (ref->accesses_in_loop.length ()
> (unsigned) loop->num)
{
- accs = VEC_index (mem_ref_locs_p, ref->accesses_in_loop, loop->num);
+ accs = ref->accesses_in_loop[loop->num];
if (accs)
{
- FOR_EACH_VEC_ELT (mem_ref_loc_p, accs->locs, i, loc)
- VEC_safe_push (mem_ref_loc_p, heap, *locs, loc);
+ FOR_EACH_VEC_ELT (accs->locs, i, loc)
+ locs->safe_push (loc);
}
}
{
unsigned i;
mem_ref_loc_p loc;
- VEC (mem_ref_loc_p, heap) *locs = NULL;
+ vec<mem_ref_loc_p> locs = vec<mem_ref_loc_p>();
get_all_locs_in_loop (loop, ref, &locs);
- FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
+ FOR_EACH_VEC_ELT (locs, i, loc)
rewrite_mem_ref_loc (loc, tmp_var);
- VEC_free (mem_ref_loc_p, heap, locs);
+ locs.release ();
}
/* The name and the length of the currently generated variable
unsigned i;
mem_ref_loc_p loc;
tree flag;
- VEC (mem_ref_loc_p, heap) *locs = NULL;
+ vec<mem_ref_loc_p> locs = vec<mem_ref_loc_p>();
char *str = get_lsm_tmp_name (ref->mem, ~0);
lsm_tmp_name_add ("_flag");
flag = create_tmp_reg (boolean_type_node, str);
get_all_locs_in_loop (loop, ref, &locs);
- FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
+ FOR_EACH_VEC_ELT (locs, i, loc)
{
gimple_stmt_iterator gsi;
gimple stmt;
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
}
}
- VEC_free (mem_ref_loc_p, heap, locs);
+ locs.release ();
return flag;
}
to the reference from the temporary variable are emitted to exits. */
static void
-execute_sm (struct loop *loop, VEC (edge, heap) *exits, mem_ref_p ref)
+execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
{
tree tmp_var, store_flag;
unsigned i;
}
/* Sink the store to every exit from the loop. */
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
if (!multi_threaded_model_p)
{
gimple store;
static void
hoist_memory_references (struct loop *loop, bitmap mem_refs,
- VEC (edge, heap) *exits)
+ vec<edge> exits)
{
mem_ref_p ref;
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
{
- ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
+ ref = memory_accesses.refs_list[i];
execute_sm (loop, exits, ref);
}
}
static bool
ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
{
- VEC (mem_ref_loc_p, heap) *locs = NULL;
+ vec<mem_ref_loc_p> locs = vec<mem_ref_loc_p>();
unsigned i;
mem_ref_loc_p loc;
bool ret = false;
base = TREE_OPERAND (base, 0);
get_all_locs_in_loop (loop, ref, &locs);
- FOR_EACH_VEC_ELT (mem_ref_loc_p, locs, i, loc)
+ FOR_EACH_VEC_ELT (locs, i, loc)
{
if (!get_lim_data (loc->stmt))
continue;
break;
}
}
- VEC_free (mem_ref_loc_p, heap, locs);
+ locs.release ();
return ret;
}
mem_ref_p aref;
if (stored)
- refs_to_check = VEC_index (bitmap,
- memory_accesses.all_refs_in_loop, loop->num);
+ refs_to_check = memory_accesses.all_refs_in_loop[loop->num];
else
- refs_to_check = VEC_index (bitmap,
- memory_accesses.all_refs_stored_in_loop,
- loop->num);
+ refs_to_check = memory_accesses.all_refs_stored_in_loop[loop->num];
EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
{
- aref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
+ aref = memory_accesses.refs_list[i];
if (!MEM_ANALYZABLE (aref)
|| !refs_independent_p (ref, aref))
{
static void
find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
{
- bitmap refs = VEC_index (bitmap, memory_accesses.all_refs_in_loop,
- loop->num);
+ bitmap refs = memory_accesses.all_refs_in_loop[loop->num];
unsigned i;
bitmap_iterator bi;
mem_ref_p ref;
EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
{
- ref = VEC_index (mem_ref_p, memory_accesses.refs_list, i);
+ ref = memory_accesses.refs_list[i];
if (can_sm_ref_p (loop, ref))
bitmap_set_bit (refs_to_sm, i);
}
static bool
loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
- VEC (edge, heap) *exits)
+ vec<edge> exits)
{
unsigned i;
edge ex;
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
return false;
static void
store_motion_loop (struct loop *loop, bitmap sm_executed)
{
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
struct loop *subloop;
bitmap sm_in_loop = BITMAP_ALLOC (NULL);
find_refs_for_sm (loop, sm_executed, sm_in_loop);
hoist_memory_references (loop, sm_in_loop, exits);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
bitmap_ior_into (sm_executed, sm_in_loop);
for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
htab_delete (memory_accesses.refs);
- FOR_EACH_VEC_ELT (mem_ref_p, memory_accesses.refs_list, i, ref)
+ FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
memref_free (ref);
- VEC_free (mem_ref_p, heap, memory_accesses.refs_list);
+ memory_accesses.refs_list.release ();
- VEC_free (bitmap, heap, memory_accesses.refs_in_loop);
- VEC_free (bitmap, heap, memory_accesses.all_refs_in_loop);
- VEC_free (bitmap, heap, memory_accesses.all_refs_stored_in_loop);
+ memory_accesses.refs_in_loop.release ();
+ memory_accesses.all_refs_in_loop.release ();
+ memory_accesses.all_refs_stored_in_loop.release ();
if (memory_accesses.ttae_cache)
free_affine_expand_cache (&memory_accesses.ttae_cache);
gimple_stmt_iterator gsi;
unsigned int i;
bool after_exit;
- VEC (basic_block, heap) *path = get_loop_hot_path (loop);
+ vec<basic_block> path = get_loop_hot_path (loop);
size->overall = 0;
size->eliminated_by_peeling = 0;
}
}
}
- while (VEC_length (basic_block, path))
+ while (path.length ())
{
- basic_block bb = VEC_pop (basic_block, path);
+ basic_block bb = path.pop ();
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
size->num_branches_on_hot_path++;
}
}
- VEC_free (basic_block, heap, path);
+ path.release ();
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
size->eliminated_by_peeling, size->last_iteration,
edge
loop_edge_to_cancel (struct loop *loop)
{
- VEC (edge, heap) *exits;
+ vec<edge> exits;
unsigned i;
edge edge_to_cancel;
gimple_stmt_iterator gsi;
exits = get_loop_exit_edges (loop);
- FOR_EACH_VEC_ELT (edge, exits, i, edge_to_cancel)
+ FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
{
/* Find the other edge than the loop exit
leaving the conditoinal. */
if (edge_to_cancel->dest != loop->latch)
continue;
- VEC_free (edge, heap, exits);
+ exits.release ();
/* Verify that the code in loop latch does nothing that may end program
execution without really reaching the exit. This may include
return NULL;
return edge_to_cancel;
}
- VEC_free (edge, heap, exits);
+ exits.release ();
return NULL;
}
}
/* Stores loops that will be unlooped after we process whole loop tree. */
-static VEC(loop_p, heap) *loops_to_unloop;
-static VEC(int, heap) *loops_to_unloop_nunroll;
+static vec<loop_p> loops_to_unloop;
+static vec<int> loops_to_unloop_nunroll;
/* Cancel all fully unrolled loops by putting __builtin_unreachable
on the latch edge.
unloop_loops (bitmap loop_closed_ssa_invalidated,
bool *irred_invalidated)
{
- while (VEC_length (loop_p, loops_to_unloop))
+ while (loops_to_unloop.length ())
{
- struct loop *loop = VEC_pop (loop_p, loops_to_unloop);
- int n_unroll = VEC_pop (int, loops_to_unloop_nunroll);
+ struct loop *loop = loops_to_unloop.pop ();
+ int n_unroll = loops_to_unloop_nunroll.pop ();
basic_block latch = loop->latch;
edge latch_edge = loop_latch_edge (loop);
int flags = latch_edge->flags;
gsi = gsi_start_bb (latch_edge->dest);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
}
- VEC_free (loop_p, heap, loops_to_unloop);
- loops_to_unloop = NULL;
- VEC_free (int, heap, loops_to_unloop_nunroll);
- loops_to_unloop_nunroll = NULL;
+ loops_to_unloop.release ();
+ loops_to_unloop_nunroll.release ();
}
/* Tries to unroll LOOP completely, i.e. NITER times.
sbitmap wont_exit;
edge e;
unsigned i;
- VEC (edge, heap) *to_remove = NULL;
+ vec<edge> to_remove = vec<edge>();
if (ul == UL_SINGLE_ITER)
return false;
return false;
}
- FOR_EACH_VEC_ELT (edge, to_remove, i, e)
+ FOR_EACH_VEC_ELT (to_remove, i, e)
{
bool ok = remove_path (e);
gcc_assert (ok);
}
- VEC_free (edge, heap, to_remove);
+ to_remove.release ();
free (wont_exit);
free_original_copy_tables ();
}
}
/* Store the loop for later unlooping and exit removal. */
- VEC_safe_push (loop_p, heap, loops_to_unloop, loop);
- VEC_safe_push (int, heap, loops_to_unloop_nunroll, n_unroll);
+ loops_to_unloop.safe_push (loop);
+ loops_to_unloop_nunroll.safe_push (n_unroll);
if (dump_file && (dump_flags & TDF_DETAILS))
{
unsigned int
tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
{
- VEC(loop_p,stack) *father_stack = VEC_alloc (loop_p, stack, 16);
+ vec<loop_p, va_stack> father_stack;
loop_iterator li;
struct loop *loop;
bool changed;
int iteration = 0;
bool irred_invalidated = false;
+ vec_stack_alloc (loop_p, father_stack, 16);
do
{
changed = false;
iteration is complete and the IR eventually cleaned up. */
if (loop_outer (loop_father) && !loop_father->aux)
{
- VEC_safe_push (loop_p, stack, father_stack, loop_father);
+ father_stack.safe_push (loop_father);
loop_father->aux = loop_father;
}
}
/* Be sure to skip unlooped loops while procesing father_stack
array. */
- FOR_EACH_VEC_ELT (loop_p, loops_to_unloop, i, iter)
+ FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
(*iter)->aux = NULL;
- FOR_EACH_VEC_ELT (loop_p, father_stack, i, iter)
+ FOR_EACH_VEC_ELT (father_stack, i, iter)
if (!(*iter)->aux)
*iter = NULL;
unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
update_ssa (TODO_update_ssa);
/* Propagate the constants within the new basic blocks. */
- FOR_EACH_VEC_ELT (loop_p, father_stack, i, iter)
+ FOR_EACH_VEC_ELT (father_stack, i, iter)
if (*iter)
{
unsigned j;
free (body);
(*iter)->aux = NULL;
}
- VEC_truncate (loop_p, father_stack, 0);
+ father_stack.truncate (0);
/* This will take care of removing completely unrolled loops
from the loop structures so we can continue unrolling now
while (changed
&& ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
- VEC_free (loop_p, stack, father_stack);
+ father_stack.release ();
if (irred_invalidated
&& loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
/* The data used by the induction variable optimizations. */
typedef struct iv_use *iv_use_p;
-DEF_VEC_P(iv_use_p);
-DEF_VEC_ALLOC_P(iv_use_p,heap);
typedef struct iv_cand *iv_cand_p;
-DEF_VEC_P(iv_cand_p);
-DEF_VEC_ALLOC_P(iv_cand_p,heap);
struct ivopts_data
{
bitmap relevant;
/* The uses of induction variables. */
- VEC(iv_use_p,heap) *iv_uses;
+ vec<iv_use_p> iv_uses;
/* The candidates. */
- VEC(iv_cand_p,heap) *iv_candidates;
+ vec<iv_cand_p> iv_candidates;
/* A bitmap of important candidates. */
bitmap important_candidates;
/* The list of trees for that the decl_rtl field must be reset is stored
here. */
-static VEC(tree,heap) *decl_rtl_to_reset;
+static vec<tree> decl_rtl_to_reset;
static comp_cost force_expr_to_var_cost (tree, bool);
static inline unsigned
n_iv_uses (struct ivopts_data *data)
{
- return VEC_length (iv_use_p, data->iv_uses);
+ return data->iv_uses.length ();
}
/* Ith use recorded in DATA. */
static inline struct iv_use *
iv_use (struct ivopts_data *data, unsigned i)
{
- return VEC_index (iv_use_p, data->iv_uses, i);
+ return data->iv_uses[i];
}
/* Number of candidates recorded in DATA. */
static inline unsigned
n_iv_cands (struct ivopts_data *data)
{
- return VEC_length (iv_cand_p, data->iv_candidates);
+ return data->iv_candidates.length ();
}
/* Ith candidate recorded in DATA. */
static inline struct iv_cand *
iv_cand (struct ivopts_data *data, unsigned i)
{
- return VEC_index (iv_cand_p, data->iv_candidates, i);
+ return data->iv_candidates[i];
}
/* The single loop exit if it dominates the latch, NULL otherwise. */
data->important_candidates = BITMAP_ALLOC (NULL);
data->max_inv_id = 0;
data->niters = NULL;
- data->iv_uses = VEC_alloc (iv_use_p, heap, 20);
- data->iv_candidates = VEC_alloc (iv_cand_p, heap, 20);
+ data->iv_uses.create (20);
+ data->iv_candidates.create (20);
data->inv_expr_tab = htab_create (10, htab_inv_expr_hash,
htab_inv_expr_eq, free);
data->inv_expr_id = 0;
- decl_rtl_to_reset = VEC_alloc (tree, heap, 20);
+ decl_rtl_to_reset.create (20);
}
/* Returns a memory object to that EXPR points. In case we are able to
if (dump_file && (dump_flags & TDF_DETAILS))
dump_use (dump_file, use);
- VEC_safe_push (iv_use_p, heap, data->iv_uses, use);
+ data->iv_uses.safe_push (use);
return use;
}
}
cand->important = important;
cand->incremented_at = incremented_at;
- VEC_safe_push (iv_cand_p, heap, data->iv_candidates, cand);
+ data->iv_candidates.safe_push (cand);
if (step
&& TREE_CODE (step) != INTEGER_CST)
if (x)
{
- VEC_safe_push (tree, heap, decl_rtl_to_reset, obj);
+ decl_rtl_to_reset.safe_push (obj);
SET_DECL_RTL (obj, x);
}
validity for a memory reference accessing memory of mode MODE in
address space AS. */
-DEF_VEC_P (sbitmap);
-DEF_VEC_ALLOC_P (sbitmap, heap);
bool
multiplier_allowed_in_address_p (HOST_WIDE_INT ratio, enum machine_mode mode,
{
#define MAX_RATIO 128
unsigned int data_index = (int) as * MAX_MACHINE_MODE + (int) mode;
- static VEC (sbitmap, heap) *valid_mult_list;
+ static vec<sbitmap> valid_mult_list;
sbitmap valid_mult;
- if (data_index >= VEC_length (sbitmap, valid_mult_list))
- VEC_safe_grow_cleared (sbitmap, heap, valid_mult_list, data_index + 1);
+ if (data_index >= valid_mult_list.length ())
+ valid_mult_list.safe_grow_cleared (data_index + 1);
- valid_mult = VEC_index (sbitmap, valid_mult_list, data_index);
+ valid_mult = valid_mult_list[data_index];
if (!valid_mult)
{
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
fprintf (dump_file, "\n");
}
- VEC_replace (sbitmap, valid_mult_list, data_index, valid_mult);
+ valid_mult_list[data_index] = valid_mult;
}
if (ratio > MAX_RATIO || ratio < -MAX_RATIO)
unsigned costs[2][2][2][2];
} *address_cost_data;
-DEF_VEC_P (address_cost_data);
-DEF_VEC_ALLOC_P (address_cost_data, heap);
static comp_cost
get_address_cost (bool symbol_present, bool var_present,
bool stmt_after_inc, bool *may_autoinc)
{
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
- static VEC(address_cost_data, heap) *address_cost_data_list;
+ static vec<address_cost_data> address_cost_data_list;
unsigned int data_index = (int) as * MAX_MACHINE_MODE + (int) mem_mode;
address_cost_data data;
static bool has_preinc[MAX_MACHINE_MODE], has_postinc[MAX_MACHINE_MODE];
unsigned HOST_WIDE_INT mask;
unsigned bits;
- if (data_index >= VEC_length (address_cost_data, address_cost_data_list))
- VEC_safe_grow_cleared (address_cost_data, heap, address_cost_data_list,
- data_index + 1);
+ if (data_index >= address_cost_data_list.length ())
+ address_cost_data_list.safe_grow_cleared (data_index + 1);
- data = VEC_index (address_cost_data, address_cost_data_list, data_index);
+ data = address_cost_data_list[data_index];
if (!data)
{
HOST_WIDE_INT i;
fprintf (dump_file, "\n");
}
- VEC_replace (address_cost_data, address_cost_data_list,
- data_index, data);
+ address_cost_data_list[data_index] = data;
}
bits = GET_MODE_BITSIZE (address_mode);
free (use->cost_map);
free (use);
}
- VEC_truncate (iv_use_p, data->iv_uses, 0);
+ data->iv_uses.truncate (0);
for (i = 0; i < n_iv_cands (data); i++)
{
BITMAP_FREE (cand->depends_on);
free (cand);
}
- VEC_truncate (iv_cand_p, data->iv_candidates, 0);
+ data->iv_candidates.truncate (0);
if (data->version_info_size < num_ssa_names)
{
data->max_inv_id = 0;
- FOR_EACH_VEC_ELT (tree, decl_rtl_to_reset, i, obj)
+ FOR_EACH_VEC_ELT (decl_rtl_to_reset, i, obj)
SET_DECL_RTL (obj, NULL_RTX);
- VEC_truncate (tree, decl_rtl_to_reset, 0);
+ decl_rtl_to_reset.truncate (0);
htab_empty (data->inv_expr_tab);
data->inv_expr_id = 0;
BITMAP_FREE (data->relevant);
BITMAP_FREE (data->important_candidates);
- VEC_free (tree, heap, decl_rtl_to_reset);
- VEC_free (iv_use_p, heap, data->iv_uses);
- VEC_free (iv_cand_p, heap, data->iv_candidates);
+ decl_rtl_to_reset.release ();
+ data->iv_uses.release ();
+ data->iv_candidates.release ();
htab_delete (data->inv_expr_tab);
}
{
unsigned i;
bitmap_iterator bi;
- VEC (basic_block, heap) *worklist;
+ vec<basic_block> worklist;
struct loop *def_loop = def_bb->loop_father;
unsigned def_loop_depth = loop_depth (def_loop);
bitmap def_loop_exits;
/* Normally the work list size is bounded by the number of basic
blocks in the largest loop. We don't know this number, but we
can be fairly sure that it will be relatively small. */
- worklist = VEC_alloc (basic_block, heap, MAX (8, n_basic_blocks / 128));
+ worklist.create (MAX (8, n_basic_blocks / 128));
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
if (! flow_loop_nested_p (use_loop, def_loop))
use_bb = find_sibling_superloop (use_loop, def_loop)->header;
if (bitmap_set_bit (live_exits, use_bb->index))
- VEC_safe_push (basic_block, heap, worklist, use_bb);
+ worklist.safe_push (use_bb);
}
/* Iterate until the worklist is empty. */
- while (! VEC_empty (basic_block, worklist))
+ while (! worklist.is_empty ())
{
edge e;
edge_iterator ei;
/* Pull a block off the worklist. */
- basic_block bb = VEC_pop (basic_block, worklist);
+ basic_block bb = worklist.pop ();
/* Make sure we have at least enough room in the work list
for all predecessors of this block. */
- VEC_reserve (basic_block, heap, worklist, EDGE_COUNT (bb->preds));
+ worklist.reserve (EDGE_COUNT (bb->preds));
/* For each predecessor block. */
FOR_EACH_EDGE (e, ei, bb->preds)
if (pred_visited || dominated_by_p (CDI_DOMINATORS, pred, bb))
continue;
- VEC_quick_push (basic_block, worklist, pred);
+ worklist.quick_push (pred);
}
}
- VEC_free (basic_block, heap, worklist);
+ worklist.release ();
def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
for (struct loop *loop = def_loop;
FOR_EACH_LOOP (li, loop, 0)
{
- VEC(edge, heap) *exit_edges = get_loop_exit_edges (loop);
+ vec<edge> exit_edges = get_loop_exit_edges (loop);
loop_exits[loop->num] = BITMAP_ALLOC (&loop_renamer_obstack);
- FOR_EACH_VEC_ELT (edge, exit_edges, j, e)
+ FOR_EACH_VEC_ELT (exit_edges, j, e)
bitmap_set_bit (loop_exits[loop->num], e->dest->index);
- VEC_free (edge, heap, exit_edges);
+ exit_edges.release ();
}
}
bool
gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
- edge orig, VEC (edge, heap) **to_remove,
+ edge orig, vec<edge> *to_remove,
int flags)
{
unsigned first_new_block;
unsigned new_est_niter, i, prob;
unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
sbitmap wont_exit;
- VEC (edge, heap) *to_remove = NULL;
+ vec<edge> to_remove = vec<edge>();
est_niter = expected_loop_iterations (loop);
determine_exit_conditions (loop, desc, factor,
free (wont_exit);
gcc_assert (ok);
- FOR_EACH_VEC_ELT (edge, to_remove, i, e)
+ FOR_EACH_VEC_ELT (to_remove, i, e)
{
ok = remove_path (e);
gcc_assert (ok);
}
- VEC_free (edge, heap, to_remove);
+ to_remove.release ();
update_ssa (TODO_update_ssa);
/* Ensure that the frequencies in the loop match the new estimated
find_loop_niter (struct loop *loop, edge *exit)
{
unsigned i;
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
struct tree_niter_desc desc;
*exit = NULL;
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
{
if (!number_of_iterations_exit (loop, ex, &desc, false))
continue;
continue;
}
}
- VEC_free (edge, heap, exits);
+ exits.release ();
return niter ? niter : chrec_dont_know;
}
find_loop_niter_by_eval (struct loop *loop, edge *exit)
{
unsigned i;
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
/* Loops with multiple exits are expensive to handle and less important. */
if (!flag_expensive_optimizations
- && VEC_length (edge, exits) > 1)
+ && exits.length () > 1)
{
- VEC_free (edge, heap, exits);
+ exits.release ();
return chrec_dont_know;
}
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
{
if (!just_once_each_iteration_p (loop, ex->src))
continue;
niter = aniter;
*exit = ex;
}
- VEC_free (edge, heap, exits);
+ exits.release ();
return niter ? niter : chrec_dont_know;
}
Lookup by binary search. */
int
-bound_index (VEC (double_int, heap) *bounds, double_int bound)
+bound_index (vec<double_int> bounds, double_int bound)
{
- unsigned int end = VEC_length (double_int, bounds);
+ unsigned int end = bounds.length ();
unsigned int begin = 0;
/* Find a matching index by means of a binary search. */
while (begin != end)
{
unsigned int middle = (begin + end) / 2;
- double_int index = VEC_index (double_int, bounds, middle);
+ double_int index = bounds[middle];
if (index == bound)
return middle;
}
/* Used to hold vector of queues of basic blocks bellow. */
-typedef VEC (basic_block, heap) *bb_queue;
-DEF_VEC_P(bb_queue);
-DEF_VEC_ALLOC_P(bb_queue,heap);
+typedef vec<basic_block> bb_queue;
/* We recorded loop bounds only for statements dominating loop latch (and thus
executed each loop iteration). If there are any bounds on statements not
{
pointer_map_t *bb_bounds;
struct nb_iter_bound *elt;
- VEC (double_int, heap) *bounds = NULL;
- VEC (bb_queue, heap) *queues = NULL;
- bb_queue queue = NULL;
+ vec<double_int> bounds = vec<double_int>();
+ vec<bb_queue> queues = vec<bb_queue>();
+ bb_queue queue = bb_queue();
ptrdiff_t queue_index;
ptrdiff_t latch_index = 0;
pointer_map_t *block_priority;
if (!loop->any_upper_bound
|| bound.ult (loop->nb_iterations_upper_bound))
- VEC_safe_push (double_int, heap, bounds, bound);
+ bounds.safe_push (bound);
}
/* Exit early if there is nothing to do. */
- if (!bounds)
+ if (!bounds.exists ())
return;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
/* Sort the bounds in decreasing order. */
- qsort (VEC_address (double_int, bounds), VEC_length (double_int, bounds),
+ qsort (bounds.address (), bounds.length (),
sizeof (double_int), double_int_cmp);
/* For every basic block record the lowest bound that is guaranteed to
To avoid the need for fibonaci heap on double ints we simply compress
double ints into indexes to BOUNDS array and then represent the queue
as arrays of queues for every index.
- Index of VEC_length (BOUNDS) means that the execution of given BB has
+ Index of BOUNDS.length() means that the execution of given BB has
no bounds determined.
VISITED is a pointer map translating basic block into smallest index
latch_index = -1;
/* Start walk in loop header with index set to infinite bound. */
- queue_index = VEC_length (double_int, bounds);
- VEC_safe_grow_cleared (bb_queue, heap, queues, queue_index + 1);
- VEC_safe_push (basic_block, heap, queue, loop->header);
- VEC_replace (bb_queue, queues, queue_index, queue);
+ queue_index = bounds.length ();
+ queues.safe_grow_cleared (queue_index + 1);
+ queue.safe_push (loop->header);
+ queues[queue_index] = queue;
*pointer_map_insert (block_priority, loop->header) = (void *)queue_index;
for (; queue_index >= 0; queue_index--)
{
if (latch_index < queue_index)
{
- while (VEC_length (basic_block,
- VEC_index (bb_queue, queues, queue_index)))
+ while (queues[queue_index].length ())
{
basic_block bb;
ptrdiff_t bound_index = queue_index;
edge e;
edge_iterator ei;
- queue = VEC_index (bb_queue, queues, queue_index);
- bb = VEC_pop (basic_block, queue);
+ queue = queues[queue_index];
+ bb = queue.pop ();
/* OK, we later inserted the BB with lower priority, skip it. */
if ((ptrdiff_t)*pointer_map_contains (block_priority, bb) > queue_index)
if (insert)
{
- bb_queue queue2 = VEC_index (bb_queue, queues, bound_index);
- VEC_safe_push (basic_block, heap, queue2, e->dest);
- VEC_replace (bb_queue, queues, bound_index, queue2);
+ bb_queue queue2 = queues[bound_index];
+ queue2.safe_push (e->dest);
+ queues[bound_index] = queue2;
}
}
}
}
else
- VEC_free (basic_block, heap, VEC_index (bb_queue, queues, queue_index));
+ queues[queue_index].release ();
}
gcc_assert (latch_index >= 0);
- if ((unsigned)latch_index < VEC_length (double_int, bounds))
+ if ((unsigned)latch_index < bounds.length ())
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Found better loop bound ");
- dump_double_int (dump_file,
- VEC_index (double_int, bounds, latch_index), true);
+ dump_double_int (dump_file, bounds[latch_index], true);
fprintf (dump_file, "\n");
}
- record_niter_bound (loop, VEC_index (double_int, bounds, latch_index),
- false, true);
+ record_niter_bound (loop, bounds[latch_index], false, true);
}
- VEC_free (bb_queue, heap, queues);
+ queues.release ();
pointer_map_destroy (bb_bounds);
pointer_map_destroy (block_priority);
}
pointer_set_t *not_executed_last_iteration = NULL;
struct nb_iter_bound *elt;
bool found_exit = false;
- VEC (basic_block, heap) *queue = NULL;
+ vec<basic_block> queue = vec<basic_block>();
bitmap visited;
/* Collect all statements with interesting (i.e. lower than
effects that may terminate the loop otherwise) without visiting
any of the statements known to have undefined effect on the last
iteration. */
- VEC_safe_push (basic_block, heap, queue, loop->header);
+ queue.safe_push (loop->header);
visited = BITMAP_ALLOC (NULL);
bitmap_set_bit (visited, loop->header->index);
found_exit = false;
do
{
- basic_block bb = VEC_pop (basic_block, queue);
+ basic_block bb = queue.pop ();
gimple_stmt_iterator gsi;
bool stmt_found = false;
break;
}
if (bitmap_set_bit (visited, e->dest->index))
- VEC_safe_push (basic_block, heap, queue, e->dest);
+ queue.safe_push (e->dest);
}
}
}
- while (VEC_length (basic_block, queue) && !found_exit);
+ while (queue.length () && !found_exit);
/* If every path through the loop reach bounding statement before exit,
then we know the last iteration of the loop will have undefined effect
false, true);
}
BITMAP_FREE (visited);
- VEC_free (basic_block, heap, queue);
+ queue.release ();
}
/* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
void
estimate_numbers_of_iterations_loop (struct loop *loop)
{
- VEC (edge, heap) *exits;
+ vec<edge> exits;
tree niter, type;
unsigned i;
struct tree_niter_desc niter_desc;
exits = get_loop_exit_edges (loop);
likely_exit = single_likely_exit (loop);
- FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ FOR_EACH_VEC_ELT (exits, i, ex)
{
if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
continue;
last_stmt (ex->src),
true, ex == likely_exit, true);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
infer_loop_bounds_from_undefined (loop);
static void
emit_mfence_after_loop (struct loop *loop)
{
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
edge exit;
gimple call;
gimple_stmt_iterator bsi;
unsigned i;
- FOR_EACH_VEC_ELT (edge, exits, i, exit)
+ FOR_EACH_VEC_ELT (exits, i, exit)
{
call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
gsi_insert_before (&bsi, call, GSI_NEW_STMT);
}
- VEC_free (edge, heap, exits);
+ exits.release ();
update_ssa (TODO_update_ssa_only_virtuals);
}
is a suitable place for it at each of the loop exits. */
if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
{
- VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ vec<edge> exits = get_loop_exit_edges (loop);
unsigned i;
edge exit;
- FOR_EACH_VEC_ELT (edge, exits, i, exit)
+ FOR_EACH_VEC_ELT (exits, i, exit)
if ((exit->flags & EDGE_ABNORMAL)
&& exit->dest == EXIT_BLOCK_PTR)
ret = false;
- VEC_free (edge, heap, exits);
+ exits.release ();
}
return ret;
{
tree stride, access_fn;
HOST_WIDE_INT *strides, astride;
- VEC (tree, heap) *access_fns;
+ vec<tree> access_fns;
tree ref = DR_REF (dr);
unsigned i, ret = ~0u;
strides = XCNEWVEC (HOST_WIDE_INT, n);
access_fns = DR_ACCESS_FNS (dr);
- FOR_EACH_VEC_ELT (tree, access_fns, i, access_fn)
+ FOR_EACH_VEC_ELT (access_fns, i, access_fn)
{
/* Keep track of the reference corresponding to the subscript, so that we
know its stride. */
bool no_other_refs)
{
struct loop *nest, *aloop;
- VEC (data_reference_p, heap) *datarefs = NULL;
- VEC (ddr_p, heap) *dependences = NULL;
+ vec<data_reference_p> datarefs = vec<data_reference_p>();
+ vec<ddr_p> dependences = vec<ddr_p>();
struct mem_ref_group *gr;
struct mem_ref *ref, *refb;
- VEC (loop_p, heap) *vloops = NULL;
+ vec<loop_p> vloops = vec<loop_p>();
unsigned *loop_data_size;
unsigned i, j, n;
unsigned volume, dist, adist;
We use this to estimate whether the reference is evicted from the
cache before its reuse. */
find_loop_nest (nest, &vloops);
- n = VEC_length (loop_p, vloops);
+ n = vloops.length ();
loop_data_size = XNEWVEC (unsigned, n);
volume = volume_of_references (refs);
i = n;
if (volume > L2_CACHE_SIZE_BYTES)
continue;
- aloop = VEC_index (loop_p, vloops, i);
+ aloop = vloops[i];
vol = estimated_stmt_executions_int (aloop);
if (vol == -1)
vol = expected_loop_iterations (aloop);
{
ref->reuse_distance = volume;
dr->aux = ref;
- VEC_safe_push (data_reference_p, heap, datarefs, dr);
+ datarefs.safe_push (dr);
}
else
no_other_refs = false;
}
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
dist = self_reuse_distance (dr, loop_data_size, n, loop);
ref = (struct mem_ref *) dr->aux;
if (!compute_all_dependences (datarefs, &dependences, vloops, true))
return false;
- FOR_EACH_VEC_ELT (ddr_p, dependences, i, dep)
+ FOR_EACH_VEC_ELT (dependences, i, dep)
{
if (DDR_ARE_DEPENDENT (dep) == chrec_known)
continue;
statements in the vector. */
static bool
-maybe_record_sincos (VEC(gimple, heap) **stmts,
+maybe_record_sincos (vec<gimple> *stmts,
basic_block *top_bb, gimple use_stmt)
{
basic_block use_bb = gimple_bb (use_stmt);
if (*top_bb
&& (*top_bb == use_bb
|| dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
- VEC_safe_push (gimple, heap, *stmts, use_stmt);
+ stmts->safe_push (use_stmt);
else if (!*top_bb
|| dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
{
- VEC_safe_push (gimple, heap, *stmts, use_stmt);
+ stmts->safe_push (use_stmt);
*top_bb = use_bb;
}
else
tree fndecl, res, type;
gimple def_stmt, use_stmt, stmt;
int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
- VEC(gimple, heap) *stmts = NULL;
+ vec<gimple> stmts = vec<gimple>();
basic_block top_bb = NULL;
int i;
bool cfg_changed = false;
if (seen_cos + seen_sin + seen_cexpi <= 1)
{
- VEC_free(gimple, heap, stmts);
+ stmts.release ();
return false;
}
sincos_stats.inserted++;
/* And adjust the recorded old call sites. */
- for (i = 0; VEC_iterate(gimple, stmts, i, use_stmt); ++i)
+ for (i = 0; stmts.iterate (i, &use_stmt); ++i)
{
tree rhs = NULL;
fndecl = gimple_call_fndecl (use_stmt);
cfg_changed = true;
}
- VEC_free(gimple, heap, stmts);
+ stmts.release ();
return cfg_changed;
}
#define opf_not_non_addressable (1 << 4)
/* Array for building all the use operands. */
-static VEC(tree,heap) *build_uses;
+static vec<tree> build_uses;
/* The built VDEF operand. */
static tree build_vdef;
{
if (!n_initialized++)
{
- build_uses = VEC_alloc (tree, heap, 10);
+ build_uses.create (10);
build_vuse = NULL_TREE;
build_vdef = NULL_TREE;
bitmap_obstack_initialize (&operands_bitmap_obstack);
if (!--n_initialized)
{
- VEC_free (tree, heap, build_uses);
+ build_uses.release ();
build_vdef = NULL_TREE;
build_vuse = NULL_TREE;
}
/* Takes elements from build_defs and turns them into def operands of STMT.
- TODO -- Make build_defs VEC of tree *. */
+ TODO -- Make build_defs vec of tree *. */
static inline void
finalize_ssa_defs (gimple stmt)
/* Takes elements from build_uses and turns them into use operands of STMT.
- TODO -- Make build_uses VEC of tree *. */
+ TODO -- Make build_uses vec of tree *. */
static inline void
finalize_ssa_uses (gimple stmt)
if (oldvuse != (build_vuse != NULL_TREE
? build_vuse : build_vdef))
gimple_set_vuse (stmt, NULL_TREE);
- VEC_safe_insert (tree, heap, build_uses, 0, (tree)gimple_vuse_ptr (stmt));
+ build_uses.safe_insert (0, (tree)gimple_vuse_ptr (stmt));
}
new_list.next = NULL;
}
/* Now create nodes for all the new nodes. */
- for (new_i = 0; new_i < VEC_length (tree, build_uses); new_i++)
+ for (new_i = 0; new_i < build_uses.length (); new_i++)
{
- tree *op = (tree *) VEC_index (tree, build_uses, new_i);
+ tree *op = (tree *) build_uses[new_i];
last = add_use_op (stmt, op, last);
}
{
build_vdef = NULL_TREE;
build_vuse = NULL_TREE;
- VEC_truncate (tree, build_uses, 0);
+ build_uses.truncate (0);
}
static inline void
start_ssa_stmt_operands (void)
{
- gcc_assert (VEC_length (tree, build_uses) == 0);
+ gcc_assert (build_uses.length () == 0);
gcc_assert (build_vuse == NULL_TREE);
gcc_assert (build_vdef == NULL_TREE);
}
static inline void
append_use (tree *use_p)
{
- VEC_safe_push (tree, heap, build_uses, (tree) use_p);
+ build_uses.safe_push ((tree) use_p);
}
gimple_set_has_volatile_ops (stmt, true);
for (idx = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (expr), idx, ce);
+ vec_safe_iterate (CONSTRUCTOR_ELTS (expr), idx, &ce);
idx++)
get_expr_operands (stmt, &ce->value, uflags);
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
{
- FOR_EACH_VEC_ELT (tree, build_uses, i, use)
+ FOR_EACH_VEC_ELT (build_uses, i, use)
{
if (use_p->use == (tree *)use)
{
- VEC_replace (tree, build_uses, i, NULL_TREE);
+ build_uses[i] = NULL_TREE;
break;
}
}
- if (i == VEC_length (tree, build_uses))
+ if (i == build_uses.length ())
{
error ("excess use operand for stmt");
debug_generic_expr (USE_FROM_PTR (use_p));
return true;
}
}
- FOR_EACH_VEC_ELT (tree, build_uses, i, use)
+ FOR_EACH_VEC_ELT (build_uses, i, use)
if (use != NULL_TREE)
{
error ("use operand missing for stmt");
split_bbs_on_noreturn_calls during cfg cleanup. */
if (is_gimple_call (stmt)
&& gimple_call_noreturn_p (stmt))
- VEC_safe_push (gimple, gc, MODIFIED_NORETURN_CALLS (cfun), stmt);
+ vec_safe_push (MODIFIED_NORETURN_CALLS (cfun), stmt);
gcc_assert (gimple_modified_p (stmt));
build_ssa_operands (stmt);
{
gimple then_assign = last_and_only_stmt (then_bb);
gimple else_assign = last_and_only_stmt (else_bb);
- VEC (data_reference_p, heap) *then_datarefs, *else_datarefs;
- VEC (ddr_p, heap) *then_ddrs, *else_ddrs;
+ vec<data_reference_p> then_datarefs, else_datarefs;
+ vec<ddr_p> then_ddrs, else_ddrs;
gimple then_store, else_store;
bool found, ok = false, res;
struct data_dependence_relation *ddr;
data_reference_p then_dr, else_dr;
int i, j;
tree then_lhs, else_lhs;
- VEC (gimple, heap) *then_stores, *else_stores;
+ vec<gimple> then_stores, else_stores;
basic_block blocks[3];
if (MAX_STORES_TO_SINK == 0)
then_assign, else_assign);
/* Find data references. */
- then_datarefs = VEC_alloc (data_reference_p, heap, 1);
- else_datarefs = VEC_alloc (data_reference_p, heap, 1);
+ then_datarefs.create (1);
+ else_datarefs.create (1);
if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
== chrec_dont_know)
- || !VEC_length (data_reference_p, then_datarefs)
+ || !then_datarefs.length ()
|| (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
== chrec_dont_know)
- || !VEC_length (data_reference_p, else_datarefs))
+ || !else_datarefs.length ())
{
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
}
/* Find pairs of stores with equal LHS. */
- then_stores = VEC_alloc (gimple, heap, 1);
- else_stores = VEC_alloc (gimple, heap, 1);
- FOR_EACH_VEC_ELT (data_reference_p, then_datarefs, i, then_dr)
+ then_stores.create (1);
+ else_stores.create (1);
+ FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
{
if (DR_IS_READ (then_dr))
continue;
then_lhs = gimple_get_lhs (then_store);
found = false;
- FOR_EACH_VEC_ELT (data_reference_p, else_datarefs, j, else_dr)
+ FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
{
if (DR_IS_READ (else_dr))
continue;
if (!found)
continue;
- VEC_safe_push (gimple, heap, then_stores, then_store);
- VEC_safe_push (gimple, heap, else_stores, else_store);
+ then_stores.safe_push (then_store);
+ else_stores.safe_push (else_store);
}
/* No pairs of stores found. */
- if (!VEC_length (gimple, then_stores)
- || VEC_length (gimple, then_stores) > (unsigned) MAX_STORES_TO_SINK)
+ if (!then_stores.length ()
+ || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
{
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
- VEC_free (gimple, heap, then_stores);
- VEC_free (gimple, heap, else_stores);
+ then_stores.release ();
+ else_stores.release ();
return false;
}
/* Compute and check data dependencies in both basic blocks. */
- then_ddrs = VEC_alloc (ddr_p, heap, 1);
- else_ddrs = VEC_alloc (ddr_p, heap, 1);
- if (!compute_all_dependences (then_datarefs, &then_ddrs, NULL, false)
- || !compute_all_dependences (else_datarefs, &else_ddrs, NULL, false))
+ then_ddrs.create (1);
+ else_ddrs.create (1);
+ if (!compute_all_dependences (then_datarefs, &then_ddrs,
+ vec<loop_p>(), false)
+ || !compute_all_dependences (else_datarefs, &else_ddrs,
+ vec<loop_p>(), false))
{
free_dependence_relations (then_ddrs);
free_dependence_relations (else_ddrs);
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
- VEC_free (gimple, heap, then_stores);
- VEC_free (gimple, heap, else_stores);
+ then_stores.release ();
+ else_stores.release ();
return false;
}
blocks[0] = then_bb;
/* Check that there are no read-after-write or write-after-write dependencies
in THEN_BB. */
- FOR_EACH_VEC_ELT (ddr_p, then_ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
free_dependence_relations (else_ddrs);
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
- VEC_free (gimple, heap, then_stores);
- VEC_free (gimple, heap, else_stores);
+ then_stores.release ();
+ else_stores.release ();
return false;
}
}
/* Check that there are no read-after-write or write-after-write dependencies
in ELSE_BB. */
- FOR_EACH_VEC_ELT (ddr_p, else_ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
free_dependence_relations (else_ddrs);
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
- VEC_free (gimple, heap, then_stores);
- VEC_free (gimple, heap, else_stores);
+ then_stores.release ();
+ else_stores.release ();
return false;
}
}
/* Sink stores with same LHS. */
- FOR_EACH_VEC_ELT (gimple, then_stores, i, then_store)
+ FOR_EACH_VEC_ELT (then_stores, i, then_store)
{
- else_store = VEC_index (gimple, else_stores, i);
+ else_store = else_stores[i];
res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
then_store, else_store);
ok = ok || res;
free_dependence_relations (else_ddrs);
free_data_refs (then_datarefs);
free_data_refs (else_datarefs);
- VEC_free (gimple, heap, then_stores);
- VEC_free (gimple, heap, else_stores);
+ then_stores.release ();
+ else_stores.release ();
return ok;
}
static unsigned int
tree_ssa_phiprop (void)
{
- VEC(basic_block, heap) *bbs;
+ vec<basic_block> bbs;
struct phiprop_d *phivn;
bool did_something = false;
basic_block bb;
/* Walk the dominator tree in preorder. */
bbs = get_all_dominated_blocks (CDI_DOMINATORS,
single_succ (ENTRY_BLOCK_PTR));
- FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ FOR_EACH_VEC_ELT (bbs, i, bb)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
did_something |= propagate_with_phi (bb, gsi_stmt (gsi), phivn, n);
if (did_something)
gsi_commit_edge_inserts ();
- VEC_free (basic_block, heap, bbs);
+ bbs.release ();
free (phivn);
return 0;
static unsigned int next_expression_id;
/* Mapping from expression to id number we can use in bitmap sets. */
-DEF_VEC_P (pre_expr);
-DEF_VEC_ALLOC_P (pre_expr, heap);
-static VEC(pre_expr, heap) *expressions;
+static vec<pre_expr> expressions;
static hash_table <pre_expr_d> expression_to_id;
-static VEC(unsigned, heap) *name_to_id;
+static vec<unsigned> name_to_id;
/* Allocate an expression id for EXPR. */
/* Make sure we won't overflow. */
gcc_assert (next_expression_id + 1 > next_expression_id);
expr->id = next_expression_id++;
- VEC_safe_push (pre_expr, heap, expressions, expr);
+ expressions.safe_push (expr);
if (expr->kind == NAME)
{
unsigned version = SSA_NAME_VERSION (PRE_EXPR_NAME (expr));
- /* VEC_safe_grow_cleared allocates no headroom. Avoid frequent
- re-allocations by using VEC_reserve upfront. There is no
- VEC_quick_grow_cleared unfortunately. */
- unsigned old_len = VEC_length (unsigned, name_to_id);
- VEC_reserve (unsigned, heap, name_to_id, num_ssa_names - old_len);
- VEC_safe_grow_cleared (unsigned, heap, name_to_id, num_ssa_names);
- gcc_assert (VEC_index (unsigned, name_to_id, version) == 0);
- VEC_replace (unsigned, name_to_id, version, expr->id);
+ /* vec::safe_grow_cleared allocates no headroom. Avoid frequent
+ re-allocations by using vec::reserve upfront. There is no
+ vec::quick_grow_cleared unfortunately. */
+ unsigned old_len = name_to_id.length ();
+ name_to_id.reserve (num_ssa_names - old_len);
+ name_to_id.safe_grow_cleared (num_ssa_names);
+ gcc_assert (name_to_id[version] == 0);
+ name_to_id[version] = expr->id;
}
else
{
if (expr->kind == NAME)
{
unsigned version = SSA_NAME_VERSION (PRE_EXPR_NAME (expr));
- if (VEC_length (unsigned, name_to_id) <= version)
+ if (name_to_id.length () <= version)
return 0;
- return VEC_index (unsigned, name_to_id, version);
+ return name_to_id[version];
}
else
{
static inline pre_expr
expression_for_id (unsigned int id)
{
- return VEC_index (pre_expr, expressions, id);
+ return expressions[id];
}
/* Free the expression id field in all of our expressions,
static void
clear_expression_ids (void)
{
- VEC_free (pre_expr, heap, expressions);
+ expressions.release ();
}
static alloc_pool pre_expr_pool;
EXECUTE_IF_SET_IN_BITMAP(&(set)->values, 0, (id), (bi))
/* Mapping from value id to expressions with that value_id. */
-static VEC(bitmap, heap) *value_expressions;
+static vec<bitmap> value_expressions;
/* Sets that we need to keep track of. */
typedef struct bb_bitmap_sets
gcc_checking_assert (get_expr_value_id (e) == v);
- if (v >= VEC_length (bitmap, value_expressions))
+ if (v >= value_expressions.length ())
{
- VEC_safe_grow_cleared (bitmap, heap, value_expressions, v + 1);
+ value_expressions.safe_grow_cleared (v + 1);
}
- set = VEC_index (bitmap, value_expressions, v);
+ set = value_expressions[v];
if (!set)
{
set = BITMAP_ALLOC (&grand_bitmap_obstack);
- VEC_replace (bitmap, value_expressions, v, set);
+ value_expressions[v] = set;
}
bitmap_set_bit (set, get_or_alloc_expression_id (e));
{
bitmap_iterator bi;
unsigned int i;
- bitmap exprset = VEC_index (bitmap, value_expressions, val);
+ bitmap exprset = value_expressions[val];
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
pre_expr vexpr = expression_for_id (i);
/* Generate an topological-ordered array of bitmap set SET. */
-static VEC(pre_expr, heap) *
+static vec<pre_expr>
sorted_array_from_bitmap_set (bitmap_set_t set)
{
unsigned int i, j;
bitmap_iterator bi, bj;
- VEC(pre_expr, heap) *result;
+ vec<pre_expr> result;
/* Pre-allocate roughly enough space for the array. */
- result = VEC_alloc (pre_expr, heap, bitmap_count_bits (&set->values));
+ result.create (bitmap_count_bits (&set->values));
FOR_EACH_VALUE_ID_IN_SET (set, i, bi)
{
If this is somehow a significant lose for some cases, we can
choose which set to walk based on the set size. */
- bitmap exprset = VEC_index (bitmap, value_expressions, i);
+ bitmap exprset = value_expressions[i];
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, j, bj)
{
if (bitmap_bit_p (&set->expressions, j))
- VEC_safe_push (pre_expr, heap, result, expression_for_id (j));
+ result.safe_push (expression_for_id (j));
}
}
5-10x faster than walking the bitmap. If this is somehow a
significant lose for some cases, we can choose which set to walk
based on the set size. */
- exprset = VEC_index (bitmap, value_expressions, lookfor);
+ exprset = value_expressions[lookfor];
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
if (bitmap_clear_bit (&set->expressions, i))
vn_reference_t ref = PRE_EXPR_REFERENCE (expr);
fprintf (outfile, "{");
for (i = 0;
- VEC_iterate (vn_reference_op_s, ref->operands, i, vro);
+ ref->operands.iterate (i, &vro);
i++)
{
bool closebrace = false;
}
if (closebrace)
fprintf (outfile, ">");
- if (i != VEC_length (vn_reference_op_s, ref->operands) - 1)
+ if (i != ref->operands.length () - 1)
fprintf (outfile, ",");
}
fprintf (outfile, "}");
static void
print_value_expressions (FILE *outfile, unsigned int val)
{
- bitmap set = VEC_index (bitmap, value_expressions, val);
+ bitmap set = value_expressions[val];
if (set)
{
bitmap_set x;
{
unsigned int i;
bitmap_iterator bi;
- bitmap exprset = VEC_index (bitmap, value_expressions, v);
+ bitmap exprset = value_expressions[v];
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
in case the new vuse doesn't change the value id of the OPERANDS. */
static tree
-translate_vuse_through_block (VEC (vn_reference_op_s, heap) *operands,
+translate_vuse_through_block (vec<vn_reference_op_s> operands,
alias_set_type set, tree type, tree vuse,
basic_block phiblock,
basic_block block, bool *same_valid)
and pick out an SSA_NAME. */
unsigned int i;
bitmap_iterator bi;
- bitmap exprs = VEC_index (bitmap, value_expressions, value_id);
+ bitmap exprs = value_expressions[value_id];
EXECUTE_IF_SET_IN_BITMAP (exprs, 0, i, bi)
{
pre_expr rep = expression_for_id (i);
else
{
new_val_id = get_next_value_id ();
- VEC_safe_grow_cleared (bitmap, heap,
- value_expressions,
- get_max_value_id() + 1);
+ value_expressions.safe_grow_cleared (get_max_value_id() + 1);
nary = vn_nary_op_insert_pieces (newnary->length,
newnary->opcode,
newnary->type,
case REFERENCE:
{
vn_reference_t ref = PRE_EXPR_REFERENCE (expr);
- VEC (vn_reference_op_s, heap) *operands = ref->operands;
+ vec<vn_reference_op_s> operands = ref->operands;
tree vuse = ref->vuse;
tree newvuse = vuse;
- VEC (vn_reference_op_s, heap) *newoperands = NULL;
+ vec<vn_reference_op_s> newoperands
+ = vec<vn_reference_op_s>();
bool changed = false, same_valid = true;
unsigned int i, j, n;
vn_reference_op_t operand;
vn_reference_t newref;
for (i = 0, j = 0;
- VEC_iterate (vn_reference_op_s, operands, i, operand); i++, j++)
+ operands.iterate (i, &operand); i++, j++)
{
pre_expr opresult;
pre_expr leader;
}
if (n != 3)
{
- if (newoperands)
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
return NULL;
}
- if (!newoperands)
- newoperands = VEC_copy (vn_reference_op_s, heap, operands);
+ if (!newoperands.exists ())
+ newoperands = operands.copy ();
/* We may have changed from an SSA_NAME to a constant */
if (newop.opcode == SSA_NAME && TREE_CODE (op[0]) != SSA_NAME)
newop.opcode = TREE_CODE (op[0]);
if (off.fits_shwi ())
newop.off = off.low;
}
- VEC_replace (vn_reference_op_s, newoperands, j, newop);
+ newoperands[j] = newop;
/* If it transforms from an SSA_NAME to an address, fold with
a preceding indirect reference. */
if (j > 0 && op[0] && TREE_CODE (op[0]) == ADDR_EXPR
- && VEC_index (vn_reference_op_s,
- newoperands, j - 1).opcode == MEM_REF)
+ && newoperands[j - 1].opcode == MEM_REF)
vn_reference_fold_indirect (&newoperands, &j);
}
- if (i != VEC_length (vn_reference_op_s, operands))
+ if (i != operands.length ())
{
- if (newoperands)
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
return NULL;
}
&same_valid);
if (newvuse == NULL_TREE)
{
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
return NULL;
}
}
newoperands,
&newref, VN_WALK);
if (result)
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
/* We can always insert constants, so if we have a partial
redundant constant load of another type try to translate it
else if (!result && newref
&& !useless_type_conversion_p (ref->type, newref->type))
{
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
return NULL;
}
if (changed || !same_valid)
{
new_val_id = get_next_value_id ();
- VEC_safe_grow_cleared (bitmap, heap,
- value_expressions,
- get_max_value_id() + 1);
+ value_expressions.safe_grow_cleared(get_max_value_id() + 1);
}
else
new_val_id = ref->value_id;
ref->type,
newoperands,
result, new_val_id);
- newoperands = NULL;
+ newoperands.create (0);
PRE_EXPR_REFERENCE (expr) = newref;
constant = fully_constant_expression (expr);
if (constant != expr)
}
add_to_value (new_val_id, expr);
}
- VEC_free (vn_reference_op_s, heap, newoperands);
+ newoperands.release ();
return expr;
}
break;
phi_translate_set (bitmap_set_t dest, bitmap_set_t set, basic_block pred,
basic_block phiblock)
{
- VEC (pre_expr, heap) *exprs;
+ vec<pre_expr> exprs;
pre_expr expr;
int i;
}
exprs = sorted_array_from_bitmap_set (set);
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_VEC_ELT (exprs, i, expr)
{
pre_expr translated;
translated = phi_translate (expr, set, NULL, pred, phiblock);
else
bitmap_value_insert_into_set (dest, translated);
}
- VEC_free (pre_expr, heap, exprs);
+ exprs.release ();
}
/* Find the leader for a value (i.e., the name representing that
{
unsigned int i;
bitmap_iterator bi;
- bitmap exprset = VEC_index (bitmap, value_expressions, val);
+ bitmap exprset = value_expressions[val];
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
{
choose which set to walk based on which set is smaller. */
unsigned int i;
bitmap_iterator bi;
- bitmap exprset = VEC_index (bitmap, value_expressions, val);
+ bitmap exprset = value_expressions[val];
EXECUTE_IF_AND_IN_BITMAP (exprset, &set->expressions, 0, i, bi)
return expression_for_id (i);
vn_reference_op_t vro;
unsigned int i;
- FOR_EACH_VEC_ELT (vn_reference_op_s, ref->operands, i, vro)
+ FOR_EACH_VEC_ELT (ref->operands, i, vro)
{
if (!op_valid_in_sets (set1, set2, vro->op0)
|| !op_valid_in_sets (set1, set2, vro->op1)
static void
dependent_clean (bitmap_set_t set1, bitmap_set_t set2, basic_block block)
{
- VEC (pre_expr, heap) *exprs = sorted_array_from_bitmap_set (set1);
+ vec<pre_expr> exprs = sorted_array_from_bitmap_set (set1);
pre_expr expr;
int i;
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_VEC_ELT (exprs, i, expr)
{
if (!valid_in_sets (set1, set2, expr, block))
bitmap_remove_from_set (set1, expr);
}
- VEC_free (pre_expr, heap, exprs);
+ exprs.release ();
}
/* Clean the set of expressions that are no longer valid in SET. This
static void
clean (bitmap_set_t set, basic_block block)
{
- VEC (pre_expr, heap) *exprs = sorted_array_from_bitmap_set (set);
+ vec<pre_expr> exprs = sorted_array_from_bitmap_set (set);
pre_expr expr;
int i;
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_VEC_ELT (exprs, i, expr)
{
if (!valid_in_sets (set, NULL, expr, block))
bitmap_remove_from_set (set, expr);
}
- VEC_free (pre_expr, heap, exprs);
+ exprs.release ();
}
/* Clean the set of expressions that are no longer valid in SET because
phis to translate through. */
else
{
- VEC(basic_block, heap) * worklist;
+ vec<basic_block> worklist;
size_t i;
basic_block bprime, first = NULL;
- worklist = VEC_alloc (basic_block, heap, EDGE_COUNT (block->succs));
+ worklist.create (EDGE_COUNT (block->succs));
FOR_EACH_EDGE (e, ei, block->succs)
{
if (!first
&& BB_VISITED (e->dest))
first = e->dest;
else if (BB_VISITED (e->dest))
- VEC_quick_push (basic_block, worklist, e->dest);
+ worklist.quick_push (e->dest);
}
/* Of multiple successors we have to have visited one already. */
BB_VISITED (block) = 0;
BB_DEFERRED (block) = 1;
changed = true;
- VEC_free (basic_block, heap, worklist);
+ worklist.release ();
goto maybe_dump_sets;
}
else
bitmap_set_copy (ANTIC_OUT, ANTIC_IN (first));
- FOR_EACH_VEC_ELT (basic_block, worklist, i, bprime)
+ FOR_EACH_VEC_ELT (worklist, i, bprime)
{
if (!gimple_seq_empty_p (phi_nodes (bprime)))
{
else
bitmap_set_and (ANTIC_OUT, ANTIC_IN (bprime));
}
- VEC_free (basic_block, heap, worklist);
+ worklist.release ();
}
/* Prune expressions that are clobbered in block and thus become
them. */
else
{
- VEC(basic_block, heap) * worklist;
+ vec<basic_block> worklist;
size_t i;
basic_block bprime;
- worklist = VEC_alloc (basic_block, heap, EDGE_COUNT (block->succs));
+ worklist.create (EDGE_COUNT (block->succs));
FOR_EACH_EDGE (e, ei, block->succs)
{
if (e->flags & EDGE_DFS_BACK)
continue;
- VEC_quick_push (basic_block, worklist, e->dest);
+ worklist.quick_push (e->dest);
}
- if (VEC_length (basic_block, worklist) > 0)
+ if (worklist.length () > 0)
{
- FOR_EACH_VEC_ELT (basic_block, worklist, i, bprime)
+ FOR_EACH_VEC_ELT (worklist, i, bprime)
{
unsigned int i;
bitmap_iterator bi;
expression_for_id (i));
}
}
- VEC_free (basic_block, heap, worklist);
+ worklist.release ();
}
/* Prune expressions that are clobbered in block and thus become
create_component_ref_by_pieces_1 (basic_block block, vn_reference_t ref,
unsigned int *operand, gimple_seq *stmts)
{
- vn_reference_op_t currop = &VEC_index (vn_reference_op_s, ref->operands,
- *operand);
+ vn_reference_op_t currop = &ref->operands[*operand];
tree genop;
++*operand;
switch (currop->opcode)
fn = find_or_generate_expression (block, currop->op0, stmts);
if (currop->op1)
sc = find_or_generate_expression (block, currop->op1, stmts);
- args = XNEWVEC (tree, VEC_length (vn_reference_op_s,
- ref->operands) - 1);
- while (*operand < VEC_length (vn_reference_op_s, ref->operands))
+ args = XNEWVEC (tree, ref->operands.length () - 1);
+ while (*operand < ref->operands.length ())
{
args[nargs] = create_component_ref_by_pieces_1 (block, ref,
operand, stmts);
case TARGET_MEM_REF:
{
tree genop0 = NULL_TREE, genop1 = NULL_TREE;
- vn_reference_op_t nextop = &VEC_index (vn_reference_op_s, ref->operands,
- ++*operand);
+ vn_reference_op_t nextop = &ref->operands[++*operand];
tree baseop = create_component_ref_by_pieces_1 (block, ref, operand,
stmts);
if (currop->op0)
}
/* It must be a complex expression, so generate it recursively. */
- bitmap exprset = VEC_index (bitmap, value_expressions, lookfor);
+ bitmap exprset = value_expressions[lookfor];
bitmap_iterator bi;
unsigned int i;
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
}
if (nary->opcode == CONSTRUCTOR)
{
- VEC(constructor_elt,gc) *elts = NULL;
+ vec<constructor_elt, va_gc> *elts = NULL;
for (i = 0; i < nary->length; ++i)
CONSTRUCTOR_APPEND_ELT (elts, NULL_TREE, genop[i]);
folded = build_constructor (nary->type, elts);
inhibit_phi_insertion (basic_block bb, pre_expr expr)
{
vn_reference_t vr = PRE_EXPR_REFERENCE (expr);
- VEC (vn_reference_op_s, heap) *ops = vr->operands;
+ vec<vn_reference_op_s> ops = vr->operands;
vn_reference_op_t op;
unsigned i;
memory reference is a simple induction variable. In other
cases the vectorizer won't do anything anyway (either it's
loop invariant or a complicated expression). */
- FOR_EACH_VEC_ELT (vn_reference_op_s, ops, i, op)
+ FOR_EACH_VEC_ELT (ops, i, op)
{
switch (op->opcode)
{
static bool
insert_into_preds_of_block (basic_block block, unsigned int exprnum,
- VEC(pre_expr, heap) *avail)
+ vec<pre_expr> avail)
{
pre_expr expr = expression_for_id (exprnum);
pre_expr newphi;
gimple_seq stmts = NULL;
tree builtexpr;
bprime = pred->src;
- eprime = VEC_index (pre_expr, avail, pred->dest_idx);
+ eprime = avail[pred->dest_idx];
if (eprime->kind != NAME && eprime->kind != CONSTANT)
{
&stmts, type);
gcc_assert (!(pred->flags & EDGE_ABNORMAL));
gsi_insert_seq_on_edge (pred, stmts);
- VEC_replace (pre_expr, avail, pred->dest_idx,
- get_or_alloc_expr_for_name (builtexpr));
+ avail[pred->dest_idx] = get_or_alloc_expr_for_name (builtexpr);
insertions = true;
}
else if (eprime->kind == CONSTANT)
}
gsi_insert_seq_on_edge (pred, stmts);
}
- VEC_replace (pre_expr, avail, pred->dest_idx,
- get_or_alloc_expr_for_name (forcedexpr));
+ avail[pred->dest_idx]
+ = get_or_alloc_expr_for_name (forcedexpr);
}
}
else
- VEC_replace (pre_expr, avail, pred->dest_idx,
- get_or_alloc_expr_for_constant (builtexpr));
+ avail[pred->dest_idx]
+ = get_or_alloc_expr_for_constant (builtexpr);
}
}
else if (eprime->kind == NAME)
}
gsi_insert_seq_on_edge (pred, stmts);
}
- VEC_replace (pre_expr, avail, pred->dest_idx,
- get_or_alloc_expr_for_name (forcedexpr));
+ avail[pred->dest_idx] = get_or_alloc_expr_for_name (forcedexpr);
}
}
}
bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (temp));
FOR_EACH_EDGE (pred, ei, block->preds)
{
- pre_expr ae = VEC_index (pre_expr, avail, pred->dest_idx);
+ pre_expr ae = avail[pred->dest_idx];
gcc_assert (get_expr_type (ae) == type
|| useless_type_conversion_p (type, get_expr_type (ae)));
if (ae->kind == CONSTANT)
do_regular_insertion (basic_block block, basic_block dom)
{
bool new_stuff = false;
- VEC (pre_expr, heap) *exprs;
+ vec<pre_expr> exprs;
pre_expr expr;
- VEC (pre_expr, heap) *avail = NULL;
+ vec<pre_expr> avail = vec<pre_expr>();
int i;
exprs = sorted_array_from_bitmap_set (ANTIC_IN (block));
- VEC_safe_grow (pre_expr, heap, avail, EDGE_COUNT (block->preds));
+ avail.safe_grow (EDGE_COUNT (block->preds));
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_VEC_ELT (exprs, i, expr)
{
if (expr->kind != NAME)
{
rest of the results are. */
if (eprime == NULL)
{
- VEC_replace (pre_expr, avail, pred->dest_idx, NULL);
+ avail[pred->dest_idx] = NULL;
cant_insert = true;
break;
}
vprime);
if (edoubleprime == NULL)
{
- VEC_replace (pre_expr, avail, pred->dest_idx, eprime);
+ avail[pred->dest_idx] = eprime;
all_same = false;
}
else
{
- VEC_replace (pre_expr, avail, pred->dest_idx, edoubleprime);
+ avail[pred->dest_idx] = edoubleprime;
by_some = true;
/* We want to perform insertions to remove a redundancy on
a path in the CFG we want to optimize for speed. */
{
unsigned int j;
bitmap_iterator bi;
- bitmap exprset = VEC_index (bitmap, value_expressions, val);
+ bitmap exprset = value_expressions[val];
unsigned int new_val = get_expr_value_id (edoubleprime);
EXECUTE_IF_SET_IN_BITMAP (exprset, 0, j, bi)
}
}
- VEC_free (pre_expr, heap, exprs);
- VEC_free (pre_expr, heap, avail);
+ exprs.release ();
+ avail.release ();
return new_stuff;
}
do_partial_partial_insertion (basic_block block, basic_block dom)
{
bool new_stuff = false;
- VEC (pre_expr, heap) *exprs;
+ vec<pre_expr> exprs;
pre_expr expr;
- VEC (pre_expr, heap) *avail = NULL;
+ vec<pre_expr> avail = vec<pre_expr>();
int i;
exprs = sorted_array_from_bitmap_set (PA_IN (block));
- VEC_safe_grow (pre_expr, heap, avail, EDGE_COUNT (block->preds));
+ avail.safe_grow (EDGE_COUNT (block->preds));
- FOR_EACH_VEC_ELT (pre_expr, exprs, i, expr)
+ FOR_EACH_VEC_ELT (exprs, i, expr)
{
if (expr->kind != NAME)
{
rest of the results are. */
if (eprime == NULL)
{
- VEC_replace (pre_expr, avail, pred->dest_idx, NULL);
+ avail[pred->dest_idx] = NULL;
cant_insert = true;
break;
}
eprime = fully_constant_expression (eprime);
vprime = get_expr_value_id (eprime);
edoubleprime = bitmap_find_leader (AVAIL_OUT (bprime), vprime);
- VEC_replace (pre_expr, avail, pred->dest_idx, edoubleprime);
+ avail[pred->dest_idx] = edoubleprime;
if (edoubleprime == NULL)
{
by_all = false;
}
}
- VEC_free (pre_expr, heap, exprs);
- VEC_free (pre_expr, heap, avail);
+ exprs.release ();
+ avail.release ();
return new_stuff;
}
{
vn_reference_t ref;
pre_expr result = NULL;
- VEC(vn_reference_op_s, heap) *ops = NULL;
+ vec<vn_reference_op_s> ops
+ = vec<vn_reference_op_s>();
/* We can value number only calls to real functions. */
if (gimple_call_internal_p (stmt))
vn_reference_lookup_pieces (gimple_vuse (stmt), 0,
gimple_expr_type (stmt),
ops, &ref, VN_NOWALK);
- VEC_free (vn_reference_op_s, heap, ops);
+ ops.release ();
if (!ref)
continue;
/* Local state for the eliminate domwalk. */
-static VEC (gimple, heap) *el_to_remove;
-static VEC (gimple, heap) *el_to_update;
+static vec<gimple> el_to_remove;
+static vec<gimple> el_to_update;
static unsigned int el_todo;
-static VEC (tree, heap) *el_avail;
-static VEC (tree, heap) *el_avail_stack;
+static vec<tree> el_avail;
+static vec<tree> el_avail_stack;
/* Return a leader for OP that is available at the current point of the
eliminate domwalk. */
{
if (SSA_NAME_IS_DEFAULT_DEF (valnum))
return valnum;
- if (VEC_length (tree, el_avail) > SSA_NAME_VERSION (valnum))
- return VEC_index (tree, el_avail, SSA_NAME_VERSION (valnum));
+ if (el_avail.length () > SSA_NAME_VERSION (valnum))
+ return el_avail[SSA_NAME_VERSION (valnum)];
}
else if (is_gimple_min_invariant (valnum))
return valnum;
tree valnum = VN_INFO (op)->valnum;
if (TREE_CODE (valnum) == SSA_NAME)
{
- if (VEC_length (tree, el_avail) <= SSA_NAME_VERSION (valnum))
- VEC_safe_grow_cleared (tree, heap,
- el_avail, SSA_NAME_VERSION (valnum) + 1);
- VEC_replace (tree, el_avail, SSA_NAME_VERSION (valnum), op);
- VEC_safe_push (tree, heap, el_avail_stack, op);
+ if (el_avail.length () <= SSA_NAME_VERSION (valnum))
+ el_avail.safe_grow_cleared (SSA_NAME_VERSION (valnum) + 1);
+ el_avail[SSA_NAME_VERSION (valnum)] = op;
+ el_avail_stack.safe_push (op);
}
}
gimple stmt;
/* Mark new bb. */
- VEC_safe_push (tree, heap, el_avail_stack, NULL_TREE);
+ el_avail_stack.safe_push (NULL_TREE);
for (gsi = gsi_start_phis (b); !gsi_end_p (gsi);)
{
gsi2 = gsi_after_labels (b);
gsi_insert_before (&gsi2, stmt, GSI_NEW_STMT);
/* Queue the copy for eventual removal. */
- VEC_safe_push (gimple, heap, el_to_remove, stmt);
+ el_to_remove.safe_push (stmt);
/* If we inserted this PHI node ourself, it's not an elimination. */
if (inserted_exprs
&& bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res)))
}
/* Queue stmt for removal. */
- VEC_safe_push (gimple, heap, el_to_remove, stmt);
+ el_to_remove.safe_push (stmt);
}
}
/* Visit COND_EXPRs and fold the comparison with the
}
gimple_call_set_fn (stmt, fn);
- VEC_safe_push (gimple, heap, el_to_update, stmt);
+ el_to_update.safe_push (stmt);
/* When changing a call into a noreturn call, cfg cleanup
is needed to fix up the noreturn call. */
eliminate_leave_block (dom_walk_data *, basic_block)
{
tree entry;
- while ((entry = VEC_pop (tree, el_avail_stack)) != NULL_TREE)
- VEC_replace (tree, el_avail,
- SSA_NAME_VERSION (VN_INFO (entry)->valnum), NULL_TREE);
+ while ((entry = el_avail_stack.pop ()) != NULL_TREE)
+ el_avail[SSA_NAME_VERSION (VN_INFO (entry)->valnum)] = NULL_TREE;
}
/* Eliminate fully redundant computations. */
need_eh_cleanup = BITMAP_ALLOC (NULL);
need_ab_cleanup = BITMAP_ALLOC (NULL);
- el_to_remove = NULL;
- el_to_update = NULL;
+ el_to_remove.create (0);
+ el_to_update.create (0);
el_todo = 0;
- el_avail = NULL;
- el_avail_stack = NULL;
+ el_avail.create (0);
+ el_avail_stack.create (0);
walk_data.dom_direction = CDI_DOMINATORS;
walk_data.initialize_block_local_data = NULL;
walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
fini_walk_dominator_tree (&walk_data);
- VEC_free (tree, heap, el_avail);
- VEC_free (tree, heap, el_avail_stack);
+ el_avail.release ();
+ el_avail_stack.release ();
/* We cannot remove stmts during BB walk, especially not release SSA
names there as this confuses the VN machinery. The stmts ending
up in el_to_remove are either stores or simple copies. */
- FOR_EACH_VEC_ELT (gimple, el_to_remove, i, stmt)
+ FOR_EACH_VEC_ELT (el_to_remove, i, stmt)
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
release_defs (stmt);
}
}
- VEC_free (gimple, heap, el_to_remove);
+ el_to_remove.release ();
/* We cannot update call statements with virtual operands during
SSA walk. This might remove them which in turn makes our
VN lattice invalid. */
- FOR_EACH_VEC_ELT (gimple, el_to_update, i, stmt)
+ FOR_EACH_VEC_ELT (el_to_update, i, stmt)
update_stmt (stmt);
- VEC_free (gimple, heap, el_to_update);
+ el_to_update.release ();
return el_todo;
}
basic_block bb;
next_expression_id = 1;
- expressions = NULL;
- VEC_safe_push (pre_expr, heap, expressions, NULL);
- value_expressions = VEC_alloc (bitmap, heap, get_max_value_id () + 1);
- VEC_safe_grow_cleared (bitmap, heap, value_expressions,
- get_max_value_id() + 1);
- name_to_id = NULL;
+ expressions.create (0);
+ expressions.safe_push (NULL);
+ value_expressions.create (get_max_value_id () + 1);
+ value_expressions.safe_grow_cleared (get_max_value_id() + 1);
+ name_to_id.create (0);
inserted_exprs = BITMAP_ALLOC (NULL);
fini_pre ()
{
free (postorder);
- VEC_free (bitmap, heap, value_expressions);
+ value_expressions.release ();
BITMAP_FREE (inserted_exprs);
bitmap_obstack_release (&grand_bitmap_obstack);
free_alloc_pool (bitmap_set_pool);
free_alloc_pool (pre_expr_pool);
phi_translate_table.dispose ();
expression_to_id.dispose ();
- VEC_free (unsigned, heap, name_to_id);
+ name_to_id.release ();
free_aux_for_blocks ();
static sbitmap executable_blocks;
/* Array of control flow edges on the worklist. */
-static VEC(basic_block,heap) *cfg_blocks;
+static vec<basic_block> cfg_blocks;
static unsigned int cfg_blocks_num = 0;
static int cfg_blocks_tail;
definition has changed. SSA edges are def-use edges in the SSA
web. For each D-U edge, we store the target statement or PHI node
U. */
-static GTY(()) VEC(gimple,gc) *interesting_ssa_edges;
+static GTY(()) vec<gimple, va_gc> *interesting_ssa_edges;
/* Identical to INTERESTING_SSA_EDGES. For performance reasons, the
list of SSA edges is split into two. One contains all SSA edges
don't use a separate worklist for VARYING edges, we end up with
situations where lattice values move from
UNDEFINED->INTERESTING->VARYING instead of UNDEFINED->VARYING. */
-static GTY(()) VEC(gimple,gc) *varying_ssa_edges;
+static GTY(()) vec<gimple, va_gc> *varying_ssa_edges;
/* Return true if the block worklist empty. */
else
{
cfg_blocks_num++;
- if (cfg_blocks_num > VEC_length (basic_block, cfg_blocks))
+ if (cfg_blocks_num > cfg_blocks.length ())
{
/* We have to grow the array now. Adjust to queue to occupy
the full space of the original array. We do not need to
initialize the newly allocated portion of the array
because we keep track of CFG_BLOCKS_HEAD and
CFG_BLOCKS_HEAD. */
- cfg_blocks_tail = VEC_length (basic_block, cfg_blocks);
+ cfg_blocks_tail = cfg_blocks.length ();
cfg_blocks_head = 0;
- VEC_safe_grow (basic_block, heap, cfg_blocks, 2 * cfg_blocks_tail);
+ cfg_blocks.safe_grow (2 * cfg_blocks_tail);
}
/* Minor optimization: we prefer to see blocks with more
predecessors later, because there is more of a chance that
the incoming edges will be executable. */
else if (EDGE_COUNT (bb->preds)
- >= EDGE_COUNT (VEC_index (basic_block, cfg_blocks,
- cfg_blocks_head)->preds))
- cfg_blocks_tail = ((cfg_blocks_tail + 1)
- % VEC_length (basic_block, cfg_blocks));
+ >= EDGE_COUNT (cfg_blocks[cfg_blocks_head]->preds))
+ cfg_blocks_tail = ((cfg_blocks_tail + 1) % cfg_blocks.length ());
else
{
if (cfg_blocks_head == 0)
- cfg_blocks_head = VEC_length (basic_block, cfg_blocks);
+ cfg_blocks_head = cfg_blocks.length ();
--cfg_blocks_head;
head = true;
}
}
- VEC_replace (basic_block, cfg_blocks,
- head ? cfg_blocks_head : cfg_blocks_tail,
- bb);
+ cfg_blocks[head ? cfg_blocks_head : cfg_blocks_tail] = bb;
bitmap_set_bit (bb_in_list, bb->index);
}
{
basic_block bb;
- bb = VEC_index (basic_block, cfg_blocks, cfg_blocks_head);
+ bb = cfg_blocks[cfg_blocks_head];
gcc_assert (!cfg_blocks_empty_p ());
gcc_assert (bb);
- cfg_blocks_head = ((cfg_blocks_head + 1)
- % VEC_length (basic_block, cfg_blocks));
+ cfg_blocks_head = ((cfg_blocks_head + 1) % cfg_blocks.length ());
--cfg_blocks_num;
bitmap_clear_bit (bb_in_list, bb->index);
{
gimple_set_plf (use_stmt, STMT_IN_SSA_EDGE_WORKLIST, true);
if (is_varying)
- VEC_safe_push (gimple, gc, varying_ssa_edges, use_stmt);
+ vec_safe_push (varying_ssa_edges, use_stmt);
else
- VEC_safe_push (gimple, gc, interesting_ssa_edges, use_stmt);
+ vec_safe_push (interesting_ssa_edges, use_stmt);
}
}
}
SSA edge is added to it in simulate_stmt. */
static void
-process_ssa_edge_worklist (VEC(gimple,gc) **worklist)
+process_ssa_edge_worklist (vec<gimple, va_gc> **worklist)
{
/* Drain the entire worklist. */
- while (VEC_length (gimple, *worklist) > 0)
+ while ((*worklist)->length () > 0)
{
basic_block bb;
/* Pull the statement to simulate off the worklist. */
- gimple stmt = VEC_pop (gimple, *worklist);
+ gimple stmt = (*worklist)->pop ();
/* If this statement was already visited by simulate_block, then
we don't need to visit it again here. */
basic_block bb;
/* Worklists of SSA edges. */
- interesting_ssa_edges = VEC_alloc (gimple, gc, 20);
- varying_ssa_edges = VEC_alloc (gimple, gc, 20);
+ vec_alloc (interesting_ssa_edges, 20);
+ vec_alloc (varying_ssa_edges, 20);
executable_blocks = sbitmap_alloc (last_basic_block);
bitmap_clear (executable_blocks);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_immediate_uses (dump_file);
- cfg_blocks = VEC_alloc (basic_block, heap, 20);
- VEC_safe_grow (basic_block, heap, cfg_blocks, 20);
+ cfg_blocks.create (20);
+ cfg_blocks.safe_grow_cleared (20);
/* Initially assume that every edge in the CFG is not executable.
(including the edges coming out of ENTRY_BLOCK_PTR). */
static void
ssa_prop_fini (void)
{
- VEC_free (gimple, gc, interesting_ssa_edges);
- VEC_free (gimple, gc, varying_ssa_edges);
- VEC_free (basic_block, heap, cfg_blocks);
- cfg_blocks = NULL;
+ vec_free (interesting_ssa_edges);
+ vec_free (varying_ssa_edges);
+ cfg_blocks.release ();
sbitmap_free (bb_in_list);
sbitmap_free (executable_blocks);
}
tree fn = CALL_EXPR_FN (expr);
unsigned i;
unsigned nargs = call_expr_nargs (expr);
- VEC(tree, heap) *args = NULL;
+ vec<tree> args = vec<tree>();
gimple new_stmt;
if (nargs > 0)
{
- args = VEC_alloc (tree, heap, nargs);
- VEC_safe_grow (tree, heap, args, nargs);
+ args.create (nargs);
+ args.safe_grow_cleared (nargs);
for (i = 0; i < nargs; i++)
- VEC_replace (tree, args, i, CALL_EXPR_ARG (expr, i));
+ args[i] = CALL_EXPR_ARG (expr, i);
}
new_stmt = gimple_build_call_vec (fn, args);
finish_update_gimple_call (si_p, new_stmt, stmt);
- VEC_free (tree, heap, args);
+ args.release ();
return true;
}
/* Iterate until the worklists are empty. */
while (!cfg_blocks_empty_p ()
- || VEC_length (gimple, interesting_ssa_edges) > 0
- || VEC_length (gimple, varying_ssa_edges) > 0)
+ || interesting_ssa_edges->length () > 0
+ || varying_ssa_edges->length () > 0)
{
if (!cfg_blocks_empty_p ())
{
return 0;
}
-DEF_VEC_P(operand_entry_t);
-DEF_VEC_ALLOC_P(operand_entry_t, heap);
/* We want integer ones to end up last no matter what, since they are
the ones we can do the most with. */
/* Add an operand entry to *OPS for the tree operand OP. */
static void
-add_to_ops_vec (VEC(operand_entry_t, heap) **ops, tree op)
+add_to_ops_vec (vec<operand_entry_t> *ops, tree op)
{
operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
oe->rank = get_rank (op);
oe->id = next_operand_entry_id++;
oe->count = 1;
- VEC_safe_push (operand_entry_t, heap, *ops, oe);
+ ops->safe_push (oe);
}
/* Add an operand entry to *OPS for the tree operand OP with repeat
count REPEAT. */
static void
-add_repeat_to_ops_vec (VEC(operand_entry_t, heap) **ops, tree op,
+add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op,
HOST_WIDE_INT repeat)
{
operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
oe->rank = get_rank (op);
oe->id = next_operand_entry_id++;
oe->count = repeat;
- VEC_safe_push (operand_entry_t, heap, *ops, oe);
+ ops->safe_push (oe);
reassociate_stats.pows_encountered++;
}
static bool
eliminate_duplicate_pair (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops,
+ vec<operand_entry_t> *ops,
bool *all_done,
unsigned int i,
operand_entry_t curr,
print_generic_stmt (dump_file, last->op, 0);
}
- VEC_ordered_remove (operand_entry_t, *ops, i);
+ ops->ordered_remove (i);
reassociate_stats.ops_eliminated ++;
return true;
reassociate_stats.ops_eliminated += 2;
- if (VEC_length (operand_entry_t, *ops) == 2)
+ if (ops->length () == 2)
{
- VEC_free (operand_entry_t, heap, *ops);
- *ops = NULL;
+ ops->create (0);
add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (last->op)));
*all_done = true;
}
else
{
- VEC_ordered_remove (operand_entry_t, *ops, i-1);
- VEC_ordered_remove (operand_entry_t, *ops, i-1);
+ ops->ordered_remove (i-1);
+ ops->ordered_remove (i-1);
}
return true;
return false;
}
-static VEC(tree, heap) *plus_negates;
+static vec<tree> plus_negates;
/* If OPCODE is PLUS_EXPR, CURR->OP is a negate expression or a bitwise not
expression, look in OPS for a corresponding positive operation to cancel
static bool
eliminate_plus_minus_pair (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops,
+ vec<operand_entry_t> *ops,
unsigned int currindex,
operand_entry_t curr)
{
one, we can stop. */
for (i = currindex + 1;
- VEC_iterate (operand_entry_t, *ops, i, oe)
+ ops->iterate (i, &oe)
&& oe->rank >= curr->rank - 1 ;
i++)
{
fprintf (dump_file, " -> 0\n");
}
- VEC_ordered_remove (operand_entry_t, *ops, i);
+ ops->ordered_remove (i);
add_to_ops_vec (ops, build_zero_cst (TREE_TYPE (oe->op)));
- VEC_ordered_remove (operand_entry_t, *ops, currindex);
+ ops->ordered_remove (currindex);
reassociate_stats.ops_eliminated ++;
return true;
fprintf (dump_file, " -> -1\n");
}
- VEC_ordered_remove (operand_entry_t, *ops, i);
+ ops->ordered_remove (i);
add_to_ops_vec (ops, build_int_cst_type (op_type, -1));
- VEC_ordered_remove (operand_entry_t, *ops, currindex);
+ ops->ordered_remove (currindex);
reassociate_stats.ops_eliminated ++;
return true;
/* CURR->OP is a negate expr in a plus expr: save it for later
inspection in repropagate_negates(). */
if (negateop != NULL_TREE)
- VEC_safe_push (tree, heap, plus_negates, curr->op);
+ plus_negates.safe_push (curr->op);
return false;
}
static bool
eliminate_not_pairs (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops,
+ vec<operand_entry_t> *ops,
unsigned int currindex,
operand_entry_t curr)
{
one, we can stop. */
for (i = currindex + 1;
- VEC_iterate (operand_entry_t, *ops, i, oe)
+ ops->iterate (i, &oe)
&& oe->rank >= curr->rank - 1;
i++)
{
oe->op = build_low_bits_mask (TREE_TYPE (oe->op),
TYPE_PRECISION (TREE_TYPE (oe->op)));
- reassociate_stats.ops_eliminated
- += VEC_length (operand_entry_t, *ops) - 1;
- VEC_free (operand_entry_t, heap, *ops);
- *ops = NULL;
- VEC_safe_push (operand_entry_t, heap, *ops, oe);
+ reassociate_stats.ops_eliminated += ops->length () - 1;
+ ops->truncate (0);
+ ops->quick_push (oe);
return true;
}
}
static void
eliminate_using_constants (enum tree_code opcode,
- VEC(operand_entry_t, heap) **ops)
+ vec<operand_entry_t> *ops)
{
- operand_entry_t oelast = VEC_last (operand_entry_t, *ops);
+ operand_entry_t oelast = ops->last ();
tree type = TREE_TYPE (oelast->op);
if (oelast->rank == 0
case BIT_AND_EXPR:
if (integer_zerop (oelast->op))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found & 0, removing all other ops\n");
- reassociate_stats.ops_eliminated
- += VEC_length (operand_entry_t, *ops) - 1;
+ reassociate_stats.ops_eliminated += ops->length () - 1;
- VEC_free (operand_entry_t, heap, *ops);
- *ops = NULL;
- VEC_safe_push (operand_entry_t, heap, *ops, oelast);
+ ops->truncate (0);
+ ops->quick_push (oelast);
return;
}
}
else if (integer_all_onesp (oelast->op))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found & -1, removing\n");
- VEC_pop (operand_entry_t, *ops);
+ ops->pop ();
reassociate_stats.ops_eliminated++;
}
}
case BIT_IOR_EXPR:
if (integer_all_onesp (oelast->op))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found | -1, removing all other ops\n");
- reassociate_stats.ops_eliminated
- += VEC_length (operand_entry_t, *ops) - 1;
+ reassociate_stats.ops_eliminated += ops->length () - 1;
- VEC_free (operand_entry_t, heap, *ops);
- *ops = NULL;
- VEC_safe_push (operand_entry_t, heap, *ops, oelast);
+ ops->truncate (0);
+ ops->quick_push (oelast);
return;
}
}
else if (integer_zerop (oelast->op))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found | 0, removing\n");
- VEC_pop (operand_entry_t, *ops);
+ ops->pop ();
reassociate_stats.ops_eliminated++;
}
}
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
&& real_zerop (oelast->op)))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found * 0, removing all other ops\n");
- reassociate_stats.ops_eliminated
- += VEC_length (operand_entry_t, *ops) - 1;
- VEC_free (operand_entry_t, heap, *ops);
- *ops = NULL;
- VEC_safe_push (operand_entry_t, heap, *ops, oelast);
+ reassociate_stats.ops_eliminated += ops->length () - 1;
+ ops->truncate (1);
+ ops->quick_push (oelast);
return;
}
}
&& !HONOR_SNANS (TYPE_MODE (type))
&& real_onep (oelast->op)))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found * 1, removing\n");
- VEC_pop (operand_entry_t, *ops);
+ ops->pop ();
reassociate_stats.ops_eliminated++;
return;
}
&& fold_real_zero_addition_p (type, oelast->op,
opcode == MINUS_EXPR)))
{
- if (VEC_length (operand_entry_t, *ops) != 1)
+ if (ops->length () != 1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found [|^+] 0, removing\n");
- VEC_pop (operand_entry_t, *ops);
+ ops->pop ();
reassociate_stats.ops_eliminated++;
return;
}
}
-static void linearize_expr_tree (VEC(operand_entry_t, heap) **, gimple,
+static void linearize_expr_tree (vec<operand_entry_t> *, gimple,
bool, bool);
/* Structure for tracking and counting operands. */
tree op;
} oecount;
-DEF_VEC_O(oecount);
-DEF_VEC_ALLOC_O(oecount,heap);
/* The heap for the oecount hashtable and the sorted list of operands. */
-static VEC (oecount, heap) *cvec;
+static vec<oecount> cvec;
/* Hash function for oecount. */
static hashval_t
oecount_hash (const void *p)
{
- const oecount *c = &VEC_index (oecount, cvec, (size_t)p - 42);
+ const oecount *c = &cvec[(size_t)p - 42];
return htab_hash_pointer (c->op) ^ (hashval_t)c->oecode;
}
static int
oecount_eq (const void *p1, const void *p2)
{
- const oecount *c1 = &VEC_index (oecount, cvec, (size_t)p1 - 42);
- const oecount *c2 = &VEC_index (oecount, cvec, (size_t)p2 - 42);
+ const oecount *c1 = &cvec[(size_t)p1 - 42];
+ const oecount *c2 = &cvec[(size_t)p2 - 42];
return (c1->oecode == c2->oecode
&& c1->op == c2->op);
}
static bool
undistribute_ops_list (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops, struct loop *loop)
+ vec<operand_entry_t> *ops, struct loop *loop)
{
- unsigned int length = VEC_length (operand_entry_t, *ops);
+ unsigned int length = ops->length ();
operand_entry_t oe1;
unsigned i, j;
sbitmap candidates, candidates2;
unsigned nr_candidates, nr_candidates2;
sbitmap_iterator sbi0;
- VEC (operand_entry_t, heap) **subops;
+ vec<operand_entry_t> *subops;
htab_t ctable;
bool changed = false;
int next_oecount_id = 0;
candidates = sbitmap_alloc (length);
bitmap_clear (candidates);
nr_candidates = 0;
- FOR_EACH_VEC_ELT (operand_entry_t, *ops, i, oe1)
+ FOR_EACH_VEC_ELT (*ops, i, oe1)
{
enum tree_code dcode;
gimple oe1def;
{
fprintf (dump_file, "searching for un-distribute opportunities ");
print_generic_expr (dump_file,
- VEC_index (operand_entry_t, *ops,
- bitmap_first_set_bit (candidates))->op, 0);
+ (*ops)[bitmap_first_set_bit (candidates)]->op, 0);
fprintf (dump_file, " %d\n", nr_candidates);
}
/* Build linearized sub-operand lists and the counting table. */
- cvec = NULL;
+ cvec.create (0);
ctable = htab_create (15, oecount_hash, oecount_eq, NULL);
- subops = XCNEWVEC (VEC (operand_entry_t, heap) *,
- VEC_length (operand_entry_t, *ops));
+ /* ??? Macro arguments cannot have multi-argument template types in
+ them. This typedef is needed to workaround that limitation. */
+ typedef vec<operand_entry_t> vec_operand_entry_t_heap;
+ subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ());
EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
{
gimple oedef;
enum tree_code oecode;
unsigned j;
- oedef = SSA_NAME_DEF_STMT (VEC_index (operand_entry_t, *ops, i)->op);
+ oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op);
oecode = gimple_assign_rhs_code (oedef);
linearize_expr_tree (&subops[i], oedef,
associative_tree_code (oecode), false);
- FOR_EACH_VEC_ELT (operand_entry_t, subops[i], j, oe1)
+ FOR_EACH_VEC_ELT (subops[i], j, oe1)
{
oecount c;
void **slot;
c.cnt = 1;
c.id = next_oecount_id++;
c.op = oe1->op;
- VEC_safe_push (oecount, heap, cvec, c);
- idx = VEC_length (oecount, cvec) + 41;
+ cvec.safe_push (c);
+ idx = cvec.length () + 41;
slot = htab_find_slot (ctable, (void *)idx, INSERT);
if (!*slot)
{
}
else
{
- VEC_pop (oecount, cvec);
- VEC_index (oecount, cvec, (size_t)*slot - 42).cnt++;
+ cvec.pop ();
+ cvec[(size_t)*slot - 42].cnt++;
}
}
}
htab_delete (ctable);
/* Sort the counting table. */
- VEC_qsort (oecount, cvec, oecount_cmp);
+ cvec.qsort (oecount_cmp);
if (dump_file && (dump_flags & TDF_DETAILS))
{
oecount *c;
fprintf (dump_file, "Candidates:\n");
- FOR_EACH_VEC_ELT (oecount, cvec, j, c)
+ FOR_EACH_VEC_ELT (cvec, j, c)
{
fprintf (dump_file, " %u %s: ", c->cnt,
c->oecode == MULT_EXPR
/* Process the (operand, code) pairs in order of most occurence. */
candidates2 = sbitmap_alloc (length);
- while (!VEC_empty (oecount, cvec))
+ while (!cvec.is_empty ())
{
- oecount *c = &VEC_last (oecount, cvec);
+ oecount *c = &cvec.last ();
if (c->cnt < 2)
break;
gimple oedef;
enum tree_code oecode;
unsigned j;
- tree op = VEC_index (operand_entry_t, *ops, i)->op;
+ tree op = (*ops)[i]->op;
/* If we undistributed in this chain already this may be
a constant. */
if (oecode != c->oecode)
continue;
- FOR_EACH_VEC_ELT (operand_entry_t, subops[i], j, oe1)
+ FOR_EACH_VEC_ELT (subops[i], j, oe1)
{
if (oe1->op == c->op)
{
int first = bitmap_first_set_bit (candidates2);
/* Build the new addition chain. */
- oe1 = VEC_index (operand_entry_t, *ops, first);
+ oe1 = (*ops)[first];
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Building (");
EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0)
{
gimple sum;
- oe2 = VEC_index (operand_entry_t, *ops, i);
+ oe2 = (*ops)[i];
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " + ");
undistribution with this op. */
oe1->op = gimple_assign_lhs (prod);
oe1->rank = get_rank (oe1->op);
- VEC_free (operand_entry_t, heap, subops[first]);
+ subops[first].release ();
changed = true;
}
- VEC_pop (oecount, cvec);
+ cvec.pop ();
}
- for (i = 0; i < VEC_length (operand_entry_t, *ops); ++i)
- VEC_free (operand_entry_t, heap, subops[i]);
+ for (i = 0; i < ops->length (); ++i)
+ subops[i].release ();
free (subops);
- VEC_free (oecount, heap, cvec);
+ cvec.release ();
sbitmap_free (candidates);
sbitmap_free (candidates2);
static bool
eliminate_redundant_comparison (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops,
+ vec<operand_entry_t> *ops,
unsigned int currindex,
operand_entry_t curr)
{
op2 = gimple_assign_rhs2 (def1);
/* Now look for a similar comparison in the remaining OPS. */
- for (i = currindex + 1;
- VEC_iterate (operand_entry_t, *ops, i, oe);
- i++)
+ for (i = currindex + 1; ops->iterate (i, &oe); i++)
{
tree t;
/* Now we can delete oe, as it has been subsumed by the new combined
expression t. */
- VEC_ordered_remove (operand_entry_t, *ops, i);
+ ops->ordered_remove (i);
reassociate_stats.ops_eliminated ++;
/* If t is the same as curr->op, we're done. Otherwise we must
the current entry. */
if (TREE_CODE (t) == INTEGER_CST)
{
- VEC_ordered_remove (operand_entry_t, *ops, currindex);
+ ops->ordered_remove (currindex);
add_to_ops_vec (ops, t);
}
else if (!operand_equal_p (t, curr->op, 0))
static void
optimize_ops_list (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops)
+ vec<operand_entry_t> *ops)
{
- unsigned int length = VEC_length (operand_entry_t, *ops);
+ unsigned int length = ops->length ();
unsigned int i;
operand_entry_t oe;
operand_entry_t oelast = NULL;
if (length == 1)
return;
- oelast = VEC_last (operand_entry_t, *ops);
+ oelast = ops->last ();
/* If the last two are constants, pop the constants off, merge them
and try the next two. */
if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op))
{
- operand_entry_t oelm1 = VEC_index (operand_entry_t, *ops, length - 2);
+ operand_entry_t oelm1 = (*ops)[length - 2];
if (oelm1->rank == 0
&& is_gimple_min_invariant (oelm1->op)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Merging constants\n");
- VEC_pop (operand_entry_t, *ops);
- VEC_pop (operand_entry_t, *ops);
+ ops->pop ();
+ ops->pop ();
add_to_ops_vec (ops, folded);
reassociate_stats.constants_eliminated++;
eliminate_using_constants (opcode, ops);
oelast = NULL;
- for (i = 0; VEC_iterate (operand_entry_t, *ops, i, oe);)
+ for (i = 0; ops->iterate (i, &oe);)
{
bool done = false;
i++;
}
- length = VEC_length (operand_entry_t, *ops);
- oelast = VEC_last (operand_entry_t, *ops);
+ length = ops->length ();
+ oelast = ops->last ();
if (iterate)
optimize_ops_list (opcode, ops);
static bool
update_range_test (struct range_entry *range, struct range_entry *otherrange,
unsigned int count, enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops, tree exp, bool in_p,
+ vec<operand_entry_t> *ops, tree exp, bool in_p,
tree low, tree high, bool strict_overflow_p)
{
- operand_entry_t oe = VEC_index (oeprand_entry_t, *ops, range->idx);
+ operand_entry_t oe = (*ops)[range->idx];
tree op = oe->op;
gimple stmt = op ? SSA_NAME_DEF_STMT (op) : last_stmt (BASIC_BLOCK (oe->id));
location_t loc = gimple_location (stmt);
for (range = otherrange; range < otherrange + count; range++)
{
- oe = VEC_index (oeprand_entry_t, *ops, range->idx);
+ oe = (*ops)[range->idx];
/* Now change all the other range test immediate uses, so that
those tests will be optimized away. */
if (opcode == ERROR_MARK)
static void
optimize_range_tests (enum tree_code opcode,
- VEC (operand_entry_t, heap) **ops)
+ vec<operand_entry_t> *ops)
{
- unsigned int length = VEC_length (operand_entry_t, *ops), i, j, first;
+ unsigned int length = ops->length (), i, j, first;
operand_entry_t oe;
struct range_entry *ranges;
bool any_changes = false;
ranges = XNEWVEC (struct range_entry, length);
for (i = 0; i < length; i++)
{
- oe = VEC_index (operand_entry_t, *ops, i);
+ oe = (*ops)[i];
ranges[i].idx = i;
init_range_entry (ranges + i, oe->op,
oe->op ? NULL : last_stmt (BASIC_BLOCK (oe->id)));
if (any_changes && opcode != ERROR_MARK)
{
j = 0;
- FOR_EACH_VEC_ELT (operand_entry_t, *ops, i, oe)
+ FOR_EACH_VEC_ELT (*ops, i, oe)
{
if (oe->op == error_mark_node)
continue;
else if (i != j)
- VEC_replace (operand_entry_t, *ops, j, oe);
+ (*ops)[j] = oe;
j++;
}
- VEC_truncate (operand_entry_t, *ops, j);
+ ops->truncate (j);
}
XDELETEVEC (ranges);
return true and fill in *OPS recursively. */
static bool
-get_ops (tree var, enum tree_code code, VEC(operand_entry_t, heap) **ops,
+get_ops (tree var, enum tree_code code, vec<operand_entry_t> *ops,
struct loop *loop)
{
gimple stmt = SSA_NAME_DEF_STMT (var);
oe->rank = code;
oe->id = 0;
oe->count = 1;
- VEC_safe_push (operand_entry_t, heap, *ops, oe);
+ ops->safe_push (oe);
}
return true;
}
basic_block bb;
edge_iterator ei;
edge e;
- VEC(operand_entry_t, heap) *ops = NULL;
+ vec<operand_entry_t> ops = vec<operand_entry_t>();
/* Consider only basic blocks that end with GIMPLE_COND or
a cast statement satisfying final_range_test_p. All
oe->rank = code;
oe->id = 0;
oe->count = 1;
- VEC_safe_push (operand_entry_t, heap, ops, oe);
+ ops.safe_push (oe);
}
continue;
}
is. */
oe->id = bb->index;
oe->count = 1;
- VEC_safe_push (operand_entry_t, heap, ops, oe);
+ ops.safe_push (oe);
}
if (bb == first_bb)
break;
}
- if (VEC_length (operand_entry_t, ops) > 1)
+ if (ops.length () > 1)
optimize_range_tests (ERROR_MARK, &ops);
- VEC_free (operand_entry_t, heap, ops);
+ ops.release ();
}
/* Return true if OPERAND is defined by a PHI node which uses the LHS
cases, but it is unlikely to be worth it. */
static void
-swap_ops_for_binary_stmt (VEC(operand_entry_t, heap) * ops,
+swap_ops_for_binary_stmt (vec<operand_entry_t> ops,
unsigned int opindex, gimple stmt)
{
operand_entry_t oe1, oe2, oe3;
- oe1 = VEC_index (operand_entry_t, ops, opindex);
- oe2 = VEC_index (operand_entry_t, ops, opindex + 1);
- oe3 = VEC_index (operand_entry_t, ops, opindex + 2);
+ oe1 = ops[opindex];
+ oe2 = ops[opindex + 1];
+ oe3 = ops[opindex + 2];
if ((oe1->rank == oe2->rank
&& oe2->rank != oe3->rank)
static void
rewrite_expr_tree (gimple stmt, unsigned int opindex,
- VEC(operand_entry_t, heap) * ops, bool moved)
+ vec<operand_entry_t> ops, bool moved)
{
tree rhs1 = gimple_assign_rhs1 (stmt);
tree rhs2 = gimple_assign_rhs2 (stmt);
/* If we have three operands left, then we want to make sure the ones
that get the double binary op are chosen wisely. */
- if (opindex + 3 == VEC_length (operand_entry_t, ops))
+ if (opindex + 3 == ops.length ())
swap_ops_for_binary_stmt (ops, opindex, stmt);
/* The final recursion case for this function is that you have
If we had one exactly one op in the entire list to start with, we
would have never called this function, and the tail recursion
rewrites them one at a time. */
- if (opindex + 2 == VEC_length (operand_entry_t, ops))
+ if (opindex + 2 == ops.length ())
{
operand_entry_t oe1, oe2;
- oe1 = VEC_index (operand_entry_t, ops, opindex);
- oe2 = VEC_index (operand_entry_t, ops, opindex + 1);
+ oe1 = ops[opindex];
+ oe2 = ops[opindex + 1];
if (rhs1 != oe1->op || rhs2 != oe2->op)
{
}
/* If we hit here, we should have 3 or more ops left. */
- gcc_assert (opindex + 2 < VEC_length (operand_entry_t, ops));
+ gcc_assert (opindex + 2 < ops.length ());
/* Rewrite the next operator. */
- oe = VEC_index (operand_entry_t, ops, opindex);
+ oe = ops[opindex];
if (oe->op != rhs2)
{
unsigned int count;
gsinow = gsi_for_stmt (stmt);
- count = VEC_length (operand_entry_t, ops) - opindex - 2;
+ count = ops.length () - opindex - 2;
while (count-- != 0)
{
stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt1));
static void
rewrite_expr_tree_parallel (gimple stmt, int width,
- VEC(operand_entry_t, heap) * ops)
+ vec<operand_entry_t> ops)
{
enum tree_code opcode = gimple_assign_rhs_code (stmt);
- int op_num = VEC_length (operand_entry_t, ops);
+ int op_num = ops.length ();
int stmt_num = op_num - 1;
gimple *stmts = XALLOCAVEC (gimple, stmt_num);
int op_index = op_num - 1;
if (ready_stmts_end > stmt_index)
op2 = gimple_assign_lhs (stmts[stmt_index++]);
else if (op_index >= 0)
- op2 = VEC_index (operand_entry_t, ops, op_index--)->op;
+ op2 = ops[op_index--]->op;
else
{
gcc_assert (stmt_index < i);
{
if (op_index > 1)
swap_ops_for_binary_stmt (ops, op_index - 2, NULL);
- op2 = VEC_index (operand_entry_t, ops, op_index--)->op;
- op1 = VEC_index (operand_entry_t, ops, op_index--)->op;
+ op2 = ops[op_index--]->op;
+ op1 = ops[op_index--]->op;
}
/* If we emit the last statement then we should put
Place the operands of the expression tree in the vector named OPS. */
static void
-linearize_expr_tree (VEC(operand_entry_t, heap) **ops, gimple stmt,
+linearize_expr_tree (vec<operand_entry_t> *ops, gimple stmt,
bool is_associative, bool set_visited)
{
tree binlhs = gimple_assign_rhs1 (stmt);
unsigned int i = 0;
tree negate;
- FOR_EACH_VEC_ELT (tree, plus_negates, i, negate)
+ FOR_EACH_VEC_ELT (plus_negates, i, negate)
{
gimple user = get_single_immediate_use (negate);
gimple_assign_set_rhs_with_ops (&gsi2, NEGATE_EXPR, negate, NULL);
update_stmt (gsi_stmt (gsi2));
gsi_move_before (&gsi, &gsi2);
- VEC_safe_push (tree, heap, plus_negates,
- gimple_assign_lhs (gsi_stmt (gsi2)));
+ plus_negates.safe_push (gimple_assign_lhs (gsi_stmt (gsi2)));
}
else
{
}
else if (gimple_assign_rhs_code (stmt) == NEGATE_EXPR
&& can_reassociate_p (gimple_assign_rhs1 (stmt)))
- VEC_safe_push (tree, heap, plus_negates, gimple_assign_lhs (stmt));
+ plus_negates.safe_push (gimple_assign_lhs (stmt));
}
for (son = first_dom_son (CDI_DOMINATORS, bb);
son;
typedef struct repeat_factor_d repeat_factor, *repeat_factor_t;
typedef const struct repeat_factor_d *const_repeat_factor_t;
-DEF_VEC_O (repeat_factor);
-DEF_VEC_ALLOC_O (repeat_factor, heap);
-static VEC (repeat_factor, heap) *repeat_factor_vec;
+static vec<repeat_factor> repeat_factor_vec;
/* Used for sorting the repeat factor vector. Sort primarily by
ascending occurrence count, secondarily by descending rank. */
SSA name representing the value of the replacement sequence. */
static tree
-attempt_builtin_powi (gimple stmt, VEC(operand_entry_t, heap) **ops)
+attempt_builtin_powi (gimple stmt, vec<operand_entry_t> *ops)
{
unsigned i, j, vec_len;
int ii;
return NULL_TREE;
/* Allocate the repeated factor vector. */
- repeat_factor_vec = VEC_alloc (repeat_factor, heap, 10);
+ repeat_factor_vec.create (10);
/* Scan the OPS vector for all SSA names in the product and build
up a vector of occurrence counts for each factor. */
- FOR_EACH_VEC_ELT (operand_entry_t, *ops, i, oe)
+ FOR_EACH_VEC_ELT (*ops, i, oe)
{
if (TREE_CODE (oe->op) == SSA_NAME)
{
- FOR_EACH_VEC_ELT (repeat_factor, repeat_factor_vec, j, rf1)
+ FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
{
if (rf1->factor == oe->op)
{
}
}
- if (j >= VEC_length (repeat_factor, repeat_factor_vec))
+ if (j >= repeat_factor_vec.length ())
{
rfnew.factor = oe->op;
rfnew.rank = oe->rank;
rfnew.count = oe->count;
rfnew.repr = NULL_TREE;
- VEC_safe_push (repeat_factor, heap, repeat_factor_vec, rfnew);
+ repeat_factor_vec.safe_push (rfnew);
}
}
}
/* Sort the repeated factor vector by (a) increasing occurrence count,
and (b) decreasing rank. */
- VEC_qsort (repeat_factor, repeat_factor_vec, compare_repeat_factors);
+ repeat_factor_vec.qsort (compare_repeat_factors);
/* It is generally best to combine as many base factors as possible
into a product before applying __builtin_powi to the result.
t5 = t3 * t4
result = t5 * y */
- vec_len = VEC_length (repeat_factor, repeat_factor_vec);
+ vec_len = repeat_factor_vec.length ();
/* Repeatedly look for opportunities to create a builtin_powi call. */
while (true)
it if the minimum occurrence count for its factors is at
least 2, or just use this cached product as our next
multiplicand if the minimum occurrence count is 1. */
- FOR_EACH_VEC_ELT (repeat_factor, repeat_factor_vec, j, rf1)
+ FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
{
if (rf1->repr && rf1->count > 0)
break;
fputs ("Multiplying by cached product ", dump_file);
for (elt = j; elt < vec_len; elt++)
{
- rf = &VEC_index (repeat_factor, repeat_factor_vec, elt);
+ rf = &repeat_factor_vec[elt];
print_generic_expr (dump_file, rf->factor, 0);
if (elt < vec_len - 1)
fputs (" * ", dump_file);
dump_file);
for (elt = j; elt < vec_len; elt++)
{
- rf = &VEC_index (repeat_factor, repeat_factor_vec, elt);
+ rf = &repeat_factor_vec[elt];
print_generic_expr (dump_file, rf->factor, 0);
if (elt < vec_len - 1)
fputs (" * ", dump_file);
vector whose occurrence count is at least 2. If no such
factor exists, there are no builtin_powi opportunities
remaining. */
- FOR_EACH_VEC_ELT (repeat_factor, repeat_factor_vec, j, rf1)
+ FOR_EACH_VEC_ELT (repeat_factor_vec, j, rf1)
{
if (rf1->count >= 2)
break;
fputs ("Building __builtin_pow call for (", dump_file);
for (elt = j; elt < vec_len; elt++)
{
- rf = &VEC_index (repeat_factor, repeat_factor_vec, elt);
+ rf = &repeat_factor_vec[elt];
print_generic_expr (dump_file, rf->factor, 0);
if (elt < vec_len - 1)
fputs (" * ", dump_file);
{
tree op1, op2;
- rf1 = &VEC_index (repeat_factor, repeat_factor_vec, ii);
- rf2 = &VEC_index (repeat_factor, repeat_factor_vec, ii + 1);
+ rf1 = &repeat_factor_vec[ii];
+ rf2 = &repeat_factor_vec[ii + 1];
/* Init the last factor's representative to be itself. */
if (!rf2->repr)
/* Form a call to __builtin_powi for the maximum product
just formed, raised to the power obtained earlier. */
- rf1 = &VEC_index (repeat_factor, repeat_factor_vec, j);
+ rf1 = &repeat_factor_vec[j];
iter_result = make_temp_ssa_name (type, NULL, "reassocpow");
pow_stmt = gimple_build_call (powi_fndecl, 2, rf1->repr,
build_int_cst (integer_type_node,
unsigned k = power;
unsigned n;
- rf1 = &VEC_index (repeat_factor, repeat_factor_vec, i);
+ rf1 = &repeat_factor_vec[i];
rf1->count -= power;
- FOR_EACH_VEC_ELT_REVERSE (operand_entry_t, *ops, n, oe)
+ FOR_EACH_VEC_ELT_REVERSE (*ops, n, oe)
{
if (oe->op == rf1->factor)
{
if (oe->count <= k)
{
- VEC_ordered_remove (operand_entry_t, *ops, n);
+ ops->ordered_remove (n);
k -= oe->count;
if (k == 0)
remaining occurrence count of 0 or 1, and those with a count of 1
don't have cached representatives. Re-sort the ops vector and
clean up. */
- VEC_qsort (operand_entry_t, *ops, sort_by_operand_rank);
- VEC_free (repeat_factor, heap, repeat_factor_vec);
+ ops->qsort (sort_by_operand_rank);
+ repeat_factor_vec.release ();
/* Return the final product computed herein. Note that there may
still be some elements with single occurrence count left in OPS;
if (associative_tree_code (rhs_code))
{
- VEC(operand_entry_t, heap) *ops = NULL;
+ vec<operand_entry_t> ops = vec<operand_entry_t>();
tree powi_result = NULL_TREE;
/* There may be no immediate uses left by the time we
gimple_set_visited (stmt, true);
linearize_expr_tree (&ops, stmt, true, true);
- VEC_qsort (operand_entry_t, ops, sort_by_operand_rank);
+ ops.qsort (sort_by_operand_rank);
optimize_ops_list (rhs_code, &ops);
if (undistribute_ops_list (rhs_code, &ops,
loop_containing_stmt (stmt)))
{
- VEC_qsort (operand_entry_t, ops, sort_by_operand_rank);
+ ops.qsort (sort_by_operand_rank);
optimize_ops_list (rhs_code, &ops);
}
/* If the operand vector is now empty, all operands were
consumed by the __builtin_powi optimization. */
- if (VEC_length (operand_entry_t, ops) == 0)
+ if (ops.length () == 0)
transform_stmt_to_copy (&gsi, stmt, powi_result);
- else if (VEC_length (operand_entry_t, ops) == 1)
+ else if (ops.length () == 1)
{
- tree last_op = VEC_last (operand_entry_t, ops)->op;
+ tree last_op = ops.last ()->op;
if (powi_result)
transform_stmt_to_multiply (&gsi, stmt, last_op,
else
{
enum machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- int ops_num = VEC_length (operand_entry_t, ops);
+ int ops_num = ops.length ();
int width = get_reassociation_width (ops_num, rhs_code, mode);
if (dump_file && (dump_flags & TDF_DETAILS))
"Width = %d was chosen for reassociation\n", width);
if (width > 1
- && VEC_length (operand_entry_t, ops) > 3)
+ && ops.length () > 3)
rewrite_expr_tree_parallel (stmt, width, ops);
else
rewrite_expr_tree (stmt, 0, ops, false);
}
}
- VEC_free (operand_entry_t, heap, ops);
+ ops.release ();
}
}
}
reassociate_bb (son);
}
-void dump_ops_vector (FILE *file, VEC (operand_entry_t, heap) *ops);
-void debug_ops_vector (VEC (operand_entry_t, heap) *ops);
+void dump_ops_vector (FILE *file, vec<operand_entry_t> ops);
+void debug_ops_vector (vec<operand_entry_t> ops);
/* Dump the operand entry vector OPS to FILE. */
void
-dump_ops_vector (FILE *file, VEC (operand_entry_t, heap) *ops)
+dump_ops_vector (FILE *file, vec<operand_entry_t> ops)
{
operand_entry_t oe;
unsigned int i;
- FOR_EACH_VEC_ELT (operand_entry_t, ops, i, oe)
+ FOR_EACH_VEC_ELT (ops, i, oe)
{
fprintf (file, "Op %d -> rank: %d, tree: ", i, oe->rank);
print_generic_expr (file, oe->op, 0);
/* Dump the operand entry vector OPS to STDERR. */
DEBUG_FUNCTION void
-debug_ops_vector (VEC (operand_entry_t, heap) *ops)
+debug_ops_vector (vec<operand_entry_t> ops)
{
dump_ops_vector (stderr, ops);
}
free (bbs);
calculate_dominance_info (CDI_POST_DOMINATORS);
- plus_negates = NULL;
+ plus_negates = vec<tree>();
}
/* Cleanup after the reassociation pass, and print stats if
pointer_map_destroy (operand_rank);
free_alloc_pool (operand_entry_pool);
free (bb_rank);
- VEC_free (tree, heap, plus_negates);
+ plus_negates.release ();
free_dominance_info (CDI_POST_DOMINATORS);
loop_optimizer_finalize ();
}
detection. */
static unsigned int next_dfs_num;
-static VEC (tree, heap) *sccstack;
+static vec<tree> sccstack;
-DEF_VEC_P(vn_ssa_aux_t);
-DEF_VEC_ALLOC_P(vn_ssa_aux_t, heap);
/* Table of vn_ssa_aux_t's, one per ssa_name. The vn_ssa_aux_t objects
are allocated on an obstack for locality reasons, and to free them
- without looping over the VEC. */
+ without looping over the vec. */
-static VEC (vn_ssa_aux_t, heap) *vn_ssa_aux_table;
+static vec<vn_ssa_aux_t> vn_ssa_aux_table;
static struct obstack vn_ssa_aux_obstack;
/* Return the value numbering information for a given SSA name. */
vn_ssa_aux_t
VN_INFO (tree name)
{
- vn_ssa_aux_t res = VEC_index (vn_ssa_aux_t, vn_ssa_aux_table,
- SSA_NAME_VERSION (name));
+ vn_ssa_aux_t res = vn_ssa_aux_table[SSA_NAME_VERSION (name)];
gcc_checking_assert (res);
return res;
}
static inline void
VN_INFO_SET (tree name, vn_ssa_aux_t value)
{
- VEC_replace (vn_ssa_aux_t, vn_ssa_aux_table,
- SSA_NAME_VERSION (name), value);
+ vn_ssa_aux_table[SSA_NAME_VERSION (name)] = value;
}
/* Initialize the value numbering info for a given SSA name.
newinfo = XOBNEW (&vn_ssa_aux_obstack, struct vn_ssa_aux);
memset (newinfo, 0, sizeof (struct vn_ssa_aux));
- if (SSA_NAME_VERSION (name) >= VEC_length (vn_ssa_aux_t, vn_ssa_aux_table))
- VEC_safe_grow (vn_ssa_aux_t, heap, vn_ssa_aux_table,
- SSA_NAME_VERSION (name) + 1);
- VEC_replace (vn_ssa_aux_t, vn_ssa_aux_table,
- SSA_NAME_VERSION (name), newinfo);
+ if (SSA_NAME_VERSION (name) >= vn_ssa_aux_table.length ())
+ vn_ssa_aux_table.safe_grow (SSA_NAME_VERSION (name) + 1);
+ vn_ssa_aux_table[SSA_NAME_VERSION (name)] = newinfo;
return newinfo;
}
free_phi (void *vp)
{
vn_phi_t phi = (vn_phi_t) vp;
- VEC_free (tree, heap, phi->phiargs);
+ phi->phiargs.release ();
}
/* Free a reference operation structure VP. */
free_reference (void *vp)
{
vn_reference_t vr = (vn_reference_t) vp;
- VEC_free (vn_reference_op_s, heap, vr->operands);
+ vr->operands.release ();
}
/* Hash table equality function for vn_constant_t. */
HOST_WIDE_INT off = -1;
bool deref = false;
- FOR_EACH_VEC_ELT (vn_reference_op_s, vr1->operands, i, vro)
+ FOR_EACH_VEC_ELT (vr1->operands, i, vro)
{
if (vro->opcode == MEM_REF)
deref = true;
vn_reference_op_t vro1, vro2;
vn_reference_op_s tem1, tem2;
bool deref1 = false, deref2 = false;
- for (; VEC_iterate (vn_reference_op_s, vr1->operands, i, vro1); i++)
+ for (; vr1->operands.iterate (i, &vro1); i++)
{
if (vro1->opcode == MEM_REF)
deref1 = true;
break;
off1 += vro1->off;
}
- for (; VEC_iterate (vn_reference_op_s, vr2->operands, j, vro2); j++)
+ for (; vr2->operands.iterate (j, &vro2); j++)
{
if (vro2->opcode == MEM_REF)
deref2 = true;
++j;
++i;
}
- while (VEC_length (vn_reference_op_s, vr1->operands) != i
- || VEC_length (vn_reference_op_s, vr2->operands) != j);
+ while (vr1->operands.length () != i
+ || vr2->operands.length () != j);
return true;
}
vn_reference_op_s's. */
void
-copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
+copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
{
if (TREE_CODE (ref) == TARGET_MEM_REF)
{
temp.op1 = TMR_STEP (ref);
temp.op2 = TMR_OFFSET (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
memset (&temp, 0, sizeof (temp));
temp.type = NULL_TREE;
temp.opcode = ERROR_MARK;
temp.op0 = TMR_INDEX2 (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
memset (&temp, 0, sizeof (temp));
temp.type = NULL_TREE;
temp.opcode = TREE_CODE (TMR_BASE (ref));
temp.op0 = TMR_BASE (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
return;
}
temp.opcode = MEM_REF;
temp.op0 = build_int_cst (build_pointer_type (TREE_TYPE (ref)), 0);
temp.off = 0;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
temp.opcode = ADDR_EXPR;
temp.op0 = build_fold_addr_expr (ref);
temp.type = TREE_TYPE (temp.op0);
default:
gcc_unreachable ();
}
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
if (REFERENCE_CLASS_P (ref)
|| TREE_CODE (ref) == MODIFY_EXPR
bool
ao_ref_init_from_vn_reference (ao_ref *ref,
alias_set_type set, tree type,
- VEC (vn_reference_op_s, heap) *ops)
+ vec<vn_reference_op_s> ops)
{
vn_reference_op_t op;
unsigned i;
alias_set_type base_alias_set = -1;
/* First get the final access size from just the outermost expression. */
- op = &VEC_index (vn_reference_op_s, ops, 0);
+ op = &ops[0];
if (op->opcode == COMPONENT_REF)
size_tree = DECL_SIZE (op->op0);
else if (op->opcode == BIT_FIELD_REF)
/* Compute cumulative bit-offset for nested component-refs and array-refs,
and find the ultimate containing object. */
- FOR_EACH_VEC_ELT (vn_reference_op_s, ops, i, op)
+ FOR_EACH_VEC_ELT (ops, i, op)
{
switch (op->opcode)
{
&& op->op0
&& DECL_P (TREE_OPERAND (op->op0, 0)))
{
- vn_reference_op_t pop = &VEC_index (vn_reference_op_s, ops, i-1);
+ vn_reference_op_t pop = &ops[i-1];
base = TREE_OPERAND (op->op0, 0);
if (pop->off == -1)
{
void
copy_reference_ops_from_call (gimple call,
- VEC(vn_reference_op_s, heap) **result)
+ vec<vn_reference_op_s> *result)
{
vn_reference_op_s temp;
unsigned i;
temp.type = TREE_TYPE (lhs);
temp.op0 = lhs;
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
}
/* Copy the type, opcode, function being called and static chain. */
temp.op0 = gimple_call_fn (call);
temp.op1 = gimple_call_chain (call);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, temp);
+ result->safe_push (temp);
/* Copy the call arguments. As they can be references as well,
just chain them together. */
/* Create a vector of vn_reference_op_s structures from REF, a
REFERENCE_CLASS_P tree. The vector is not shared. */
-static VEC(vn_reference_op_s, heap) *
+static vec<vn_reference_op_s>
create_reference_ops_from_ref (tree ref)
{
- VEC (vn_reference_op_s, heap) *result = NULL;
+ vec<vn_reference_op_s> result = vec<vn_reference_op_s>();
copy_reference_ops_from_ref (ref, &result);
return result;
/* Create a vector of vn_reference_op_s structures from CALL, a
call statement. The vector is not shared. */
-static VEC(vn_reference_op_s, heap) *
+static vec<vn_reference_op_s>
create_reference_ops_from_call (gimple call)
{
- VEC (vn_reference_op_s, heap) *result = NULL;
+ vec<vn_reference_op_s> result = vec<vn_reference_op_s>();
copy_reference_ops_from_call (call, &result);
return result;
/* Fold *& at position *I_P in a vn_reference_op_s vector *OPS. Updates
*I_P to point to the last element of the replacement. */
void
-vn_reference_fold_indirect (VEC (vn_reference_op_s, heap) **ops,
+vn_reference_fold_indirect (vec<vn_reference_op_s> *ops,
unsigned int *i_p)
{
unsigned int i = *i_p;
- vn_reference_op_t op = &VEC_index (vn_reference_op_s, *ops, i);
- vn_reference_op_t mem_op = &VEC_index (vn_reference_op_s, *ops, i - 1);
+ vn_reference_op_t op = &(*ops)[i];
+ vn_reference_op_t mem_op = &(*ops)[i - 1];
tree addr_base;
HOST_WIDE_INT addr_offset = 0;
/* Fold *& at position *I_P in a vn_reference_op_s vector *OPS. Updates
*I_P to point to the last element of the replacement. */
static void
-vn_reference_maybe_forwprop_address (VEC (vn_reference_op_s, heap) **ops,
+vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
unsigned int *i_p)
{
unsigned int i = *i_p;
- vn_reference_op_t op = &VEC_index (vn_reference_op_s, *ops, i);
- vn_reference_op_t mem_op = &VEC_index (vn_reference_op_s, *ops, i - 1);
+ vn_reference_op_t op = &(*ops)[i];
+ vn_reference_op_t mem_op = &(*ops)[i - 1];
gimple def_stmt;
enum tree_code code;
double_int off;
tree
fully_constant_vn_reference_p (vn_reference_t ref)
{
- VEC (vn_reference_op_s, heap) *operands = ref->operands;
+ vec<vn_reference_op_s> operands = ref->operands;
vn_reference_op_t op;
/* Try to simplify the translated expression if it is
a call to a builtin function with at most two arguments. */
- op = &VEC_index (vn_reference_op_s, operands, 0);
+ op = &operands[0];
if (op->opcode == CALL_EXPR
&& TREE_CODE (op->op0) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (op->op0, 0)) == FUNCTION_DECL
&& DECL_BUILT_IN (TREE_OPERAND (op->op0, 0))
- && VEC_length (vn_reference_op_s, operands) >= 2
- && VEC_length (vn_reference_op_s, operands) <= 3)
+ && operands.length () >= 2
+ && operands.length () <= 3)
{
vn_reference_op_t arg0, arg1 = NULL;
bool anyconst = false;
- arg0 = &VEC_index (vn_reference_op_s, operands, 1);
- if (VEC_length (vn_reference_op_s, operands) > 2)
- arg1 = &VEC_index (vn_reference_op_s, operands, 2);
+ arg0 = &operands[1];
+ if (operands.length () > 2)
+ arg1 = &operands[2];
if (TREE_CODE_CLASS (arg0->opcode) == tcc_constant
|| (arg0->opcode == ADDR_EXPR
&& is_gimple_min_invariant (arg0->op0)))
else if (op->opcode == ARRAY_REF
&& TREE_CODE (op->op0) == INTEGER_CST
&& integer_zerop (op->op1)
- && VEC_length (vn_reference_op_s, operands) == 2)
+ && operands.length () == 2)
{
vn_reference_op_t arg0;
- arg0 = &VEC_index (vn_reference_op_s, operands, 1);
+ arg0 = &operands[1];
if (arg0->opcode == STRING_CST
&& (TYPE_MODE (op->type)
== TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0->op0))))
the vector passed in is returned. *VALUEIZED_ANYTHING will specify
whether any operands were valueized. */
-static VEC (vn_reference_op_s, heap) *
-valueize_refs_1 (VEC (vn_reference_op_s, heap) *orig, bool *valueized_anything)
+static vec<vn_reference_op_s>
+valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
{
vn_reference_op_t vro;
unsigned int i;
*valueized_anything = false;
- FOR_EACH_VEC_ELT (vn_reference_op_s, orig, i, vro)
+ FOR_EACH_VEC_ELT (orig, i, vro)
{
if (vro->opcode == SSA_NAME
|| (vro->op0 && TREE_CODE (vro->op0) == SSA_NAME))
if (i > 0
&& vro->op0
&& TREE_CODE (vro->op0) == ADDR_EXPR
- && VEC_index (vn_reference_op_s,
- orig, i - 1).opcode == MEM_REF)
+ && orig[i - 1].opcode == MEM_REF)
vn_reference_fold_indirect (&orig, &i);
else if (i > 0
&& vro->opcode == SSA_NAME
- && VEC_index (vn_reference_op_s,
- orig, i - 1).opcode == MEM_REF)
+ && orig[i - 1].opcode == MEM_REF)
vn_reference_maybe_forwprop_address (&orig, &i);
/* If it transforms a non-constant ARRAY_REF into a constant
one, adjust the constant offset. */
return orig;
}
-static VEC (vn_reference_op_s, heap) *
-valueize_refs (VEC (vn_reference_op_s, heap) *orig)
+static vec<vn_reference_op_s>
+valueize_refs (vec<vn_reference_op_s> orig)
{
bool tem;
return valueize_refs_1 (orig, &tem);
}
-static VEC(vn_reference_op_s, heap) *shared_lookup_references;
+static vec<vn_reference_op_s> shared_lookup_references;
/* Create a vector of vn_reference_op_s structures from REF, a
REFERENCE_CLASS_P tree. The vector is shared among all callers of
this function. *VALUEIZED_ANYTHING will specify whether any
operands were valueized. */
-static VEC(vn_reference_op_s, heap) *
+static vec<vn_reference_op_s>
valueize_shared_reference_ops_from_ref (tree ref, bool *valueized_anything)
{
if (!ref)
- return NULL;
- VEC_truncate (vn_reference_op_s, shared_lookup_references, 0);
+ return vec<vn_reference_op_s>();
+ shared_lookup_references.truncate (0);
copy_reference_ops_from_ref (ref, &shared_lookup_references);
shared_lookup_references = valueize_refs_1 (shared_lookup_references,
valueized_anything);
call statement. The vector is shared among all callers of
this function. */
-static VEC(vn_reference_op_s, heap) *
+static vec<vn_reference_op_s>
valueize_shared_reference_ops_from_call (gimple call)
{
if (!call)
- return NULL;
- VEC_truncate (vn_reference_op_s, shared_lookup_references, 0);
+ return vec<vn_reference_op_s>();
+ shared_lookup_references.truncate (0);
copy_reference_ops_from_call (call, &shared_lookup_references);
shared_lookup_references = valueize_refs (shared_lookup_references);
return shared_lookup_references;
vn_reference_lookup_or_insert_for_pieces (tree vuse,
alias_set_type set,
tree type,
- VEC (vn_reference_op_s,
- heap) *operands,
+ vec<vn_reference_op_s,
+ va_heap> operands,
tree value)
{
struct vn_reference_s vr1;
else
value_id = get_or_alloc_constant_value_id (value);
return vn_reference_insert_pieces (vuse, set, type,
- VEC_copy (vn_reference_op_s, heap,
- operands), value, value_id);
+ operands.copy (), value, value_id);
}
/* Callback for walk_non_aliased_vuses. Tries to perform a lookup
gimple def_stmt = SSA_NAME_DEF_STMT (vuse);
tree base;
HOST_WIDE_INT offset, maxsize;
- static VEC (vn_reference_op_s, heap) *lhs_ops = NULL;
+ static vec<vn_reference_op_s>
+ lhs_ops = vec<vn_reference_op_s>();
ao_ref lhs_ref;
bool lhs_ref_ok = false;
/* First try to disambiguate after value-replacing in the definitions LHS. */
if (is_gimple_assign (def_stmt))
{
- VEC (vn_reference_op_s, heap) *tem;
+ vec<vn_reference_op_s> tem;
tree lhs = gimple_assign_lhs (def_stmt);
bool valueized_anything = false;
/* Avoid re-allocation overhead. */
- VEC_truncate (vn_reference_op_s, lhs_ops, 0);
+ lhs_ops.truncate (0);
copy_reference_ops_from_ref (lhs, &lhs_ops);
tem = lhs_ops;
lhs_ops = valueize_refs_1 (lhs_ops, &valueized_anything);
tree base2;
HOST_WIDE_INT offset2, size2, maxsize2;
int i, j;
- VEC (vn_reference_op_s, heap) *rhs = NULL;
+ vec<vn_reference_op_s>
+ rhs = vec<vn_reference_op_s>();
vn_reference_op_t vro;
ao_ref r;
/* Find the common base of ref and the lhs. lhs_ops already
contains valueized operands for the lhs. */
- i = VEC_length (vn_reference_op_s, vr->operands) - 1;
- j = VEC_length (vn_reference_op_s, lhs_ops) - 1;
+ i = vr->operands.length () - 1;
+ j = lhs_ops.length () - 1;
while (j >= 0 && i >= 0
- && vn_reference_op_eq (&VEC_index (vn_reference_op_s,
- vr->operands, i),
- &VEC_index (vn_reference_op_s, lhs_ops, j)))
+ && vn_reference_op_eq (&vr->operands[i], &lhs_ops[j]))
{
i--;
j--;
don't care here - further lookups with the rewritten operands
will simply fail if we messed up types too badly. */
if (j == 0 && i >= 0
- && VEC_index (vn_reference_op_s, lhs_ops, 0).opcode == MEM_REF
- && VEC_index (vn_reference_op_s, lhs_ops, 0).off != -1
- && (VEC_index (vn_reference_op_s, lhs_ops, 0).off
- == VEC_index (vn_reference_op_s, vr->operands, i).off))
+ && lhs_ops[0].opcode == MEM_REF
+ && lhs_ops[0].off != -1
+ && (lhs_ops[0].off == vr->operands[i].off))
i--, j--;
/* i now points to the first additional op.
/* Now re-write REF to be based on the rhs of the assignment. */
copy_reference_ops_from_ref (gimple_assign_rhs1 (def_stmt), &rhs);
/* We need to pre-pend vr->operands[0..i] to rhs. */
- if (i + 1 + VEC_length (vn_reference_op_s, rhs)
- > VEC_length (vn_reference_op_s, vr->operands))
+ if (i + 1 + rhs.length () > vr->operands.length ())
{
- VEC (vn_reference_op_s, heap) *old = vr->operands;
- VEC_safe_grow (vn_reference_op_s, heap, vr->operands,
- i + 1 + VEC_length (vn_reference_op_s, rhs));
+ vec<vn_reference_op_s> old = vr->operands;
+ vr->operands.safe_grow (i + 1 + rhs.length ());
if (old == shared_lookup_references
&& vr->operands != old)
- shared_lookup_references = NULL;
+ shared_lookup_references = vec<vn_reference_op_s>();
}
else
- VEC_truncate (vn_reference_op_s, vr->operands,
- i + 1 + VEC_length (vn_reference_op_s, rhs));
- FOR_EACH_VEC_ELT (vn_reference_op_s, rhs, j, vro)
- VEC_replace (vn_reference_op_s, vr->operands, i + 1 + j, *vro);
- VEC_free (vn_reference_op_s, heap, rhs);
+ vr->operands.truncate (i + 1 + rhs.length ());
+ FOR_EACH_VEC_ELT (rhs, j, vro)
+ vr->operands[i + 1 + j] = *vro;
+ rhs.release ();
vr->operands = valueize_refs (vr->operands);
vr->hashcode = vn_reference_compute_hash (vr);
return (void *)-1;
/* Make room for 2 operands in the new reference. */
- if (VEC_length (vn_reference_op_s, vr->operands) < 2)
+ if (vr->operands.length () < 2)
{
- VEC (vn_reference_op_s, heap) *old = vr->operands;
- VEC_safe_grow (vn_reference_op_s, heap, vr->operands, 2);
+ vec<vn_reference_op_s> old = vr->operands;
+ vr->operands.safe_grow_cleared (2);
if (old == shared_lookup_references
&& vr->operands != old)
- shared_lookup_references = NULL;
+ shared_lookup_references.create (0);
}
else
- VEC_truncate (vn_reference_op_s, vr->operands, 2);
+ vr->operands.truncate (2);
/* The looked-through reference is a simple MEM_REF. */
memset (&op, 0, sizeof (op));
op.opcode = MEM_REF;
op.op0 = build_int_cst (ptr_type_node, at - rhs_offset);
op.off = at - lhs_offset + rhs_offset;
- VEC_replace (vn_reference_op_s, vr->operands, 0, op);
+ vr->operands[0] = op;
op.type = TREE_TYPE (rhs);
op.opcode = TREE_CODE (rhs);
op.op0 = rhs;
op.off = -1;
- VEC_replace (vn_reference_op_s, vr->operands, 1, op);
+ vr->operands[1] = op;
vr->hashcode = vn_reference_compute_hash (vr);
/* Adjust *ref from the new operands. */
tree
vn_reference_lookup_pieces (tree vuse, alias_set_type set, tree type,
- VEC (vn_reference_op_s, heap) *operands,
+ vec<vn_reference_op_s> operands,
vn_reference_t *vnresult, vn_lookup_kind kind)
{
struct vn_reference_s vr1;
*vnresult = NULL;
vr1.vuse = vuse ? SSA_VAL (vuse) : NULL_TREE;
- VEC_truncate (vn_reference_op_s, shared_lookup_references, 0);
- VEC_safe_grow (vn_reference_op_s, heap, shared_lookup_references,
- VEC_length (vn_reference_op_s, operands));
- memcpy (VEC_address (vn_reference_op_s, shared_lookup_references),
- VEC_address (vn_reference_op_s, operands),
+ shared_lookup_references.truncate (0);
+ shared_lookup_references.safe_grow (operands.length ());
+ memcpy (shared_lookup_references.address (),
+ operands.address (),
sizeof (vn_reference_op_s)
- * VEC_length (vn_reference_op_s, operands));
+ * operands.length ());
vr1.operands = operands = shared_lookup_references
= valueize_refs (shared_lookup_references);
vr1.type = type;
vn_reference_lookup_2,
vn_reference_lookup_3, &vr1);
if (vr1.operands != operands)
- VEC_free (vn_reference_op_s, heap, vr1.operands);
+ vr1.operands.release ();
}
if (*vnresult)
vn_reference_lookup (tree op, tree vuse, vn_lookup_kind kind,
vn_reference_t *vnresult)
{
- VEC (vn_reference_op_s, heap) *operands;
+ vec<vn_reference_op_s> operands;
struct vn_reference_s vr1;
tree cst;
bool valuezied_anything;
vn_reference_lookup_2,
vn_reference_lookup_3, &vr1);
if (vr1.operands != operands)
- VEC_free (vn_reference_op_s, heap, vr1.operands);
+ vr1.operands.release ();
if (wvnresult)
{
if (vnresult)
vn_reference_t
vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
- VEC (vn_reference_op_s, heap) *operands,
+ vec<vn_reference_op_s> operands,
tree result, unsigned int value_id)
{
/* If all PHI arguments are constants we need to distinguish
the PHI node via its type. */
- type = TREE_TYPE (VEC_index (tree, vp1->phiargs, 0));
+ type = TREE_TYPE (vp1->phiargs[0]);
result += (INTEGRAL_TYPE_P (type)
+ (INTEGRAL_TYPE_P (type)
? TYPE_PRECISION (type) + TYPE_UNSIGNED (type) : 0));
- FOR_EACH_VEC_ELT (tree, vp1->phiargs, i, phi1op)
+ FOR_EACH_VEC_ELT (vp1->phiargs, i, phi1op)
{
if (phi1op == VN_TOP)
continue;
/* If the PHI nodes do not have compatible types
they are not the same. */
- if (!types_compatible_p (TREE_TYPE (VEC_index (tree, vp1->phiargs, 0)),
- TREE_TYPE (VEC_index (tree, vp2->phiargs, 0))))
+ if (!types_compatible_p (TREE_TYPE (vp1->phiargs[0]),
+ TREE_TYPE (vp2->phiargs[0])))
return false;
/* Any phi in the same block will have it's arguments in the
same edge order, because of how we store phi nodes. */
- FOR_EACH_VEC_ELT (tree, vp1->phiargs, i, phi1op)
+ FOR_EACH_VEC_ELT (vp1->phiargs, i, phi1op)
{
- tree phi2op = VEC_index (tree, vp2->phiargs, i);
+ tree phi2op = vp2->phiargs[i];
if (phi1op == VN_TOP || phi2op == VN_TOP)
continue;
if (!expressions_equal_p (phi1op, phi2op))
return false;
}
-static VEC(tree, heap) *shared_lookup_phiargs;
+static vec<tree> shared_lookup_phiargs;
/* Lookup PHI in the current hash table, and return the resulting
value number if it exists in the hash table. Return NULL_TREE if
struct vn_phi_s vp1;
unsigned i;
- VEC_truncate (tree, shared_lookup_phiargs, 0);
+ shared_lookup_phiargs.truncate (0);
/* Canonicalize the SSA_NAME's to their value number. */
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree def = PHI_ARG_DEF (phi, i);
def = TREE_CODE (def) == SSA_NAME ? SSA_VAL (def) : def;
- VEC_safe_push (tree, heap, shared_lookup_phiargs, def);
+ shared_lookup_phiargs.safe_push (def);
}
vp1.phiargs = shared_lookup_phiargs;
vp1.block = gimple_bb (phi);
void **slot;
vn_phi_t vp1 = (vn_phi_t) pool_alloc (current_info->phis_pool);
unsigned i;
- VEC (tree, heap) *args = NULL;
+ vec<tree> args = vec<tree>();
/* Canonicalize the SSA_NAME's to their value number. */
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree def = PHI_ARG_DEF (phi, i);
def = TREE_CODE (def) == SSA_NAME ? SSA_VAL (def) : def;
- VEC_safe_push (tree, heap, args, def);
+ args.safe_push (def);
}
vp1->value_id = VN_INFO (result)->value_id;
vp1->phiargs = args;
/* Print set of components in strongly connected component SCC to OUT. */
static void
-print_scc (FILE *out, VEC (tree, heap) *scc)
+print_scc (FILE *out, vec<tree> scc)
{
tree var;
unsigned int i;
fprintf (out, "SCC consists of:");
- FOR_EACH_VEC_ELT (tree, scc, i, var)
+ FOR_EACH_VEC_ELT (scc, i, var)
{
fprintf (out, " ");
print_generic_expr (out, var, 0);
array will give you the members in RPO order. */
static void
-sort_scc (VEC (tree, heap) *scc)
+sort_scc (vec<tree> scc)
{
- VEC_qsort (tree, scc, compare_ops);
+ scc.qsort (compare_ops);
}
/* Insert the no longer used nary ONARY to the hash INFO. */
vn_phi_t phi = (vn_phi_t) pool_alloc (info->phis_pool);
void **slot;
memcpy (phi, ophi, sizeof (*phi));
- ophi->phiargs = NULL;
+ ophi->phiargs.create (0);
slot = htab_find_slot_with_hash (info->phis, phi, phi->hashcode, INSERT);
gcc_assert (!*slot);
*slot = phi;
void **slot;
ref = (vn_reference_t) pool_alloc (info->references_pool);
memcpy (ref, oref, sizeof (*ref));
- oref->operands = NULL;
+ oref->operands.create (0);
slot = htab_find_slot_with_hash (info->references, ref, ref->hashcode,
INSERT);
if (*slot)
/* Process a strongly connected component in the SSA graph. */
static void
-process_scc (VEC (tree, heap) *scc)
+process_scc (vec<tree> scc)
{
tree var;
unsigned int i;
vn_reference_t ref;
/* If the SCC has a single member, just visit it. */
- if (VEC_length (tree, scc) == 1)
+ if (scc.length () == 1)
{
- tree use = VEC_index (tree, scc, 0);
+ tree use = scc[0];
if (VN_INFO (use)->use_processed)
return;
/* We need to make sure it doesn't form a cycle itself, which can
gcc_obstack_init (&optimistic_info->nary_obstack);
empty_alloc_pool (optimistic_info->phis_pool);
empty_alloc_pool (optimistic_info->references_pool);
- FOR_EACH_VEC_ELT (tree, scc, i, var)
+ FOR_EACH_VEC_ELT (scc, i, var)
VN_INFO (var)->expr = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, scc, i, var)
+ FOR_EACH_VEC_ELT (scc, i, var)
changed |= visit_use (var);
}
current_info = valid_info;
}
-DEF_VEC_O(ssa_op_iter);
-DEF_VEC_ALLOC_O(ssa_op_iter,heap);
/* Pop the components of the found SCC for NAME off the SCC stack
and process them. Returns true if all went well, false if
static bool
extract_and_process_scc_for_name (tree name)
{
- VEC (tree, heap) *scc = NULL;
+ vec<tree> scc = vec<tree>();
tree x;
/* Found an SCC, pop the components off the SCC stack and
process them. */
do
{
- x = VEC_pop (tree, sccstack);
+ x = sccstack.pop ();
VN_INFO (x)->on_sccstack = false;
- VEC_safe_push (tree, heap, scc, x);
+ scc.safe_push (x);
} while (x != name);
/* Bail out of SCCVN in case a SCC turns out to be incredibly large. */
- if (VEC_length (tree, scc)
+ if (scc.length ()
> (unsigned)PARAM_VALUE (PARAM_SCCVN_MAX_SCC_SIZE))
{
if (dump_file)
fprintf (dump_file, "WARNING: Giving up with SCCVN due to "
- "SCC size %u exceeding %u\n", VEC_length (tree, scc),
+ "SCC size %u exceeding %u\n", scc.length (),
(unsigned)PARAM_VALUE (PARAM_SCCVN_MAX_SCC_SIZE));
- VEC_free (tree, heap, scc);
+ scc.release ();
return false;
}
- if (VEC_length (tree, scc) > 1)
+ if (scc.length () > 1)
sort_scc (scc);
if (dump_file && (dump_flags & TDF_DETAILS))
process_scc (scc);
- VEC_free (tree, heap, scc);
+ scc.release ();
return true;
}
static bool
DFS (tree name)
{
- VEC(ssa_op_iter, heap) *itervec = NULL;
- VEC(tree, heap) *namevec = NULL;
+ vec<ssa_op_iter> itervec = vec<ssa_op_iter>();
+ vec<tree> namevec = vec<tree>();
use_operand_p usep = NULL;
gimple defstmt;
tree use;
VN_INFO (name)->visited = true;
VN_INFO (name)->low = VN_INFO (name)->dfsnum;
- VEC_safe_push (tree, heap, sccstack, name);
+ sccstack.safe_push (name);
VN_INFO (name)->on_sccstack = true;
defstmt = SSA_NAME_DEF_STMT (name);
if (VN_INFO (name)->low == VN_INFO (name)->dfsnum)
if (!extract_and_process_scc_for_name (name))
{
- VEC_free (tree, heap, namevec);
- VEC_free (ssa_op_iter, heap, itervec);
+ namevec.release ();
+ itervec.release ();
return false;
}
/* Check if we are done. */
- if (VEC_empty (tree, namevec))
+ if (namevec.is_empty ())
{
- VEC_free (tree, heap, namevec);
- VEC_free (ssa_op_iter, heap, itervec);
+ namevec.release ();
+ itervec.release ();
return true;
}
/* Restore the last use walker and continue walking there. */
use = name;
- name = VEC_pop (tree, namevec);
- memcpy (&iter, &VEC_last (ssa_op_iter, itervec),
+ name = namevec.pop ();
+ memcpy (&iter, &itervec.last (),
sizeof (ssa_op_iter));
- VEC_pop (ssa_op_iter, itervec);
+ itervec.pop ();
goto continue_walking;
}
{
/* Recurse by pushing the current use walking state on
the stack and starting over. */
- VEC_safe_push(ssa_op_iter, heap, itervec, iter);
- VEC_safe_push(tree, heap, namevec, name);
+ itervec.safe_push (iter);
+ namevec.safe_push (name);
name = use;
goto start_over;
int *rpo_numbers_temp;
calculate_dominance_info (CDI_DOMINATORS);
- sccstack = NULL;
+ sccstack.create (0);
constant_to_value_id = htab_create (23, vn_constant_hash, vn_constant_eq,
free);
next_dfs_num = 1;
next_value_id = 1;
- vn_ssa_aux_table = VEC_alloc (vn_ssa_aux_t, heap, num_ssa_names + 1);
+ vn_ssa_aux_table.create (num_ssa_names + 1);
/* VEC_alloc doesn't actually grow it to the right size, it just
preallocates the space to do so. */
- VEC_safe_grow_cleared (vn_ssa_aux_t, heap, vn_ssa_aux_table,
- num_ssa_names + 1);
+ vn_ssa_aux_table.safe_grow_cleared (num_ssa_names + 1);
gcc_obstack_init (&vn_ssa_aux_obstack);
- shared_lookup_phiargs = NULL;
- shared_lookup_references = NULL;
+ shared_lookup_phiargs.create (0);
+ shared_lookup_references.create (0);
rpo_numbers = XNEWVEC (int, last_basic_block);
rpo_numbers_temp = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
htab_delete (constant_to_value_id);
BITMAP_FREE (constant_value_ids);
- VEC_free (tree, heap, shared_lookup_phiargs);
- VEC_free (vn_reference_op_s, heap, shared_lookup_references);
+ shared_lookup_phiargs.release ();
+ shared_lookup_references.release ();
XDELETEVEC (rpo_numbers);
for (i = 0; i < num_ssa_names; i++)
release_ssa_name (name);
}
obstack_free (&vn_ssa_aux_obstack, NULL);
- VEC_free (vn_ssa_aux_t, heap, vn_ssa_aux_table);
+ vn_ssa_aux_table.release ();
- VEC_free (tree, heap, sccstack);
+ sccstack.release ();
free_vn_table (valid_info);
XDELETE (valid_info);
free_vn_table (optimistic_info);
/* Unique identifier that all expressions with the same value have. */
unsigned int value_id;
hashval_t hashcode;
- VEC (tree, heap) *phiargs;
+ vec<tree> phiargs;
basic_block block;
tree result;
} *vn_phi_t;
typedef vn_reference_op_s *vn_reference_op_t;
typedef const vn_reference_op_s *const_vn_reference_op_t;
-DEF_VEC_O(vn_reference_op_s);
-DEF_VEC_ALLOC_O(vn_reference_op_s, heap);
/* A reference operation in the hashtable is representation as
the vuse, representing the memory state at the time of
tree vuse;
alias_set_type set;
tree type;
- VEC (vn_reference_op_s, heap) *operands;
+ vec<vn_reference_op_s> operands;
tree result;
tree result_vdef;
} *vn_reference_t;
vn_nary_op_t vn_nary_op_insert_stmt (gimple, tree);
vn_nary_op_t vn_nary_op_insert_pieces (unsigned int, enum tree_code,
tree, tree *, tree, unsigned int);
-void vn_reference_fold_indirect (VEC (vn_reference_op_s, heap) **,
+void vn_reference_fold_indirect (vec<vn_reference_op_s> *,
unsigned int *);
-void copy_reference_ops_from_ref (tree, VEC(vn_reference_op_s, heap) **);
-void copy_reference_ops_from_call (gimple, VEC(vn_reference_op_s, heap) **);
+void copy_reference_ops_from_ref (tree, vec<vn_reference_op_s> *);
+void copy_reference_ops_from_call (gimple, vec<vn_reference_op_s> *);
bool ao_ref_init_from_vn_reference (ao_ref *, alias_set_type, tree,
- VEC (vn_reference_op_s, heap) *);
+ vec<vn_reference_op_s> );
tree vn_reference_lookup_pieces (tree, alias_set_type, tree,
- VEC (vn_reference_op_s, heap) *,
+ vec<vn_reference_op_s> ,
vn_reference_t *, vn_lookup_kind);
tree vn_reference_lookup (tree, tree, vn_lookup_kind, vn_reference_t *);
vn_reference_t vn_reference_insert (tree, tree, tree, tree);
vn_reference_t vn_reference_insert_pieces (tree, alias_set_type, tree,
- VEC (vn_reference_op_s, heap) *,
+ vec<vn_reference_op_s> ,
tree, unsigned int);
hashval_t vn_nary_op_compute_hash (const vn_nary_op_t);
/* A vector indexed by SSA_NAME_VERSION. 0 means unknown, positive value
is an index into strinfo vector, negative value stands for
string length of a string literal (~strlen). */
-static VEC (int, heap) *ssa_ver_to_stridx;
+static vec<int> ssa_ver_to_stridx;
/* Number of currently active string indexes plus one. */
static int max_stridx;
be invalidated. Always cleared by maybe_invalidate. */
bool dont_invalidate;
} *strinfo;
-DEF_VEC_P(strinfo);
-DEF_VEC_ALLOC_P(strinfo,heap);
/* Pool for allocating strinfo_struct entries. */
static alloc_pool strinfo_pool;
a basic block pointer to the owner basic_block if shared.
If some other bb wants to modify the vector, the vector needs
to be unshared first, and only the owner bb is supposed to free it. */
-static VEC(strinfo, heap) *stridx_to_strinfo;
+static vec<strinfo, va_heap, vl_embed> *stridx_to_strinfo;
/* One OFFSET->IDX mapping. */
struct stridxlist
tree s, o;
if (TREE_CODE (exp) == SSA_NAME)
- return VEC_index (int, ssa_ver_to_stridx, SSA_NAME_VERSION (exp));
+ return ssa_ver_to_stridx[SSA_NAME_VERSION (exp)];
if (TREE_CODE (exp) == ADDR_EXPR)
{
static inline bool
strinfo_shared (void)
{
- return VEC_length (strinfo, stridx_to_strinfo)
- && VEC_index (strinfo, stridx_to_strinfo, 0) != NULL;
+ return vec_safe_length (stridx_to_strinfo)
+ && (*stridx_to_strinfo)[0] != NULL;
}
/* Unshare strinfo vector that is shared with the immediate dominator. */
unsigned int i = 0;
gcc_assert (strinfo_shared ());
- stridx_to_strinfo = VEC_copy (strinfo, heap, stridx_to_strinfo);
- for (i = 1; VEC_iterate (strinfo, stridx_to_strinfo, i, si); ++i)
+ stridx_to_strinfo = vec_safe_copy (stridx_to_strinfo);
+ for (i = 1; vec_safe_iterate (stridx_to_strinfo, i, &si); ++i)
if (si != NULL)
si->refcount++;
- VEC_replace (strinfo, stridx_to_strinfo, 0, NULL);
+ (*stridx_to_strinfo)[0] = NULL;
}
/* Attempt to create a string index for exp, ADDR_EXPR's operand.
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp))
return 0;
idx = max_stridx++;
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (exp), idx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (exp)] = idx;
return idx;
}
if (TREE_CODE (exp) == ADDR_EXPR)
static inline strinfo
get_strinfo (int idx)
{
- if (VEC_length (strinfo, stridx_to_strinfo) <= (unsigned int) idx)
+ if (vec_safe_length (stridx_to_strinfo) <= (unsigned int) idx)
return NULL;
- return VEC_index (strinfo, stridx_to_strinfo, idx);
+ return (*stridx_to_strinfo)[idx];
}
/* Set strinfo in the vector entry IDX to SI. */
static inline void
set_strinfo (int idx, strinfo si)
{
- if (VEC_length (strinfo, stridx_to_strinfo) && VEC_index (strinfo, stridx_to_strinfo, 0))
+ if (vec_safe_length (stridx_to_strinfo) && (*stridx_to_strinfo)[0])
unshare_strinfo_vec ();
- if (VEC_length (strinfo, stridx_to_strinfo) <= (unsigned int) idx)
- VEC_safe_grow_cleared (strinfo, heap, stridx_to_strinfo, idx + 1);
- VEC_replace (strinfo, stridx_to_strinfo, idx, si);
+ if (vec_safe_length (stridx_to_strinfo) <= (unsigned int) idx)
+ vec_safe_grow_cleared (stridx_to_strinfo, idx + 1);
+ (*stridx_to_strinfo)[idx] = si;
}
/* Return string length, or NULL if it can't be computed. */
unsigned int i;
bool nonempty = false;
- for (i = 1; VEC_iterate (strinfo, stridx_to_strinfo, i, si); ++i)
+ for (i = 1; vec_safe_iterate (stridx_to_strinfo, i, &si); ++i)
if (si != NULL)
{
if (!si->dont_invalidate)
chainsi = unshare_strinfo (chainsi);
chainsi->next = 0;
}
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (ptr),
- chainsi->idx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (ptr)] = chainsi->idx;
return chainsi;
}
}
/* We might find an endptr created in this pass. Grow the
vector in that case. */
- if (VEC_length (int, ssa_ver_to_stridx) <= SSA_NAME_VERSION (ptr))
- VEC_safe_grow_cleared (int, heap, ssa_ver_to_stridx, num_ssa_names);
+ if (ssa_ver_to_stridx.length () <= SSA_NAME_VERSION (ptr))
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
- if (VEC_index (int, ssa_ver_to_stridx, SSA_NAME_VERSION (ptr)) != 0)
+ if (ssa_ver_to_stridx[SSA_NAME_VERSION (ptr)] != 0)
return;
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (ptr), idx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (ptr)] = idx;
}
}
case BUILT_IN_STRCPY:
fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (lhs)
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (lhs), didx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] = didx;
break;
case BUILT_IN_STRCPY_CHK:
fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK);
if (lhs)
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (lhs), didx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] = didx;
break;
case BUILT_IN_STPCPY:
/* This would need adjustment of the lhs (subtract one),
laststmt.len = dsi->length;
laststmt.stridx = dsi->idx;
if (lhs)
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (lhs), didx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] = didx;
break;
case BUILT_IN_MEMPCPY:
case BUILT_IN_MEMPCPY_CHK:
if (host_integerp (off, 1)
&& (unsigned HOST_WIDE_INT) tree_low_cst (off, 1)
<= (unsigned HOST_WIDE_INT) ~idx)
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (lhs),
- ~(~idx - (int) tree_low_cst (off, 1)));
+ ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)]
+ = ~(~idx - (int) tree_low_cst (off, 1));
return;
}
&& POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))))
{
int idx = get_stridx (gimple_assign_rhs1 (stmt));
- VEC_replace (int, ssa_ver_to_stridx, SSA_NAME_VERSION (lhs),
- idx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] = idx;
}
else if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
handle_pointer_plus (gsi);
stridx_to_strinfo = NULL;
else
{
- stridx_to_strinfo = (VEC(strinfo, heap) *) dombb->aux;
+ stridx_to_strinfo = ((vec<strinfo, va_heap, vl_embed> *) dombb->aux);
if (stridx_to_strinfo)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (idx != get_stridx (gimple_phi_arg_def (phi, i)))
break;
if (i == n)
- VEC_replace (int, ssa_ver_to_stridx,
- SSA_NAME_VERSION (result), idx);
+ ssa_ver_to_stridx[SSA_NAME_VERSION (result)] = idx;
}
}
}
gsi_next (&gsi);
bb->aux = stridx_to_strinfo;
- if (VEC_length (strinfo, stridx_to_strinfo) && !strinfo_shared ())
- VEC_replace (strinfo, stridx_to_strinfo, 0, (strinfo) bb);
+ if (vec_safe_length (stridx_to_strinfo) && !strinfo_shared ())
+ (*stridx_to_strinfo)[0] = (strinfo) bb;
}
/* Callback for walk_dominator_tree. Free strinfo vector if it is
{
if (bb->aux)
{
- stridx_to_strinfo = (VEC(strinfo, heap) *) bb->aux;
- if (VEC_length (strinfo, stridx_to_strinfo)
- && VEC_index (strinfo, stridx_to_strinfo, 0) == (strinfo) bb)
+ stridx_to_strinfo = ((vec<strinfo, va_heap, vl_embed> *) bb->aux);
+ if (vec_safe_length (stridx_to_strinfo)
+ && (*stridx_to_strinfo)[0] == (strinfo) bb)
{
unsigned int i;
strinfo si;
- for (i = 1; VEC_iterate (strinfo, stridx_to_strinfo, i, si); ++i)
+ for (i = 1; vec_safe_iterate (stridx_to_strinfo, i, &si); ++i)
free_strinfo (si);
- VEC_free (strinfo, heap, stridx_to_strinfo);
+ vec_free (stridx_to_strinfo);
}
bb->aux = NULL;
}
{
struct dom_walk_data walk_data;
- VEC_safe_grow_cleared (int, heap, ssa_ver_to_stridx, num_ssa_names);
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
max_stridx = 1;
strinfo_pool = create_alloc_pool ("strinfo_struct pool",
sizeof (struct strinfo_struct), 64);
/* Finalize the dominator walker. */
fini_walk_dominator_tree (&walk_data);
- VEC_free (int, heap, ssa_ver_to_stridx);
+ ssa_ver_to_stridx.release ();
free_alloc_pool (strinfo_pool);
if (decl_to_stridxlist_htab)
{
struct constraint;
typedef struct constraint *constraint_t;
-DEF_VEC_P(constraint_t);
-DEF_VEC_ALLOC_P(constraint_t,heap);
#define EXECUTE_IF_IN_NONNULL_BITMAP(a, b, c, d) \
if (a) \
/* Pool of variable info structures. */
static alloc_pool variable_info_pool;
-DEF_VEC_P(varinfo_t);
-DEF_VEC_ALLOC_P(varinfo_t, heap);
/* Table of variable info structures for constraint variables.
Indexed directly by variable info id. */
-static VEC(varinfo_t,heap) *varmap;
+static vec<varinfo_t> varmap;
/* Return the varmap element N */
static inline varinfo_t
get_varinfo (unsigned int n)
{
- return VEC_index (varinfo_t, varmap, n);
+ return varmap[n];
}
/* Static IDs for the special variables. */
static varinfo_t
new_var_info (tree t, const char *name)
{
- unsigned index = VEC_length (varinfo_t, varmap);
+ unsigned index = varmap.length ();
varinfo_t ret = (varinfo_t) pool_alloc (variable_info_pool);
ret->id = index;
stats.total_vars++;
- VEC_safe_push (varinfo_t, heap, varmap, ret);
+ varmap.safe_push (ret);
return ret;
}
#define UNKNOWN_OFFSET ((HOST_WIDE_INT)-1 << (HOST_BITS_PER_WIDE_INT-1))
typedef struct constraint_expr ce_s;
-DEF_VEC_O(ce_s);
-DEF_VEC_ALLOC_O(ce_s, heap);
-static void get_constraint_for_1 (tree, VEC(ce_s, heap) **, bool, bool);
-static void get_constraint_for (tree, VEC(ce_s, heap) **);
-static void get_constraint_for_rhs (tree, VEC(ce_s, heap) **);
-static void do_deref (VEC (ce_s, heap) **);
+static void get_constraint_for_1 (tree, vec<ce_s> *, bool, bool);
+static void get_constraint_for (tree, vec<ce_s> *);
+static void get_constraint_for_rhs (tree, vec<ce_s> *);
+static void do_deref (vec<ce_s> *);
/* Our set constraints are made up of two constraint expressions, one
LHS, and one RHS.
/* List of constraints that we use to build the constraint graph from. */
-static VEC(constraint_t,heap) *constraints;
+static vec<constraint_t> constraints;
static alloc_pool constraint_pool;
/* The constraint graph is represented as an array of bitmaps
/* Vector of complex constraints for each graph node. Complex
constraints are those involving dereferences or offsets that are
not 0. */
- VEC(constraint_t,heap) **complex;
+ vec<constraint_t> *complex;
};
static constraint_graph_t graph;
cycle finding, we create nodes to represent dereferences and
address taken constraints. These represent where these start and
end. */
-#define FIRST_REF_NODE (VEC_length (varinfo_t, varmap))
+#define FIRST_REF_NODE (varmap).length ()
#define LAST_REF_NODE (FIRST_REF_NODE + (FIRST_REF_NODE - 1))
/* Return the representative node for NODE, if NODE has been unioned
{
int i;
constraint_t c;
- for (i = from; VEC_iterate (constraint_t, constraints, i, c); i++)
+ for (i = from; constraints.iterate (i, &c); i++)
if (c)
{
dump_constraint (file, c);
fprintf (file, "\"%s\"", get_varinfo (i)->name);
else
fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
- if (graph->complex[i])
+ if (graph->complex[i].exists ())
{
unsigned j;
constraint_t c;
fprintf (file, " [label=\"\\N\\n");
- for (j = 0; VEC_iterate (constraint_t, graph->complex[i], j, c); ++j)
+ for (j = 0; graph->complex[i].iterate (j, &c); ++j)
{
dump_constraint (file, c);
fprintf (file, "\\l");
/* Find a constraint LOOKFOR in the sorted constraint vector VEC */
static constraint_t
-constraint_vec_find (VEC(constraint_t,heap) *vec,
+constraint_vec_find (vec<constraint_t> vec,
struct constraint lookfor)
{
unsigned int place;
constraint_t found;
- if (vec == NULL)
+ if (!vec.exists ())
return NULL;
- place = VEC_lower_bound (constraint_t, vec, &lookfor, constraint_less);
- if (place >= VEC_length (constraint_t, vec))
+ place = vec.lower_bound (&lookfor, constraint_less);
+ if (place >= vec.length ())
return NULL;
- found = VEC_index (constraint_t, vec, place);
+ found = vec[place];
if (!constraint_equal (*found, lookfor))
return NULL;
return found;
/* Union two constraint vectors, TO and FROM. Put the result in TO. */
static void
-constraint_set_union (VEC(constraint_t,heap) **to,
- VEC(constraint_t,heap) **from)
+constraint_set_union (vec<constraint_t> *to,
+ vec<constraint_t> *from)
{
int i;
constraint_t c;
- FOR_EACH_VEC_ELT (constraint_t, *from, i, c)
+ FOR_EACH_VEC_ELT (*from, i, c)
{
if (constraint_vec_find (*to, *c) == NULL)
{
- unsigned int place = VEC_lower_bound (constraint_t, *to, c,
- constraint_less);
- VEC_safe_insert (constraint_t, heap, *to, place, c);
+ unsigned int place = to->lower_bound (c, constraint_less);
+ to->safe_insert (place, c);
}
}
}
insert_into_complex (constraint_graph_t graph,
unsigned int var, constraint_t c)
{
- VEC (constraint_t, heap) *complex = graph->complex[var];
- unsigned int place = VEC_lower_bound (constraint_t, complex, c,
- constraint_less);
+ vec<constraint_t> complex = graph->complex[var];
+ unsigned int place = complex.lower_bound (c, constraint_less);
/* Only insert constraints that do not already exist. */
- if (place >= VEC_length (constraint_t, complex)
- || !constraint_equal (*c, *VEC_index (constraint_t, complex, place)))
- VEC_safe_insert (constraint_t, heap, graph->complex[var], place, c);
+ if (place >= complex.length ()
+ || !constraint_equal (*c, *complex[place]))
+ graph->complex[var].safe_insert (place, c);
}
gcc_assert (find (from) == to);
/* Move all complex constraints from src node into to node */
- FOR_EACH_VEC_ELT (constraint_t, graph->complex[from], i, c)
+ FOR_EACH_VEC_ELT (graph->complex[from], i, c)
{
/* In complex constraints for node src, we may have either
a = *src, and *src = a, or an offseted constraint which are
c->rhs.var = to;
}
constraint_set_union (&graph->complex[to], &graph->complex[from]);
- VEC_free (constraint_t, heap, graph->complex[from]);
- graph->complex[from] = NULL;
+ graph->complex[from].release ();
}
graph->succs = XCNEWVEC (bitmap, graph->size);
graph->indirect_cycles = XNEWVEC (int, graph->size);
graph->rep = XNEWVEC (unsigned int, graph->size);
- graph->complex = XCNEWVEC (VEC(constraint_t, heap) *, size);
+ /* ??? Macros do not support template types with multiple arguments,
+ so we use a typedef to work around it. */
+ typedef vec<constraint_t> vec_constraint_t_heap;
+ graph->complex = XCNEWVEC (vec_constraint_t_heap, size);
graph->pe = XCNEWVEC (unsigned int, graph->size);
graph->pe_rep = XNEWVEC (int, graph->size);
for (j = 0; j < graph->size; j++)
graph->eq_rep[j] = -1;
- for (j = 0; j < VEC_length (varinfo_t, varmap); j++)
+ for (j = 0; j < varmap.length (); j++)
graph->indirect_cycles[j] = -1;
- FOR_EACH_VEC_ELT (constraint_t, constraints, i, c)
+ FOR_EACH_VEC_ELT (constraints, i, c)
{
struct constraint_expr lhs = c->lhs;
struct constraint_expr rhs = c->rhs;
unsigned i, t;
constraint_t c;
- FOR_EACH_VEC_ELT (constraint_t, constraints, i, c)
+ FOR_EACH_VEC_ELT (constraints, i, c)
{
struct constraint_expr lhs;
struct constraint_expr rhs;
unsigned int *dfs;
unsigned int *node_mapping;
int current_index;
- VEC(unsigned,heap) *scc_stack;
+ vec<unsigned> scc_stack;
};
/* See if any components have been identified. */
if (si->dfs[n] == my_dfs)
{
- if (VEC_length (unsigned, si->scc_stack) > 0
- && si->dfs[VEC_last (unsigned, si->scc_stack)] >= my_dfs)
+ if (si->scc_stack.length () > 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
{
bitmap scc = BITMAP_ALLOC (NULL);
unsigned int lowest_node;
bitmap_set_bit (scc, n);
- while (VEC_length (unsigned, si->scc_stack) != 0
- && si->dfs[VEC_last (unsigned, si->scc_stack)] >= my_dfs)
+ while (si->scc_stack.length () != 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
{
- unsigned int w = VEC_pop (unsigned, si->scc_stack);
+ unsigned int w = si->scc_stack.pop ();
bitmap_set_bit (scc, w);
}
bitmap_set_bit (si->deleted, n);
}
else
- VEC_safe_push (unsigned, heap, si->scc_stack, n);
+ si->scc_stack.safe_push (n);
}
/* Unify node FROM into node TO, updating the changed count if
sbitmap visited;
/* Array that stores the topological order of the graph, *in
reverse*. */
- VEC(unsigned,heap) *topo_order;
+ vec<unsigned> topo_order;
};
struct topo_info *ti = XNEW (struct topo_info);
ti->visited = sbitmap_alloc (size);
bitmap_clear (ti->visited);
- ti->topo_order = VEC_alloc (unsigned, heap, 1);
+ ti->topo_order.create (1);
return ti;
}
free_topo_info (struct topo_info *ti)
{
sbitmap_free (ti->visited);
- VEC_free (unsigned, heap, ti->topo_order);
+ ti->topo_order.release ();
free (ti);
}
topo_visit (graph, ti, j);
}
- VEC_safe_push (unsigned, heap, ti->topo_order, n);
+ ti->topo_order.safe_push (n);
}
/* Process a constraint C that represents x = *(y + off), using DELTA as the
for (i = 0; i < size; i++)
si->node_mapping[i] = i;
- si->scc_stack = VEC_alloc (unsigned, heap, 1);
+ si->scc_stack.create (1);
return si;
}
sbitmap_free (si->deleted);
free (si->node_mapping);
free (si->dfs);
- VEC_free (unsigned, heap, si->scc_stack);
+ si->scc_stack.release ();
free (si);
}
/* See if any components have been identified. */
if (si->dfs[n] == my_dfs)
{
- while (VEC_length (unsigned, si->scc_stack) != 0
- && si->dfs[VEC_last (unsigned, si->scc_stack)] >= my_dfs)
+ while (si->scc_stack.length () != 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
{
- unsigned int w = VEC_pop (unsigned, si->scc_stack);
+ unsigned int w = si->scc_stack.pop ();
si->node_mapping[w] = n;
if (!bitmap_bit_p (graph->direct_nodes, w))
bitmap_set_bit (si->deleted, n);
}
else
- VEC_safe_push (unsigned, heap, si->scc_stack, n);
+ si->scc_stack.safe_push (n);
}
/* Label pointer equivalences. */
int i;
constraint_t c;
- FOR_EACH_VEC_ELT (constraint_t, constraints, i, c)
+ FOR_EACH_VEC_ELT (constraints, i, c)
{
if (c)
{
for (j = 0; j < graph->size; j++)
gcc_assert (find (j) == j);
- FOR_EACH_VEC_ELT (constraint_t, constraints, i, c)
+ FOR_EACH_VEC_ELT (constraints, i, c)
{
struct constraint_expr lhs = c->lhs;
struct constraint_expr rhs = c->rhs;
dump_constraint (dump_file, c);
fprintf (dump_file, "\n");
}
- VEC_replace (constraint_t, constraints, i, NULL);
+ constraints[i] = NULL;
continue;
}
dump_constraint (dump_file, c);
fprintf (dump_file, "\n");
}
- VEC_replace (constraint_t, constraints, i, NULL);
+ constraints[i] = NULL;
continue;
}
&& !bitmap_empty_p (get_varinfo (node)->solution))
{
unsigned int i;
- VEC(unsigned,heap) *queue = NULL;
+ vec<unsigned> queue = vec<unsigned>();
int queuepos;
unsigned int to = find (graph->indirect_cycles[node]);
bitmap_iterator bi;
if (find (i) == i && i != to)
{
if (unite (to, i))
- VEC_safe_push (unsigned, heap, queue, i);
+ queue.safe_push (i);
}
}
for (queuepos = 0;
- VEC_iterate (unsigned, queue, queuepos, i);
+ queue.iterate (queuepos, &i);
queuepos++)
{
unify_nodes (graph, to, i, true);
}
- VEC_free (unsigned, heap, queue);
+ queue.release ();
return true;
}
return false;
varinfo_t ivi = get_varinfo (i);
if (find (i) == i && !bitmap_empty_p (ivi->solution)
&& ((graph->succs[i] && !bitmap_empty_p (graph->succs[i]))
- || VEC_length (constraint_t, graph->complex[i]) > 0))
+ || graph->complex[i].length () > 0))
bitmap_set_bit (changed, i);
}
compute_topo_order (graph, ti);
- while (VEC_length (unsigned, ti->topo_order) != 0)
+ while (ti->topo_order.length () != 0)
{
- i = VEC_pop (unsigned, ti->topo_order);
+ i = ti->topo_order.pop ();
/* If this variable is not a representative, skip it. */
if (find (i) != i)
unsigned int j;
constraint_t c;
bitmap solution;
- VEC(constraint_t,heap) *complex = graph->complex[i];
+ vec<constraint_t> complex = graph->complex[i];
varinfo_t vi = get_varinfo (i);
bool solution_empty;
solution_empty = bitmap_empty_p (solution);
/* Process the complex constraints */
- FOR_EACH_VEC_ELT (constraint_t, complex, j, c)
+ FOR_EACH_VEC_ELT (complex, j, c)
{
/* XXX: This is going to unsort the constraints in
some cases, which will occasionally add duplicate
If address_p is true, the result will be taken its address of. */
static void
-get_constraint_for_ssa_var (tree t, VEC(ce_s, heap) **results, bool address_p)
+get_constraint_for_ssa_var (tree t, vec<ce_s> *results, bool address_p)
{
struct constraint_expr cexpr;
varinfo_t vi;
for (; vi; vi = vi->next)
{
cexpr.var = vi->id;
- VEC_safe_push (ce_s, heap, *results, cexpr);
+ results->safe_push (cexpr);
}
return;
}
- VEC_safe_push (ce_s, heap, *results, cexpr);
+ results->safe_push (cexpr);
}
/* Process constraint T, performing various simplifications and then
struct constraint_expr rhs = t->rhs;
struct constraint_expr lhs = t->lhs;
- gcc_assert (rhs.var < VEC_length (varinfo_t, varmap));
- gcc_assert (lhs.var < VEC_length (varinfo_t, varmap));
+ gcc_assert (rhs.var < varmap.length ());
+ gcc_assert (lhs.var < varmap.length ());
/* If we didn't get any useful constraint from the lhs we get
&ANYTHING as fallback from get_constraint_for. Deal with
else
{
gcc_assert (rhs.type != ADDRESSOF || rhs.offset == 0);
- VEC_safe_push (constraint_t, heap, constraints, t);
+ constraints.safe_push (t);
}
}
static void
get_constraint_for_ptr_offset (tree ptr, tree offset,
- VEC (ce_s, heap) **results)
+ vec<ce_s> *results)
{
struct constraint_expr c;
unsigned int j, n;
return;
/* As we are eventually appending to the solution do not use
- VEC_iterate here. */
- n = VEC_length (ce_s, *results);
+ vec::iterate here. */
+ n = results->length ();
for (j = 0; j < n; j++)
{
varinfo_t curr;
- c = VEC_index (ce_s, *results, j);
+ c = (*results)[j];
curr = get_varinfo (c.var);
if (c.type == ADDRESSOF
c2.type = ADDRESSOF;
c2.offset = 0;
if (c2.var != c.var)
- VEC_safe_push (ce_s, heap, *results, c2);
+ results->safe_push (c2);
temp = temp->next;
}
while (temp);
c2.var = temp->next->id;
c2.type = ADDRESSOF;
c2.offset = 0;
- VEC_safe_push (ce_s, heap, *results, c2);
+ results->safe_push (c2);
}
c.var = temp->id;
c.offset = 0;
else
c.offset = rhsoffset;
- VEC_replace (ce_s, *results, j, c);
+ (*results)[j] = c;
}
}
as the lhs. */
static void
-get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
+get_constraint_for_component_ref (tree t, vec<ce_s> *results,
bool address_p, bool lhs_p)
{
tree orig_t = t;
HOST_WIDE_INT bitmaxsize = -1;
HOST_WIDE_INT bitpos;
tree forzero;
- struct constraint_expr *result;
/* Some people like to do cute things like take the address of
&0->a.b */
temp.offset = 0;
temp.var = integer_id;
temp.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
return;
}
temp.offset = 0;
temp.var = anything_id;
temp.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
return;
}
}
/* Pretend to take the address of the base, we'll take care of
adding the required subset of sub-fields below. */
get_constraint_for_1 (t, results, true, lhs_p);
- gcc_assert (VEC_length (ce_s, *results) == 1);
- result = &VEC_last (ce_s, *results);
+ gcc_assert (results->length () == 1);
+ struct constraint_expr &result = results->last ();
- if (result->type == SCALAR
- && get_varinfo (result->var)->is_full_var)
+ if (result.type == SCALAR
+ && get_varinfo (result.var)->is_full_var)
/* For single-field vars do not bother about the offset. */
- result->offset = 0;
- else if (result->type == SCALAR)
+ result.offset = 0;
+ else if (result.type == SCALAR)
{
/* In languages like C, you can access one past the end of an
array. You aren't allowed to dereference it, so we can
ignore this constraint. When we handle pointer subtraction,
we may have to do something cute here. */
- if ((unsigned HOST_WIDE_INT)bitpos < get_varinfo (result->var)->fullsize
+ if ((unsigned HOST_WIDE_INT)bitpos < get_varinfo (result.var)->fullsize
&& bitmaxsize != 0)
{
/* It's also not true that the constraint will actually start at the
right offset, it may start in some padding. We only care about
setting the constraint to the first actual field it touches, so
walk to find it. */
- struct constraint_expr cexpr = *result;
+ struct constraint_expr cexpr = result;
varinfo_t curr;
- VEC_pop (ce_s, *results);
+ results->pop ();
cexpr.offset = 0;
for (curr = get_varinfo (cexpr.var); curr; curr = curr->next)
{
bitpos, bitmaxsize))
{
cexpr.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, cexpr);
+ results->safe_push (cexpr);
if (address_p)
break;
}
/* If we are going to take the address of this field then
to be able to compute reachability correctly add at least
the last field of the variable. */
- if (address_p
- && VEC_length (ce_s, *results) == 0)
+ if (address_p && results->length () == 0)
{
curr = get_varinfo (cexpr.var);
while (curr->next != NULL)
curr = curr->next;
cexpr.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, cexpr);
+ results->safe_push (cexpr);
}
- else if (VEC_length (ce_s, *results) == 0)
+ else if (results->length () == 0)
/* Assert that we found *some* field there. The user couldn't be
accessing *only* padding. */
/* Still the user could access one past the end of an array
cexpr.type = SCALAR;
cexpr.var = anything_id;
cexpr.offset = 0;
- VEC_safe_push (ce_s, heap, *results, cexpr);
+ results->safe_push (cexpr);
}
}
else if (bitmaxsize == 0)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Access to past the end of variable, ignoring\n");
}
- else if (result->type == DEREF)
+ else if (result.type == DEREF)
{
/* If we do not know exactly where the access goes say so. Note
that only for non-structure accesses we know that we access
if (bitpos == -1
|| bitsize != bitmaxsize
|| AGGREGATE_TYPE_P (TREE_TYPE (orig_t))
- || result->offset == UNKNOWN_OFFSET)
- result->offset = UNKNOWN_OFFSET;
+ || result.offset == UNKNOWN_OFFSET)
+ result.offset = UNKNOWN_OFFSET;
else
- result->offset += bitpos;
+ result.offset += bitpos;
}
- else if (result->type == ADDRESSOF)
+ else if (result.type == ADDRESSOF)
{
/* We can end up here for component references on a
VIEW_CONVERT_EXPR <>(&foobar). */
- result->type = SCALAR;
- result->var = anything_id;
- result->offset = 0;
+ result.type = SCALAR;
+ result.var = anything_id;
+ result.offset = 0;
}
else
gcc_unreachable ();
This is needed so that we can handle dereferencing DEREF constraints. */
static void
-do_deref (VEC (ce_s, heap) **constraints)
+do_deref (vec<ce_s> *constraints)
{
struct constraint_expr *c;
unsigned int i = 0;
- FOR_EACH_VEC_ELT (ce_s, *constraints, i, c)
+ FOR_EACH_VEC_ELT (*constraints, i, c)
{
if (c->type == SCALAR)
c->type = DEREF;
address of it. */
static void
-get_constraint_for_address_of (tree t, VEC (ce_s, heap) **results)
+get_constraint_for_address_of (tree t, vec<ce_s> *results)
{
struct constraint_expr *c;
unsigned int i;
get_constraint_for_1 (t, results, true, true);
- FOR_EACH_VEC_ELT (ce_s, *results, i, c)
+ FOR_EACH_VEC_ELT (*results, i, c)
{
if (c->type == DEREF)
c->type = SCALAR;
/* Given a tree T, return the constraint expression for it. */
static void
-get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
+get_constraint_for_1 (tree t, vec<ce_s> *results, bool address_p,
bool lhs_p)
{
struct constraint_expr temp;
temp.var = nonlocal_id;
temp.type = ADDRESSOF;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
return;
}
temp.var = readonly_id;
temp.type = SCALAR;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
return;
}
if (address_p)
return;
- cs = VEC_last (ce_s, *results);
+ cs = results->last ();
if (cs.type == DEREF
&& type_can_have_subvars (TREE_TYPE (t)))
{
/* For dereferences this means we have to defer it
to solving time. */
- VEC_last (ce_s, *results).offset = UNKNOWN_OFFSET;
+ results->last ().offset = UNKNOWN_OFFSET;
return;
}
if (cs.type != SCALAR)
if (curr->offset - vi->offset < size)
{
cs.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, cs);
+ results->safe_push (cs);
}
else
break;
{
unsigned int i;
tree val;
- VEC (ce_s, heap) *tmp = NULL;
+ vec<ce_s> tmp = vec<ce_s>();
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (t), i, val)
{
struct constraint_expr *rhsp;
unsigned j;
get_constraint_for_1 (val, &tmp, address_p, lhs_p);
- FOR_EACH_VEC_ELT (ce_s, tmp, j, rhsp)
- VEC_safe_push (ce_s, heap, *results, *rhsp);
- VEC_truncate (ce_s, tmp, 0);
+ FOR_EACH_VEC_ELT (tmp, j, rhsp)
+ results->safe_push (*rhsp);
+ tmp.truncate (0);
}
- VEC_free (ce_s, heap, tmp);
+ tmp.release ();
/* We do not know whether the constructor was complete,
so technically we have to add &NOTHING or &ANYTHING
like we do for an empty constructor as well. */
temp.type = ADDRESSOF;
temp.var = nonlocal_id;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
return;
}
default:;
temp.type = ADDRESSOF;
temp.var = anything_id;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, temp);
+ results->safe_push (temp);
}
/* Given a gimple tree T, return the constraint expression vector for it. */
static void
-get_constraint_for (tree t, VEC (ce_s, heap) **results)
+get_constraint_for (tree t, vec<ce_s> *results)
{
- gcc_assert (VEC_length (ce_s, *results) == 0);
+ gcc_assert (results->length () == 0);
get_constraint_for_1 (t, results, false, true);
}
to be used as the rhs of a constraint. */
static void
-get_constraint_for_rhs (tree t, VEC (ce_s, heap) **results)
+get_constraint_for_rhs (tree t, vec<ce_s> *results)
{
- gcc_assert (VEC_length (ce_s, *results) == 0);
+ gcc_assert (results->length () == 0);
get_constraint_for_1 (t, results, false, false);
}
entries in *LHSC. */
static void
-process_all_all_constraints (VEC (ce_s, heap) *lhsc, VEC (ce_s, heap) *rhsc)
+process_all_all_constraints (vec<ce_s> lhsc,
+ vec<ce_s> rhsc)
{
struct constraint_expr *lhsp, *rhsp;
unsigned i, j;
- if (VEC_length (ce_s, lhsc) <= 1
- || VEC_length (ce_s, rhsc) <= 1)
+ if (lhsc.length () <= 1 || rhsc.length () <= 1)
{
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
- FOR_EACH_VEC_ELT (ce_s, rhsc, j, rhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (rhsc, j, rhsp)
process_constraint (new_constraint (*lhsp, *rhsp));
}
else
{
struct constraint_expr tmp;
tmp = new_scalar_tmp_constraint_exp ("allalltmp");
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (tmp, *rhsp));
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
process_constraint (new_constraint (*lhsp, tmp));
}
}
do_structure_copy (tree lhsop, tree rhsop)
{
struct constraint_expr *lhsp, *rhsp;
- VEC (ce_s, heap) *lhsc = NULL, *rhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
+ vec<ce_s> rhsc = vec<ce_s>();
unsigned j;
get_constraint_for (lhsop, &lhsc);
get_constraint_for_rhs (rhsop, &rhsc);
- lhsp = &VEC_index (ce_s, lhsc, 0);
- rhsp = &VEC_index (ce_s, rhsc, 0);
+ lhsp = &lhsc[0];
+ rhsp = &rhsc[0];
if (lhsp->type == DEREF
|| (lhsp->type == ADDRESSOF && lhsp->var == anything_id)
|| rhsp->type == DEREF)
{
if (lhsp->type == DEREF)
{
- gcc_assert (VEC_length (ce_s, lhsc) == 1);
+ gcc_assert (lhsc.length () == 1);
lhsp->offset = UNKNOWN_OFFSET;
}
if (rhsp->type == DEREF)
{
- gcc_assert (VEC_length (ce_s, rhsc) == 1);
+ gcc_assert (rhsc.length () == 1);
rhsp->offset = UNKNOWN_OFFSET;
}
process_all_all_constraints (lhsc, rhsc);
unsigned k = 0;
get_ref_base_and_extent (lhsop, &lhsoffset, &lhssize, &lhsmaxsize);
get_ref_base_and_extent (rhsop, &rhsoffset, &rhssize, &rhsmaxsize);
- for (j = 0; VEC_iterate (ce_s, lhsc, j, lhsp);)
+ for (j = 0; lhsc.iterate (j, &lhsp);)
{
varinfo_t lhsv, rhsv;
- rhsp = &VEC_index (ce_s, rhsc, k);
+ rhsp = &rhsc[k];
lhsv = get_varinfo (lhsp->var);
rhsv = get_varinfo (rhsp->var);
if (lhsv->may_have_pointers
> rhsv->offset + lhsoffset + rhsv->size)))
{
++k;
- if (k >= VEC_length (ce_s, rhsc))
+ if (k >= rhsc.length ())
break;
}
else
else
gcc_unreachable ();
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
}
/* Create constraints ID = { rhsc }. */
static void
-make_constraints_to (unsigned id, VEC(ce_s, heap) *rhsc)
+make_constraints_to (unsigned id, vec<ce_s> rhsc)
{
struct constraint_expr *c;
struct constraint_expr includes;
includes.offset = 0;
includes.type = SCALAR;
- FOR_EACH_VEC_ELT (ce_s, rhsc, j, c)
+ FOR_EACH_VEC_ELT (rhsc, j, c)
process_constraint (new_constraint (includes, *c));
}
static void
make_constraint_to (unsigned id, tree op)
{
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> rhsc = vec<ce_s>();
get_constraint_for_rhs (op, &rhsc);
make_constraints_to (id, rhsc);
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
/* Create a constraint ID = &FROM. */
RHS. */
static void
-handle_rhs_call (gimple stmt, VEC(ce_s, heap) **results)
+handle_rhs_call (gimple stmt, vec<ce_s> *results)
{
struct constraint_expr rhsc;
unsigned i;
rhsc.var = get_call_use_vi (stmt)->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
/* The static chain escapes as well. */
&& gimple_call_lhs (stmt) != NULL_TREE
&& TREE_ADDRESSABLE (TREE_TYPE (gimple_call_lhs (stmt))))
{
- VEC(ce_s, heap) *tmpc = NULL;
+ vec<ce_s> tmpc = vec<ce_s>();
struct constraint_expr lhsc, *c;
get_constraint_for_address_of (gimple_call_lhs (stmt), &tmpc);
lhsc.var = escaped_id;
lhsc.offset = 0;
lhsc.type = SCALAR;
- FOR_EACH_VEC_ELT (ce_s, tmpc, i, c)
+ FOR_EACH_VEC_ELT (tmpc, i, c)
process_constraint (new_constraint (lhsc, *c));
- VEC_free(ce_s, heap, tmpc);
+ tmpc.release ();
}
/* Regular functions return nonlocal memory. */
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
/* For non-IPA mode, generate constraints necessary for a call
the LHS point to global and escaped variables. */
static void
-handle_lhs_call (gimple stmt, tree lhs, int flags, VEC(ce_s, heap) *rhsc,
+handle_lhs_call (gimple stmt, tree lhs, int flags, vec<ce_s> rhsc,
tree fndecl)
{
- VEC(ce_s, heap) *lhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
get_constraint_for (lhs, &lhsc);
/* If the store is to a global decl make sure to
tmpc.var = escaped_id;
tmpc.offset = 0;
tmpc.type = SCALAR;
- VEC_safe_push (ce_s, heap, lhsc, tmpc);
+ lhsc.safe_push (tmpc);
}
/* If the call returns an argument unmodified override the rhs
&& (flags & ERF_RETURN_ARG_MASK) < gimple_call_num_args (stmt))
{
tree arg;
- rhsc = NULL;
+ rhsc.create (0);
arg = gimple_call_arg (stmt, flags & ERF_RETURN_ARG_MASK);
get_constraint_for (arg, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
else if (flags & ERF_NOALIAS)
{
varinfo_t vi;
struct constraint_expr tmpc;
- rhsc = NULL;
+ rhsc.create (0);
vi = make_heapvar ("HEAP");
/* We delay marking allocated storage global until we know if
it escapes. */
tmpc.var = vi->id;
tmpc.offset = 0;
tmpc.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, rhsc, tmpc);
+ rhsc.safe_push (tmpc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
else
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
}
/* For non-IPA mode, generate constraints necessary for a call of a
const function that returns a pointer in the statement STMT. */
static void
-handle_const_call (gimple stmt, VEC(ce_s, heap) **results)
+handle_const_call (gimple stmt, vec<ce_s> *results)
{
struct constraint_expr rhsc;
unsigned int k;
rhsc.var = uses->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
/* May return arguments. */
for (k = 0; k < gimple_call_num_args (stmt); ++k)
{
tree arg = gimple_call_arg (stmt, k);
- VEC(ce_s, heap) *argc = NULL;
+ vec<ce_s> argc = vec<ce_s>();
unsigned i;
struct constraint_expr *argp;
get_constraint_for_rhs (arg, &argc);
- FOR_EACH_VEC_ELT (ce_s, argc, i, argp)
- VEC_safe_push (ce_s, heap, *results, *argp);
- VEC_free(ce_s, heap, argc);
+ FOR_EACH_VEC_ELT (argc, i, argp)
+ results->safe_push (*argp);
+ argc.release ();
}
/* May return addresses of globals. */
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
/* For non-IPA mode, generate constraints necessary for a call to a
pure function in statement STMT. */
static void
-handle_pure_call (gimple stmt, VEC(ce_s, heap) **results)
+handle_pure_call (gimple stmt, vec<ce_s> *results)
{
struct constraint_expr rhsc;
unsigned i;
rhsc.var = uses->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, rhsc);
+ results->safe_push (rhsc);
}
find_func_aliases_for_builtin_call (gimple t)
{
tree fndecl = gimple_call_fndecl (t);
- VEC(ce_s, heap) *lhsc = NULL;
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
+ vec<ce_s> rhsc = vec<ce_s>();
varinfo_t fi;
if (fndecl != NULL_TREE
else
get_constraint_for (dest, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
}
get_constraint_for_ptr_offset (dest, NULL_TREE, &lhsc);
get_constraint_for_ptr_offset (src, NULL_TREE, &rhsc);
do_deref (&lhsc);
do_deref (&rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
return true;
}
case BUILT_IN_MEMSET:
get_constraint_for (res, &lhsc);
get_constraint_for (dest, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
}
get_constraint_for_ptr_offset (dest, NULL_TREE, &lhsc);
do_deref (&lhsc);
ac.var = integer_id;
}
ac.offset = 0;
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
process_constraint (new_constraint (*lhsp, ac));
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
return true;
}
case BUILT_IN_ASSUME_ALIGNED:
get_constraint_for (res, &lhsc);
get_constraint_for (dest, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
}
return true;
}
if (gimple_call_lhs (t))
{
handle_lhs_call (t, gimple_call_lhs (t), gimple_call_flags (t),
- NULL, fndecl);
+ vec<ce_s>(), fndecl);
get_constraint_for_ptr_offset (gimple_call_lhs (t),
NULL_TREE, &lhsc);
get_constraint_for_ptr_offset (gimple_call_arg (t, 0),
do_deref (&lhsc);
do_deref (&rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
return true;
}
break;
{
lhs = get_function_part_constraint (nfi, fi_static_chain);
get_constraint_for (frame, &rhsc);
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
/* Make the frame point to the function for
the trampoline adjustment call. */
do_deref (&lhsc);
get_constraint_for (nfunc, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, rhsc);
- VEC_free (ce_s, heap, lhsc);
+ rhsc.release ();
+ lhsc.release ();
return true;
}
get_constraint_for (tramp, &rhsc);
do_deref (&rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, rhsc);
- VEC_free (ce_s, heap, lhsc);
+ rhsc.release ();
+ lhsc.release ();
}
return true;
}
do_deref (&lhsc);
get_constraint_for (src, &rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
return true;
}
CASE_BUILT_IN_TM_LOAD (1):
get_constraint_for (addr, &rhsc);
do_deref (&rhsc);
process_all_all_constraints (lhsc, rhsc);
- VEC_free (ce_s, heap, lhsc);
- VEC_free (ce_s, heap, rhsc);
+ lhsc.release ();
+ rhsc.release ();
return true;
}
/* Variadic argument handling needs to be handled in IPA
rhs.type = ADDRESSOF;
rhs.offset = 0;
}
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
process_constraint (new_constraint (*lhsp, rhs));
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
/* va_list is clobbered. */
make_constraint_to (get_call_clobber_vi (t)->id, valist);
return true;
find_func_aliases_for_call (gimple t)
{
tree fndecl = gimple_call_fndecl (t);
- VEC(ce_s, heap) *lhsc = NULL;
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
+ vec<ce_s> rhsc = vec<ce_s>();
varinfo_t fi;
if (fndecl != NULL_TREE
if (!in_ipa_mode
|| (fndecl && !fi->is_fn_info))
{
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> rhsc = vec<ce_s>();
int flags = gimple_call_flags (t);
/* Const functions can return their arguments and addresses
handle_rhs_call (t, &rhsc);
if (gimple_call_lhs (t))
handle_lhs_call (t, gimple_call_lhs (t), flags, rhsc, fndecl);
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
else
{
get_constraint_for_rhs (arg, &rhsc);
lhs = get_function_part_constraint (fi, fi_parm_base + j);
- while (VEC_length (ce_s, rhsc) != 0)
+ while (rhsc.length () != 0)
{
- rhsp = &VEC_last (ce_s, rhsc);
+ rhsp = &rhsc.last ();
process_constraint (new_constraint (lhs, *rhsp));
- VEC_pop (ce_s, rhsc);
+ rhsc.pop ();
}
}
&& DECL_RESULT (fndecl)
&& DECL_BY_REFERENCE (DECL_RESULT (fndecl)))
{
- VEC(ce_s, heap) *tem = NULL;
- VEC_safe_push (ce_s, heap, tem, rhs);
+ vec<ce_s> tem = vec<ce_s>();
+ tem.safe_push (rhs);
do_deref (&tem);
- rhs = VEC_index (ce_s, tem, 0);
- VEC_free(ce_s, heap, tem);
+ rhs = tem[0];
+ tem.release ();
}
- FOR_EACH_VEC_ELT (ce_s, lhsc, j, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, j, lhsp)
process_constraint (new_constraint (*lhsp, rhs));
}
get_constraint_for_address_of (lhsop, &rhsc);
lhs = get_function_part_constraint (fi, fi_result);
- FOR_EACH_VEC_ELT (ce_s, rhsc, j, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, j, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
/* If we use a static chain, pass it along. */
get_constraint_for (gimple_call_chain (t), &rhsc);
lhs = get_function_part_constraint (fi, fi_static_chain);
- FOR_EACH_VEC_ELT (ce_s, rhsc, j, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, j, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
}
}
find_func_aliases (gimple origt)
{
gimple t = origt;
- VEC(ce_s, heap) *lhsc = NULL;
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
+ vec<ce_s> rhsc = vec<ce_s>();
struct constraint_expr *c;
varinfo_t fi;
STRIP_NOPS (strippedrhs);
get_constraint_for_rhs (gimple_phi_arg_def (t, i), &rhsc);
- FOR_EACH_VEC_ELT (ce_s, lhsc, j, c)
+ FOR_EACH_VEC_ELT (lhsc, j, c)
{
struct constraint_expr *c2;
- while (VEC_length (ce_s, rhsc) > 0)
+ while (rhsc.length () > 0)
{
- c2 = &VEC_last (ce_s, rhsc);
+ c2 = &rhsc.last ();
process_constraint (new_constraint (*c, *c2));
- VEC_pop (ce_s, rhsc);
+ rhsc.pop ();
}
}
}
else if (code == COND_EXPR)
{
/* The result is a merge of both COND_EXPR arms. */
- VEC (ce_s, heap) *tmp = NULL;
+ vec<ce_s> tmp = vec<ce_s>();
struct constraint_expr *rhsp;
unsigned i;
get_constraint_for_rhs (gimple_assign_rhs2 (t), &rhsc);
get_constraint_for_rhs (gimple_assign_rhs3 (t), &tmp);
- FOR_EACH_VEC_ELT (ce_s, tmp, i, rhsp)
- VEC_safe_push (ce_s, heap, rhsc, *rhsp);
- VEC_free (ce_s, heap, tmp);
+ FOR_EACH_VEC_ELT (tmp, i, rhsp)
+ rhsc.safe_push (*rhsp);
+ tmp.release ();
}
else if (truth_value_p (code))
/* Truth value results are not pointer (parts). Or at least
else
{
/* All other operations are merges. */
- VEC (ce_s, heap) *tmp = NULL;
+ vec<ce_s> tmp = vec<ce_s>();
struct constraint_expr *rhsp;
unsigned i, j;
get_constraint_for_rhs (gimple_assign_rhs1 (t), &rhsc);
for (i = 2; i < gimple_num_ops (t); ++i)
{
get_constraint_for_rhs (gimple_op (t, i), &tmp);
- FOR_EACH_VEC_ELT (ce_s, tmp, j, rhsp)
- VEC_safe_push (ce_s, heap, rhsc, *rhsp);
- VEC_truncate (ce_s, tmp, 0);
+ FOR_EACH_VEC_ELT (tmp, j, rhsp)
+ rhsc.safe_push (*rhsp);
+ tmp.truncate (0);
}
- VEC_free (ce_s, heap, tmp);
+ tmp.release ();
}
process_all_all_constraints (lhsc, rhsc);
}
lhs = get_function_part_constraint (fi, fi_result);
get_constraint_for_rhs (gimple_return_retval (t), &rhsc);
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
}
}
any global memory. */
if (op)
{
- VEC(ce_s, heap) *lhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
struct constraint_expr rhsc, *lhsp;
unsigned j;
get_constraint_for (op, &lhsc);
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- FOR_EACH_VEC_ELT (ce_s, lhsc, j, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, j, lhsp)
process_constraint (new_constraint (*lhsp, rhsc));
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
}
}
for (i = 0; i < gimple_asm_ninputs (t); ++i)
}
}
- VEC_free (ce_s, heap, rhsc);
- VEC_free (ce_s, heap, lhsc);
+ rhsc.release ();
+ lhsc.release ();
}
static void
process_ipa_clobber (varinfo_t fi, tree ptr)
{
- VEC(ce_s, heap) *ptrc = NULL;
+ vec<ce_s> ptrc = vec<ce_s>();
struct constraint_expr *c, lhs;
unsigned i;
get_constraint_for_rhs (ptr, &ptrc);
lhs = get_function_part_constraint (fi, fi_clobbers);
- FOR_EACH_VEC_ELT (ce_s, ptrc, i, c)
+ FOR_EACH_VEC_ELT (ptrc, i, c)
process_constraint (new_constraint (lhs, *c));
- VEC_free (ce_s, heap, ptrc);
+ ptrc.release ();
}
/* Walk statement T setting up clobber and use constraints according to the
find_func_clobbers (gimple origt)
{
gimple t = origt;
- VEC(ce_s, heap) *lhsc = NULL;
- VEC(ce_s, heap) *rhsc = NULL;
+ vec<ce_s> lhsc = vec<ce_s>();
+ vec<ce_s> rhsc = vec<ce_s>();
varinfo_t fi;
/* Add constraints for clobbered/used in IPA mode.
unsigned i;
lhsc = get_function_part_constraint (fi, fi_clobbers);
get_constraint_for_address_of (lhs, &rhsc);
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhsc, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
}
unsigned i;
lhs = get_function_part_constraint (fi, fi_uses);
get_constraint_for_address_of (rhs, &rhsc);
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
}
struct constraint_expr *rhsp, *lhsp;
get_constraint_for_ptr_offset (dest, NULL_TREE, &lhsc);
lhs = get_function_part_constraint (fi, fi_clobbers);
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
process_constraint (new_constraint (lhs, *lhsp));
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
get_constraint_for_ptr_offset (src, NULL_TREE, &rhsc);
lhs = get_function_part_constraint (fi, fi_uses);
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
return;
}
/* The following function clobbers memory pointed to by
ce_s *lhsp;
get_constraint_for_ptr_offset (dest, NULL_TREE, &lhsc);
lhs = get_function_part_constraint (fi, fi_clobbers);
- FOR_EACH_VEC_ELT (ce_s, lhsc, i, lhsp)
+ FOR_EACH_VEC_ELT (lhsc, i, lhsp)
process_constraint (new_constraint (lhs, *lhsp));
- VEC_free (ce_s, heap, lhsc);
+ lhsc.release ();
return;
}
/* The following functions clobber their second and third
continue;
get_constraint_for_address_of (arg, &rhsc);
- FOR_EACH_VEC_ELT (ce_s, rhsc, j, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, j, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
/* Build constraints for propagating clobbers/uses along the
anything_id);
}
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
};
typedef struct fieldoff fieldoff_s;
-DEF_VEC_O(fieldoff_s);
-DEF_VEC_ALLOC_O(fieldoff_s,heap);
/* qsort comparison function for two fieldoff's PA and PB */
/* Sort a fieldstack according to the field offset and sizes. */
static void
-sort_fieldstack (VEC(fieldoff_s,heap) *fieldstack)
+sort_fieldstack (vec<fieldoff_s> fieldstack)
{
- VEC_qsort (fieldoff_s, fieldstack, fieldoff_compare);
+ fieldstack.qsort (fieldoff_compare);
}
/* Return true if T is a type that can have subvars. */
recursed for. */
static bool
-push_fields_onto_fieldstack (tree type, VEC(fieldoff_s,heap) **fieldstack,
+push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack,
HOST_WIDE_INT offset)
{
tree field;
return false;
/* If the vector of fields is growing too big, bail out early.
- Callers check for VEC_length <= MAX_FIELDS_FOR_FIELD_SENSITIVE, make
+ Callers check for vec::length <= MAX_FIELDS_FOR_FIELD_SENSITIVE, make
sure this fails. */
- if (VEC_length (fieldoff_s, *fieldstack) > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+ if (fieldstack->length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
return false;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
bool has_unknown_size = false;
bool must_have_pointers_p;
- if (!VEC_empty (fieldoff_s, *fieldstack))
- pair = &VEC_last (fieldoff_s, *fieldstack);
+ if (!fieldstack->is_empty ())
+ pair = &fieldstack->last ();
/* If there isn't anything at offset zero, create sth. */
if (!pair
&& offset + foff != 0)
{
fieldoff_s e = {0, offset + foff, false, false, false, false};
- pair = VEC_safe_push (fieldoff_s, heap, *fieldstack, e);
+ pair = fieldstack->safe_push (e);
}
if (!DECL_SIZE (field)
= (!has_unknown_size
&& POINTER_TYPE_P (TREE_TYPE (field))
&& TYPE_RESTRICT (TREE_TYPE (field)));
- VEC_safe_push (fieldoff_s, heap, *fieldstack, e);
+ fieldstack->safe_push (e);
}
}
FIELDSTACK is assumed to be sorted by offset. */
static bool
-check_for_overlaps (VEC (fieldoff_s,heap) *fieldstack)
+check_for_overlaps (vec<fieldoff_s> fieldstack)
{
fieldoff_s *fo = NULL;
unsigned int i;
HOST_WIDE_INT lastoffset = -1;
- FOR_EACH_VEC_ELT (fieldoff_s, fieldstack, i, fo)
+ FOR_EACH_VEC_ELT (fieldstack, i, fo)
{
if (fo->offset == lastoffset)
return true;
varinfo_t vi, newvi;
tree decl_type = TREE_TYPE (decl);
tree declsize = DECL_P (decl) ? DECL_SIZE (decl) : TYPE_SIZE (decl_type);
- VEC (fieldoff_s,heap) *fieldstack = NULL;
+ vec<fieldoff_s> fieldstack = vec<fieldoff_s>();
fieldoff_s *fo;
unsigned int i;
push_fields_onto_fieldstack (decl_type, &fieldstack, 0);
- for (i = 0; !notokay && VEC_iterate (fieldoff_s, fieldstack, i, fo); i++)
+ for (i = 0; !notokay && fieldstack.iterate (i, &fo); i++)
if (fo->has_unknown_size
|| fo->offset < 0)
{
}
if (notokay)
- VEC_free (fieldoff_s, heap, fieldstack);
+ fieldstack.release ();
}
/* If we didn't end up collecting sub-variables create a full
variable for the decl. */
- if (VEC_length (fieldoff_s, fieldstack) <= 1
- || VEC_length (fieldoff_s, fieldstack) > MAX_FIELDS_FOR_FIELD_SENSITIVE)
+ if (fieldstack.length () <= 1
+ || fieldstack.length () > MAX_FIELDS_FOR_FIELD_SENSITIVE)
{
vi = new_var_info (decl, name);
vi->offset = 0;
vi->fullsize = TREE_INT_CST_LOW (declsize);
vi->size = vi->fullsize;
vi->is_full_var = true;
- VEC_free (fieldoff_s, heap, fieldstack);
+ fieldstack.release ();
return vi;
}
vi = new_var_info (decl, name);
vi->fullsize = TREE_INT_CST_LOW (declsize);
for (i = 0, newvi = vi;
- VEC_iterate (fieldoff_s, fieldstack, i, fo);
+ fieldstack.iterate (i, &fo);
++i, newvi = newvi->next)
{
const char *newname = "NULL";
newvi->fullsize = vi->fullsize;
newvi->may_have_pointers = fo->may_have_pointers;
newvi->only_restrict_pointers = fo->only_restrict_pointers;
- if (i + 1 < VEC_length (fieldoff_s, fieldstack))
+ if (i + 1 < fieldstack.length ())
newvi->next = new_var_info (decl, name);
}
- VEC_free (fieldoff_s, heap, fieldstack);
+ fieldstack.release ();
return vi;
}
if (DECL_INITIAL (decl)
&& vnode->analyzed)
{
- VEC (ce_s, heap) *rhsc = NULL;
+ vec<ce_s> rhsc = vec<ce_s>();
struct constraint_expr lhs, *rhsp;
unsigned i;
get_constraint_for_rhs (DECL_INITIAL (decl), &rhsc);
lhs.var = vi->id;
lhs.offset = 0;
lhs.type = SCALAR;
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
/* If this is a variable that escapes from the unit
the initializer escapes as well. */
lhs.var = escaped_id;
lhs.offset = 0;
lhs.type = SCALAR;
- FOR_EACH_VEC_ELT (ce_s, rhsc, i, rhsp)
+ FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
}
- VEC_free (ce_s, heap, rhsc);
+ rhsc.release ();
}
}
}
stats.num_implicit_edges);
}
- for (i = 0; i < VEC_length (varinfo_t, varmap); i++)
+ for (i = 0; i < varmap.length (); i++)
{
varinfo_t vi = get_varinfo (i);
if (!vi->may_have_pointers)
/* This specifically does not use process_constraint because
process_constraint ignores all anything = anything constraints, since all
but this one are redundant. */
- VEC_safe_push (constraint_t, heap, constraints, new_constraint (lhs, rhs));
+ constraints.safe_push (new_constraint (lhs, rhs));
/* Create the READONLY variable, used to represent that a variable
points to readonly memory. */
sizeof (struct constraint), 30);
variable_info_pool = create_alloc_pool ("Variable info pool",
sizeof (struct variable_info), 30);
- constraints = VEC_alloc (constraint_t, heap, 8);
- varmap = VEC_alloc (varinfo_t, heap, 8);
+ constraints.create (8);
+ varmap.create (8);
vi_for_tree = pointer_map_create ();
call_stmt_vars = pointer_map_create ();
/* Now reallocate the size of the successor list as, and blow away
the predecessor bitmaps. */
- graph->size = VEC_length (varinfo_t, varmap);
+ graph->size = varmap.length ();
graph->succs = XRESIZEVEC (bitmap, graph->succs, graph->size);
free (graph->implicit_preds);
"\nCollapsing static cycles and doing variable "
"substitution\n");
- init_graph (VEC_length (varinfo_t, varmap) * 2);
+ init_graph (varmap.length () * 2);
if (dump_file)
fprintf (dump_file, "Building predecessor graph\n");
cfun->gimple_df->escaped.escaped = 0;
/* Mark escaped HEAP variables as global. */
- FOR_EACH_VEC_ELT (varinfo_t, varmap, i, vi)
+ FOR_EACH_VEC_ELT (varmap, i, vi)
if (vi->is_heap_var
&& !vi->is_global_var)
DECL_EXTERNAL (vi->decl) = vi->is_global_var
pointer_map_destroy (vi_for_tree);
pointer_map_destroy (call_stmt_vars);
bitmap_obstack_release (&pta_obstack);
- VEC_free (constraint_t, heap, constraints);
+ constraints.release ();
for (i = 0; i < graph->size; i++)
- VEC_free (constraint_t, heap, graph->complex[i]);
+ graph->complex[i].release ();
free (graph->complex);
free (graph->rep);
free (graph->indirect_cycles);
free (graph);
- VEC_free (varinfo_t, heap, varmap);
+ varmap.release ();
free_alloc_pool (variable_info_pool);
free_alloc_pool (constraint_pool);
dump_constraints (dump_file, 0);
fprintf (dump_file, "\n");
}
- from = VEC_length (constraint_t, constraints);
+ from = constraints.length ();
FOR_EACH_DEFINED_FUNCTION (node)
{
dump_constraints (dump_file, from);
fprintf (dump_file, "\n");
}
- from = VEC_length (constraint_t, constraints);
+ from = constraints.length ();
}
/* From the constraints compute the points-to sets. */
fn = DECL_STRUCT_FUNCTION (node->symbol.decl);
/* Compute the points-to sets for pointer SSA_NAMEs. */
- FOR_EACH_VEC_ELT (tree, fn->gimple_df->ssa_names, i, ptr)
+ FOR_EACH_VEC_ELT (*fn->gimple_df->ssa_names, i, ptr)
{
if (ptr
&& POINTER_TYPE_P (TREE_TYPE (ptr)))
bb. */
bitmap inverse;
/* The edge flags for each of the successor bbs. */
- VEC (int, heap) *succ_flags;
+ vec<int> succ_flags;
/* Indicates whether the struct is currently in the worklist. */
bool in_worklist;
/* The hash value of the struct. */
bitmap_print (file, e->succs, "succs:", "\n");
bitmap_print (file, e->inverse, "inverse:", "\n");
fprintf (file, "flags:");
- for (i = 0; i < VEC_length (int, e->succ_flags); ++i)
- fprintf (file, " %x", VEC_index (int, e->succ_flags, i));
+ for (i = 0; i < e->succ_flags.length (); ++i)
+ fprintf (file, " %x", e->succ_flags[i]);
fprintf (file, "\n");
}
hashval = iterative_hash_hashval_t (size, hashval);
BB_SIZE (bb) = size;
- for (i = 0; i < VEC_length (int, e->succ_flags); ++i)
+ for (i = 0; i < e->succ_flags.length (); ++i)
{
- flags = VEC_index (int, e->succ_flags, i);
+ flags = e->succ_flags[i];
flags = flags & ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
hashval = iterative_hash_hashval_t (flags, hashval);
}
int f1a, f1b, f2a, f2b;
int mask = ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
- if (VEC_length (int, e1->succ_flags) != 2)
+ if (e1->succ_flags.length () != 2)
return false;
- f1a = VEC_index (int, e1->succ_flags, 0);
- f1b = VEC_index (int, e1->succ_flags, 1);
- f2a = VEC_index (int, e2->succ_flags, 0);
- f2b = VEC_index (int, e2->succ_flags, 1);
+ f1a = e1->succ_flags[0];
+ f1b = e1->succ_flags[1];
+ f2a = e2->succ_flags[0];
+ f2b = e2->succ_flags[1];
if (f1a == f2a && f1b == f2b)
return false;
if (e1->hashval != e2->hashval)
return 0;
- if (VEC_length (int, e1->succ_flags) != VEC_length (int, e2->succ_flags))
+ if (e1->succ_flags.length () != e2->succ_flags.length ())
return 0;
if (!bitmap_equal_p (e1->succs, e2->succs))
if (!inverse_flags (e1, e2))
{
- for (i = 0; i < VEC_length (int, e1->succ_flags); ++i)
- if (VEC_index (int, e1->succ_flags, i)
- != VEC_index (int, e1->succ_flags, i))
+ for (i = 0; i < e1->succ_flags.length (); ++i)
+ if (e1->succ_flags[i] != e1->succ_flags[i])
return 0;
}
same->bbs = BITMAP_ALLOC (NULL);
same->succs = BITMAP_ALLOC (NULL);
same->inverse = BITMAP_ALLOC (NULL);
- same->succ_flags = VEC_alloc (int, heap, 10);
+ same->succ_flags.create (10);
same->in_worklist = false;
return same;
BITMAP_FREE (e->bbs);
BITMAP_FREE (e->succs);
BITMAP_FREE (e->inverse);
- VEC_free (int, heap, e->succ_flags);
+ e->succ_flags.release ();
XDELETE (e);
}
bitmap_clear (same->bbs);
bitmap_clear (same->succs);
bitmap_clear (same->inverse);
- VEC_truncate (int, same->succ_flags, 0);
+ same->succ_flags.truncate (0);
}
static hash_table <same_succ_def> same_succ_htab;
same_succ_htab.traverse <FILE *, ssa_same_succ_print_traverse> (stderr);
}
-DEF_VEC_P (same_succ);
-DEF_VEC_ALLOC_P (same_succ, heap);
/* Vector of bbs to process. */
-static VEC (same_succ, heap) *worklist;
+static vec<same_succ> worklist;
/* Prints worklist to FILE. */
print_worklist (FILE *file)
{
unsigned int i;
- for (i = 0; i < VEC_length (same_succ, worklist); ++i)
- same_succ_print (file, VEC_index (same_succ, worklist, i));
+ for (i = 0; i < worklist.length (); ++i)
+ same_succ_print (file, worklist[i]);
}
/* Adds SAME to worklist. */
return;
same->in_worklist = true;
- VEC_safe_push (same_succ, heap, worklist, same);
+ worklist.safe_push (same);
}
/* Add BB to same_succ_htab. */
same_succ_edge_flags[index] = e->flags;
}
EXECUTE_IF_SET_IN_BITMAP (same->succs, 0, j, bj)
- VEC_safe_push (int, heap, same->succ_flags, same_succ_edge_flags[j]);
+ same->succ_flags.safe_push (same_succ_edge_flags[j]);
same->hashval = same_succ_hash (same);
same_succ_edge_flags = XCNEWVEC (int, last_basic_block);
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
- worklist = VEC_alloc (same_succ, heap, n_basic_blocks);
+ worklist.create (n_basic_blocks);
find_same_succ ();
if (dump_file && (dump_flags & TDF_DETAILS))
same_succ_edge_flags = NULL;
BITMAP_FREE (deleted_bbs);
BITMAP_FREE (deleted_bb_preds);
- VEC_free (same_succ, heap, worklist);
+ worklist.release ();
}
/* Mark BB as deleted, and mark its predecessors. */
XDELETE (c);
}
-DEF_VEC_P (bb_cluster);
-DEF_VEC_ALLOC_P (bb_cluster, heap);
/* Array that contains all clusters. */
-static VEC (bb_cluster, heap) *all_clusters;
+static vec<bb_cluster> all_clusters;
/* Allocate all cluster vectors. */
static void
alloc_cluster_vectors (void)
{
- all_clusters = VEC_alloc (bb_cluster, heap, n_basic_blocks);
+ all_clusters.create (n_basic_blocks);
}
/* Reset all cluster vectors. */
{
unsigned int i;
basic_block bb;
- for (i = 0; i < VEC_length (bb_cluster, all_clusters); ++i)
- delete_cluster (VEC_index (bb_cluster, all_clusters, i));
- VEC_truncate (bb_cluster, all_clusters, 0);
+ for (i = 0; i < all_clusters.length (); ++i)
+ delete_cluster (all_clusters[i]);
+ all_clusters.truncate (0);
FOR_EACH_BB (bb)
BB_CLUSTER (bb) = NULL;
}
delete_cluster_vectors (void)
{
unsigned int i;
- for (i = 0; i < VEC_length (bb_cluster, all_clusters); ++i)
- delete_cluster (VEC_index (bb_cluster, all_clusters, i));
- VEC_free (bb_cluster, heap, all_clusters);
+ for (i = 0; i < all_clusters.length (); ++i)
+ delete_cluster (all_clusters[i]);
+ all_clusters.release ();
}
/* Merge cluster C2 into C1. */
add_bb_to_cluster (c, bb2);
BB_CLUSTER (bb1) = c;
BB_CLUSTER (bb2) = c;
- c->index = VEC_length (bb_cluster, all_clusters);
- VEC_safe_push (bb_cluster, heap, all_clusters, c);
+ c->index = all_clusters.length ();
+ all_clusters.safe_push (c);
}
else if (BB_CLUSTER (bb1) == NULL || BB_CLUSTER (bb2) == NULL)
{
merge_clusters (merge, old);
EXECUTE_IF_SET_IN_BITMAP (old->bbs, 0, i, bi)
BB_CLUSTER (BASIC_BLOCK (i)) = merge;
- VEC_replace (bb_cluster, all_clusters, old->index, NULL);
+ all_clusters[old->index] = NULL;
update_rep_bb (merge, old->rep_bb);
delete_cluster (old);
}
{
same_succ same;
- while (!VEC_empty (same_succ, worklist))
+ while (!worklist.is_empty ())
{
- same = VEC_pop (same_succ, worklist);
+ same = worklist.pop ();
same->in_worklist = false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
bitmap_iterator bj;
int nr_bbs_removed = 0;
- for (i = 0; i < VEC_length (bb_cluster, all_clusters); ++i)
+ for (i = 0; i < all_clusters.length (); ++i)
{
- c = VEC_index (bb_cluster, all_clusters, i);
+ c = all_clusters[i];
if (c == NULL)
continue;
}
init_worklist ();
- while (!VEC_empty (same_succ, worklist))
+ while (!worklist.is_empty ())
{
if (!loop_entered)
{
fprintf (dump_file, "worklist iteration #%d\n", iteration_nr);
find_clusters ();
- gcc_assert (VEC_empty (same_succ, worklist));
- if (VEC_empty (bb_cluster, all_clusters))
+ gcc_assert (worklist.is_empty ());
+ if (all_clusters.is_empty ())
break;
nr_bbs_removed = apply_clusters ();
static int stmt_count;
/* Array to record value-handles per SSA_NAME. */
-VEC(tree,heap) *ssa_name_values;
+vec<tree> ssa_name_values;
/* Set the value for the SSA name NAME to VALUE. */
void
set_ssa_name_value (tree name, tree value)
{
- if (SSA_NAME_VERSION (name) >= VEC_length (tree, ssa_name_values))
- VEC_safe_grow_cleared (tree, heap, ssa_name_values,
- SSA_NAME_VERSION (name) + 1);
- VEC_replace (tree, ssa_name_values, SSA_NAME_VERSION (name), value);
+ if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
+ ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
+ ssa_name_values[SSA_NAME_VERSION (name)] = value;
}
/* Initialize the per SSA_NAME value-handles array. Returns it. */
void
threadedge_initialize_values (void)
{
- gcc_assert (ssa_name_values == NULL);
- ssa_name_values = VEC_alloc(tree, heap, num_ssa_names);
+ gcc_assert (!ssa_name_values.exists ());
+ ssa_name_values.create (num_ssa_names);
}
/* Free the per SSA_NAME value-handle array. */
void
threadedge_finalize_values (void)
{
- VEC_free(tree, heap, ssa_name_values);
+ ssa_name_values.release ();
}
/* Return TRUE if we may be able to thread an incoming edge into
structures. */
static void
-remove_temporary_equivalences (VEC(tree, heap) **stack)
+remove_temporary_equivalences (vec<tree> *stack)
{
- while (VEC_length (tree, *stack) > 0)
+ while (stack->length () > 0)
{
tree prev_value, dest;
- dest = VEC_pop (tree, *stack);
+ dest = stack->pop ();
/* A NULL value indicates we should stop unwinding, otherwise
pop off the next entry as they're recorded in pairs. */
if (dest == NULL)
break;
- prev_value = VEC_pop (tree, *stack);
+ prev_value = stack->pop ();
set_ssa_name_value (dest, prev_value);
}
}
done processing the current edge. */
static void
-record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
+record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
{
tree prev_x = SSA_NAME_VALUE (x);
}
set_ssa_name_value (x, y);
- VEC_reserve (tree, heap, *stack, 2);
- VEC_quick_push (tree, *stack, prev_x);
- VEC_quick_push (tree, *stack, x);
+ stack->reserve (2);
+ stack->quick_push (prev_x);
+ stack->quick_push (x);
}
/* Record temporary equivalences created by PHIs at the target of the
indicating we should not thread this edge, else return TRUE. */
static bool
-record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
+record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
{
gimple_stmt_iterator gsi;
static gimple
record_temporary_equivalences_from_stmts_at_dest (edge e,
- VEC(tree, heap) **stack,
+ vec<tree> *stack,
tree (*simplify) (gimple,
gimple))
{
return false;
}
-DEF_VEC_O(tree);
-DEF_VEC_ALLOC_O_STACK(tree);
-#define VEC_tree_stack_alloc(alloc) VEC_stack_alloc (tree, alloc)
-
/* Copy debug stmts from DEST's chain of single predecessors up to
SRC, so that we don't lose the bindings as PHI nodes are introduced
when DEST gains new predecessors. */
i++;
}
- VEC(tree, stack) *fewvars = NULL;
+ vec<tree, va_stack> fewvars = vec<tree, va_stack>();
pointer_set_t *vars = NULL;
/* If we're already starting with 3/4 of alloc_count, go for a
if (i * 4 > alloc_count * 3)
vars = pointer_set_create ();
else if (alloc_count)
- fewvars = VEC_alloc (tree, stack, alloc_count);
+ vec_stack_alloc (tree, fewvars, alloc_count);
/* Now go through the initial debug stmts in DEST again, this time
actually inserting in VARS or FEWVARS. Don't bother checking for
if (vars)
pointer_set_insert (vars, var);
else
- VEC_quick_push (tree, fewvars, var);
+ fewvars.quick_push (var);
}
basic_block bb = dest;
continue;
else if (!vars)
{
- int i = VEC_length (tree, fewvars);
+ int i = fewvars.length ();
while (i--)
- if (VEC_index (tree, fewvars, i) == var)
+ if (fewvars[i] == var)
break;
if (i >= 0)
continue;
- if (VEC_length (tree, fewvars) < alloc_count)
- VEC_quick_push (tree, fewvars, var);
+ if (fewvars.length () < alloc_count)
+ fewvars.quick_push (var);
else
{
vars = pointer_set_create ();
for (i = 0; i < alloc_count; i++)
- pointer_set_insert (vars, VEC_index (tree, fewvars, i));
- VEC_free (tree, stack, fewvars);
+ pointer_set_insert (vars, fewvars[i]);
+ fewvars.release ();
pointer_set_insert (vars, var);
}
}
if (vars)
pointer_set_destroy (vars);
- else if (fewvars)
- VEC_free (tree, stack, fewvars);
+ else if (fewvars.exists ())
+ fewvars.release ();
}
/* TAKEN_EDGE represents the an edge taken as a result of jump threading.
thread_across_edge (gimple dummy_cond,
edge e,
bool handle_dominating_asserts,
- VEC(tree, heap) **stack,
+ vec<tree> *stack,
tree (*simplify) (gimple, gimple))
{
gimple stmt;
opportunities as they are discovered. We keep the registered
jump threading opportunities in this vector as edge pairs
(original_edge, target_edge). */
-static VEC(edge,heap) *threaded_edges;
+static vec<edge> threaded_edges;
/* When we start updating the CFG for threading, data necessary for jump
threading is attached to the AUX field for the incoming edge. Use these
edge e;
edge_iterator ei;
- for (i = 0; i < VEC_length (edge, threaded_edges); i += 3)
+ for (i = 0; i < threaded_edges.length (); i += 3)
{
- edge e = VEC_index (edge, threaded_edges, i);
+ edge e = threaded_edges[i];
edge *x = XNEWVEC (edge, 2);
e->aux = x;
- THREAD_TARGET (e) = VEC_index (edge, threaded_edges, i + 1);
- THREAD_TARGET2 (e) = VEC_index (edge, threaded_edges, i + 2);
+ THREAD_TARGET (e) = threaded_edges[i + 1];
+ THREAD_TARGET2 (e) = threaded_edges[i + 2];
bitmap_set_bit (tmp, e->dest->index);
}
/* We must know about loops in order to preserve them. */
gcc_assert (current_loops != NULL);
- if (threaded_edges == NULL)
+ if (!threaded_edges.exists ())
return false;
threaded_blocks = BITMAP_ALLOC (NULL);
BITMAP_FREE (threaded_blocks);
threaded_blocks = NULL;
- VEC_free (edge, heap, threaded_edges);
- threaded_edges = NULL;
+ threaded_edges.release ();
if (retval)
loops_state_set (LOOPS_NEED_FIXUP);
if (e2 == NULL)
return;
- if (threaded_edges == NULL)
- threaded_edges = VEC_alloc (edge, heap, 15);
+ if (!threaded_edges.exists ())
+ threaded_edges.create (15);
if (dump_file && (dump_flags & TDF_DETAILS)
&& e->dest != e2->src)
fprintf (dump_file,
" Registering jump thread around one or more intermediate blocks\n");
- VEC_safe_push (edge, heap, threaded_edges, e);
- VEC_safe_push (edge, heap, threaded_edges, e2);
- VEC_safe_push (edge, heap, threaded_edges, e3);
+ threaded_edges.safe_push (e);
+ threaded_edges.safe_push (e2);
+ threaded_edges.safe_push (e3);
}
leading to this block. If no such edge equivalency exists, then we
record NULL. These equivalences are live until we leave the dominator
subtree rooted at the block where we record the equivalency. */
-static VEC(tree,heap) *equiv_stack;
+static vec<tree> equiv_stack;
/* Global hash table implementing a mapping from invariant values
to a list of SSA_NAMEs which have the same value. We might be
tree value;
/* List of SSA_NAMEs which have the same value/key. */
- VEC(tree,heap) *equivalences;
+ vec<tree> equivalences;
};
static void uncprop_enter_block (struct dom_walk_data *, basic_block);
equiv_free (void *p)
{
struct equiv_hash_elt *elt = (struct equiv_hash_elt *) p;
- VEC_free (tree, heap, elt->equivalences);
+ elt->equivalences.release ();
free (elt);
}
void **slot;
equiv_hash_elt.value = value;
- equiv_hash_elt.equivalences = NULL;
+ equiv_hash_elt.equivalences.create (0);
slot = htab_find_slot (equiv, &equiv_hash_elt, NO_INSERT);
equiv_hash_elt_p = (struct equiv_hash_elt *) *slot;
- VEC_pop (tree, equiv_hash_elt_p->equivalences);
+ equiv_hash_elt_p->equivalences.pop ();
}
/* Record EQUIVALENCE = VALUE into our hash table. */
equiv_hash_elt = XNEW (struct equiv_hash_elt);
equiv_hash_elt->value = value;
- equiv_hash_elt->equivalences = NULL;
+ equiv_hash_elt->equivalences.create (0);
slot = htab_find_slot (equiv, equiv_hash_elt, INSERT);
equiv_hash_elt = (struct equiv_hash_elt *) *slot;
- VEC_safe_push (tree, heap, equiv_hash_elt->equivalences, equivalence);
+ equiv_hash_elt->equivalences.safe_push (equivalence);
}
/* Main driver for un-cprop. */
/* Create our global data structures. */
equiv = htab_create (1024, equiv_hash, equiv_eq, equiv_free);
- equiv_stack = VEC_alloc (tree, heap, 2);
+ equiv_stack.create (2);
/* We're going to do a dominator walk, so ensure that we have
dominance information. */
need to empty elements out of the hash table, free EQUIV_STACK,
and cleanup the AUX field on the edges. */
htab_delete (equiv);
- VEC_free (tree, heap, equiv_stack);
+ equiv_stack.release ();
FOR_EACH_BB (bb)
{
edge e;
basic_block bb ATTRIBUTE_UNUSED)
{
/* Pop the topmost value off the equiv stack. */
- tree value = VEC_pop (tree, equiv_stack);
+ tree value = equiv_stack.pop ();
/* If that value was non-null, then pop the topmost equivalency off
its equivalency stack. */
/* Lookup this argument's value in the hash table. */
equiv_hash_elt.value = arg;
- equiv_hash_elt.equivalences = NULL;
+ equiv_hash_elt.equivalences.create (0);
slot = htab_find_slot (equiv, &equiv_hash_elt, NO_INSERT);
if (slot)
then replace the value in the argument with its equivalent
SSA_NAME. Use the most recent equivalence as hopefully
that results in shortest lifetimes. */
- for (j = VEC_length (tree, elt->equivalences) - 1; j >= 0; j--)
+ for (j = elt->equivalences.length () - 1; j >= 0; j--)
{
- tree equiv = VEC_index (tree, elt->equivalences, j);
+ tree equiv = elt->equivalences[j];
if (SSA_NAME_VAR (equiv) == SSA_NAME_VAR (res)
&& TREE_TYPE (equiv) == TREE_TYPE (res))
struct edge_equivalency *equiv = (struct edge_equivalency *) e->aux;
record_equiv (equiv->rhs, equiv->lhs);
- VEC_safe_push (tree, heap, equiv_stack, equiv->rhs);
+ equiv_stack.safe_push (equiv->rhs);
recorded = true;
}
}
if (!recorded)
- VEC_safe_push (tree, heap, equiv_stack, NULL_TREE);
+ equiv_stack.safe_push (NULL_TREE);
uncprop_into_successor_phis (bb);
}
static bool
compute_control_dep_chain (basic_block bb, basic_block dep_bb,
- VEC(edge, heap) **cd_chains,
+ vec<edge> *cd_chains,
size_t *num_chains,
- VEC(edge, heap) **cur_cd_chain)
+ vec<edge> *cur_cd_chain)
{
edge_iterator ei;
edge e;
return false;
/* Could use a set instead. */
- cur_chain_len = VEC_length (edge, *cur_cd_chain);
+ cur_chain_len = cur_cd_chain->length ();
if (cur_chain_len > MAX_CHAIN_LEN)
return false;
for (i = 0; i < cur_chain_len; i++)
{
- edge e = VEC_index (edge, *cur_cd_chain, i);
+ edge e = (*cur_cd_chain)[i];
/* cycle detected. */
if (e->src == bb)
return false;
continue;
cd_bb = e->dest;
- VEC_safe_push (edge, heap, *cur_cd_chain, e);
+ cur_cd_chain->safe_push (e);
while (!is_non_loop_exit_postdominating (cd_bb, bb))
{
if (cd_bb == dep_bb)
/* Found a direct control dependence. */
if (*num_chains < MAX_NUM_CHAINS)
{
- cd_chains[*num_chains]
- = VEC_copy (edge, heap, *cur_cd_chain);
+ cd_chains[*num_chains] = cur_cd_chain->copy ();
(*num_chains)++;
}
found_cd_chain = true;
if (cd_bb == EXIT_BLOCK_PTR)
break;
}
- VEC_pop (edge, *cur_cd_chain);
- gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
+ cur_cd_chain->pop ();
+ gcc_assert (cur_cd_chain->length () == cur_chain_len);
}
- gcc_assert (VEC_length (edge, *cur_cd_chain) == cur_chain_len);
+ gcc_assert (cur_cd_chain->length () == cur_chain_len);
return found_cd_chain;
}
bool invert;
} *use_pred_info_t;
-DEF_VEC_P(use_pred_info_t);
-DEF_VEC_ALLOC_P(use_pred_info_t, heap);
/* Converts the chains of control dependence edges into a set of
*NUM_PREDS is the number of composite predictes. */
static bool
-convert_control_dep_chain_into_preds (VEC(edge, heap) **dep_chains,
+convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
size_t num_chains,
- VEC(use_pred_info_t, heap) ***preds,
+ vec<use_pred_info_t> **preds,
size_t *num_preds)
{
bool has_valid_pred = false;
/* Now convert the control dep chain into a set
of predicates. */
- *preds = XCNEWVEC (VEC(use_pred_info_t, heap) *,
- num_chains);
+ typedef vec<use_pred_info_t> vec_use_pred_info_t_heap;
+ *preds = XCNEWVEC (vec_use_pred_info_t_heap, num_chains);
*num_preds = num_chains;
for (i = 0; i < num_chains; i++)
{
- VEC(edge, heap) *one_cd_chain = dep_chains[i];
+ vec<edge> one_cd_chain = dep_chains[i];
has_valid_pred = false;
- for (j = 0; j < VEC_length (edge, one_cd_chain); j++)
+ for (j = 0; j < one_cd_chain.length (); j++)
{
gimple cond_stmt;
gimple_stmt_iterator gsi;
use_pred_info_t one_pred;
edge e;
- e = VEC_index (edge, one_cd_chain, j);
+ e = one_cd_chain[j];
guard_bb = e->src;
gsi = gsi_last_bb (guard_bb);
if (gsi_end_p (gsi))
one_pred = XNEW (struct use_pred_info);
one_pred->cond = cond_stmt;
one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
- VEC_safe_push (use_pred_info_t, heap, (*preds)[i], one_pred);
+ (*preds)[i].safe_push (one_pred);
has_valid_pred = true;
}
the phi whose result is used in USE_BB. */
static bool
-find_predicates (VEC(use_pred_info_t, heap) ***preds,
+find_predicates (vec<use_pred_info_t> **preds,
size_t *num_preds,
basic_block phi_bb,
basic_block use_bb)
{
size_t num_chains = 0, i;
- VEC(edge, heap) **dep_chains = 0;
- VEC(edge, heap) *cur_chain = 0;
+ vec<edge> *dep_chains = 0;
+ vec<edge> cur_chain = vec<edge>();
bool has_valid_pred = false;
basic_block cd_root = 0;
- dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
+ typedef vec<edge> vec_edge_heap;
+ dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
/* First find the closest bb that is control equivalent to PHI_BB
that also dominates USE_BB. */
preds,
num_preds);
/* Free individual chain */
- VEC_free (edge, heap, cur_chain);
+ cur_chain.release ();
for (i = 0; i < num_chains; i++)
- VEC_free (edge, heap, dep_chains[i]);
+ dep_chains[i].release ();
free (dep_chains);
return has_valid_pred;
}
static void
collect_phi_def_edges (gimple phi, basic_block cd_root,
- VEC(edge, heap) **edges,
+ vec<edge> *edges,
struct pointer_set_t *visited_phis)
{
size_t i, n;
fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
print_gimple_stmt (dump_file, phi, 0, 0);
}
- VEC_safe_push (edge, heap, *edges, opnd_edge);
+ edges->safe_push (opnd_edge);
}
else
{
fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
print_gimple_stmt (dump_file, phi, 0, 0);
}
- VEC_safe_push (edge, heap, *edges, opnd_edge);
+ edges->safe_push (opnd_edge);
}
}
}
composite predicates pointed to by PREDS. */
static bool
-find_def_preds (VEC(use_pred_info_t, heap) ***preds,
+find_def_preds (vec<use_pred_info_t> **preds,
size_t *num_preds, gimple phi)
{
size_t num_chains = 0, i, n;
- VEC(edge, heap) **dep_chains = 0;
- VEC(edge, heap) *cur_chain = 0;
- VEC(edge, heap) *def_edges = 0;
+ vec<edge> *dep_chains = 0;
+ vec<edge> cur_chain = vec<edge>();
+ vec<edge> def_edges = vec<edge>();
bool has_valid_pred = false;
basic_block phi_bb, cd_root = 0;
struct pointer_set_t *visited_phis;
- dep_chains = XCNEWVEC (VEC(edge, heap) *, MAX_NUM_CHAINS);
+ typedef vec<edge> vec_edge_heap;
+ dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
phi_bb = gimple_bb (phi);
/* First find the closest dominating bb to be
collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
pointer_set_destroy (visited_phis);
- n = VEC_length (edge, def_edges);
+ n = def_edges.length ();
if (n == 0)
return false;
size_t prev_nc, j;
edge opnd_edge;
- opnd_edge = VEC_index (edge, def_edges, i);
+ opnd_edge = def_edges[i];
prev_nc = num_chains;
compute_control_dep_chain (cd_root, opnd_edge->src,
dep_chains, &num_chains,
&cur_chain);
/* Free individual chain */
- VEC_free (edge, heap, cur_chain);
- cur_chain = 0;
+ cur_chain.release ();
/* Now update the newly added chains with
the phi operand edge: */
num_chains++;
for (j = prev_nc; j < num_chains; j++)
{
- VEC_safe_push (edge, heap, dep_chains[j], opnd_edge);
+ dep_chains[j].safe_push (opnd_edge);
}
}
}
preds,
num_preds);
for (i = 0; i < num_chains; i++)
- VEC_free (edge, heap, dep_chains[i]);
+ dep_chains[i].release ();
free (dep_chains);
return has_valid_pred;
}
static void
dump_predicates (gimple usestmt, size_t num_preds,
- VEC(use_pred_info_t, heap) **preds,
+ vec<use_pred_info_t> *preds,
const char* msg)
{
size_t i, j;
- VEC(use_pred_info_t, heap) *one_pred_chain;
+ vec<use_pred_info_t> one_pred_chain;
fprintf (dump_file, msg);
print_gimple_stmt (dump_file, usestmt, 0, 0);
fprintf (dump_file, "is guarded by :\n");
size_t np;
one_pred_chain = preds[i];
- np = VEC_length (use_pred_info_t, one_pred_chain);
+ np = one_pred_chain.length ();
for (j = 0; j < np; j++)
{
use_pred_info_t one_pred
- = VEC_index (use_pred_info_t, one_pred_chain, j);
+ = one_pred_chain[j];
if (one_pred->invert)
fprintf (dump_file, " (.NOT.) ");
print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
static void
destroy_predicate_vecs (size_t n,
- VEC(use_pred_info_t, heap) ** preds)
+ vec<use_pred_info_t> * preds)
{
size_t i, j;
for (i = 0; i < n; i++)
{
- for (j = 0; j < VEC_length (use_pred_info_t, preds[i]); j++)
- free (VEC_index (use_pred_info_t, preds[i], j));
- VEC_free (use_pred_info_t, heap, preds[i]);
+ for (j = 0; j < preds[i].length (); j++)
+ free (preds[i][j]);
+ preds[i].release ();
}
free (preds);
}
static bool
find_matching_predicate_in_rest_chains (use_pred_info_t pred,
- VEC(use_pred_info_t, heap) **preds,
+ vec<use_pred_info_t> *preds,
size_t num_pred_chains)
{
size_t i, j, n;
for (i = 1; i < num_pred_chains; i++)
{
bool found = false;
- VEC(use_pred_info_t, heap) *one_chain = preds[i];
- n = VEC_length (use_pred_info_t, one_chain);
+ vec<use_pred_info_t> one_chain = preds[i];
+ n = one_chain.length ();
for (j = 0; j < n; j++)
{
use_pred_info_t pred2
- = VEC_index (use_pred_info_t, one_chain, j);
+ = one_chain[j];
/* can relax the condition comparison to not
use address comparison. However, the most common
case is that multiple control dependent paths share
static bool
use_pred_not_overlap_with_undef_path_pred (
size_t num_preds,
- VEC(use_pred_info_t, heap) **preds,
+ vec<use_pred_info_t> *preds,
gimple phi, unsigned uninit_opnds,
struct pointer_set_t *visited_phis)
{
enum tree_code cmp_code;
bool swap_cond = false;
bool invert = false;
- VEC(use_pred_info_t, heap) *the_pred_chain;
+ vec<use_pred_info_t> the_pred_chain;
bitmap visited_flag_phis = NULL;
bool all_pruned = false;
a predicate that is a comparison of a flag variable against
a constant. */
the_pred_chain = preds[0];
- n = VEC_length (use_pred_info_t, the_pred_chain);
+ n = the_pred_chain.length ();
for (i = 0; i < n; i++)
{
gimple cond;
tree cond_lhs, cond_rhs, flag = 0;
use_pred_info_t the_pred
- = VEC_index (use_pred_info_t, the_pred_chain, i);
+ = the_pred_chain[i];
cond = the_pred->cond;
invert = the_pred->invert;
typedef struct norm_cond
{
- VEC(gimple, heap) *conds;
+ vec<gimple> conds;
enum tree_code cond_code;
bool invert;
} *norm_cond_t;
gc = gimple_code (cond);
if (gc != GIMPLE_ASSIGN)
{
- VEC_safe_push (gimple, heap, norm_cond->conds, cond);
+ norm_cond->conds.safe_push (cond);
return;
}
SSA_NAME_DEF_STMT (rhs2),
norm_cond, cond_code);
else
- VEC_safe_push (gimple, heap, norm_cond->conds, cond);
+ norm_cond->conds.safe_push (cond);
return;
}
norm_cond->cond_code = cur_cond_code;
}
else
- VEC_safe_push (gimple, heap, norm_cond->conds, cond);
+ norm_cond->conds.safe_push (cond);
}
/* See normalize_cond_1 for details. INVERT is a flag to indicate
norm_cond->cond_code = ERROR_MARK;
norm_cond->invert = false;
- norm_cond->conds = NULL;
+ norm_cond->conds.create (0);
gcc_assert (gimple_code (cond) == GIMPLE_COND);
cond_code = gimple_cond_code (cond);
if (invert)
norm_cond, ERROR_MARK);
else
{
- VEC_safe_push (gimple, heap, norm_cond->conds, cond);
+ norm_cond->conds.safe_push (cond);
norm_cond->invert = invert;
}
}
else
{
- VEC_safe_push (gimple, heap, norm_cond->conds, cond);
+ norm_cond->conds.safe_push (cond);
norm_cond->invert = invert;
}
- gcc_assert (VEC_length (gimple, norm_cond->conds) == 1
+ gcc_assert (norm_cond->conds.length () == 1
|| is_and_or_or (norm_cond->cond_code, NULL));
}
norm_cond_t norm_cond, bool reverse)
{
size_t i;
- size_t len = VEC_length (gimple, norm_cond->conds);
+ size_t len = norm_cond->conds.length ();
for (i = 0; i < len; i++)
{
if (is_gcond_subset_of (cond, invert,
- VEC_index (gimple, norm_cond->conds, i),
+ norm_cond->conds[i],
false, reverse))
return true;
}
norm_cond_t norm_cond2)
{
size_t i;
- size_t len = VEC_length (gimple, norm_cond1->conds);
+ size_t len = norm_cond1->conds.length ();
for (i = 0; i < len; i++)
{
- if (!is_subset_of_any (VEC_index (gimple, norm_cond1->conds, i),
+ if (!is_subset_of_any (norm_cond1->conds[i],
false, norm_cond2, false))
return false;
}
norm_cond_t norm_cond2)
{
size_t i;
- size_t len = VEC_length (gimple, norm_cond2->conds);
+ size_t len = norm_cond2->conds.length ();
for (i = 0; i < len; i++)
{
- if (!is_subset_of_any (VEC_index (gimple, norm_cond2->conds, i),
+ if (!is_subset_of_any (norm_cond2->conds[i],
false, norm_cond1, true))
return false;
}
else if (code2 == BIT_IOR_EXPR)
{
size_t len1;
- len1 = VEC_length (gimple, norm_cond1->conds);
+ len1 = norm_cond1->conds.length ();
for (i = 0; i < len1; i++)
{
- gimple cond1 = VEC_index (gimple, norm_cond1->conds, i);
+ gimple cond1 = norm_cond1->conds[i];
if (is_subset_of_any (cond1, false, norm_cond2, false))
return true;
}
else
{
gcc_assert (code2 == ERROR_MARK);
- gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
- return is_subset_of_any (VEC_index (gimple, norm_cond2->conds, 0),
+ gcc_assert (norm_cond2->conds.length () == 1);
+ return is_subset_of_any (norm_cond2->conds[0],
norm_cond2->invert, norm_cond1, true);
}
}
else
{
gcc_assert (code1 == ERROR_MARK);
- gcc_assert (VEC_length (gimple, norm_cond1->conds) == 1);
+ gcc_assert (norm_cond1->conds.length () == 1);
/* Conservatively returns false if NORM_COND1 is non-decomposible
and NORM_COND2 is an AND expression. */
if (code2 == BIT_AND_EXPR)
return false;
if (code2 == BIT_IOR_EXPR)
- return is_subset_of_any (VEC_index (gimple, norm_cond1->conds, 0),
+ return is_subset_of_any (norm_cond1->conds[0],
norm_cond1->invert, norm_cond2, false);
gcc_assert (code2 == ERROR_MARK);
- gcc_assert (VEC_length (gimple, norm_cond2->conds) == 1);
- return is_gcond_subset_of (VEC_index (gimple, norm_cond1->conds, 0),
+ gcc_assert (norm_cond2->conds.length () == 1);
+ return is_gcond_subset_of (norm_cond1->conds[0],
norm_cond1->invert,
- VEC_index (gimple, norm_cond2->conds, 0),
+ norm_cond2->conds[0],
norm_cond2->invert, false);
}
}
is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
/* Free memory */
- VEC_free (gimple, heap, norm_cond1.conds);
- VEC_free (gimple, heap, norm_cond2.conds);
+ norm_cond1.conds.release ();
+ norm_cond2.conds.release ();
return is_subset ;
}
of that of PRED2. Returns false if it can not be proved so. */
static bool
-is_pred_chain_subset_of (VEC(use_pred_info_t, heap) *pred1,
- VEC(use_pred_info_t, heap) *pred2)
+is_pred_chain_subset_of (vec<use_pred_info_t> pred1,
+ vec<use_pred_info_t> pred2)
{
size_t np1, np2, i1, i2;
- np1 = VEC_length (use_pred_info_t, pred1);
- np2 = VEC_length (use_pred_info_t, pred2);
+ np1 = pred1.length ();
+ np2 = pred2.length ();
for (i2 = 0; i2 < np2; i2++)
{
bool found = false;
use_pred_info_t info2
- = VEC_index (use_pred_info_t, pred2, i2);
+ = pred2[i2];
for (i1 = 0; i1 < np1; i1++)
{
use_pred_info_t info1
- = VEC_index (use_pred_info_t, pred1, i1);
+ = pred1[i1];
if (is_pred_expr_subset_of (info1, info2))
{
found = true;
In other words, the result is conservative. */
static bool
-is_included_in (VEC(use_pred_info_t, heap) *one_pred,
- VEC(use_pred_info_t, heap) **preds,
+is_included_in (vec<use_pred_info_t> one_pred,
+ vec<use_pred_info_t> *preds,
size_t n)
{
size_t i;
emitted. */
static bool
-is_superset_of (VEC(use_pred_info_t, heap) **preds1,
+is_superset_of (vec<use_pred_info_t> *preds1,
size_t n1,
- VEC(use_pred_info_t, heap) **preds2,
+ vec<use_pred_info_t> *preds2,
size_t n2)
{
size_t i;
- VEC(use_pred_info_t, heap) *one_pred_chain;
+ vec<use_pred_info_t> one_pred_chain;
for (i = 0; i < n2; i++)
{
pred_chain_length_cmp (const void *p1, const void *p2)
{
use_pred_info_t i1, i2;
- VEC(use_pred_info_t, heap) * const *chain1
- = (VEC(use_pred_info_t, heap) * const *)p1;
- VEC(use_pred_info_t, heap) * const *chain2
- = (VEC(use_pred_info_t, heap) * const *)p2;
+ vec<use_pred_info_t> const *chain1
+ = (vec<use_pred_info_t> const *)p1;
+ vec<use_pred_info_t> const *chain2
+ = (vec<use_pred_info_t> const *)p2;
- if (VEC_length (use_pred_info_t, *chain1)
- != VEC_length (use_pred_info_t, *chain2))
- return (VEC_length (use_pred_info_t, *chain1)
- - VEC_length (use_pred_info_t, *chain2));
+ if (chain1->length () != chain2->length ())
+ return (chain1->length () - chain2->length ());
- i1 = VEC_index (use_pred_info_t, *chain1, 0);
- i2 = VEC_index (use_pred_info_t, *chain2, 0);
+ i1 = (*chain1)[0];
+ i2 = (*chain2)[0];
/* Allow predicates with similar prefix come together. */
if (!i1->invert && i2->invert)
the number of chains. Returns true if normalization happens. */
static bool
-normalize_preds (VEC(use_pred_info_t, heap) **preds, size_t *n)
+normalize_preds (vec<use_pred_info_t> *preds, size_t *n)
{
size_t i, j, ll;
- VEC(use_pred_info_t, heap) *pred_chain;
- VEC(use_pred_info_t, heap) *x = 0;
+ vec<use_pred_info_t> pred_chain;
+ vec<use_pred_info_t> x = vec<use_pred_info_t>();
use_pred_info_t xj = 0, nxj = 0;
if (*n < 2)
/* First sort the chains in ascending order of lengths. */
qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
pred_chain = preds[0];
- ll = VEC_length (use_pred_info_t, pred_chain);
+ ll = pred_chain.length ();
if (ll != 1)
{
if (ll == 2)
{
use_pred_info_t xx, yy, xx2, nyy;
- VEC(use_pred_info_t, heap) *pred_chain2 = preds[1];
- if (VEC_length (use_pred_info_t, pred_chain2) != 2)
+ vec<use_pred_info_t> pred_chain2 = preds[1];
+ if (pred_chain2.length () != 2)
return false;
/* See if simplification x AND y OR x AND !y is possible. */
- xx = VEC_index (use_pred_info_t, pred_chain, 0);
- yy = VEC_index (use_pred_info_t, pred_chain, 1);
- xx2 = VEC_index (use_pred_info_t, pred_chain2, 0);
- nyy = VEC_index (use_pred_info_t, pred_chain2, 1);
+ xx = pred_chain[0];
+ yy = pred_chain[1];
+ xx2 = pred_chain2[0];
+ nyy = pred_chain2[1];
if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
|| gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
|| gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
free (yy);
free (nyy);
free (xx2);
- VEC_free (use_pred_info_t, heap, pred_chain);
- VEC_free (use_pred_info_t, heap, pred_chain2);
- pred_chain = 0;
- VEC_safe_push (use_pred_info_t, heap, pred_chain, xx);
+ pred_chain.release ();
+ pred_chain2.release ();
+ pred_chain.safe_push (xx);
preds[0] = pred_chain;
for (i = 1; i < *n - 1; i++)
preds[i] = preds[i + 1];
- preds[*n - 1] = 0;
+ preds[*n - 1].create (0);
*n = *n - 1;
}
else
return false;
}
- VEC_safe_push (use_pred_info_t, heap, x,
- VEC_index (use_pred_info_t, pred_chain, 0));
+ x.safe_push (pred_chain[0]);
/* The loop extracts x1, x2, x3, etc from chains
x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
for (i = 1; i < *n; i++)
{
pred_chain = preds[i];
- if (VEC_length (use_pred_info_t, pred_chain) != i + 1)
+ if (pred_chain.length () != i + 1)
return false;
for (j = 0; j < i; j++)
{
- xj = VEC_index (use_pred_info_t, x, j);
- nxj = VEC_index (use_pred_info_t, pred_chain, j);
+ xj = x[j];
+ nxj = pred_chain[j];
/* Check if nxj is !xj */
if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
return false;
}
- VEC_safe_push (use_pred_info_t, heap, x,
- VEC_index (use_pred_info_t, pred_chain, i));
+ x.safe_push (pred_chain[i]);
}
/* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
for (j = 0; j < *n; j++)
{
use_pred_info_t t;
- xj = VEC_index (use_pred_info_t, x, j);
+ xj = x[j];
t = XNEW (struct use_pred_info);
*t = *xj;
- VEC_replace (use_pred_info_t, x, j, t);
+ x[j] = t;
}
for (i = 0; i < *n; i++)
{
pred_chain = preds[i];
- for (j = 0; j < VEC_length (use_pred_info_t, pred_chain); j++)
- free (VEC_index (use_pred_info_t, pred_chain, j));
- VEC_free (use_pred_info_t, heap, pred_chain);
- pred_chain = 0;
+ for (j = 0; j < pred_chain.length (); j++)
+ free (pred_chain[j]);
+ pred_chain.release ();
/* A new chain. */
- VEC_safe_push (use_pred_info_t, heap, pred_chain,
- VEC_index (use_pred_info_t, x, i));
+ pred_chain.safe_push (x[i]);
preds[i] = pred_chain;
}
return true;
struct pointer_set_t *visited_phis)
{
basic_block phi_bb;
- VEC(use_pred_info_t, heap) **preds = 0;
- VEC(use_pred_info_t, heap) **def_preds = 0;
+ vec<use_pred_info_t> *preds = 0;
+ vec<use_pred_info_t> *def_preds = 0;
size_t num_preds = 0, num_def_preds = 0;
bool has_valid_preds = false;
bool is_properly_guarded = false;
static gimple
find_uninit_use (gimple phi, unsigned uninit_opnds,
- VEC(gimple, heap) **worklist,
+ vec<gimple> *worklist,
struct pointer_set_t *added_to_worklist)
{
tree phi_result;
print_gimple_stmt (dump_file, use_stmt, 0, 0);
}
- VEC_safe_push (gimple, heap, *worklist, use_stmt);
- pointer_set_insert (possibly_undefined_names,
- phi_result);
+ worklist->safe_push (use_stmt);
+ pointer_set_insert (possibly_undefined_names, phi_result);
}
}
a pointer set tracking if the new phi is added to the worklist or not. */
static void
-warn_uninitialized_phi (gimple phi, VEC(gimple, heap) **worklist,
+warn_uninitialized_phi (gimple phi, vec<gimple> *worklist,
struct pointer_set_t *added_to_worklist)
{
unsigned uninit_opnds;
{
basic_block bb;
gimple_stmt_iterator gsi;
- VEC(gimple, heap) *worklist = 0;
+ vec<gimple> worklist = vec<gimple>();
struct pointer_set_t *added_to_worklist;
calculate_dominance_info (CDI_DOMINATORS);
if (TREE_CODE (op) == SSA_NAME
&& ssa_undefined_value_p (op))
{
- VEC_safe_push (gimple, heap, worklist, phi);
+ worklist.safe_push (phi);
pointer_set_insert (added_to_worklist, phi);
if (dump_file && (dump_flags & TDF_DETAILS))
{
}
}
- while (VEC_length (gimple, worklist) != 0)
+ while (worklist.length () != 0)
{
gimple cur_phi = 0;
- cur_phi = VEC_pop (gimple, worklist);
+ cur_phi = worklist.pop ();
warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
}
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
pointer_set_destroy (added_to_worklist);
pointer_set_destroy (possibly_undefined_names);
possibly_undefined_names = NULL;
redirect_edge_var_map_add (edge e, tree result, tree def, source_location locus)
{
void **slot;
- edge_var_map_vector old_head, head;
+ edge_var_map_vector *head;
edge_var_map new_node;
if (edge_var_maps == NULL)
edge_var_maps = pointer_map_create ();
slot = pointer_map_insert (edge_var_maps, e);
- old_head = head = (edge_var_map_vector) *slot;
+ head = (edge_var_map_vector *) *slot;
if (!head)
{
- head = VEC_alloc (edge_var_map, heap, 5);
+ head = new edge_var_map_vector;
+ head->create (5);
*slot = head;
}
new_node.def = def;
new_node.result = result;
new_node.locus = locus;
- VEC_safe_push (edge_var_map, heap, head, new_node);
- if (old_head != head)
- {
- /* The push did some reallocation. Update the pointer map. */
- *slot = head;
- }
+ head->safe_push (new_node);
}
redirect_edge_var_map_clear (edge e)
{
void **slot;
- edge_var_map_vector head;
+ edge_var_map_vector *head;
if (!edge_var_maps)
return;
if (slot)
{
- head = (edge_var_map_vector) *slot;
- VEC_free (edge_var_map, heap, head);
+ head = (edge_var_map_vector *) *slot;
+ delete head;
*slot = NULL;
}
}
redirect_edge_var_map_dup (edge newe, edge olde)
{
void **new_slot, **old_slot;
- edge_var_map_vector head;
+ edge_var_map_vector *head;
if (!edge_var_maps)
return;
old_slot = pointer_map_contains (edge_var_maps, olde);
if (!old_slot)
return;
- head = (edge_var_map_vector) *old_slot;
+ head = (edge_var_map_vector *) *old_slot;
+ edge_var_map_vector *new_head = new edge_var_map_vector;
if (head)
- *new_slot = VEC_copy (edge_var_map, heap, head);
+ *new_head = head->copy ();
else
- *new_slot = VEC_alloc (edge_var_map, heap, 5);
+ new_head->create (5);
+ *new_slot = new_head;
}
/* Return the variable mappings for a given edge. If there is none, return
NULL. */
-edge_var_map_vector
+edge_var_map_vector *
redirect_edge_var_map_vector (edge e)
{
void **slot;
if (!slot)
return NULL;
- return (edge_var_map_vector) *slot;
+ return (edge_var_map_vector *) *slot;
}
/* Used by redirect_edge_var_map_destroy to free all memory. */
void **value,
void *data ATTRIBUTE_UNUSED)
{
- edge_var_map_vector head = (edge_var_map_vector) *value;
- VEC_free (edge_var_map, heap, head);
+ edge_var_map_vector *head = (edge_var_map_vector *) *value;
+ delete head;
return true;
}
flush_pending_stmts (edge e)
{
gimple phi;
- edge_var_map_vector v;
+ edge_var_map_vector *v;
edge_var_map *vm;
int i;
gimple_stmt_iterator gsi;
return;
for (gsi = gsi_start_phis (e->dest), i = 0;
- !gsi_end_p (gsi) && VEC_iterate (edge_var_map, v, i, vm);
+ !gsi_end_p (gsi) && v->iterate (i, &vm);
gsi_next (&gsi), i++)
{
tree def;
maybe_optimize_var (var, addresses_taken, not_reg_needs,
suitable_for_renaming);
- FOR_EACH_VEC_ELT (tree, cfun->local_decls, i, var)
+ FOR_EACH_VEC_SAFE_ELT (cfun->local_decls, i, var)
maybe_optimize_var (var, addresses_taken, not_reg_needs,
suitable_for_renaming);
if (size < 50)
size = 50;
- SSANAMES (fn) = VEC_alloc (tree, gc, size);
+ vec_alloc (SSANAMES (fn), size);
/* Version 0 is special, so reserve the first slot in the table. Though
currently unused, we may use version 0 in alias analysis as part of
the heuristics used to group aliases when the alias sets are too
large.
- We use VEC_quick_push here because we know that SSA_NAMES has at
+ We use vec::quick_push here because we know that SSA_NAMES has at
least 50 elements reserved in it. */
- VEC_quick_push (tree, SSANAMES (fn), NULL_TREE);
+ SSANAMES (fn)->quick_push (NULL_TREE);
FREE_SSANAMES (fn) = NULL;
fn->gimple_df->ssa_renaming_needed = 0;
void
fini_ssanames (void)
{
- VEC_free (tree, gc, SSANAMES (cfun));
- VEC_free (tree, gc, FREE_SSANAMES (cfun));
+ vec_free (SSANAMES (cfun));
+ vec_free (FREE_SSANAMES (cfun));
}
/* Dump some simple statistics regarding the re-use of SSA_NAME nodes. */
|| (TYPE_P (var) && is_gimple_reg_type (var)));
/* If our free list has an element, then use it. */
- if (!VEC_empty (tree, FREE_SSANAMES (fn)))
+ if (!vec_safe_is_empty (FREE_SSANAMES (fn)))
{
- t = VEC_pop (tree, FREE_SSANAMES (fn));
+ t = FREE_SSANAMES (fn)->pop ();
if (GATHER_STATISTICS)
ssa_name_nodes_reused++;
/* The node was cleared out when we put it on the free list, so
there is no need to do so again here. */
gcc_assert (ssa_name (SSA_NAME_VERSION (t)) == NULL);
- VEC_replace (tree, SSANAMES (fn), SSA_NAME_VERSION (t), t);
+ (*SSANAMES (fn))[SSA_NAME_VERSION (t)] = t;
}
else
{
t = make_node (SSA_NAME);
- SSA_NAME_VERSION (t) = VEC_length (tree, SSANAMES (fn));
- VEC_safe_push (tree, gc, SSANAMES (fn), t);
+ SSA_NAME_VERSION (t) = SSANAMES (fn)->length ();
+ vec_safe_push (SSANAMES (fn), t);
if (GATHER_STATISTICS)
ssa_name_nodes_created++;
}
while (imm->next != imm)
delink_imm_use (imm->next);
- VEC_replace (tree, SSANAMES (cfun),
- SSA_NAME_VERSION (var), NULL_TREE);
+ (*SSANAMES (cfun))[SSA_NAME_VERSION (var)] = NULL_TREE;
memset (var, 0, tree_size (var));
imm->prev = imm;
SSA_NAME_IN_FREE_LIST (var) = 1;
/* And finally put it on the free list. */
- VEC_safe_push (tree, gc, FREE_SSANAMES (cfun), var);
+ vec_safe_push (FREE_SSANAMES (cfun), var);
}
}
release_dead_ssa_names (void)
{
unsigned i, j;
- int n = VEC_length (tree, FREE_SSANAMES (cfun));
+ int n = vec_safe_length (FREE_SSANAMES (cfun));
/* Now release the freelist. */
- VEC_free (tree, gc, FREE_SSANAMES (cfun));
- FREE_SSANAMES (cfun) = NULL;
+ vec_free (FREE_SSANAMES (cfun));
/* And compact the SSA number space. We make sure to not change the
relative order of SSA versions. */
- for (i = 1, j = 1; i < VEC_length (tree, cfun->gimple_df->ssa_names); ++i)
+ for (i = 1, j = 1; i < cfun->gimple_df->ssa_names->length (); ++i)
{
tree name = ssa_name (i);
if (name)
if (i != j)
{
SSA_NAME_VERSION (name) = j;
- VEC_replace (tree, cfun->gimple_df->ssa_names, j, name);
+ (*cfun->gimple_df->ssa_names)[j] = name;
}
j++;
}
}
- VEC_truncate (tree, cfun->gimple_df->ssa_names, j);
+ cfun->gimple_df->ssa_names->truncate (j);
statistics_counter_event (cfun, "SSA names released", n);
statistics_counter_event (cfun, "SSA name holes removed", i - j);
static bool
reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
{
- VEC (edge, heap) *stack = NULL;
+ vec<edge> stack = vec<edge>();
edge e;
edge_iterator ei;
sbitmap visited;
ret = true;
FOR_EACH_EDGE (e, ei, va_arg_bb->preds)
- VEC_safe_push (edge, heap, stack, e);
+ stack.safe_push (e);
- while (! VEC_empty (edge, stack))
+ while (! stack.is_empty ())
{
basic_block src;
- e = VEC_pop (edge, stack);
+ e = stack.pop ();
src = e->src;
if (e->flags & EDGE_COMPLEX)
{
bitmap_set_bit (visited, src->index);
FOR_EACH_EDGE (e, ei, src->preds)
- VEC_safe_push (edge, heap, stack, e);
+ stack.safe_push (e);
}
}
- VEC_free (edge, heap, stack);
+ stack.release ();
sbitmap_free (visited);
return ret;
}
struct bitpack_d *bp, tree expr)
{
TRANSLATION_UNIT_LANGUAGE (expr) = xstrdup (bp_unpack_string (data_in, bp));
- VEC_safe_push (tree, gc, all_translation_units, expr);
+ vec_safe_push (all_translation_units, expr);
}
/* Unpack a TS_TARGET_OPTION tree from BP into EXPR. */
{
unsigned HOST_WIDE_INT length = bp_unpack_var_len_unsigned (bp);
if (length > 0)
- VEC_safe_grow (tree, gc, BINFO_BASE_ACCESSES (expr), length);
+ vec_safe_grow (BINFO_BASE_ACCESSES (expr), length);
}
if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR))
{
unsigned HOST_WIDE_INT length = bp_unpack_var_len_unsigned (bp);
if (length > 0)
- VEC_safe_grow (constructor_elt, gc, CONSTRUCTOR_ELTS (expr), length);
+ vec_safe_grow (CONSTRUCTOR_ELTS (expr), length);
}
}
/* Note that the number of slots in EXPR was read in
streamer_alloc_tree when instantiating EXPR. However, the
- vector is empty so we cannot rely on VEC_length to know how many
+ vector is empty so we cannot rely on vec::length to know how many
elements to read. So, this list is emitted as a 0-terminated
list on the writer side. */
do
{
t = stream_read_tree (ib, data_in);
if (t)
- VEC_quick_push (tree, BINFO_BASE_BINFOS (expr), t);
+ BINFO_BASE_BINFOS (expr)->quick_push (t);
}
while (t);
/* The vector of BINFO_BASE_ACCESSES is pre-allocated during
unpacking the bitfield section. */
- for (i = 0; i < VEC_length (tree, BINFO_BASE_ACCESSES (expr)); i++)
+ for (i = 0; i < vec_safe_length (BINFO_BASE_ACCESSES (expr)); i++)
{
tree a = stream_read_tree (ib, data_in);
- VEC_replace (tree, BINFO_BASE_ACCESSES (expr), i, a);
+ (*BINFO_BASE_ACCESSES (expr))[i] = a;
}
BINFO_INHERITANCE_CHAIN (expr) = stream_read_tree (ib, data_in);
constructor_elt e;
e.index = stream_read_tree (ib, data_in);
e.value = stream_read_tree (ib, data_in);
- VEC_replace (constructor_elt, CONSTRUCTOR_ELTS (expr), i, e);
+ (*CONSTRUCTOR_ELTS (expr))[i] = e;
}
}
pack_ts_optimization (bp, expr);
if (CODE_CONTAINS_STRUCT (code, TS_BINFO))
- bp_pack_var_len_unsigned (bp, VEC_length (tree, BINFO_BASE_ACCESSES (expr)));
+ bp_pack_var_len_unsigned (bp, vec_safe_length (BINFO_BASE_ACCESSES (expr)));
if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR))
bp_pack_var_len_unsigned (bp, CONSTRUCTOR_NELTS (expr));
/* Note that the number of BINFO slots has already been emitted in
EXPR's header (see streamer_write_tree_header) because this length
is needed to build the empty BINFO node on the reader side. */
- FOR_EACH_VEC_ELT (tree, BINFO_BASE_BINFOS (expr), i, t)
+ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (expr), i, t)
stream_write_tree (ob, t, ref_p);
stream_write_tree (ob, NULL_TREE, false);
/* The number of BINFO_BASE_ACCESSES has already been emitted in
EXPR's bitfield section. */
- FOR_EACH_VEC_ELT (tree, BINFO_BASE_ACCESSES (expr), i, t)
+ FOR_EACH_VEC_SAFE_ELT (BINFO_BASE_ACCESSES (expr), i, t)
stream_write_tree (ob, t, ref_p);
stream_write_tree (ob, BINFO_INHERITANCE_CHAIN (expr), ref_p);
{
/* Make sure we're either replacing an old element or
appending consecutively. */
- gcc_assert (ix <= VEC_length (tree, cache->nodes));
+ gcc_assert (ix <= cache->nodes.length ());
- if (ix == VEC_length (tree, cache->nodes))
- VEC_safe_push (tree, heap, cache->nodes, t);
+ if (ix == cache->nodes.length ())
+ cache->nodes.safe_push (t);
else
- VEC_replace (tree, cache->nodes, ix, t);
+ cache->nodes[ix] = t;
}
{
/* Determine the next slot to use in the cache. */
if (insert_at_next_slot_p)
- ix = VEC_length (tree, cache->nodes);
+ ix = cache->nodes.length ();
else
ix = *ix_p;
*slot = (void *)(size_t) (ix + 1);
void
streamer_tree_cache_append (struct streamer_tree_cache_d *cache, tree t)
{
- unsigned ix = VEC_length (tree, cache->nodes);
+ unsigned ix = cache->nodes.length ();
streamer_tree_cache_insert_1 (cache, t, &ix, false);
}
return;
pointer_map_destroy (c->node_map);
- VEC_free (tree, heap, c->nodes);
+ c->nodes.release ();
free (c);
}
struct pointer_map_t *node_map;
/* The nodes pickled so far. */
- VEC(tree,heap) *nodes;
+ vec<tree> nodes;
};
/* Return true if tree node EXPR should be streamed as a builtin. For
static inline tree
streamer_tree_cache_get (struct streamer_tree_cache_d *cache, unsigned ix)
{
- return VEC_index (tree, cache->nodes, ix);
+ return cache->nodes[ix];
}
edge default_edge;
bool update_dom = dom_info_available_p (CDI_DOMINATORS);
- VEC (basic_block, heap) *bbs_to_fix_dom = NULL;
+ vec<basic_block> bbs_to_fix_dom = vec<basic_block>();
tree index_type = TREE_TYPE (index_expr);
tree unsigned_index_type = unsigned_type_for (index_type);
if (update_dom)
{
- bbs_to_fix_dom = VEC_alloc (basic_block, heap, 10);
- VEC_quick_push (basic_block, bbs_to_fix_dom, switch_bb);
- VEC_quick_push (basic_block, bbs_to_fix_dom, default_bb);
- VEC_quick_push (basic_block, bbs_to_fix_dom, new_default_bb);
+ bbs_to_fix_dom.create (10);
+ bbs_to_fix_dom.quick_push (switch_bb);
+ bbs_to_fix_dom.quick_push (default_bb);
+ bbs_to_fix_dom.quick_push (new_default_bb);
}
/* Now build the test-and-branch code. */
tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_edge, update_dom);
if (update_dom)
- VEC_quick_push (basic_block, bbs_to_fix_dom, new_bb);
+ bbs_to_fix_dom.quick_push (new_bb);
gcc_assert (gimple_bb (swtch) == new_bb);
gsi = gsi_last_bb (new_bb);
of NEW_BB, are still immediately dominated by SWITCH_BB. Make it so. */
if (update_dom)
{
- VEC (basic_block, heap) *dom_bbs;
+ vec<basic_block> dom_bbs;
basic_block dom_son;
dom_bbs = get_dominated_by (CDI_DOMINATORS, new_bb);
- FOR_EACH_VEC_ELT (basic_block, dom_bbs, i, dom_son)
+ FOR_EACH_VEC_ELT (dom_bbs, i, dom_son)
{
edge e = find_edge (new_bb, dom_son);
if (e && single_pred_p (e->dest))
continue;
set_immediate_dominator (CDI_DOMINATORS, dom_son, switch_bb);
- VEC_safe_push (basic_block, heap, bbs_to_fix_dom, dom_son);
+ bbs_to_fix_dom.safe_push (dom_son);
}
- VEC_free (basic_block, heap, dom_bbs);
+ dom_bbs.release ();
}
/* csui = (1 << (word_mode) idx) */
new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_edge,
update_dom);
if (update_dom)
- VEC_safe_push (basic_block, heap, bbs_to_fix_dom, new_bb);
+ bbs_to_fix_dom.safe_push (new_bb);
gcc_assert (gimple_bb (swtch) == new_bb);
gsi = gsi_last_bb (new_bb);
}
{
/* Fix up the dominator tree. */
iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
- VEC_free (basic_block, heap, bbs_to_fix_dom);
+ bbs_to_fix_dom.release ();
}
}
\f
tree *default_values;
/* Constructors of new static arrays. */
- VEC (constructor_elt, gc) **constructors;
+ vec<constructor_elt, va_gc> **constructors;
/* Array of ssa names that are initialized with a value from a new static
array. */
int i;
info->default_values = XCNEWVEC (tree, info->phi_count * 3);
- info->constructors = XCNEWVEC (VEC (constructor_elt, gc) *, info->phi_count);
+ /* ??? Macros do not support multi argument templates in their
+ argument list. We create a typedef to work around that problem. */
+ typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
+ info->constructors = XCNEWVEC (vec_constructor_elt_gc, info->phi_count);
info->target_inbound_names = info->default_values + info->phi_count;
info->target_outbound_names = info->target_inbound_names + info->phi_count;
for (i = 0; i < info->phi_count; i++)
- info->constructors[i]
- = VEC_alloc (constructor_elt, gc, tree_low_cst (info->range_size, 1) + 1);
+ vec_alloc (info->constructors[i], tree_low_cst (info->range_size, 1) + 1);
}
/* Free the arrays created by create_temp_arrays(). The vectors that are
elt.index = int_const_binop (MINUS_EXPR, pos, info->range_min);
elt.value = info->default_values[k];
- VEC_quick_push (constructor_elt, info->constructors[k], elt);
+ info->constructors[k]->quick_push (elt);
}
pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
elt.index = int_const_binop (MINUS_EXPR, pos, info->range_min);
elt.value = val;
- VEC_quick_push (constructor_elt, info->constructors[j], elt);
+ info->constructors[j]->quick_push (elt);
pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
} while (!tree_int_cst_lt (high, pos)
vectors. */
static tree
-constructor_contains_same_values_p (VEC (constructor_elt, gc) *vec)
+constructor_contains_same_values_p (vec<constructor_elt, va_gc> *vec)
{
unsigned int i;
tree prev = NULL_TREE;
constructor_elt *elt;
- FOR_EACH_VEC_ELT (constructor_elt, vec, i, elt)
+ FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
{
if (!prev)
prev = elt->value;
array_value_type (gimple swtch, tree type, int num,
struct switch_conv_info *info)
{
- unsigned int i, len = VEC_length (constructor_elt, info->constructors[num]);
+ unsigned int i, len = vec_safe_length (info->constructors[num]);
constructor_elt *elt;
enum machine_mode mode;
int sign = 0;
if (len < (optimize_bb_for_size_p (gimple_bb (swtch)) ? 2 : 32))
return type;
- FOR_EACH_VEC_ELT (constructor_elt, info->constructors[num], i, elt)
+ FOR_EACH_VEC_SAFE_ELT (info->constructors[num], i, elt)
{
double_int cst;
unsigned int i;
constructor_elt *elt;
- FOR_EACH_VEC_ELT (constructor_elt, info->constructors[num], i, elt)
+ FOR_EACH_VEC_SAFE_ELT (info->constructors[num], i, elt)
elt->value = fold_convert (value_type, elt->value);
}
ctor = build_constructor (array_type, info->constructors[num]);
/* Fix the dominator tree, if it is available. */
if (dom_info_available_p (CDI_DOMINATORS))
{
- VEC (basic_block, heap) *bbs_to_fix_dom;
+ vec<basic_block> bbs_to_fix_dom;
set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
/* If bbD was the immediate dominator ... */
set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
- bbs_to_fix_dom = VEC_alloc (basic_block, heap, 4);
- VEC_quick_push (basic_block, bbs_to_fix_dom, bb0);
- VEC_quick_push (basic_block, bbs_to_fix_dom, bb1);
- VEC_quick_push (basic_block, bbs_to_fix_dom, bb2);
- VEC_quick_push (basic_block, bbs_to_fix_dom, bbf);
+ bbs_to_fix_dom.create (4);
+ bbs_to_fix_dom.quick_push (bb0);
+ bbs_to_fix_dom.quick_push (bb1);
+ bbs_to_fix_dom.quick_push (bb2);
+ bbs_to_fix_dom.quick_push (bbf);
iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
- VEC_free (basic_block, heap, bbs_to_fix_dom);
+ bbs_to_fix_dom.release ();
}
}
return false;
}
- VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr);
+ LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
return true;
}
}
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
bb_vec_info bb_vinfo, int *max_vf)
{
unsigned int i;
- VEC (ddr_p, heap) *ddrs = NULL;
+ vec<ddr_p> ddrs = vec<ddr_p>();
struct data_dependence_relation *ddr;
if (dump_enabled_p ())
else
ddrs = BB_VINFO_DDRS (bb_vinfo);
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
return false;
vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
struct data_reference *dr;
unsigned int i;
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_compute_data_ref_alignment (dr))
{
struct data_reference *dr_peel, int npeel)
{
unsigned int i;
- VEC(dr_p,heap) *same_align_drs;
+ vec<dr_p> same_align_drs;
struct data_reference *current_dr;
int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
are aligned in the vector loop. */
same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
- FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
+ FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
{
if (current_dr != dr)
continue;
bool
vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
struct data_reference *dr;
enum dr_alignment_support supportable_dr_alignment;
unsigned int i;
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gimple stmt = DR_STMT (elem->dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
+ vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
int single_iter_cost;
- prologue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
- body_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
- epilogue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
+ prologue_cost_vec.create (2);
+ body_cost_vec.create (2);
+ epilogue_cost_vec.create (2);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
These costs depend only on the scalar iteration cost, the
number of peeling iterations finally chosen, and the number of
misaligned statements. So discard the information found here. */
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
- VEC_free (stmt_info_for_cost, heap, epilogue_cost_vec);
+ prologue_cost_vec.release ();
+ epilogue_cost_vec.release ();
if (inside_cost < min->inside_cost
|| (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
{
min->inside_cost = inside_cost;
min->outside_cost = outside_cost;
- VEC_free (stmt_info_for_cost, heap, min->body_cost_vec);
+ min->body_cost_vec.release ();
min->body_cost_vec = body_cost_vec;
min->peel_info.dr = elem->dr;
min->peel_info.npeel = elem->npeel;
}
else
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
+ body_cost_vec.release ();
return 1;
}
struct _vect_peel_extended_info res;
res.peel_info.dr = NULL;
- res.body_cost_vec = NULL;
+ res.body_cost_vec = stmt_vector_for_cost();
if (flag_vect_cost_model)
{
bool
vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
- VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
+ vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum dr_alignment_support supportable_dr_alignment;
struct data_reference *dr0 = NULL, *first_store = NULL;
unsigned possible_npeel_number = 1;
tree vectype;
unsigned int nelements, mis, same_align_drs_max = 0;
- stmt_vector_for_cost body_cost_vec = NULL;
+ stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost();
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
- The cost of peeling (the extra runtime checks, the increase
in code size). */
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
stores over load. */
if (all_misalignments_unknown)
{
- if (same_align_drs_max < VEC_length (dr_p,
- STMT_VINFO_SAME_ALIGN_REFS (stmt_info))
+ if (same_align_drs_max
+ < STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ()
|| !dr0)
{
- same_align_drs_max = VEC_length (dr_p,
- STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
+ same_align_drs_max
+ = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
dr0 = dr;
}
unsigned int store_inside_cost = 0, store_outside_cost = 0;
unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
- stmt_vector_for_cost dummy = VEC_alloc (stmt_info_for_cost, heap, 2);
+ stmt_vector_for_cost dummy;
+ dummy.create (2);
vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
&dummy);
vect_get_data_access_cost (first_store, &store_inside_cost,
&store_outside_cost, &dummy);
- VEC_free (stmt_info_for_cost, heap, dummy);
+ dummy.release ();
/* Calculate the penalty for leaving FIRST_STORE unaligned (by
aligning the load DR0). */
load_inside_penalty = store_inside_cost;
load_outside_penalty = store_outside_cost;
- for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
- (vinfo_for_stmt (DR_STMT (first_store))),
- i, dr);
+ for (i = 0;
+ STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
+ DR_STMT (first_store))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
aligning the FIRST_STORE). */
store_inside_penalty = load_inside_cost;
store_outside_penalty = load_outside_cost;
- for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
- (vinfo_for_stmt (DR_STMT (dr0))),
- i, dr);
+ for (i = 0;
+ STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
+ DR_STMT (dr0))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
/* In case there are only loads with different unknown misalignments, use
peeling only if it may help to align other accesses in the loop. */
- if (!first_store && !VEC_length (dr_p, STMT_VINFO_SAME_ALIGN_REFS
- (vinfo_for_stmt (DR_STMT (dr0))))
+ if (!first_store
+ && !STMT_VINFO_SAME_ALIGN_REFS (
+ vinfo_for_stmt (DR_STMT (dr0))).length ()
&& vect_supportable_dr_alignment (dr0, false)
!= dr_unaligned_supported)
do_peeling = false;
}
/* Ensure that all data refs can be vectorized after the peel. */
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
int save_misalignment;
do_peeling = false;
else
{
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
+ body_cost_vec.release ();
return stat;
}
}
by the peeling factor times the element size of DR_i (MOD the
vectorization factor times the size). Otherwise, the
misalignment of DR_i must be set to unknown. */
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
if (dr != dr0)
vect_update_misalignment_for_peel (dr, dr0, npeel);
/* We've delayed passing the inside-loop peeling costs to the
target cost model until we were sure peeling would happen.
Do so now. */
- if (body_cost_vec)
+ if (body_cost_vec.exists ())
{
- FOR_EACH_VEC_ELT (stmt_info_for_cost, body_cost_vec, i, si)
+ FOR_EACH_VEC_ELT (body_cost_vec, i, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (data, si->count, si->kind, stmt_info,
si->misalign, vect_body);
}
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
+ body_cost_vec.release ();
}
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
}
}
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
+ body_cost_vec.release ();
/* (2) Versioning to force alignment. */
if (do_versioning)
{
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
tree vectype;
if (known_alignment_for_access_p (dr)
- || VEC_length (gimple,
- LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
>= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
{
do_versioning = false;
gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
|| LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
- VEC_safe_push (gimple, heap,
- LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo),
- DR_STMT (dr));
+ LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
+ DR_STMT (dr));
}
}
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false;
else if (!do_versioning)
- VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
+ LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
}
if (do_versioning)
{
- VEC(gimple,heap) *may_misalign_stmts
+ vec<gimple> may_misalign_stmts
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
gimple stmt;
/* It can now be assumed that the data references in the statements
in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
of the loop being vectorized. */
- FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
+ FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
return;
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
- FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
+ FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
|| (dist % vectorization_factor == 0 && dra_size == drb_size))
{
/* Two references with distance zero have the same alignment. */
- VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
- VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
+ STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
+ STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
data dependence information. */
if (loop_vinfo)
{
- VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
+ vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
struct data_dependence_relation *ddr;
unsigned int i;
- FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (ddrs, i, ddr)
vect_find_same_alignment_drs (ddr, loop_vinfo);
}
if (DR_IS_WRITE (dr) && !slp_impossible)
{
if (loop_vinfo)
- VEC_safe_push (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo),
- stmt);
+ LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
if (bb_vinfo)
- VEC_safe_push (gimple, heap, BB_VINFO_GROUPED_STORES (bb_vinfo),
- stmt);
+ BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
}
/* There is a gap in the end of the group. */
vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
unsigned int i;
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
struct data_reference *dr;
if (dump_enabled_p ())
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
bool
vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
{
- VEC (ddr_p, heap) * ddrs =
+ vec<ddr_p> ddrs =
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
unsigned i, j;
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===");
- for (i = 0; i < VEC_length (ddr_p, ddrs); )
+ for (i = 0; i < ddrs.length (); )
{
bool found;
ddr_p ddr_i;
- ddr_i = VEC_index (ddr_p, ddrs, i);
+ ddr_i = ddrs[i];
found = false;
for (j = 0; j < i; j++)
{
- ddr_p ddr_j = VEC_index (ddr_p, ddrs, j);
+ ddr_p ddr_j = ddrs[j];
if (vect_vfa_range_equal (ddr_i, ddr_j))
{
if (found)
{
- VEC_ordered_remove (ddr_p, ddrs, i);
+ ddrs.ordered_remove (i);
continue;
}
i++;
}
- if (VEC_length (ddr_p, ddrs) >
+ if (ddrs.length () >
(unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
{
if (dump_enabled_p ())
"generated checks exceeded.");
}
- VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0);
+ LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).truncate (0);
return false;
}
struct loop *loop = NULL;
basic_block bb = NULL;
unsigned int i;
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
struct data_reference *dr;
tree scalar_type;
bool res, stop_bb_analysis = false;
}
}
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
- &BB_VINFO_DDRS (bb_vinfo), NULL, true))
+ &BB_VINFO_DDRS (bb_vinfo),
+ vec<loop_p>(), true))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* Go through the data-refs, check that the analysis succeeded. Update
pointer from stmt_vec_info struct to DR and vectype. */
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
{
gimple stmt;
stmt_vec_info stmt_info;
{
unsigned int j, k, n;
struct data_reference *olddr
- = VEC_index (data_reference_p, datarefs, i);
- VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
+ = datarefs[i];
+ vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
struct data_dependence_relation *ddr, *newddr;
bool bad = false;
tree off;
- VEC (loop_p, heap) *nest = LOOP_VINFO_LOOP_NEST (loop_vinfo);
+ vec<loop_p> nest = LOOP_VINFO_LOOP_NEST (loop_vinfo);
gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
if (gather
return false;
}
- n = VEC_length (data_reference_p, datarefs) - 1;
+ n = datarefs.length () - 1;
for (j = 0, k = i - 1; j < i; j++)
{
- ddr = VEC_index (ddr_p, ddrs, k);
+ ddr = ddrs[k];
gcc_assert (DDR_B (ddr) == olddr);
newddr = initialize_data_dependence_relation (DDR_A (ddr), dr,
nest);
- VEC_replace (ddr_p, ddrs, k, newddr);
+ ddrs[k] = newddr;
free_dependence_relation (ddr);
if (!bad
&& DR_IS_WRITE (DDR_A (newddr))
}
k++;
- n = k + VEC_length (data_reference_p, datarefs) - i - 1;
+ n = k + datarefs.length () - i - 1;
for (; k < n; k++)
{
- ddr = VEC_index (ddr_p, ddrs, k);
+ ddr = ddrs[k];
gcc_assert (DDR_A (ddr) == olddr);
newddr = initialize_data_dependence_relation (dr, DDR_B (ddr),
nest);
- VEC_replace (ddr_p, ddrs, k, newddr);
+ ddrs[k] = newddr;
free_dependence_relation (ddr);
if (!bad
&& DR_IS_WRITE (DDR_B (newddr))
bad = true;
}
- k = VEC_length (ddr_p, ddrs)
- - VEC_length (data_reference_p, datarefs) + i;
- ddr = VEC_index (ddr_p, ddrs, k);
+ k = ddrs.length ()
+ - datarefs.length () + i;
+ ddr = ddrs[k];
gcc_assert (DDR_A (ddr) == olddr && DDR_B (ddr) == olddr);
newddr = initialize_data_dependence_relation (dr, dr, nest);
- VEC_replace (ddr_p, ddrs, k, newddr);
+ ddrs[k] = newddr;
free_dependence_relation (ddr);
- VEC_replace (data_reference_p, datarefs, i, dr);
+ datarefs[i] = dr;
if (bad)
{
I4: 6 14 22 30 7 15 23 31. */
void
-vect_permute_store_chain (VEC(tree,heap) *dr_chain,
+vect_permute_store_chain (vec<tree> dr_chain,
unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
- VEC(tree,heap) **result_chain)
+ vec<tree> *result_chain)
{
tree vect1, vect2, high, low;
gimple perm_stmt;
unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
- *result_chain = VEC_copy (tree, heap, dr_chain);
+ *result_chain = dr_chain.copy ();
for (i = 0, n = nelt / 2; i < n; i++)
{
{
for (j = 0; j < length/2; j++)
{
- vect1 = VEC_index (tree, dr_chain, j);
- vect2 = VEC_index (tree, dr_chain, j+length/2);
+ vect1 = dr_chain[j];
+ vect2 = dr_chain[j+length/2];
/* Create interleaving stmt:
high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */
= gimple_build_assign_with_ops (VEC_PERM_EXPR, high,
vect1, vect2, perm_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- VEC_replace (tree, *result_chain, 2*j, high);
+ (*result_chain)[2*j] = high;
/* Create interleaving stmt:
low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1,
= gimple_build_assign_with_ops (VEC_PERM_EXPR, low,
vect1, vect2, perm_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- VEC_replace (tree, *result_chain, 2*j+1, low);
+ (*result_chain)[2*j+1] = low;
}
- dr_chain = VEC_copy (tree, heap, *result_chain);
+ dr_chain = result_chain->copy ();
}
}
4th vec (E4): 3 7 11 15 19 23 27 31. */
static void
-vect_permute_load_chain (VEC(tree,heap) *dr_chain,
+vect_permute_load_chain (vec<tree> dr_chain,
unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
- VEC(tree,heap) **result_chain)
+ vec<tree> *result_chain)
{
tree data_ref, first_vect, second_vect;
tree perm_mask_even, perm_mask_odd;
unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
- *result_chain = VEC_copy (tree, heap, dr_chain);
+ *result_chain = dr_chain.copy ();
for (i = 0; i < nelt; ++i)
sel[i] = i * 2;
{
for (j = 0; j < length; j += 2)
{
- first_vect = VEC_index (tree, dr_chain, j);
- second_vect = VEC_index (tree, dr_chain, j+1);
+ first_vect = dr_chain[j];
+ second_vect = dr_chain[j+1];
/* data_ref = permute_even (first_data_ref, second_data_ref); */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
first_vect, second_vect,
perm_mask_even);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- VEC_replace (tree, *result_chain, j/2, data_ref);
+ (*result_chain)[j/2] = data_ref;
/* data_ref = permute_odd (first_data_ref, second_data_ref); */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
first_vect, second_vect,
perm_mask_odd);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
+ (*result_chain)[j/2+length/2] = data_ref;
}
- dr_chain = VEC_copy (tree, heap, *result_chain);
+ dr_chain = result_chain->copy ();
}
}
*/
void
-vect_transform_grouped_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
+vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
gimple_stmt_iterator *gsi)
{
- VEC(tree,heap) *result_chain = NULL;
+ vec<tree> result_chain = vec<tree>();
/* DR_CHAIN contains input data-refs that are a part of the interleaving.
RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
vectors, that are ready for vector computation. */
- result_chain = VEC_alloc (tree, heap, size);
+ result_chain.create (size);
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
vect_record_grouped_load_vectors (stmt, result_chain);
- VEC_free (tree, heap, result_chain);
+ result_chain.release ();
}
/* RESULT_CHAIN contains the output of a group of grouped loads that were
for each vector to the associated scalar statement. */
void
-vect_record_grouped_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
+vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
gimple next_stmt, new_stmt;
corresponds the order of data-refs in RESULT_CHAIN. */
next_stmt = first_stmt;
gap_count = 1;
- FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
+ FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
{
if (!next_stmt)
break;
tree type, tree inner_type,
tree a, tree b, enum tree_code code)
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
tree part_width = TYPE_SIZE (inner_type);
tree index = bitsize_int (0);
int nunits = TYPE_VECTOR_SUBPARTS (type);
warning_at (loc, OPT_Wvector_operation_performance,
"vector operation will be expanded in parallel");
- v = VEC_alloc(constructor_elt, gc, (nunits + delta - 1) / delta);
+ vec_alloc (v, (nunits + delta - 1) / delta);
for (i = 0; i < nunits;
i += delta, index = int_const_binop (PLUS_EXPR, index, part_width))
{
tree result = f (gsi, inner_type, a, b, index, part_width, code);
constructor_elt ce = {NULL_TREE, result};
- VEC_quick_push (constructor_elt, v, ce);
+ v->quick_push (ce);
}
return build_constructor (type, v);
bool a_is_comparison = false;
tree b = gimple_assign_rhs2 (stmt);
tree c = gimple_assign_rhs3 (stmt);
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
tree constr;
tree inner_type = TREE_TYPE (type);
tree cond_type = TREE_TYPE (TREE_TYPE (a));
warning_at (loc, OPT_Wvector_operation_performance,
"vector condition will be expanded piecewise");
- v = VEC_alloc(constructor_elt, gc, nunits);
+ vec_alloc (v, nunits);
for (i = 0; i < nunits;
i++, index = int_const_binop (PLUS_EXPR, index, width))
{
aa = tree_vec_extract (gsi, cond_type, a, width, index);
result = gimplify_build3 (gsi, COND_EXPR, inner_type, aa, bb, cc);
constructor_elt ce = {NULL_TREE, result};
- VEC_quick_push (constructor_elt, v, ce);
+ v->quick_push (ce);
}
constr = build_constructor (type, v);
tree vect_elt_type = TREE_TYPE (vect_type);
tree mask_elt_type = TREE_TYPE (mask_type);
unsigned int elements = TYPE_VECTOR_SUBPARTS (vect_type);
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
tree constr, t, si, i_val;
tree vec0tmp = NULL_TREE, vec1tmp = NULL_TREE, masktmp = NULL_TREE;
bool two_operand_p = !operand_equal_p (vec0, vec1, 0);
warning_at (loc, OPT_Wvector_operation_performance,
"vector shuffling operation will be expanded piecewise");
- v = VEC_alloc (constructor_elt, gc, elements);
+ vec_alloc (v, elements);
for (i = 0; i < elements; i++)
{
si = size_int (i);
basic_block bb;
} adjust_info;
-DEF_VEC_O(adjust_info);
-DEF_VEC_ALLOC_O_STACK(adjust_info);
-#define VEC_adjust_info_stack_alloc(alloc) VEC_stack_alloc (adjust_info, alloc)
-
/* A stack of values to be adjusted in debug stmts. We have to
process them LIFO, so that the closest substitution applies. If we
processed them FIFO, without the stack, we might substitute uses
with a PHI DEF that would soon become non-dominant, and when we got
to the suitable one, it wouldn't have anything to substitute any
more. */
-static VEC(adjust_info, stack) *adjust_vec;
+static vec<adjust_info, va_stack> adjust_vec;
/* Adjust any debug stmts that referenced AI->from values to use the
loop-closed AI->to, if the references are dominated by AI->bb and
if (!MAY_HAVE_DEBUG_STMTS)
return;
- gcc_assert (adjust_vec);
+ gcc_assert (adjust_vec.exists ());
- while (!VEC_empty (adjust_info, adjust_vec))
+ while (!adjust_vec.is_empty ())
{
- adjust_debug_stmts_now (&VEC_last (adjust_info, adjust_vec));
- VEC_pop (adjust_info, adjust_vec);
+ adjust_debug_stmts_now (&adjust_vec.last ());
+ adjust_vec.pop ();
}
- VEC_free (adjust_info, stack, adjust_vec);
+ adjust_vec.release ();
}
/* Adjust any debug stmts that referenced FROM values to use the
ai.to = to;
ai.bb = bb;
- if (adjust_vec)
- VEC_safe_push (adjust_info, stack, adjust_vec, ai);
+ if (adjust_vec.exists ())
+ adjust_vec.safe_push (ai);
else
adjust_debug_stmts_now (&ai);
}
if (MAY_HAVE_DEBUG_STMTS)
{
- gcc_assert (!adjust_vec);
- adjust_vec = VEC_alloc (adjust_info, stack, 32);
+ gcc_assert (!adjust_vec.exists ());
+ vec_stack_alloc (adjust_info, adjust_vec, 32);
}
if (e == exit_e)
vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
{
unsigned int i;
- VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
+ vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_update_inits_of_dr ===");
- FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
+ FOR_EACH_VEC_ELT (datarefs, i, dr)
vect_update_init_of_dr (dr, niters);
}
gimple_seq *cond_expr_stmt_list)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- VEC(gimple,heap) *may_misalign_stmts
+ vec<gimple> may_misalign_stmts
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
gimple ref_stmt;
int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
/* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
of the first vector of the i'th data reference. */
- FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, ref_stmt)
+ FOR_EACH_VEC_ELT (may_misalign_stmts, i, ref_stmt)
{
gimple_seq new_stmt_list = NULL;
tree addr_base;
gimple_seq * cond_expr_stmt_list)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- VEC (ddr_p, heap) * may_alias_ddrs =
+ vec<ddr_p> may_alias_ddrs =
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
((store_ptr_n + store_segment_length_n) <= load_ptr_n)
|| (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
- if (VEC_empty (ddr_p, may_alias_ddrs))
+ if (may_alias_ddrs.is_empty ())
return;
- FOR_EACH_VEC_ELT (ddr_p, may_alias_ddrs, i, ddr)
+ FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
{
struct data_reference *dr_a, *dr_b;
gimple dr_group_first_a, dr_group_first_b;
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"created %u versioning for alias checks.\n",
- VEC_length (ddr_p, may_alias_ddrs));
+ may_alias_ddrs.length ());
}
{
basic_block bb = loop->header;
tree dumy;
- VEC(gimple,heap) *worklist = VEC_alloc (gimple, heap, 64);
+ vec<gimple> worklist;
+ worklist.create (64);
gimple_stmt_iterator gsi;
bool double_reduc;
if (!access_fn
|| !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
{
- VEC_safe_push (gimple, heap, worklist, phi);
+ worklist.safe_push (phi);
continue;
}
/* Second - identify all reductions and nested cycles. */
- while (VEC_length (gimple, worklist) > 0)
+ while (worklist.length () > 0)
{
- gimple phi = VEC_pop (gimple, worklist);
+ gimple phi = worklist.pop ();
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple reduc_stmt;
vect_reduction_def;
/* Store the reduction cycles for possible vectorization in
loop-aware SLP. */
- VEC_safe_push (gimple, heap,
- LOOP_VINFO_REDUCTIONS (loop_vinfo),
- reduc_stmt);
+ LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
}
}
}
"Unknown def-use cycle pattern.");
}
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
}
LOOP_VINFO_VECTORIZABLE_P (res) = 0;
LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
LOOP_VINFO_VECT_FACTOR (res) = 0;
- LOOP_VINFO_LOOP_NEST (res) = VEC_alloc (loop_p, heap, 3);
- LOOP_VINFO_DATAREFS (res) = VEC_alloc (data_reference_p, heap, 10);
- LOOP_VINFO_DDRS (res) = VEC_alloc (ddr_p, heap, 10 * 10);
+ LOOP_VINFO_LOOP_NEST (res).create (3);
+ LOOP_VINFO_DATAREFS (res).create (10);
+ LOOP_VINFO_DDRS (res).create (10 * 10);
LOOP_VINFO_UNALIGNED_DR (res) = NULL;
- LOOP_VINFO_MAY_MISALIGN_STMTS (res) =
- VEC_alloc (gimple, heap,
- PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
- LOOP_VINFO_MAY_ALIAS_DDRS (res) =
- VEC_alloc (ddr_p, heap,
- PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
- LOOP_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
- LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
- LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
- LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
+ LOOP_VINFO_MAY_MISALIGN_STMTS (res).create (
+ PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
+ LOOP_VINFO_MAY_ALIAS_DDRS (res).create (
+ PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
+ LOOP_VINFO_GROUPED_STORES (res).create (10);
+ LOOP_VINFO_REDUCTIONS (res).create (10);
+ LOOP_VINFO_REDUCTION_CHAINS (res).create (10);
+ LOOP_VINFO_SLP_INSTANCES (res).create (10);
LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
LOOP_VINFO_PEELING_HTAB (res) = NULL;
LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
int nbbs;
gimple_stmt_iterator si;
int j;
- VEC (slp_instance, heap) *slp_instances;
+ vec<slp_instance> slp_instances;
slp_instance instance;
bool swapped;
free (LOOP_VINFO_BBS (loop_vinfo));
free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
- VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
- VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
+ LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
+ LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
+ LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
free (loop_vinfo);
loop->aux = NULL;
free (LOOP_VINFO_BBS (loop_vinfo));
free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo));
free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
- VEC_free (loop_p, heap, LOOP_VINFO_LOOP_NEST (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
- VEC_free (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
+ LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
+ LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
+ LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, j, instance)
+ FOR_EACH_VEC_ELT (slp_instances, j, instance)
vect_free_slp_instance (instance);
- VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
+ LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
+ LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
+ LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
+ LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
if (LOOP_VINFO_PEELING_HTAB (loop_vinfo))
htab_delete (LOOP_VINFO_PEELING_HTAB (loop_vinfo));
/* Save the chain for further analysis in SLP detection. */
first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
- VEC_safe_push (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_info), first);
+ LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
GROUP_SIZE (vinfo_for_stmt (first)) = size;
return true;
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
- unsigned len = VEC_length (gimple,
- LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo));
+ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
- unsigned len = VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo));
+ unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
int j;
void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
- prologue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
- epilogue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 2);
+ prologue_cost_vec.create (2);
+ epilogue_cost_vec.create (2);
peel_iters_prologue = npeel;
(void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
&prologue_cost_vec,
&epilogue_cost_vec);
- FOR_EACH_VEC_ELT (stmt_info_for_cost, prologue_cost_vec, j, si)
+ FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
si->misalign, vect_prologue);
}
- FOR_EACH_VEC_ELT (stmt_info_for_cost, epilogue_cost_vec, j, si)
+ FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
si->misalign, vect_epilogue);
}
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
- VEC_free (stmt_info_for_cost, heap, epilogue_cost_vec);
+ prologue_cost_vec.release ();
+ epilogue_cost_vec.release ();
}
/* FORNOW: The scalar outside cost is incremented in one of the
edge pe = loop_preheader_edge (loop);
struct loop *iv_loop;
basic_block new_bb;
- tree vec, vec_init, vec_step, t;
+ tree new_vec, vec_init, vec_step, t;
tree access_fn;
tree new_var;
tree new_name;
}
else
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
/* iv_loop is the loop to be vectorized. Create:
vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
gcc_assert (!new_bb);
}
- v = VEC_alloc (constructor_elt, gc, nunits);
+ vec_alloc (v, nunits);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
for (i = 1; i < nunits; i++)
{
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
}
/* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
- vec = build_constructor (vectype, v);
- vec_init = vect_init_vector (iv_phi, vec, vectype, NULL);
+ new_vec = build_constructor (vectype, v);
+ vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
}
gcc_assert (CONSTANT_CLASS_P (new_name));
stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
gcc_assert (stepvectype);
- vec = build_vector_from_val (stepvectype, t);
- vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
+ new_vec = build_vector_from_val (stepvectype, t);
+ vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
/* Create the following def-use cycle:
expr, step_expr);
t = unshare_expr (new_name);
gcc_assert (CONSTANT_CLASS_P (new_name));
- vec = build_vector_from_val (stepvectype, t);
- vec_step = vect_init_vector (iv_phi, vec, stepvectype, NULL);
+ new_vec = build_vector_from_val (stepvectype, t);
+ vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
init_def = build_vector (vectype, elts);
else
{
- VEC(constructor_elt,gc) *v;
- v = VEC_alloc (constructor_elt, gc, nunits);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, nunits);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
for (i = 1; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
*/
static void
-vect_create_epilog_for_reduction (VEC (tree, heap) *vect_defs, gimple stmt,
+vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
int ncopies, enum tree_code reduc_code,
- VEC (gimple, heap) *reduction_phis,
+ vec<gimple> reduction_phis,
int reduc_index, bool double_reduc,
slp_tree slp_node)
{
bool extract_scalar_result = false;
gimple use_stmt, orig_stmt, reduction_phi = NULL;
bool nested_in_vect_loop = false;
- VEC (gimple, heap) *new_phis = NULL;
- VEC (gimple, heap) *inner_phis = NULL;
+ vec<gimple> new_phis = vec<gimple>();
+ vec<gimple> inner_phis = vec<gimple>();
enum vect_def_type dt = vect_unknown_def_type;
int j, i;
- VEC (tree, heap) *scalar_results = NULL;
+ vec<tree> scalar_results = vec<tree>();
unsigned int group_size = 1, k, ratio;
- VEC (tree, heap) *vec_initial_defs = NULL;
- VEC (gimple, heap) *phis;
+ vec<tree> vec_initial_defs = vec<tree>();
+ vec<gimple> phis;
bool slp_reduc = false;
tree new_phi_result;
gimple inner_phi = NULL;
if (slp_node)
- group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (slp_node));
+ group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
if (nested_in_vect_loop_p (loop, stmt))
{
NULL, slp_node, reduc_index);
else
{
- vec_initial_defs = VEC_alloc (tree, heap, 1);
+ vec_initial_defs.create (1);
/* For the case of reduction, vect_get_vec_def_for_operand returns
the scalar def before the loop, that defines the initial value
of the reduction variable. */
vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
&adjustment_def);
- VEC_quick_push (tree, vec_initial_defs, vec_initial_def);
+ vec_initial_defs.quick_push (vec_initial_def);
}
/* Set phi nodes arguments. */
- FOR_EACH_VEC_ELT (gimple, reduction_phis, i, phi)
+ FOR_EACH_VEC_ELT (reduction_phis, i, phi)
{
- tree vec_init_def = VEC_index (tree, vec_initial_defs, i);
- tree def = VEC_index (tree, vect_defs, i);
+ tree vec_init_def = vec_initial_defs[i];
+ tree def = vect_defs[i];
for (j = 0; j < ncopies; j++)
{
/* Set the loop-entry arg of the reduction-phi. */
}
}
- VEC_free (tree, heap, vec_initial_defs);
+ vec_initial_defs.release ();
/* 2. Create epilog code.
The reduction epilog code operates across the elements of the vector
exit_bb = single_exit (loop)->dest;
prev_phi_info = NULL;
- new_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
- FOR_EACH_VEC_ELT (tree, vect_defs, i, def)
+ new_phis.create (vect_defs.length ());
+ FOR_EACH_VEC_ELT (vect_defs, i, def)
{
for (j = 0; j < ncopies; j++)
{
phi = create_phi_node (new_def, exit_bb);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
if (j == 0)
- VEC_quick_push (gimple, new_phis, phi);
+ new_phis.quick_push (phi);
else
{
def = vect_get_vec_def_for_stmt_copy (dt, def);
{
loop = outer_loop;
exit_bb = single_exit (loop)->dest;
- inner_phis = VEC_alloc (gimple, heap, VEC_length (tree, vect_defs));
- FOR_EACH_VEC_ELT (gimple, new_phis, i, phi)
+ inner_phis.create (vect_defs.length ());
+ FOR_EACH_VEC_ELT (new_phis, i, phi)
{
tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
gimple outer_phi = create_phi_node (new_result, exit_bb);
PHI_RESULT (phi));
set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
loop_vinfo, NULL));
- VEC_quick_push (gimple, inner_phis, phi);
- VEC_replace (gimple, new_phis, i, outer_phi);
+ inner_phis.quick_push (phi);
+ new_phis[i] = outer_phi;
prev_phi_info = vinfo_for_stmt (outer_phi);
while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
{
scalar_dest = gimple_assign_lhs (orig_stmt);
scalar_type = TREE_TYPE (scalar_dest);
- scalar_results = VEC_alloc (tree, heap, group_size);
+ scalar_results.create (group_size);
new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
bitsize = TYPE_SIZE (scalar_type);
one vector. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
- tree first_vect = PHI_RESULT (VEC_index (gimple, new_phis, 0));
+ tree first_vect = PHI_RESULT (new_phis[0]);
tree tmp;
gimple new_vec_stmt = NULL;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- for (k = 1; k < VEC_length (gimple, new_phis); k++)
+ for (k = 1; k < new_phis.length (); k++)
{
- gimple next_phi = VEC_index (gimple, new_phis, k);
+ gimple next_phi = new_phis[k];
tree second_vect = PHI_RESULT (next_phi);
tmp = build2 (code, vectype, first_vect, second_vect);
new_phi_result = first_vect;
if (new_vec_stmt)
{
- VEC_truncate (gimple, new_phis, 0);
- VEC_safe_push (gimple, heap, new_phis, new_vec_stmt);
+ new_phis.truncate (0);
+ new_phis.safe_push (new_vec_stmt);
}
}
else
- new_phi_result = PHI_RESULT (VEC_index (gimple, new_phis, 0));
+ new_phi_result = PHI_RESULT (new_phis[0]);
/* 2.3 Create the reduction code, using one of the three schemes described
above. In SLP we simply need to extract all the elements from the
"Reduce using scalar code. ");
vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
- FOR_EACH_VEC_ELT (gimple, new_phis, i, new_phi)
+ FOR_EACH_VEC_ELT (new_phis, i, new_phi)
{
if (gimple_code (new_phi) == GIMPLE_PHI)
vec_temp = PHI_RESULT (new_phi);
/* In SLP we don't need to apply reduction operation, so we just
collect s' values in SCALAR_RESULTS. */
if (slp_reduc)
- VEC_safe_push (tree, heap, scalar_results, new_temp);
+ scalar_results.safe_push (new_temp);
for (bit_offset = element_bitsize;
bit_offset < vec_size_in_bits;
/* In SLP we don't need to apply reduction operation, so
we just collect s' values in SCALAR_RESULTS. */
new_temp = new_name;
- VEC_safe_push (tree, heap, scalar_results, new_name);
+ scalar_results.safe_push (new_name);
}
else
{
gimple new_stmt;
/* Reduce multiple scalar results in case of SLP unrolling. */
- for (j = group_size; VEC_iterate (tree, scalar_results, j, res);
+ for (j = group_size; scalar_results.iterate (j, &res);
j++)
{
- first_res = VEC_index (tree, scalar_results, j % group_size);
+ first_res = scalar_results[j % group_size];
new_stmt = gimple_build_assign_with_ops (code,
new_scalar_dest, first_res, res);
new_res = make_ssa_name (new_scalar_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_res);
gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
- VEC_replace (tree, scalar_results, j % group_size, new_res);
+ scalar_results[j % group_size] = new_res;
}
}
else
/* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
- VEC_safe_push (tree, heap, scalar_results, new_temp);
+ scalar_results.safe_push (new_temp);
extract_scalar_result = false;
}
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
- VEC_safe_push (tree, heap, scalar_results, new_temp);
+ scalar_results.safe_push (new_temp);
}
vect_finalize_reduction:
gcc_assert (!slp_reduc);
if (nested_in_vect_loop)
{
- new_phi = VEC_index (gimple, new_phis, 0);
+ new_phi = new_phis[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, vectype);
}
else
{
- new_temp = VEC_index (tree, scalar_results, 0);
+ new_temp = scalar_results[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
expr = build2 (code, scalar_type, new_temp, adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, scalar_type);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
if (!double_reduc)
- VEC_quick_push (tree, scalar_results, new_temp);
+ scalar_results.quick_push (new_temp);
else
- VEC_replace (tree, scalar_results, 0, new_temp);
+ scalar_results[0] = new_temp;
}
else
- VEC_replace (tree, scalar_results, 0, new_temp);
+ scalar_results[0] = new_temp;
- VEC_replace (gimple, new_phis, 0, epilog_stmt);
+ new_phis[0] = epilog_stmt;
}
/* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
exit phi node. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
- scalar_dest = gimple_assign_lhs (VEC_index (gimple,
- SLP_TREE_SCALAR_STMTS (slp_node),
- group_size - 1));
+ scalar_dest = gimple_assign_lhs (
+ SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
group_size = 1;
}
(GROUP_SIZE / number of new vector stmts) scalar results correspond to
the first vector stmt, etc.
(RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
- if (group_size > VEC_length (gimple, new_phis))
+ if (group_size > new_phis.length ())
{
- ratio = group_size / VEC_length (gimple, new_phis);
- gcc_assert (!(group_size % VEC_length (gimple, new_phis)));
+ ratio = group_size / new_phis.length ();
+ gcc_assert (!(group_size % new_phis.length ()));
}
else
ratio = 1;
{
if (k % ratio == 0)
{
- epilog_stmt = VEC_index (gimple, new_phis, k / ratio);
- reduction_phi = VEC_index (gimple, reduction_phis, k / ratio);
+ epilog_stmt = new_phis[k / ratio];
+ reduction_phi = reduction_phis[k / ratio];
if (double_reduc)
- inner_phi = VEC_index (gimple, inner_phis, k / ratio);
+ inner_phi = inner_phis[k / ratio];
}
if (slp_reduc)
{
- gimple current_stmt = VEC_index (gimple,
- SLP_TREE_SCALAR_STMTS (slp_node), k);
+ gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
/* SLP statements can't participate in patterns. */
scalar_dest = gimple_assign_lhs (current_stmt);
}
- phis = VEC_alloc (gimple, heap, 3);
+ phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses -
one at the latch block, and one at the loop exit). */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
- VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
+ phis.safe_push (USE_STMT (use_p));
/* We expect to have found an exit_phi because of loop-closed-ssa
form. */
- gcc_assert (!VEC_empty (gimple, phis));
+ gcc_assert (!phis.is_empty ());
- FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
+ FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
if (outer_loop)
{
}
}
- VEC_free (gimple, heap, phis);
+ phis.release ();
if (nested_in_vect_loop)
{
if (double_reduc)
continue;
}
- phis = VEC_alloc (gimple, heap, 3);
+ phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses,
one at the latch block, and one at the loop exit). For double
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
{
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
- VEC_safe_push (gimple, heap, phis, USE_STMT (use_p));
+ phis.safe_push (USE_STMT (use_p));
else
{
if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
{
if (!flow_bb_inside_loop_p (loop,
gimple_bb (USE_STMT (phi_use_p))))
- VEC_safe_push (gimple, heap, phis,
- USE_STMT (phi_use_p));
+ phis.safe_push (USE_STMT (phi_use_p));
}
}
}
}
- FOR_EACH_VEC_ELT (gimple, phis, i, exit_phi)
+ FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
/* Replace the uses: */
orig_name = PHI_RESULT (exit_phi);
- scalar_result = VEC_index (tree, scalar_results, k);
+ scalar_result = scalar_results[k];
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, scalar_result);
}
- VEC_free (gimple, heap, phis);
+ phis.release ();
}
- VEC_free (tree, heap, scalar_results);
- VEC_free (gimple, heap, new_phis);
+ scalar_results.release ();
+ new_phis.release ();
}
struct loop * def_stmt_loop, *outer_loop = NULL;
tree def_arg;
gimple def_arg_stmt;
- VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vect_defs = NULL;
- VEC (gimple, heap) *phis = NULL;
+ vec<tree> vec_oprnds0 = vec<tree>();
+ vec<tree> vec_oprnds1 = vec<tree>();
+ vec<tree> vect_defs = vec<tree>();
+ vec<gimple> phis = vec<gimple>();
int vec_num;
tree def0, def1, tem, op0, op1 = NULL_TREE;
else
{
vec_num = 1;
- vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnds0.create (1);
if (op_type == ternary_op)
- vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnds1.create (1);
}
- phis = VEC_alloc (gimple, heap, vec_num);
- vect_defs = VEC_alloc (tree, heap, vec_num);
+ phis.create (vec_num);
+ vect_defs.create (vec_num);
if (!slp_node)
- VEC_quick_push (tree, vect_defs, NULL_TREE);
+ vect_defs.quick_push (NULL_TREE);
for (j = 0; j < ncopies; j++)
{
new_stmt_vec_info (new_phi, loop_vinfo,
NULL));
if (j == 0 || slp_node)
- VEC_quick_push (gimple, phis, new_phi);
+ phis.quick_push (new_phi);
}
}
{
gcc_assert (!slp_node);
vectorizable_condition (stmt, gsi, vec_stmt,
- PHI_RESULT (VEC_index (gimple, phis, 0)),
+ PHI_RESULT (phis[0]),
reduc_index, NULL);
/* Multiple types are not supported for condition. */
break;
{
loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
stmt, NULL);
- VEC_quick_push (tree, vec_oprnds0, loop_vec_def0);
+ vec_oprnds0.quick_push (loop_vec_def0);
if (op_type == ternary_op)
{
loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
NULL);
- VEC_quick_push (tree, vec_oprnds1, loop_vec_def1);
+ vec_oprnds1.quick_push (loop_vec_def1);
}
}
}
&dummy_stmt, &dummy, &dt);
loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
loop_vec_def0);
- VEC_replace (tree, vec_oprnds0, 0, loop_vec_def0);
+ vec_oprnds0[0] = loop_vec_def0;
if (op_type == ternary_op)
{
vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
&dummy, &dt);
loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
loop_vec_def1);
- VEC_replace (tree, vec_oprnds1, 0, loop_vec_def1);
+ vec_oprnds1[0] = loop_vec_def1;
}
}
STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
}
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, def0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
if (slp_node)
- reduc_def = PHI_RESULT (VEC_index (gimple, phis, i));
+ reduc_def = PHI_RESULT (phis[i]);
else
{
if (!single_defuse_cycle || j == 0)
}
def1 = ((op_type == ternary_op)
- ? VEC_index (tree, vec_oprnds1, i) : NULL);
+ ? vec_oprnds1[i] : NULL);
if (op_type == binary_op)
{
if (reduc_index == 0)
if (slp_node)
{
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
- VEC_quick_push (tree, vect_defs, new_temp);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ vect_defs.quick_push (new_temp);
}
else
- VEC_replace (tree, vect_defs, 0, new_temp);
+ vect_defs[0] = new_temp;
}
if (slp_node)
if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
{
new_temp = gimple_assign_lhs (*vec_stmt);
- VEC_replace (tree, vect_defs, 0, new_temp);
+ vect_defs[0] = new_temp;
}
vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
epilog_reduc_code, phis, reduc_index,
double_reduc, slp_node);
- VEC_free (gimple, heap, phis);
- VEC_free (tree, heap, vec_oprnds0);
- if (vec_oprnds1)
- VEC_free (tree, heap, vec_oprnds1);
+ phis.release ();
+ vec_oprnds0.release ();
+ vec_oprnds1.release ();
return true;
}
#include "dumpfile.h"
/* Pattern recognition functions */
-static gimple vect_recog_widen_sum_pattern (VEC (gimple, heap) **, tree *,
+static gimple vect_recog_widen_sum_pattern (vec<gimple> *, tree *,
tree *);
-static gimple vect_recog_widen_mult_pattern (VEC (gimple, heap) **, tree *,
+static gimple vect_recog_widen_mult_pattern (vec<gimple> *, tree *,
tree *);
-static gimple vect_recog_dot_prod_pattern (VEC (gimple, heap) **, tree *,
+static gimple vect_recog_dot_prod_pattern (vec<gimple> *, tree *,
tree *);
-static gimple vect_recog_pow_pattern (VEC (gimple, heap) **, tree *, tree *);
-static gimple vect_recog_over_widening_pattern (VEC (gimple, heap) **, tree *,
+static gimple vect_recog_pow_pattern (vec<gimple> *, tree *, tree *);
+static gimple vect_recog_over_widening_pattern (vec<gimple> *, tree *,
tree *);
-static gimple vect_recog_widen_shift_pattern (VEC (gimple, heap) **,
+static gimple vect_recog_widen_shift_pattern (vec<gimple> *,
tree *, tree *);
-static gimple vect_recog_vector_vector_shift_pattern (VEC (gimple, heap) **,
+static gimple vect_recog_vector_vector_shift_pattern (vec<gimple> *,
tree *, tree *);
-static gimple vect_recog_divmod_pattern (VEC (gimple, heap) **,
+static gimple vect_recog_divmod_pattern (vec<gimple> *,
tree *, tree *);
-static gimple vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **,
+static gimple vect_recog_mixed_size_cond_pattern (vec<gimple> *,
tree *, tree *);
-static gimple vect_recog_bool_pattern (VEC (gimple, heap) **, tree *, tree *);
+static gimple vect_recog_bool_pattern (vec<gimple> *, tree *, tree *);
static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
vect_recog_widen_mult_pattern,
vect_recog_widen_sum_pattern,
inner-loop nested in an outer-loop that us being vectorized). */
static gimple
-vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
+vect_recog_dot_prod_pattern (vec<gimple> *stmts, tree *type_in,
tree *type_out)
{
- gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
+ gimple stmt, last_stmt = (*stmts)[0];
tree oprnd0, oprnd1;
tree oprnd00, oprnd01;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
static bool
vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
tree const_oprnd, tree *oprnd,
- VEC (gimple, heap) **stmts, tree type,
+ vec<gimple> *stmts, tree type,
tree *half_type, gimple def_stmt)
{
tree new_type, new_oprnd;
|| TREE_TYPE (gimple_assign_lhs (new_stmt)) != new_type)
return false;
- VEC_safe_push (gimple, heap, *stmts, def_stmt);
+ stmts->safe_push (def_stmt);
*oprnd = gimple_assign_lhs (new_stmt);
}
else
new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd, *oprnd,
NULL_TREE);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
- VEC_safe_push (gimple, heap, *stmts, def_stmt);
+ stmts->safe_push (def_stmt);
*oprnd = new_oprnd;
}
*/
static gimple
-vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
+vect_recog_widen_mult_pattern (vec<gimple> *stmts,
tree *type_in, tree *type_out)
{
- gimple last_stmt = VEC_pop (gimple, *stmts);
+ gimple last_stmt = stmts->pop ();
gimple def_stmt0, def_stmt1;
tree oprnd0, oprnd1;
tree type, half_type0, half_type1;
tree var;
enum tree_code dummy_code;
int dummy_int;
- VEC (tree, heap) *dummy_vec;
+ vec<tree> dummy_vec;
bool op1_ok;
bool promotion;
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
return pattern_stmt;
}
*/
static gimple
-vect_recog_pow_pattern (VEC (gimple, heap) **stmts, tree *type_in,
+vect_recog_pow_pattern (vec<gimple> *stmts, tree *type_in,
tree *type_out)
{
- gimple last_stmt = VEC_index (gimple, *stmts, 0);
+ gimple last_stmt = (*stmts)[0];
tree fn, base, exp = NULL;
gimple stmt;
tree var;
inner-loop nested in an outer-loop that us being vectorized). */
static gimple
-vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
+vect_recog_widen_sum_pattern (vec<gimple> *stmts, tree *type_in,
tree *type_out)
{
- gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
+ gimple stmt, last_stmt = (*stmts)[0];
tree oprnd0, oprnd1;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
tree type, half_type;
static bool
vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
tree *op0, tree *op1, gimple *new_def_stmt,
- VEC (gimple, heap) **stmts)
+ vec<gimple> *stmts)
{
enum tree_code code;
tree const_oprnd, oprnd;
|| TREE_TYPE (gimple_assign_lhs (new_stmt)) != interm_type)
return false;
- VEC_safe_push (gimple, heap, *stmts, def_stmt);
+ stmts->safe_push (def_stmt);
oprnd = gimple_assign_lhs (new_stmt);
}
else
new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd,
oprnd, NULL_TREE);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
- VEC_safe_push (gimple, heap, *stmts, def_stmt);
+ stmts->safe_push (def_stmt);
oprnd = new_oprnd;
}
}
demotion operation. We also check that S3 and S4 have only one use. */
static gimple
-vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
+vect_recog_over_widening_pattern (vec<gimple> *stmts,
tree *type_in, tree *type_out)
{
- gimple stmt = VEC_pop (gimple, *stmts);
+ gimple stmt = stmts->pop ();
gimple pattern_stmt = NULL, new_def_stmt, prev_stmt = NULL, use_stmt = NULL;
tree op0, op1, vectype = NULL_TREE, use_lhs, use_type;
tree var = NULL_TREE, new_type = NULL_TREE, new_oprnd;
in the sequence. Therefore, we only add the original statement to
the list if we know that it is not the last. */
if (prev_stmt)
- VEC_safe_push (gimple, heap, *stmts, prev_stmt);
+ stmts->safe_push (prev_stmt);
var = vect_recog_temp_ssa_var (new_type, NULL);
pattern_stmt
statement created for PREV_STMT. Therefore, we add PREV_STMT
to the list in order to mark it later in vect_pattern_recog_1. */
if (prev_stmt)
- VEC_safe_push (gimple, heap, *stmts, prev_stmt);
+ stmts->safe_push (prev_stmt);
}
else
{
*type_out = NULL_TREE;
}
- VEC_safe_push (gimple, heap, *stmts, use_stmt);
+ stmts->safe_push (use_stmt);
}
else
/* TODO: support general case, create a conversion to the correct type. */
WIDEN_LSHIFT_EXPR <a_t, CONST>. */
static gimple
-vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
+vect_recog_widen_shift_pattern (vec<gimple> *stmts,
tree *type_in, tree *type_out)
{
- gimple last_stmt = VEC_pop (gimple, *stmts);
+ gimple last_stmt = stmts->pop ();
gimple def_stmt0;
tree oprnd0, oprnd1;
tree type, half_type0;
tree var;
enum tree_code dummy_code;
int dummy_int;
- VEC (tree, heap) * dummy_vec;
+ vec<tree> dummy_vec;
gimple use_stmt;
bool promotion;
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
return pattern_stmt;
}
S3 stmt. */
static gimple
-vect_recog_vector_vector_shift_pattern (VEC (gimple, heap) **stmts,
+vect_recog_vector_vector_shift_pattern (vec<gimple> *stmts,
tree *type_in, tree *type_out)
{
- gimple last_stmt = VEC_pop (gimple, *stmts);
+ gimple last_stmt = stmts->pop ();
tree oprnd0, oprnd1, lhs, var;
gimple pattern_stmt, def_stmt;
enum tree_code rhs_code;
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
return pattern_stmt;
}
S1 or modulo S4 stmt. */
static gimple
-vect_recog_divmod_pattern (VEC (gimple, heap) **stmts,
+vect_recog_divmod_pattern (vec<gimple> *stmts,
tree *type_in, tree *type_out)
{
- gimple last_stmt = VEC_pop (gimple, *stmts);
+ gimple last_stmt = stmts->pop ();
tree oprnd0, oprnd1, vectype, itype, cond;
gimple pattern_stmt, def_stmt;
enum tree_code rhs_code;
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt,
0);
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
*type_in = vectype;
*type_out = vectype;
dump_gimple_stmt (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, pattern_stmt, 0);
}
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
*type_in = vectype;
*type_out = vectype;
a_T = (TYPE) a_it; */
static gimple
-vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
+vect_recog_mixed_size_cond_pattern (vec<gimple> *stmts, tree *type_in,
tree *type_out)
{
- gimple last_stmt = VEC_index (gimple, *stmts, 0);
+ gimple last_stmt = (*stmts)[0];
tree cond_expr, then_clause, else_clause;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt), def_stmt_info;
tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype;
static tree
adjust_bool_pattern (tree var, tree out_type, tree trueval,
- VEC (gimple, heap) **stmts)
+ vec<gimple> *stmts)
{
gimple stmt = SSA_NAME_DEF_STMT (var);
enum tree_code rhs_code, def_rhs_code;
gimple tstmt;
stmt_vec_info stmt_def_vinfo = vinfo_for_stmt (def_stmt);
irhs2 = adjust_bool_pattern (rhs2, out_type, irhs1, stmts);
- tstmt = VEC_pop (gimple, *stmts);
+ tstmt = stmts->pop ();
gcc_assert (tstmt == def_stmt);
- VEC_quick_push (gimple, *stmts, stmt);
+ stmts->quick_push (stmt);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt))
= STMT_VINFO_RELATED_STMT (stmt_def_vinfo);
gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo));
gimple tstmt;
stmt_vec_info stmt_def_vinfo = vinfo_for_stmt (def_stmt);
irhs1 = adjust_bool_pattern (rhs1, out_type, irhs2, stmts);
- tstmt = VEC_pop (gimple, *stmts);
+ tstmt = stmts->pop ();
gcc_assert (tstmt == def_stmt);
- VEC_quick_push (gimple, *stmts, stmt);
+ stmts->quick_push (stmt);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt))
= STMT_VINFO_RELATED_STMT (stmt_def_vinfo);
gcc_assert (!STMT_VINFO_PATTERN_DEF_SEQ (stmt_def_vinfo));
break;
}
- VEC_safe_push (gimple, heap, *stmts, stmt);
+ stmts->safe_push (stmt);
gimple_set_location (pattern_stmt, loc);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
return gimple_assign_lhs (pattern_stmt);
but the above is more efficient. */
static gimple
-vect_recog_bool_pattern (VEC (gimple, heap) **stmts, tree *type_in,
+vect_recog_bool_pattern (vec<gimple> *stmts, tree *type_in,
tree *type_out)
{
- gimple last_stmt = VEC_pop (gimple, *stmts);
+ gimple last_stmt = stmts->pop ();
enum tree_code rhs_code;
tree var, lhs, rhs, vectype;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
= gimple_build_assign_with_ops (NOP_EXPR, lhs, rhs, NULL_TREE);
*type_out = vectype;
*type_in = vectype;
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
DR_STMT (STMT_VINFO_DATA_REF (stmt_vinfo)) = pattern_stmt;
*type_out = vectype;
*type_in = vectype;
- VEC_safe_push (gimple, heap, *stmts, last_stmt);
+ stmts->safe_push (last_stmt);
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
static void
vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
gimple_stmt_iterator si,
- VEC (gimple, heap) **stmts_to_replace)
+ vec<gimple> *stmts_to_replace)
{
gimple stmt = gsi_stmt (si), pattern_stmt;
stmt_vec_info stmt_info;
int i;
gimple next;
- VEC_truncate (gimple, *stmts_to_replace, 0);
- VEC_quick_push (gimple, *stmts_to_replace, stmt);
+ stmts_to_replace->truncate (0);
+ stmts_to_replace->quick_push (stmt);
pattern_stmt = (* vect_recog_func) (stmts_to_replace, &type_in, &type_out);
if (!pattern_stmt)
return;
- stmt = VEC_last (gimple, *stmts_to_replace);
+ stmt = stmts_to_replace->last ();
stmt_info = vinfo_for_stmt (stmt);
loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
/* Patterns cannot be vectorized using SLP, because they change the order of
computation. */
if (loop_vinfo)
- FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
+ FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
if (next == stmt)
- VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
+ LOOP_VINFO_REDUCTIONS (loop_vinfo).ordered_remove (i);
/* It is possible that additional pattern stmts are created and inserted in
STMTS_TO_REPLACE. We create a stmt_info for each of them, and mark the
relevant statements. */
- for (i = 0; VEC_iterate (gimple, *stmts_to_replace, i, stmt)
- && (unsigned) i < (VEC_length (gimple, *stmts_to_replace) - 1);
+ for (i = 0; stmts_to_replace->iterate (i, &stmt)
+ && (unsigned) i < (stmts_to_replace->length () - 1);
i++)
{
stmt_info = vinfo_for_stmt (stmt);
gimple_stmt_iterator si;
unsigned int i, j;
vect_recog_func_ptr vect_recog_func;
- VEC (gimple, heap) *stmts_to_replace = VEC_alloc (gimple, heap, 1);
+ vec<gimple> stmts_to_replace;
+ stmts_to_replace.create (1);
gimple stmt;
if (dump_enabled_p ())
}
}
- VEC_free (gimple, heap, stmts_to_replace);
+ stmts_to_replace.release ();
}
if (!node)
return;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_free_slp_tree ((slp_tree) child);
- VEC_free (slp_void_p, heap, SLP_TREE_CHILDREN (node));
- VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node));
-
- if (SLP_TREE_VEC_STMTS (node))
- VEC_free (gimple, heap, SLP_TREE_VEC_STMTS (node));
+ SLP_TREE_CHILDREN (node).release ();
+ SLP_TREE_SCALAR_STMTS (node).release ();
+ SLP_TREE_VEC_STMTS (node).release ();
free (node);
}
vect_free_slp_instance (slp_instance instance)
{
vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
- VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (instance));
- VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (instance));
- VEC_free (stmt_info_for_cost, heap, SLP_INSTANCE_BODY_COST_VEC (instance));
+ SLP_INSTANCE_LOAD_PERMUTATION (instance).release ();
+ SLP_INSTANCE_LOADS (instance).release ();
+ SLP_INSTANCE_BODY_COST_VEC (instance).release ();
free (instance);
}
/* Create an SLP node for SCALAR_STMTS. */
static slp_tree
-vect_create_new_slp_node (VEC (gimple, heap) *scalar_stmts)
+vect_create_new_slp_node (vec<gimple> scalar_stmts)
{
slp_tree node;
- gimple stmt = VEC_index (gimple, scalar_stmts, 0);
+ gimple stmt = scalar_stmts[0];
unsigned int nops;
if (is_gimple_call (stmt))
node = XNEW (struct _slp_tree);
SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
- SLP_TREE_VEC_STMTS (node) = NULL;
- SLP_TREE_CHILDREN (node) = VEC_alloc (slp_void_p, heap, nops);
+ SLP_TREE_VEC_STMTS (node).create (0);
+ SLP_TREE_CHILDREN (node).create (nops);
return node;
}
/* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
operand. */
-static VEC (slp_oprnd_info, heap) *
+static vec<slp_oprnd_info>
vect_create_oprnd_info (int nops, int group_size)
{
int i;
slp_oprnd_info oprnd_info;
- VEC (slp_oprnd_info, heap) *oprnds_info;
+ vec<slp_oprnd_info> oprnds_info;
- oprnds_info = VEC_alloc (slp_oprnd_info, heap, nops);
+ oprnds_info.create (nops);
for (i = 0; i < nops; i++)
{
oprnd_info = XNEW (struct _slp_oprnd_info);
- oprnd_info->def_stmts = VEC_alloc (gimple, heap, group_size);
+ oprnd_info->def_stmts.create (group_size);
oprnd_info->first_dt = vect_uninitialized_def;
oprnd_info->first_def_type = NULL_TREE;
oprnd_info->first_const_oprnd = NULL_TREE;
oprnd_info->first_pattern = false;
- VEC_quick_push (slp_oprnd_info, oprnds_info, oprnd_info);
+ oprnds_info.quick_push (oprnd_info);
}
return oprnds_info;
/* Free operands info. */
static void
-vect_free_oprnd_info (VEC (slp_oprnd_info, heap) **oprnds_info)
+vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
{
int i;
slp_oprnd_info oprnd_info;
- FOR_EACH_VEC_ELT (slp_oprnd_info, *oprnds_info, i, oprnd_info)
+ FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
{
- VEC_free (gimple, heap, oprnd_info->def_stmts);
+ oprnd_info->def_stmts.release ();
XDELETE (oprnd_info);
}
- VEC_free (slp_oprnd_info, heap, *oprnds_info);
+ oprnds_info.release ();
}
vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
slp_tree slp_node, gimple stmt,
int ncopies_for_cost, bool first,
- VEC (slp_oprnd_info, heap) **oprnds_info,
+ vec<slp_oprnd_info> *oprnds_info,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *body_cost_vec)
{
else
oprnd = gimple_op (stmt, op_idx++);
- oprnd_info = VEC_index (slp_oprnd_info, *oprnds_info, i);
+ oprnd_info = (*oprnds_info)[i];
if (COMPARISON_CLASS_P (oprnd))
{
different_types = true;
else
{
- oprnd0_info = VEC_index (slp_oprnd_info, *oprnds_info, 0);
+ oprnd0_info = (*oprnds_info)[0];
if (is_gimple_assign (stmt)
&& (rhs_code = gimple_assign_rhs_code (stmt))
&& TREE_CODE_CLASS (rhs_code) == tcc_binary
case vect_internal_def:
if (different_types)
{
- oprnd0_info = VEC_index (slp_oprnd_info, *oprnds_info, 0);
- oprnd1_info = VEC_index (slp_oprnd_info, *oprnds_info, 0);
+ oprnd0_info = (*oprnds_info)[0];
+ oprnd1_info = (*oprnds_info)[0];
if (i == 0)
- VEC_quick_push (gimple, oprnd1_info->def_stmts, def_stmt);
+ oprnd1_info->def_stmts.quick_push (def_stmt);
else
- VEC_quick_push (gimple, oprnd0_info->def_stmts, def_stmt);
+ oprnd0_info->def_stmts.quick_push (def_stmt);
}
else
- VEC_quick_push (gimple, oprnd_info->def_stmts, def_stmt);
+ oprnd_info->def_stmts.quick_push (def_stmt);
break;
vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
slp_tree *node, unsigned int group_size, int *outside_cost,
int ncopies_for_cost, unsigned int *max_nunits,
- VEC (int, heap) **load_permutation,
- VEC (slp_tree, heap) **loads,
+ vec<int> *load_permutation,
+ vec<slp_tree> *loads,
unsigned int vectorization_factor, bool *loads_permuted,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *body_cost_vec)
{
unsigned int i;
- VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (*node);
- gimple stmt = VEC_index (gimple, stmts, 0);
+ vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (*node);
+ gimple stmt = stmts[0];
enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
enum tree_code first_cond_code = ERROR_MARK;
tree lhs;
bool permutation = false;
unsigned int load_place;
gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
- VEC (slp_oprnd_info, heap) *oprnds_info;
+ vec<slp_oprnd_info> oprnds_info;
unsigned int nops;
slp_oprnd_info oprnd_info;
tree cond;
oprnds_info = vect_create_oprnd_info (nops, group_size);
/* For every stmt in NODE find its def stmt/s. */
- FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
+ FOR_EACH_VEC_ELT (stmts, i, stmt)
{
if (dump_enabled_p ())
{
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
scalar_type);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: no optab.");
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
icode = (int) optab_handler (optab, vec_mode);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: "
"op not supported by target.");
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
optab_op2_mode = insn_data[icode].operand[2].mode;
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
if (rhs_code == CALL_EXPR)
{
- gimple first_stmt = VEC_index (gimple, stmts, 0);
+ gimple first_stmt = stmts[0];
if (gimple_call_num_args (stmt) != nops
|| !operand_equal_p (gimple_call_fn (first_stmt),
gimple_call_fn (stmt), 0)
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
prologue_cost_vec,
body_cost_vec))
{
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
if (load_place != i)
permutation = true;
- VEC_safe_push (int, heap, *load_permutation, load_place);
+ load_permutation->safe_push (load_place);
/* We stop the tree when we reach a group of loads. */
stop_recursion = true;
}
/* FORNOW: Not grouped loads are not supported. */
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
stmt, 0);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
&oprnds_info, prologue_cost_vec,
body_cost_vec))
{
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
}
/* Grouped loads were reached - stop the recursion. */
if (stop_recursion)
{
- VEC_safe_push (slp_tree, heap, *loads, *node);
+ loads->safe_push (*node);
if (permutation)
{
- gimple first_stmt = VEC_index (gimple, stmts, 0);
+ gimple first_stmt = stmts[0];
*loads_permuted = true;
(void) record_stmt_cost (body_cost_vec, group_size, vec_perm,
vinfo_for_stmt (first_stmt), 0, vect_body);
*loads_permuted = true;
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return true;
}
/* Create SLP_TREE nodes for the definition node/s. */
- FOR_EACH_VEC_ELT (slp_oprnd_info, oprnds_info, i, oprnd_info)
+ FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
{
slp_tree child;
prologue_cost_vec, body_cost_vec))
{
if (child)
- oprnd_info->def_stmts = NULL;
+ oprnd_info->def_stmts = vec<gimple>();
vect_free_slp_tree (child);
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return false;
}
- oprnd_info->def_stmts = NULL;
- VEC_quick_push (slp_void_p, SLP_TREE_CHILDREN (*node), child);
+ oprnd_info->def_stmts.create (0);
+ SLP_TREE_CHILDREN (*node).quick_push (child);
}
- vect_free_oprnd_info (&oprnds_info);
+ vect_free_oprnd_info (oprnds_info);
return true;
}
return;
dump_printf (dump_kind, "node ");
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
dump_printf (dump_kind, "\n\tstmt %d ", i);
dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
}
dump_printf (dump_kind, "\n");
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_print_slp_tree (dump_kind, (slp_tree) child);
}
if (!node)
return;
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
if (j < 0 || i == j)
STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_mark_slp_stmts ((slp_tree) child, mark, j);
}
if (!node)
return;
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
stmt_info = vinfo_for_stmt (stmt);
gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
}
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_mark_slp_stmts_relevant ((slp_tree) child);
}
static bool
vect_supported_slp_permutation_p (slp_instance instance)
{
- slp_tree node = VEC_index (slp_tree, SLP_INSTANCE_LOADS (instance), 0);
- gimple stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
+ slp_tree node = SLP_INSTANCE_LOADS (instance)[0];
+ gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
gimple first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
- VEC (slp_tree, heap) *sorted_loads = NULL;
+ vec<slp_tree> sorted_loads = vec<slp_tree>();
int index;
slp_tree *tmp_loads = NULL;
int group_size = SLP_INSTANCE_GROUP_SIZE (instance), i, j;
Sort the nodes according to the order of accesses in the chain. */
tmp_loads = (slp_tree *) xmalloc (sizeof (slp_tree) * group_size);
for (i = 0, j = 0;
- VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance), i, index)
- && VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), j, load);
+ SLP_INSTANCE_LOAD_PERMUTATION (instance).iterate (i, &index)
+ && SLP_INSTANCE_LOADS (instance).iterate (j, &load);
i += group_size, j++)
{
- gimple scalar_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (load), 0);
+ gimple scalar_stmt = SLP_TREE_SCALAR_STMTS (load)[0];
/* Check that the loads are all in the same interleaving chain. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (scalar_stmt)) != first_load)
{
tmp_loads[index] = load;
}
- sorted_loads = VEC_alloc (slp_tree, heap, group_size);
+ sorted_loads.create (group_size);
for (i = 0; i < group_size; i++)
- VEC_safe_push (slp_tree, heap, sorted_loads, tmp_loads[i]);
+ sorted_loads.safe_push (tmp_loads[i]);
- VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (instance));
+ SLP_INSTANCE_LOADS (instance).release ();
SLP_INSTANCE_LOADS (instance) = sorted_loads;
free (tmp_loads);
- if (!vect_transform_slp_perm_load (stmt, NULL, NULL,
+ if (!vect_transform_slp_perm_load (stmt, vec<tree>(), NULL,
SLP_INSTANCE_UNROLLING_FACTOR (instance),
instance, true))
return false;
static void
vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
- VEC (int, heap) *permutation)
+ vec<int> permutation)
{
gimple stmt;
- VEC (gimple, heap) *tmp_stmts;
+ vec<gimple> tmp_stmts;
unsigned int index, i;
slp_void_p child;
if (!node)
return;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_slp_rearrange_stmts ((slp_tree) child, group_size, permutation);
- gcc_assert (group_size == VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node)));
- tmp_stmts = VEC_alloc (gimple, heap, group_size);
+ gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
+ tmp_stmts.create (group_size);
for (i = 0; i < group_size; i++)
- VEC_safe_push (gimple, heap, tmp_stmts, NULL);
+ tmp_stmts.safe_push (NULL);
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
- index = VEC_index (int, permutation, i);
- VEC_replace (gimple, tmp_stmts, index, stmt);
+ index = permutation[i];
+ tmp_stmts[index] = stmt;
}
- VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node));
+ SLP_TREE_SCALAR_STMTS (node).release ();
SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
}
static bool
vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
- VEC (int, heap) *load_permutation)
+ vec<int> load_permutation)
{
int i = 0, j, prev = -1, next, k, number_of_groups;
bool supported, bad_permutation = false;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
- FOR_EACH_VEC_ELT (int, load_permutation, i, next)
+ FOR_EACH_VEC_ELT (load_permutation, i, next)
dump_printf (MSG_NOTE, "%d ", next);
}
permutation). */
/* Check that all the load nodes are of the same size. */
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
- if (VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node))
- != (unsigned) group_size)
+ if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
return false;
- stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
+ stmt = SLP_TREE_SCALAR_STMTS (node)[0];
if (is_gimple_assign (stmt)
&& (gimple_assign_rhs_code (stmt) == REALPART_EXPR
|| gimple_assign_rhs_code (stmt) == IMAGPART_EXPR))
chains are mixed, they match the above pattern. */
if (complex_numbers)
{
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), j, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, stmt)
{
if (j == 0)
first = stmt;
else
k = 0;
- other_complex_node = VEC_index (slp_tree,
- SLP_INSTANCE_LOADS (slp_instn), k);
- other_node_first = VEC_index (gimple,
- SLP_TREE_SCALAR_STMTS (other_complex_node), 0);
+ other_complex_node = SLP_INSTANCE_LOADS (slp_instn)[k];
+ other_node_first =
+ SLP_TREE_SCALAR_STMTS (other_complex_node)[0];
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
!= other_node_first)
/* We checked that this case ok, so there is no need to proceed with
permutation tests. */
if (complex_numbers == 2
- && VEC_length (slp_tree, SLP_INSTANCE_LOADS (slp_instn)) == 2)
+ && SLP_INSTANCE_LOADS (slp_instn).length () == 2)
{
- VEC_free (slp_tree, heap, SLP_INSTANCE_LOADS (slp_instn));
- VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn));
+ SLP_INSTANCE_LOADS (slp_instn).release ();
+ SLP_INSTANCE_LOAD_PERMUTATION (slp_instn).release ();
return true;
}
node = SLP_INSTANCE_TREE (slp_instn);
- stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
+ stmt = SLP_TREE_SCALAR_STMTS (node)[0];
/* LOAD_PERMUTATION is a list of indices of all the loads of the SLP
instance, not all the loads belong to the same node or interleaving
group. Hence, we need to divide them into groups according to
GROUP_SIZE. */
- number_of_groups = VEC_length (int, load_permutation) / group_size;
+ number_of_groups = load_permutation.length () / group_size;
/* Reduction (there are no data-refs in the root).
In reduction chain the order of the loads is important. */
k = 0;
for (j = i * group_size; j < i * group_size + group_size; j++)
{
- next = VEC_index (int, load_permutation, j);
- first_group_load_index = VEC_index (int, load_permutation, k);
+ next = load_permutation[j];
+ first_group_load_index = load_permutation[k];
if (next != first_group_load_index)
{
bitmap_clear (load_index);
for (k = 0; k < group_size; k++)
{
- first_group_load_index = VEC_index (int, load_permutation, k);
+ first_group_load_index = load_permutation[k];
if (bitmap_bit_p (load_index, first_group_load_index))
{
bad_permutation = true;
according to the order of the loads. */
vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
load_permutation);
- VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn));
+ SLP_INSTANCE_LOAD_PERMUTATION (slp_instn).release ();
return true;
}
}
/* Check that for every node in the instance the loads form a subchain. */
if (bb_vinfo)
{
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
next_load = NULL;
first_load = NULL;
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), j, load)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
{
if (!first_load)
first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (load));
the first statement in every load node, is supported. */
if (!bad_permutation)
{
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_instn), i, node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
- first_load = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
+ first_load = SLP_TREE_SCALAR_STMTS (node)[0];
if (first_load
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
{
if (!bad_permutation)
{
- VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn));
+ SLP_INSTANCE_LOAD_PERMUTATION (slp_instn).release ();
return true;
}
}
/* FORNOW: the only supported permutation is 0..01..1.. of length equal to
GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
well (unless it's reduction). */
- if (VEC_length (int, load_permutation)
+ if (load_permutation.length ()
!= (unsigned int) (group_size * group_size))
return false;
for (j = 0; j < group_size; j++)
{
for (i = j * group_size, k = 0;
- VEC_iterate (int, load_permutation, i, next) && k < group_size;
+ load_permutation.iterate (i, &next) && k < group_size;
i++, k++)
{
if (i != j * group_size && next != prev)
slp_tree load_node;
gimple first_load = NULL, load;
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (instance), i, load_node)
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (load_node), j, load)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
first_load = get_earlier_stmt (load, first_load);
return first_load;
gimple last_store = NULL, store;
node = SLP_INSTANCE_TREE (instance);
- for (i = 0;
- VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, store);
- i++)
+ for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
last_store = get_later_stmt (store, last_store);
return last_store;
unsigned int vectorization_factor = 0;
int outside_cost = 0, ncopies_for_cost, i;
unsigned int max_nunits = 0;
- VEC (int, heap) *load_permutation;
- VEC (slp_tree, heap) *loads;
+ vec<int> load_permutation;
+ vec<slp_tree> loads;
struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
bool loads_permuted = false;
- VEC (gimple, heap) *scalar_stmts;
+ vec<gimple> scalar_stmts;
stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
stmt_info_for_cost *si;
{
gcc_assert (loop_vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- group_size = VEC_length (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo));
+ group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
}
if (!vectype)
}
/* Create a node (a root of the SLP tree) for the packed grouped stores. */
- scalar_stmts = VEC_alloc (gimple, heap, group_size);
+ scalar_stmts.create (group_size);
next = stmt;
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
{
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
&& STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
- VEC_safe_push (gimple, heap, scalar_stmts,
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
+ scalar_stmts.safe_push (
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
else
- VEC_safe_push (gimple, heap, scalar_stmts, next);
+ scalar_stmts.safe_push (next);
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
}
else
{
/* Collect reduction statements. */
- VEC (gimple, heap) *reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
- for (i = 0; VEC_iterate (gimple, reductions, i, next); i++)
- VEC_safe_push (gimple, heap, scalar_stmts, next);
+ vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
+ for (i = 0; reductions.iterate (i, &next); i++)
+ scalar_stmts.safe_push (next);
}
node = vect_create_new_slp_node (scalar_stmts);
GROUP_SIZE / NUNITS otherwise. */
ncopies_for_cost = unrolling_factor * group_size / nunits;
- load_permutation = VEC_alloc (int, heap, group_size * group_size);
- loads = VEC_alloc (slp_tree, heap, group_size);
- prologue_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 10);
- body_cost_vec = VEC_alloc (stmt_info_for_cost, heap, 10);
+ load_permutation.create (group_size * group_size);
+ loads.create (group_size);
+ prologue_cost_vec.create (10);
+ body_cost_vec.create (10);
/* Build the tree for the SLP instance. */
if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
"Build SLP failed: unrolling required in basic"
" block SLP");
vect_free_slp_tree (node);
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
- VEC_free (int, heap, load_permutation);
- VEC_free (slp_tree, heap, loads);
+ body_cost_vec.release ();
+ prologue_cost_vec.release ();
+ load_permutation.release ();
+ loads.release ();
return false;
}
}
vect_free_slp_instance (new_instance);
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
+ prologue_cost_vec.release ();
return false;
}
= vect_find_first_load_in_slp_instance (new_instance);
}
else
- VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (new_instance));
+ SLP_INSTANCE_LOAD_PERMUTATION (new_instance).release ();
/* Record the prologue costs, which were delayed until we were
sure that SLP was successful. Unlike the body costs, we know
the final values now regardless of the loop vectorization factor. */
- FOR_EACH_VEC_ELT (stmt_info_for_cost, prologue_cost_vec, i, si)
+ FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
si->misalign, vect_prologue);
}
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
+ prologue_cost_vec.release ();
if (loop_vinfo)
- VEC_safe_push (slp_instance, heap,
- LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
- new_instance);
+ LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
else
- VEC_safe_push (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo),
- new_instance);
+ BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
if (dump_enabled_p ())
vect_print_slp_tree (MSG_NOTE, node);
}
else
{
- VEC_free (stmt_info_for_cost, heap, body_cost_vec);
- VEC_free (stmt_info_for_cost, heap, prologue_cost_vec);
+ body_cost_vec.release ();
+ prologue_cost_vec.release ();
}
/* Failed to SLP. */
/* Free the allocated memory. */
vect_free_slp_tree (node);
- VEC_free (int, heap, load_permutation);
- VEC_free (slp_tree, heap, loads);
+ load_permutation.release ();
+ loads.release ();
return false;
}
vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
unsigned int i;
- VEC (gimple, heap) *grouped_stores, *reductions = NULL, *reduc_chains = NULL;
+ vec<gimple> grouped_stores;
+ vec<gimple> reductions = vec<gimple>();
+ vec<gimple> reduc_chains = vec<gimple>();
gimple first_element;
bool ok = false;
grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
/* Find SLP sequences starting from groups of grouped stores. */
- FOR_EACH_VEC_ELT (gimple, grouped_stores, i, first_element)
+ FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
ok = true;
}
if (loop_vinfo
- && VEC_length (gimple, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)) > 0)
+ && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
{
/* Find SLP sequences starting from reduction chains. */
- FOR_EACH_VEC_ELT (gimple, reduc_chains, i, first_element)
+ FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
ok = true;
else
}
/* Find SLP sequences starting from groups of reductions. */
- if (loop_vinfo && VEC_length (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo)) > 1
- && vect_analyze_slp_instance (loop_vinfo, bb_vinfo,
- VEC_index (gimple, reductions, 0)))
+ if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
+ && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0]))
ok = true;
return true;
vect_make_slp_decision (loop_vec_info loop_vinfo)
{
unsigned int i, unrolling_factor = 1;
- VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
+ vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
int decided_to_slp = 0;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ===");
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
/* FORNOW: SLP if you can. */
if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
vect_detect_hybrid_slp_stmts (slp_tree node)
{
int i;
- VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (node);
- gimple stmt = VEC_index (gimple, stmts, 0);
+ vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (node);
+ gimple stmt = stmts[0];
imm_use_iterator imm_iter;
gimple use_stmt;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
else
bb = BB_VINFO_BB (bb_vinfo);
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
&& TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
== vect_reduction_def))
vect_mark_slp_stmts (node, hybrid, i);
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_detect_hybrid_slp_stmts ((slp_tree) child);
}
vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
{
unsigned int i;
- VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
+ vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ===");
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
}
set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
}
- BB_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
- BB_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 2);
+ BB_VINFO_GROUPED_STORES (res).create (10);
+ BB_VINFO_SLP_INSTANCES (res).create (2);
BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
bb->aux = res;
static void
destroy_bb_vec_info (bb_vec_info bb_vinfo)
{
- VEC (slp_instance, heap) *slp_instances;
+ vec<slp_instance> slp_instances;
slp_instance instance;
basic_block bb;
gimple_stmt_iterator si;
free_data_refs (BB_VINFO_DATAREFS (bb_vinfo));
free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
- VEC_free (gimple, heap, BB_VINFO_GROUPED_STORES (bb_vinfo));
+ BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
vect_free_slp_instance (instance);
- VEC_free (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo));
+ BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
free (bb_vinfo);
bb->aux = NULL;
if (!node)
return true;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
if (!vect_slp_analyze_node_operations (bb_vinfo, (slp_tree) child))
return false;
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gcc_assert (stmt_info);
static bool
vect_slp_analyze_operations (bb_vec_info bb_vinfo)
{
- VEC (slp_instance, heap) *slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
+ vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
slp_instance instance;
int i;
- for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); )
+ for (i = 0; slp_instances.iterate (i, &instance); )
{
if (!vect_slp_analyze_node_operations (bb_vinfo,
SLP_INSTANCE_TREE (instance)))
{
vect_free_slp_instance (instance);
- VEC_ordered_remove (slp_instance, slp_instances, i);
+ slp_instances.ordered_remove (i);
}
else
i++;
}
- if (!VEC_length (slp_instance, slp_instances))
+ if (!slp_instances.length ())
return false;
return true;
static bool
vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
{
- VEC (slp_instance, heap) *slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
+ vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
slp_instance instance;
int i, j;
unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
stmt_info_for_cost *ci;
/* Calculate vector costs. */
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
- FOR_EACH_VEC_ELT (stmt_info_for_cost, body_cost_vec, j, ci)
+ FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
{
stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
vect_slp_analyze_bb_1 (basic_block bb)
{
bb_vec_info bb_vinfo;
- VEC (ddr_p, heap) *ddrs;
- VEC (slp_instance, heap) *slp_instances;
+ vec<ddr_p> ddrs;
+ vec<slp_instance> slp_instances;
slp_instance instance;
int i;
int min_vf = 2;
}
ddrs = BB_VINFO_DDRS (bb_vinfo);
- if (!VEC_length (ddr_p, ddrs))
+ if (!ddrs.length ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* Mark all the statements that we want to vectorize as pure SLP and
relevant. */
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
{
unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
+ vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
stmt_vector_for_cost body_cost_vec;
stmt_info_for_cost *si;
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_slp_costs_according_to_vf ===");
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
/* We assume that costs are linear in ncopies. */
int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
isn't known beforehand. */
body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
- FOR_EACH_VEC_ELT (stmt_info_for_cost, body_cost_vec, j, si)
+ FOR_EACH_VEC_ELT (body_cost_vec, j, si)
(void) add_stmt_cost (data, si->count * ncopies, si->kind,
vinfo_for_stmt (si->stmt), si->misalign,
vect_body);
static void
vect_get_constant_vectors (tree op, slp_tree slp_node,
- VEC (tree, heap) **vec_oprnds,
+ vec<tree> *vec_oprnds,
unsigned int op_num, unsigned int number_of_vectors,
int reduc_index)
{
- VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
- gimple stmt = VEC_index (gimple, stmts, 0);
+ vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
+ gimple stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
unsigned nunits;
tree vec_cst;
unsigned j, number_of_places_left_in_vector;
tree vector_type;
tree vop;
- int group_size = VEC_length (gimple, stmts);
+ int group_size = stmts.length ();
unsigned int vec_num, i;
unsigned number_of_copies = 1;
- VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors);
+ vec<tree> voprnds;
+ voprnds.create (number_of_vectors);
bool constant_p, is_store;
tree neutral_op = NULL;
enum tree_code code = gimple_expr_code (stmt);
elts = XALLOCAVEC (tree, nunits);
for (j = 0; j < number_of_copies; j++)
{
- for (i = group_size - 1; VEC_iterate (gimple, stmts, i, stmt); i--)
+ for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
{
if (is_store)
op = gimple_assign_rhs1 (stmt);
vec_cst = build_vector (vector_type, elts);
else
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
unsigned k;
- v = VEC_alloc (constructor_elt, gc, nunits);
+ vec_alloc (v, nunits);
for (k = 0; k < nunits; ++k)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
vec_cst = build_constructor (vector_type, v);
}
- VEC_quick_push (tree, voprnds,
- vect_init_vector (stmt, vec_cst,
- vector_type, NULL));
+ voprnds.quick_push (vect_init_vector (stmt, vec_cst,
+ vector_type, NULL));
if (ctor_seq != NULL)
{
- gimple init_stmt
- = SSA_NAME_DEF_STMT (VEC_last (tree, voprnds));
+ gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
gsi_insert_seq_before_without_update (&gsi, ctor_seq,
GSI_SAME_STMT);
/* Since the vectors are created in the reverse order, we should invert
them. */
- vec_num = VEC_length (tree, voprnds);
+ vec_num = voprnds.length ();
for (j = vec_num; j != 0; j--)
{
- vop = VEC_index (tree, voprnds, j - 1);
- VEC_quick_push (tree, *vec_oprnds, vop);
+ vop = voprnds[j - 1];
+ vec_oprnds->quick_push (vop);
}
- VEC_free (tree, heap, voprnds);
+ voprnds.release ();
/* In case that VF is greater than the unrolling factor needed for the SLP
group of stmts, NUMBER_OF_VECTORS to be created is greater than
NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
to replicate the vectors. */
- while (number_of_vectors > VEC_length (tree, *vec_oprnds))
+ while (number_of_vectors > vec_oprnds->length ())
{
tree neutral_vec = NULL;
if (!neutral_vec)
neutral_vec = build_vector_from_val (vector_type, neutral_op);
- VEC_quick_push (tree, *vec_oprnds, neutral_vec);
+ vec_oprnds->quick_push (neutral_vec);
}
else
{
- for (i = 0; VEC_iterate (tree, *vec_oprnds, i, vop) && i < vec_num; i++)
- VEC_quick_push (tree, *vec_oprnds, vop);
+ for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
+ vec_oprnds->quick_push (vop);
}
}
}
vectorized def-stmts. */
static void
-vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds)
+vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
{
tree vec_oprnd;
gimple vec_def_stmt;
unsigned int i;
- gcc_assert (SLP_TREE_VEC_STMTS (slp_node));
+ gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
{
gcc_assert (vec_def_stmt);
vec_oprnd = gimple_get_lhs (vec_def_stmt);
- VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+ vec_oprnds->quick_push (vec_oprnd);
}
}
vect_get_slp_vect_defs () to retrieve them. */
void
-vect_get_slp_defs (VEC (tree, heap) *ops, slp_tree slp_node,
- VEC (slp_void_p, heap) **vec_oprnds, int reduc_index)
+vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
+ vec<slp_void_p> *vec_oprnds, int reduc_index)
{
gimple first_stmt, first_def;
int number_of_vects = 0, i;
unsigned int child_index = 0;
HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
slp_tree child = NULL;
- VEC (tree, heap) *vec_defs;
+ vec<tree> *vec_defs;
tree oprnd, def_lhs;
bool vectorized_defs;
- first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
- FOR_EACH_VEC_ELT (tree, ops, i, oprnd)
+ first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ FOR_EACH_VEC_ELT (ops, i, oprnd)
{
/* For each operand we check if it has vectorized definitions in a child
node or we need to create them (for invariants and constants). We
vect_get_constant_vectors (), and not advance CHILD_INDEX in order
to check this child node for the next operand. */
vectorized_defs = false;
- if (VEC_length (slp_void_p, SLP_TREE_CHILDREN (slp_node)) > child_index)
+ if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
{
- child = (slp_tree) VEC_index (slp_void_p,
- SLP_TREE_CHILDREN (slp_node),
- child_index);
- first_def = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (child), 0);
+ child = (slp_tree) SLP_TREE_CHILDREN (slp_node)[child_index];
+ first_def = SLP_TREE_SCALAR_STMTS (child)[0];
/* In the end of a pattern sequence we have a use of the original stmt,
so we need to compare OPRND with the original def. */
}
/* Allocate memory for vectorized defs. */
- vec_defs = VEC_alloc (tree, heap, number_of_vects);
+ vec_alloc (vec_defs, number_of_vects);
/* For reduction defs we call vect_get_constant_vectors (), since we are
looking for initial loop invariant values. */
if (vectorized_defs && reduc_index == -1)
/* The defs are already vectorized. */
- vect_get_slp_vect_defs (child, &vec_defs);
+ vect_get_slp_vect_defs (child, vec_defs);
else
/* Build vectors from scalar defs. */
- vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
+ vect_get_constant_vectors (oprnd, slp_node, vec_defs, i,
number_of_vects, reduc_index);
- VEC_quick_push (slp_void_p, *vec_oprnds, (slp_void_p) vec_defs);
+ vec_oprnds->quick_push ((slp_void_p) vec_defs);
/* For reductions, we only need initial values. */
if (reduc_index != -1)
vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
tree mask, int first_vec_indx, int second_vec_indx,
gimple_stmt_iterator *gsi, slp_tree node,
- tree vectype, VEC(tree,heap) *dr_chain,
+ tree vectype, vec<tree> dr_chain,
int ncopies, int vect_stmts_counter)
{
tree perm_dest;
/* Initialize the vect stmts of NODE to properly insert the generated
stmts later. */
- for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
+ for (i = SLP_TREE_VEC_STMTS (node).length ();
i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL);
+ SLP_TREE_VEC_STMTS (node).quick_push (NULL);
perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
for (i = 0; i < ncopies; i++)
{
- first_vec = VEC_index (tree, dr_chain, first_vec_indx);
- second_vec = VEC_index (tree, dr_chain, second_vec_indx);
+ first_vec = dr_chain[first_vec_indx];
+ second_vec = dr_chain[second_vec_indx];
/* Generate the permute statement. */
perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
/* Store the vector statement in NODE. */
- VEC_replace (gimple, SLP_TREE_VEC_STMTS (node),
- stride * i + vect_stmts_counter, perm_stmt);
+ SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
first_vec_indx += stride;
second_vec_indx += stride;
If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
permute statements for SLP_NODE_INSTANCE. */
bool
-vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
+vect_transform_slp_perm_load (gimple stmt, vec<tree> dr_chain,
gimple_stmt_iterator *gsi, int vf,
slp_instance slp_node_instance, bool analyze_only)
{
we need the second and the third vectors: {b1,c1,a2,b2} and
{c2,a3,b3,c3}. */
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (slp_node_instance), i, node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_node_instance), i, node)
{
scalar_index = 0;
index = 0;
second_vec_index = vec_index;
}
- next_scalar_stmt = VEC_index (gimple,
- SLP_TREE_SCALAR_STMTS (node), scalar_index++);
+ next_scalar_stmt
+ = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
vect_create_mask_and_perm (stmt, next_scalar_stmt,
mask_vec, first_vec_index, second_vec_index,
if (!node)
return false;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_schedule_slp_instance ((slp_tree) child, instance,
vectorization_factor);
- stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
+ stmt = SLP_TREE_SCALAR_STMTS (node)[0];
stmt_info = vinfo_for_stmt (stmt);
/* VECTYPE is the type of the destination. */
/* In case of load permutation we have to allocate vectorized statements for
all the nodes that participate in that permutation. */
- if (SLP_INSTANCE_LOAD_PERMUTATION (instance))
+ if (SLP_INSTANCE_LOAD_PERMUTATION (instance).exists ())
{
- FOR_EACH_VEC_ELT (slp_tree, SLP_INSTANCE_LOADS (instance), i, loads_node)
+ FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, loads_node)
{
- if (!SLP_TREE_VEC_STMTS (loads_node))
+ if (!SLP_TREE_VEC_STMTS (loads_node).exists ())
{
- SLP_TREE_VEC_STMTS (loads_node) = VEC_alloc (gimple, heap,
- vec_stmts_size);
+ SLP_TREE_VEC_STMTS (loads_node).create (vec_stmts_size);
SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node) = vec_stmts_size;
}
}
}
- if (!SLP_TREE_VEC_STMTS (node))
+ if (!SLP_TREE_VEC_STMTS (node).exists ())
{
- SLP_TREE_VEC_STMTS (node) = VEC_alloc (gimple, heap, vec_stmts_size);
+ SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
}
if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
&& STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
- && SLP_INSTANCE_LOAD_PERMUTATION (instance))
+ && SLP_INSTANCE_LOAD_PERMUTATION (instance).exists ())
si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
else if (is_pattern_stmt_p (stmt_info))
si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
if (!node)
return;
- FOR_EACH_VEC_ELT (slp_void_p, SLP_TREE_CHILDREN (node), i, child)
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
vect_remove_slp_scalar_calls ((slp_tree) child);
- FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
continue;
bool
vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
- VEC (slp_instance, heap) *slp_instances;
+ vec<slp_instance> slp_instances;
slp_instance instance;
unsigned int i, vf;
bool is_store = false;
vf = 1;
}
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
/* Schedule the tree of INSTANCE. */
is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
"vectorizing stmts using SLP.");
}
- FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
+ FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
slp_tree root = SLP_INSTANCE_TREE (instance);
gimple store;
vect_remove_slp_scalar_calls (root);
- for (j = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (root), j, store)
+ for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
&& j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
{
if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
static void
-vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
+vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
enum vect_relevant relevant, bool live_p,
bool used_in_pattern)
{
return;
}
- VEC_safe_push (gimple, heap, *worklist, stmt);
+ worklist->safe_push (stmt);
}
static bool
process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
- enum vect_relevant relevant, VEC(gimple,heap) **worklist,
+ enum vect_relevant relevant, vec<gimple> *worklist,
bool force)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
bool
vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
- VEC(gimple,heap) *worklist;
+ vec<gimple> worklist;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned int nbbs = loop->num_nodes;
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_mark_stmts_to_be_vectorized ===");
- worklist = VEC_alloc (gimple, heap, 64);
+ worklist.create (64);
/* 1. Init worklist. */
for (i = 0; i < nbbs; i++)
}
/* 2. Process_worklist */
- while (VEC_length (gimple, worklist) > 0)
+ while (worklist.length () > 0)
{
use_operand_p use_p;
ssa_op_iter iter;
- stmt = VEC_pop (gimple, worklist);
+ stmt = worklist.pop ();
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of reduction.");
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of nested cycle.");
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of double reduction.");
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
|| !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
live_p, relevant, &worklist, false))
{
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
i = 2;
if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
&worklist, false))
{
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
}
if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
&worklist, false))
{
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
}
if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
&worklist, false))
{
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
}
if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
&worklist, true))
{
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return false;
}
}
} /* while worklist */
- VEC_free (gimple, heap, worklist);
+ worklist.release ();
return true;
}
{
if (slp_node)
{
- first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+ first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
group_size = 1;
}
else
static void
vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
- VEC(tree,heap) **vec_oprnds0,
- VEC(tree,heap) **vec_oprnds1)
+ vec<tree> *vec_oprnds0,
+ vec<tree> *vec_oprnds1)
{
- tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
+ tree vec_oprnd = vec_oprnds0->pop ();
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
- VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
+ vec_oprnds0->quick_push (vec_oprnd);
- if (vec_oprnds1 && *vec_oprnds1)
+ if (vec_oprnds1 && vec_oprnds1->length ())
{
- vec_oprnd = VEC_pop (tree, *vec_oprnds1);
+ vec_oprnd = vec_oprnds1->pop ();
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
- VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
+ vec_oprnds1->quick_push (vec_oprnd);
}
}
void
vect_get_vec_defs (tree op0, tree op1, gimple stmt,
- VEC (tree, heap) **vec_oprnds0,
- VEC (tree, heap) **vec_oprnds1,
+ vec<tree> *vec_oprnds0,
+ vec<tree> *vec_oprnds1,
slp_tree slp_node, int reduc_index)
{
if (slp_node)
{
int nops = (op1 == NULL_TREE) ? 1 : 2;
- VEC (tree, heap) *ops = VEC_alloc (tree, heap, nops);
- VEC (slp_void_p, heap) *vec_defs = VEC_alloc (slp_void_p, heap, nops);
+ vec<tree> ops;
+ ops.create (nops);
+ vec<slp_void_p> vec_defs;
+ vec_defs.create (nops);
- VEC_quick_push (tree, ops, op0);
+ ops.quick_push (op0);
if (op1)
- VEC_quick_push (tree, ops, op1);
+ ops.quick_push (op1);
vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
- *vec_oprnds0 = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0);
+ *vec_oprnds0 = *((vec<tree> *) vec_defs[0]);
if (op1)
- *vec_oprnds1 = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 1);
+ *vec_oprnds1 = *((vec<tree> *) vec_defs[1]);
- VEC_free (tree, heap, ops);
- VEC_free (slp_void_p, heap, vec_defs);
+ ops.release ();
+ vec_defs.release ();
}
else
{
tree vec_oprnd;
- *vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnds0->create (1);
vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
- VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
+ vec_oprnds0->quick_push (vec_oprnd);
if (op1)
{
- *vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnds1->create (1);
vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
- VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
+ vec_oprnds1->quick_push (vec_oprnd);
}
}
}
= {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
gimple new_stmt = NULL;
int ncopies, j;
- VEC(tree, heap) *vargs = NULL;
+ vec<tree> vargs = vec<tree>();
enum { NARROW, NONE, WIDEN } modifier;
size_t i, nargs;
tree lhs;
{
/* Build argument list for the vectorized call. */
if (j == 0)
- vargs = VEC_alloc (tree, heap, nargs);
+ vargs.create (nargs);
else
- VEC_truncate (tree, vargs, 0);
+ vargs.truncate (0);
if (slp_node)
{
- VEC (slp_void_p, heap) *vec_defs
- = VEC_alloc (slp_void_p, heap, nargs);
- VEC (tree, heap) *vec_oprnds0;
+ vec<slp_void_p> vec_defs;
+ vec_defs.create (nargs);
+ vec<tree> vec_oprnds0;
for (i = 0; i < nargs; i++)
- VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
+ vargs.quick_push (gimple_call_arg (stmt, i));
vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
- vec_oprnds0
- = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0);
+ vec_oprnds0 = *((vec<tree> *) vec_defs[0]);
/* Arguments are ready. Create the new vector stmt. */
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_oprnd0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
{
size_t k;
for (k = 0; k < nargs; k++)
{
- VEC (tree, heap) *vec_oprndsk
- = (VEC (tree, heap) *)
- VEC_index (slp_void_p, vec_defs, k);
- VEC_replace (tree, vargs, k,
- VEC_index (tree, vec_oprndsk, i));
+ vec<tree> vec_oprndsk = *((vec<tree> *) vec_defs[k]);
+ vargs[k] = vec_oprndsk[i];
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
- new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
for (i = 0; i < nargs; i++)
{
- VEC (tree, heap) *vec_oprndsi
- = (VEC (tree, heap) *)
- VEC_index (slp_void_p, vec_defs, i);
- VEC_free (tree, heap, vec_oprndsi);
+ vec<tree> vec_oprndsi = *((vec<tree> *) vec_defs[i]);
+ vec_oprndsi.release ();
}
- VEC_free (slp_void_p, heap, vec_defs);
+ vec_defs.release ();
continue;
}
= vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
}
- VEC_quick_push (tree, vargs, vec_oprnd0);
+ vargs.quick_push (vec_oprnd0);
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
{
/* Build argument list for the vectorized call. */
if (j == 0)
- vargs = VEC_alloc (tree, heap, nargs * 2);
+ vargs.create (nargs * 2);
else
- VEC_truncate (tree, vargs, 0);
+ vargs.truncate (0);
if (slp_node)
{
- VEC (slp_void_p, heap) *vec_defs
- = VEC_alloc (slp_void_p, heap, nargs);
- VEC (tree, heap) *vec_oprnds0;
+ vec<slp_void_p> vec_defs;
+ vec_defs.create (nargs);
+ vec<tree> vec_oprnds0;
for (i = 0; i < nargs; i++)
- VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
+ vargs.quick_push (gimple_call_arg (stmt, i));
vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
- vec_oprnds0
- = (VEC (tree, heap) *) VEC_index (slp_void_p, vec_defs, 0);
+ vec_oprnds0 = *((vec<tree> *) vec_defs[0]);
/* Arguments are ready. Create the new vector stmt. */
- for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vec_oprnd0);
- i += 2)
+ for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
{
size_t k;
- VEC_truncate (tree, vargs, 0);
+ vargs.truncate (0);
for (k = 0; k < nargs; k++)
{
- VEC (tree, heap) *vec_oprndsk
- = (VEC (tree, heap) *)
- VEC_index (slp_void_p, vec_defs, k);
- VEC_quick_push (tree, vargs,
- VEC_index (tree, vec_oprndsk, i));
- VEC_quick_push (tree, vargs,
- VEC_index (tree, vec_oprndsk, i + 1));
+ vec<tree> vec_oprndsk = *((vec<tree> *) vec_defs[k]);
+ vargs.quick_push (vec_oprndsk[i]);
+ vargs.quick_push (vec_oprndsk[i + 1]);
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
- new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
for (i = 0; i < nargs; i++)
{
- VEC (tree, heap) *vec_oprndsi
- = (VEC (tree, heap) *)
- VEC_index (slp_void_p, vec_defs, i);
- VEC_free (tree, heap, vec_oprndsi);
+ vec<tree> vec_oprndsi = *((vec<tree> *) vec_defs[i]);
+ vec_oprndsi.release ();
}
- VEC_free (slp_void_p, heap, vec_defs);
+ vec_defs.release ();
continue;
}
= vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
}
- VEC_quick_push (tree, vargs, vec_oprnd0);
- VEC_quick_push (tree, vargs, vec_oprnd1);
+ vargs.quick_push (vec_oprnd0);
+ vargs.quick_push (vec_oprnd1);
}
new_stmt = gimple_build_call_vec (fndecl, vargs);
return false;
}
- VEC_free (tree, heap, vargs);
+ vargs.release ();
/* Update the exception handling table with the vector stmt if necessary. */
if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
static void
vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
- VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
+ vec<tree> *vec_oprnds, int multi_step_cvt)
{
tree vec_oprnd;
else
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
- VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+ vec_oprnds->quick_push (vec_oprnd);
/* Get second vector operand. */
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
- VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
+ vec_oprnds->quick_push (vec_oprnd);
*oprnd = vec_oprnd;
recursively. */
static void
-vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
+vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
int multi_step_cvt, gimple stmt,
- VEC (tree, heap) *vec_dsts,
+ vec<tree> vec_dsts,
gimple_stmt_iterator *gsi,
slp_tree slp_node, enum tree_code code,
stmt_vec_info *prev_stmt_info)
gimple new_stmt;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- vec_dest = VEC_pop (tree, vec_dsts);
+ vec_dest = vec_dsts.pop ();
- for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
+ for (i = 0; i < vec_oprnds->length (); i += 2)
{
/* Create demotion operation. */
- vop0 = VEC_index (tree, *vec_oprnds, i);
- vop1 = VEC_index (tree, *vec_oprnds, i + 1);
+ vop0 = (*vec_oprnds)[i];
+ vop1 = (*vec_oprnds)[i + 1];
new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
new_tmp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_tmp);
if (multi_step_cvt)
/* Store the resulting vector for next recursive call. */
- VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
+ (*vec_oprnds)[i/2] = new_tmp;
else
{
/* This is the last step of the conversion sequence. Store the
vectors in SLP_NODE or in vector info of the scalar statement
(or in STMT_VINFO_RELATED_STMT chain). */
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
else
{
if (!*prev_stmt_info)
{
/* At each level of recursion we have half of the operands we had at the
previous level. */
- VEC_truncate (tree, *vec_oprnds, (i+1)/2);
+ vec_oprnds->truncate ((i+1)/2);
vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
stmt, vec_dsts, gsi, slp_node,
VEC_PACK_TRUNC_EXPR,
prev_stmt_info);
}
- VEC_quick_push (tree, vec_dsts, vec_dest);
+ vec_dsts.quick_push (vec_dest);
}
the resulting vectors and call the function recursively. */
static void
-vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
- VEC (tree, heap) **vec_oprnds1,
+vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
+ vec<tree> *vec_oprnds1,
gimple stmt, tree vec_dest,
gimple_stmt_iterator *gsi,
enum tree_code code1,
int i;
tree vop0, vop1, new_tmp1, new_tmp2;
gimple new_stmt1, new_stmt2;
- VEC (tree, heap) *vec_tmp = NULL;
+ vec<tree> vec_tmp = vec<tree>();
- vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
- FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
+ vec_tmp.create (vec_oprnds0->length () * 2);
+ FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
{
if (op_type == binary_op)
- vop1 = VEC_index (tree, *vec_oprnds1, i);
+ vop1 = (*vec_oprnds1)[i];
else
vop1 = NULL_TREE;
}
/* Store the results for the next step. */
- VEC_quick_push (tree, vec_tmp, new_tmp1);
- VEC_quick_push (tree, vec_tmp, new_tmp2);
+ vec_tmp.quick_push (new_tmp1);
+ vec_tmp.quick_push (new_tmp2);
}
- VEC_free (tree, heap, *vec_oprnds0);
+ vec_oprnds0->truncate (0);
*vec_oprnds0 = vec_tmp;
}
int ncopies, i, j;
tree lhs_type, rhs_type;
enum { NARROW, NONE, WIDEN } modifier;
- VEC (tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
+ vec<tree> vec_oprnds0 = vec<tree>();
+ vec<tree> vec_oprnds1 = vec<tree>();
tree vop0;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
int multi_step_cvt = 0;
- VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL;
+ vec<tree> vec_dsts = vec<tree>();
+ vec<tree> interm_types = vec<tree>();
tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
int op_type;
enum machine_mode rhs_mode;
else
{
multi_step_cvt++;
- VEC_safe_push (tree, heap, interm_types, cvt_type);
+ interm_types.safe_push (cvt_type);
cvt_type = NULL_TREE;
}
break;
STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
}
- VEC_free (tree, heap, interm_types);
+ interm_types.release ();
return true;
}
We create vector destinations for the intermediate type (TYPES) received
from supportable_*_operation, and store them in the correct order
for future use in vect_create_vectorized_*_stmts (). */
- vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
+ vec_dsts.create (multi_step_cvt + 1);
vec_dest = vect_create_destination_var (scalar_dest,
(cvt_type && modifier == WIDEN)
? cvt_type : vectype_out);
- VEC_quick_push (tree, vec_dsts, vec_dest);
+ vec_dsts.quick_push (vec_dest);
if (multi_step_cvt)
{
- for (i = VEC_length (tree, interm_types) - 1;
- VEC_iterate (tree, interm_types, i, intermediate_type); i--)
+ for (i = interm_types.length () - 1;
+ interm_types.iterate (i, &intermediate_type); i--)
{
vec_dest = vect_create_destination_var (scalar_dest,
intermediate_type);
- VEC_quick_push (tree, vec_dsts, vec_dest);
+ vec_dsts.quick_push (vec_dest);
}
}
if (!slp_node)
{
if (modifier == NONE)
- vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnds0.create (1);
else if (modifier == WIDEN)
{
- vec_oprnds0 = VEC_alloc (tree, heap,
- (multi_step_cvt
- ? vect_pow2 (multi_step_cvt) : 1));
+ vec_oprnds0.create (multi_step_cvt ? vect_pow2(multi_step_cvt) : 1);
if (op_type == binary_op)
- vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnds1.create (1);
}
else
- vec_oprnds0 = VEC_alloc (tree, heap,
- 2 * (multi_step_cvt
- ? vect_pow2 (multi_step_cvt) : 1));
+ vec_oprnds0.create (
+ 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
}
else if (code == WIDEN_LSHIFT_EXPR)
- vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+ vec_oprnds1.create (slp_node->vec_stmts_size);
last_oprnd = op0;
prev_stmt_info = NULL;
else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
/* Arguments are ready, create the new vector stmt. */
if (code1 == CALL_EXPR)
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
- new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (j == 0)
for SLP_NODE. We check during the analysis that all
the shift arguments are the same. */
for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ vec_oprnds1.quick_push (vec_oprnd1);
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
slp_node, -1);
else
{
vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
- VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
+ vec_oprnds0.quick_push (vec_oprnd0);
if (op_type == binary_op)
{
if (code == WIDEN_LSHIFT_EXPR)
else
vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
NULL);
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ vec_oprnds1.quick_push (vec_oprnd1);
}
}
}
else
{
vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
- VEC_truncate (tree, vec_oprnds0, 0);
- VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
+ vec_oprnds0.truncate (0);
+ vec_oprnds0.quick_push (vec_oprnd0);
if (op_type == binary_op)
{
if (code == WIDEN_LSHIFT_EXPR)
else
vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
vec_oprnd1);
- VEC_truncate (tree, vec_oprnds1, 0);
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ vec_oprnds1.truncate (0);
+ vec_oprnds1.quick_push (vec_oprnd1);
}
}
/* Arguments are ready. Create the new vector stmts. */
for (i = multi_step_cvt; i >= 0; i--)
{
- tree this_dest = VEC_index (tree, vec_dsts, i);
+ tree this_dest = vec_dsts[i];
enum tree_code c1 = code1, c2 = code2;
if (i == 0 && codecvt2 != ERROR_MARK)
{
op_type);
}
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
if (cvt_type)
{
new_stmt = SSA_NAME_DEF_STMT (vop0);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
- new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
else
{
if (!prev_stmt_info)
slp_node, -1);
else
{
- VEC_truncate (tree, vec_oprnds0, 0);
+ vec_oprnds0.truncate (0);
vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
vect_pow2 (multi_step_cvt) - 1);
}
/* Arguments are ready. Create the new vector stmts. */
if (cvt_type)
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
if (codecvt1 == CALL_EXPR)
{
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
- VEC_replace (tree, vec_oprnds0, i, new_temp);
+ vec_oprnds0[i] = new_temp;
}
vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
break;
}
- VEC_free (tree, heap, vec_oprnds0);
- VEC_free (tree, heap, vec_oprnds1);
- VEC_free (tree, heap, vec_dsts);
- VEC_free (tree, heap, interm_types);
+ vec_oprnds0.release ();
+ vec_oprnds1.release ();
+ vec_dsts.release ();
+ interm_types.release ();
return true;
}
unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies;
int i, j;
- VEC(tree,heap) *vec_oprnds = NULL;
+ vec<tree> vec_oprnds = vec<tree>();
tree vop;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
gimple new_stmt = NULL;
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
/* Arguments are ready. create the new vector stmt. */
- FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
+ FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
{
if (CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (slp_node)
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
- VEC_free (tree, heap, vec_oprnds);
+ vec_oprnds.release ();
return true;
}
tree op1_vectype;
int ncopies;
int j, i;
- VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
+ vec<tree> vec_oprnds0 = vec<tree>();
+ vec<tree> vec_oprnds1 = vec<tree>();
tree vop0, vop1;
unsigned int k;
bool scalar_shift_arg = true;
a scalar shift. */
if (slp_node)
{
- VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
+ vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple slpstmt;
- FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
+ FOR_EACH_VEC_ELT (stmts, k, slpstmt)
if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
scalar_shift_arg = false;
}
allocate VEC_OPRNDS1 only in case of binary operation. */
if (!slp_node)
{
- vec_oprnds0 = VEC_alloc (tree, heap, 1);
- vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnds0.create (1);
+ vec_oprnds1.create (1);
}
else if (scalar_shift_arg)
- vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+ vec_oprnds1.create (slp_node->vec_stmts_size);
prev_stmt_info = NULL;
for (j = 0; j < ncopies; j++)
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.");
vec_oprnd1 = op1;
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ vec_oprnds1.quick_push (vec_oprnd1);
if (slp_node)
{
/* Store vec_oprnd1 for every vector stmt to be created
TODO: Allow different constants for different vector
stmts generated for an SLP instance. */
for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
- VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+ vec_oprnds1.quick_push (vec_oprnd1);
}
}
}
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
/* Arguments are ready. Create the new vector stmt. */
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
- vop1 = VEC_index (tree, vec_oprnds1, i);
+ vop1 = vec_oprnds1[i];
new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (slp_node)
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
- VEC_free (tree, heap, vec_oprnds0);
- VEC_free (tree, heap, vec_oprnds1);
+ vec_oprnds0.release ();
+ vec_oprnds1.release ();
return true;
}
tree vectype_out;
int ncopies;
int j, i;
- VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
+ vec<tree> vec_oprnds0 = vec<tree>();
+ vec<tree> vec_oprnds1 = vec<tree>();
+ vec<tree> vec_oprnds2 = vec<tree>();
tree vop0, vop1, vop2;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
int vf;
slp_node, -1);
if (op_type == ternary_op)
{
- vec_oprnds2 = VEC_alloc (tree, heap, 1);
- VEC_quick_push (tree, vec_oprnds2,
- vect_get_vec_def_for_operand (op2, stmt, NULL));
+ vec_oprnds2.create (1);
+ vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
+ stmt,
+ NULL));
}
}
else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
if (op_type == ternary_op)
{
- tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
- VEC_quick_push (tree, vec_oprnds2,
- vect_get_vec_def_for_stmt_copy (dt[2],
- vec_oprnd));
+ tree vec_oprnd = vec_oprnds2.pop ();
+ vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
+ vec_oprnd));
}
}
/* Arguments are ready. Create the new vector stmt. */
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
{
vop1 = ((op_type == binary_op || op_type == ternary_op)
- ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
+ ? vec_oprnds1[i] : NULL_TREE);
vop2 = ((op_type == ternary_op)
- ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
+ ? vec_oprnds2[i] : NULL_TREE);
new_stmt = gimple_build_assign_with_ops (code, vec_dest,
vop0, vop1, vop2);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (slp_node)
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
- VEC_free (tree, heap, vec_oprnds0);
- if (vec_oprnds1)
- VEC_free (tree, heap, vec_oprnds1);
- if (vec_oprnds2)
- VEC_free (tree, heap, vec_oprnds2);
+ vec_oprnds0.release ();
+ vec_oprnds1.release ();
+ vec_oprnds2.release ();
return true;
}
bool grouped_store = false;
bool store_lanes_p = false;
unsigned int group_size, i;
- VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
+ vec<tree> dr_chain = vec<tree>();
+ vec<tree> oprnds = vec<tree>();
+ vec<tree> result_chain = vec<tree>();
bool inv_p;
- VEC(tree,heap) *vec_oprnds = NULL;
+ vec<tree> vec_oprnds = vec<tree>();
bool slp = (slp_node != NULL);
unsigned int vec_num;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+ first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
op = gimple_assign_rhs1 (first_stmt);
}
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d", ncopies);
- dr_chain = VEC_alloc (tree, heap, group_size);
- oprnds = VEC_alloc (tree, heap, group_size);
+ dr_chain.create (group_size);
+ oprnds.create (group_size);
alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
gcc_assert (alignment_support_scheme);
vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
NULL, slp_node, -1);
- vec_oprnd = VEC_index (tree, vec_oprnds, 0);
+ vec_oprnd = vec_oprnds[0];
}
else
{
vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
NULL);
- VEC_quick_push(tree, dr_chain, vec_oprnd);
- VEC_quick_push(tree, oprnds, vec_oprnd);
+ dr_chain.quick_push (vec_oprnd);
+ oprnds.quick_push (vec_oprnd);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
}
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
- op = VEC_index (tree, oprnds, i);
+ op = oprnds[i];
vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt);
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
- VEC_replace(tree, dr_chain, i, vec_oprnd);
- VEC_replace(tree, oprnds, i, vec_oprnd);
+ dr_chain[i] = vec_oprnd;
+ oprnds[i] = vec_oprnd;
}
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
TYPE_SIZE_UNIT (aggr_type));
vec_array = create_vector_array (vectype, vec_num);
for (i = 0; i < vec_num; i++)
{
- vec_oprnd = VEC_index (tree, dr_chain, i);
+ vec_oprnd = dr_chain[i];
write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
}
new_stmt = NULL;
if (grouped_store)
{
- result_chain = VEC_alloc (tree, heap, group_size);
+ result_chain.create (group_size);
/* Permute. */
vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
&result_chain);
stmt, NULL_TREE);
if (slp)
- vec_oprnd = VEC_index (tree, vec_oprnds, i);
+ vec_oprnd = vec_oprnds[i];
else if (grouped_store)
/* For grouped stores vectorized defs are interleaved in
vect_permute_store_chain(). */
- vec_oprnd = VEC_index (tree, result_chain, i);
+ vec_oprnd = result_chain[i];
data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
build_int_cst (reference_alias_ptr_type
}
}
- VEC_free (tree, heap, dr_chain);
- VEC_free (tree, heap, oprnds);
- if (result_chain)
- VEC_free (tree, heap, result_chain);
- if (vec_oprnds)
- VEC_free (tree, heap, vec_oprnds);
+ dr_chain.release ();
+ oprnds.release ();
+ result_chain.release ();
+ vec_oprnds.release ();
return true;
}
tree offset = NULL_TREE;
tree realignment_token = NULL_TREE;
gimple phi = NULL;
- VEC(tree,heap) *dr_chain = NULL;
+ vec<tree> dr_chain = vec<tree>();
bool grouped_load = false;
bool load_lanes_p = false;
gimple first_stmt;
tree ref = DR_REF (dr);
tree ivstep;
tree running_off;
- VEC(constructor_elt, gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
gimple_seq stmts = NULL;
gcc_assert (stride_base && stride_step);
{
tree vec_inv;
- v = VEC_alloc (constructor_elt, gc, nunits);
+ vec_alloc (v, nunits);
for (i = 0; i < nunits; i++)
{
tree newref, newoff;
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
- && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance)
- && first_stmt != VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0))
- first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+ && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists ()
+ && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
+ first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* Check if the chain of loads is already vectorized. */
if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
{
grouped_load = false;
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
+ if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance).exists ())
slp_perm = true;
}
else
TYPE_SIZE_UNIT (aggr_type));
if (grouped_load || slp_perm)
- dr_chain = VEC_alloc (tree, heap, vec_num);
+ dr_chain.create (vec_num);
if (load_lanes_p)
{
{
new_temp = read_vector_array (stmt, gsi, scalar_dest,
vec_array, i);
- VEC_quick_push (tree, dr_chain, new_temp);
+ dr_chain.quick_push (new_temp);
}
/* Record the mapping between SSA_NAMEs and statements. */
/* Collect vector loads and later create their permutation in
vect_transform_grouped_load (). */
if (grouped_load || slp_perm)
- VEC_quick_push (tree, dr_chain, new_temp);
+ dr_chain.quick_push (new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
if (slp && !slp_perm)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
- new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
}
if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
slp_node_instance, false))
{
- VEC_free (tree, heap, dr_chain);
+ dr_chain.release ();
return false;
}
}
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
}
- if (dr_chain)
- VEC_free (tree, heap, dr_chain);
+ dr_chain.release ();
}
return true;
stmt_vec_info prev_stmt_info = NULL;
int i, j;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
- VEC (tree, heap) *vec_oprnds2 = NULL, *vec_oprnds3 = NULL;
+ vec<tree> vec_oprnds0 = vec<tree>();
+ vec<tree> vec_oprnds1 = vec<tree>();
+ vec<tree> vec_oprnds2 = vec<tree>();
+ vec<tree> vec_oprnds3 = vec<tree>();
tree vec_cmp_type = vectype;
if (slp_node || PURE_SLP_STMT (stmt_info))
if (!slp_node)
{
- vec_oprnds0 = VEC_alloc (tree, heap, 1);
- vec_oprnds1 = VEC_alloc (tree, heap, 1);
- vec_oprnds2 = VEC_alloc (tree, heap, 1);
- vec_oprnds3 = VEC_alloc (tree, heap, 1);
+ vec_oprnds0.create (1);
+ vec_oprnds1.create (1);
+ vec_oprnds2.create (1);
+ vec_oprnds3.create (1);
}
/* Handle def. */
{
if (slp_node)
{
- VEC (tree, heap) *ops = VEC_alloc (tree, heap, 4);
- VEC (slp_void_p, heap) *vec_defs;
-
- vec_defs = VEC_alloc (slp_void_p, heap, 4);
- VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 0));
- VEC_safe_push (tree, heap, ops, TREE_OPERAND (cond_expr, 1));
- VEC_safe_push (tree, heap, ops, then_clause);
- VEC_safe_push (tree, heap, ops, else_clause);
+ vec<tree> ops;
+ ops.create (4);
+ vec<slp_void_p> vec_defs;
+
+ vec_defs.create (4);
+ ops.safe_push (TREE_OPERAND (cond_expr, 0));
+ ops.safe_push (TREE_OPERAND (cond_expr, 1));
+ ops.safe_push (then_clause);
+ ops.safe_push (else_clause);
vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
- vec_oprnds3 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
- vec_oprnds2 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
- vec_oprnds1 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
- vec_oprnds0 = (VEC (tree, heap) *) VEC_pop (slp_void_p, vec_defs);
+ vec_oprnds3 = *((vec<tree> *) vec_defs.pop ());
+ vec_oprnds2 = *((vec<tree> *) vec_defs.pop ());
+ vec_oprnds1 = *((vec<tree> *) vec_defs.pop ());
+ vec_oprnds0 = *((vec<tree> *) vec_defs.pop ());
- VEC_free (tree, heap, ops);
- VEC_free (slp_void_p, heap, vec_defs);
+ ops.release ();
+ vec_defs.release ();
}
else
{
else
{
vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
- VEC_pop (tree, vec_oprnds0));
+ vec_oprnds0.pop ());
vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
- VEC_pop (tree, vec_oprnds1));
+ vec_oprnds1.pop ());
vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
- VEC_pop (tree, vec_oprnds2));
+ vec_oprnds2.pop ());
vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
- VEC_pop (tree, vec_oprnds3));
+ vec_oprnds3.pop ());
}
if (!slp_node)
{
- VEC_quick_push (tree, vec_oprnds0, vec_cond_lhs);
- VEC_quick_push (tree, vec_oprnds1, vec_cond_rhs);
- VEC_quick_push (tree, vec_oprnds2, vec_then_clause);
- VEC_quick_push (tree, vec_oprnds3, vec_else_clause);
+ vec_oprnds0.quick_push (vec_cond_lhs);
+ vec_oprnds1.quick_push (vec_cond_rhs);
+ vec_oprnds2.quick_push (vec_then_clause);
+ vec_oprnds3.quick_push (vec_else_clause);
}
/* Arguments are ready. Create the new vector stmt. */
- FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vec_cond_lhs)
+ FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
{
- vec_cond_rhs = VEC_index (tree, vec_oprnds1, i);
- vec_then_clause = VEC_index (tree, vec_oprnds2, i);
- vec_else_clause = VEC_index (tree, vec_oprnds3, i);
+ vec_cond_rhs = vec_oprnds1[i];
+ vec_then_clause = vec_oprnds2[i];
+ vec_else_clause = vec_oprnds3[i];
vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
vec_cond_lhs, vec_cond_rhs);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (slp_node)
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
- VEC_free (tree, heap, vec_oprnds0);
- VEC_free (tree, heap, vec_oprnds1);
- VEC_free (tree, heap, vec_oprnds2);
- VEC_free (tree, heap, vec_oprnds3);
+ vec_oprnds0.release ();
+ vec_oprnds1.release ();
+ vec_oprnds2.release ();
+ vec_oprnds3.release ();
return true;
}
else
STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
- STMT_VINFO_SAME_ALIGN_REFS (res) = NULL;
+ STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
STMT_SLP_TYPE (res) = loop_vect;
GROUP_FIRST_ELEMENT (res) = NULL;
GROUP_NEXT_ELEMENT (res) = NULL;
void
init_stmt_vec_info_vec (void)
{
- gcc_assert (!stmt_vec_info_vec);
- stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
+ gcc_assert (!stmt_vec_info_vec.exists ());
+ stmt_vec_info_vec.create (50);
}
void
free_stmt_vec_info_vec (void)
{
- gcc_assert (stmt_vec_info_vec);
- VEC_free (vec_void_p, heap, stmt_vec_info_vec);
+ gcc_assert (stmt_vec_info_vec.exists ());
+ stmt_vec_info_vec.release ();
}
}
}
- VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
+ STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
set_vinfo_for_stmt (stmt, NULL);
free (stmt_info);
}
tree vectype_out, tree vectype_in,
enum tree_code *code1, enum tree_code *code2,
int *multi_step_cvt,
- VEC (tree, heap) **interm_types)
+ vec<tree> *interm_types)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
intermediate steps in promotion sequence. We try
MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
not. */
- *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
+ interm_types->create (MAX_INTERM_CVT_STEPS);
for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
{
intermediate_mode = insn_data[icode1].operand[0].mode;
== CODE_FOR_nothing))
break;
- VEC_quick_push (tree, *interm_types, intermediate_type);
+ interm_types->quick_push (intermediate_type);
(*multi_step_cvt)++;
if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
prev_mode = intermediate_mode;
}
- VEC_free (tree, heap, *interm_types);
+ interm_types->release ();
return false;
}
supportable_narrowing_operation (enum tree_code code,
tree vectype_out, tree vectype_in,
enum tree_code *code1, int *multi_step_cvt,
- VEC (tree, heap) **interm_types)
+ vec<tree> *interm_types)
{
enum machine_mode vec_mode;
enum insn_code icode1;
/* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
intermediate steps in promotion sequence. We try
MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
- *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
+ interm_types->create (MAX_INTERM_CVT_STEPS);
for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
{
intermediate_mode = insn_data[icode1].operand[0].mode;
== CODE_FOR_nothing))
break;
- VEC_quick_push (tree, *interm_types, intermediate_type);
+ interm_types->quick_push (intermediate_type);
(*multi_step_cvt)++;
if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
optab1 = interm_optab;
}
- VEC_free (tree, heap, *interm_types);
+ interm_types->release ();
return false;
}
LOC vect_location;
/* Vector mapping GIMPLE stmt to stmt_vec_info. */
-VEC(vec_void_p,heap) *stmt_vec_info_vec;
+vec<vec_void_p> stmt_vec_info_vec;
\f
/* Function vectorize_loops.
int misalign;
} stmt_info_for_cost;
-DEF_VEC_O (stmt_info_for_cost);
-DEF_VEC_ALLOC_O (stmt_info_for_cost, heap);
-typedef VEC(stmt_info_for_cost, heap) *stmt_vector_for_cost;
+typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
static inline void
add_stmt_info_to_vec (stmt_vector_for_cost *stmt_cost_vec, int count,
si.kind = kind;
si.stmt = stmt;
si.misalign = misalign;
- VEC_safe_push (stmt_info_for_cost, heap, *stmt_cost_vec, si);
+ stmt_cost_vec->safe_push (si);
}
/************************************************************************
SLP
************************************************************************/
typedef void *slp_void_p;
-DEF_VEC_P (slp_void_p);
-DEF_VEC_ALLOC_P (slp_void_p, heap);
/* A computation tree of an SLP instance. Each node corresponds to a group of
stmts to be packed in a SIMD stmt. */
typedef struct _slp_tree {
/* Nodes that contain def-stmts of this node statements operands. */
- VEC (slp_void_p, heap) *children;
+ vec<slp_void_p> children;
/* A group of scalar stmts to be vectorized together. */
- VEC (gimple, heap) *stmts;
+ vec<gimple> stmts;
/* Vectorized stmt/s. */
- VEC (gimple, heap) *vec_stmts;
+ vec<gimple> vec_stmts;
/* Number of vector stmts that are created to replace the group of scalar
stmts. It is calculated during the transformation phase as the number of
scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
unsigned int vec_stmts_size;
} *slp_tree;
-DEF_VEC_P(slp_tree);
-DEF_VEC_ALLOC_P(slp_tree, heap);
/* SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts. */
/* Loads permutation relatively to the stores, NULL if there is no
permutation. */
- VEC (int, heap) *load_permutation;
+ vec<int> load_permutation;
/* The group of nodes that contain loads of this SLP instance. */
- VEC (slp_tree, heap) *loads;
+ vec<slp_tree> loads;
/* The first scalar load of the instance. The created vector loads will be
inserted before this statement. */
gimple first_load;
} *slp_instance;
-DEF_VEC_P(slp_instance);
-DEF_VEC_ALLOC_P(slp_instance, heap);
/* Access Functions. */
#define SLP_INSTANCE_TREE(S) (S)->root
typedef struct _slp_oprnd_info
{
/* Def-stmts for the operands. */
- VEC (gimple, heap) *def_stmts;
+ vec<gimple> def_stmts;
/* Information about the first statement, its vector def-type, type, the
operand itself in case it's constant, and an indication if it's a pattern
stmt. */
bool first_pattern;
} *slp_oprnd_info;
-DEF_VEC_P(slp_oprnd_info);
-DEF_VEC_ALLOC_P(slp_oprnd_info, heap);
typedef struct _vect_peel_info
int ptr_mask;
/* The loop nest in which the data dependences are computed. */
- VEC (loop_p, heap) *loop_nest;
+ vec<loop_p> loop_nest;
/* All data references in the loop. */
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
/* All data dependences in the loop. */
- VEC (ddr_p, heap) *ddrs;
+ vec<ddr_p> ddrs;
/* Data Dependence Relations defining address ranges that are candidates
for a run-time aliasing check. */
- VEC (ddr_p, heap) *may_alias_ddrs;
+ vec<ddr_p> may_alias_ddrs;
/* Statements in the loop that have data references that are candidates for a
runtime (loop versioning) misalignment check. */
- VEC(gimple,heap) *may_misalign_stmts;
+ vec<gimple> may_misalign_stmts;
/* All interleaving chains of stores in the loop, represented by the first
stmt in the chain. */
- VEC(gimple, heap) *grouped_stores;
+ vec<gimple> grouped_stores;
/* All SLP instances in the loop. This is a subset of the set of GROUP_STORES
of the loop. */
- VEC(slp_instance, heap) *slp_instances;
+ vec<slp_instance> slp_instances;
/* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
unsigned slp_unrolling_factor;
/* Reduction cycles detected in the loop. Used in loop-aware SLP. */
- VEC (gimple, heap) *reductions;
+ vec<gimple> reductions;
/* All reduction chains in the loop, represented by the first
stmt in the chain. */
- VEC (gimple, heap) *reduction_chains;
+ vec<gimple> reduction_chains;
/* Hash table used to choose the best peeling option. */
htab_t peeling_htab;
#define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
-VEC_length (gimple, (L)->may_misalign_stmts) > 0
+(L)->may_misalign_stmts.length () > 0
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
-VEC_length (ddr_p, (L)->may_alias_ddrs) > 0
+(L)->may_alias_ddrs.length () > 0
#define NITERS_KNOWN_P(n) \
(host_integerp ((n),0) \
basic_block bb;
/* All interleaving chains of stores in the basic block, represented by the
first stmt in the chain. */
- VEC(gimple, heap) *grouped_stores;
+ vec<gimple> grouped_stores;
/* All SLP instances in the basic block. This is a subset of the set of
GROUP_STORES of the basic block. */
- VEC(slp_instance, heap) *slp_instances;
+ vec<slp_instance> slp_instances;
/* All data references in the basic block. */
- VEC (data_reference_p, heap) *datarefs;
+ vec<data_reference_p> datarefs;
/* All data dependences in the basic block. */
- VEC (ddr_p, heap) *ddrs;
+ vec<ddr_p> ddrs;
/* Cost data used by the target cost model. */
void *target_cost_data;
typedef struct data_reference *dr_p;
-DEF_VEC_P(dr_p);
-DEF_VEC_ALLOC_P(dr_p,heap);
typedef struct _stmt_vec_info {
/* List of datarefs that are known to have the same alignment as the dataref
of this stmt. */
- VEC(dr_p,heap) *same_align_refs;
+ vec<dr_p> same_align_refs;
/* Classify the def of this stmt. */
enum vect_def_type def_type;
/* Avoid GTY(()) on stmt_vec_info. */
typedef void *vec_void_p;
-DEF_VEC_P (vec_void_p);
-DEF_VEC_ALLOC_P (vec_void_p, heap);
-extern VEC(vec_void_p,heap) *stmt_vec_info_vec;
+extern vec<vec_void_p> stmt_vec_info_vec;
void init_stmt_vec_info_vec (void);
void free_stmt_vec_info_vec (void);
if (uid == 0)
return NULL;
- return (stmt_vec_info) VEC_index (vec_void_p, stmt_vec_info_vec, uid - 1);
+ return (stmt_vec_info) stmt_vec_info_vec[uid - 1];
}
/* Set vectorizer information INFO for STMT. */
if (uid == 0)
{
gcc_checking_assert (info);
- uid = VEC_length (vec_void_p, stmt_vec_info_vec) + 1;
+ uid = stmt_vec_info_vec.length () + 1;
gimple_set_uid (stmt, uid);
- VEC_safe_push (vec_void_p, heap, stmt_vec_info_vec, (vec_void_p) info);
+ stmt_vec_info_vec.safe_push ((vec_void_p) info);
}
else
- VEC_replace (vec_void_p, stmt_vec_info_vec, uid - 1, (vec_void_p) info);
+ stmt_vec_info_vec[uid - 1] = (vec_void_p) info;
}
/* Return the earlier statement between STMT1 and STMT2. */
if (uid1 == 0 || uid2 == 0)
return NULL;
- gcc_checking_assert (uid1 <= VEC_length (vec_void_p, stmt_vec_info_vec)
- && uid2 <= VEC_length (vec_void_p, stmt_vec_info_vec));
+ gcc_checking_assert (uid1 <= stmt_vec_info_vec.length ()
+ && uid2 <= stmt_vec_info_vec.length ());
if (uid1 < uid2)
return stmt1;
if (uid1 == 0 || uid2 == 0)
return NULL;
- gcc_assert (uid1 <= VEC_length (vec_void_p, stmt_vec_info_vec));
- gcc_assert (uid2 <= VEC_length (vec_void_p, stmt_vec_info_vec));
+ gcc_assert (uid1 <= stmt_vec_info_vec.length ());
+ gcc_assert (uid2 <= stmt_vec_info_vec.length ());
if (uid1 > uid2)
return stmt1;
tree *, enum vect_def_type *, tree *);
extern bool supportable_widening_operation (enum tree_code, gimple, tree, tree,
enum tree_code *, enum tree_code *,
- int *, VEC (tree, heap) **);
+ int *, vec<tree> *);
extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
enum tree_code *,
- int *, VEC (tree, heap) **);
+ int *, vec<tree> *);
extern stmt_vec_info new_stmt_vec_info (gimple stmt, loop_vec_info,
bb_vec_info);
extern void free_stmt_vec_info (gimple stmt);
extern void vect_get_store_cost (struct data_reference *, int,
unsigned int *, stmt_vector_for_cost *);
extern bool vect_supportable_shift (enum tree_code, tree);
-extern void vect_get_vec_defs (tree, tree, gimple, VEC (tree, heap) **,
- VEC (tree, heap) **, slp_tree, int);
+extern void vect_get_vec_defs (tree, tree, gimple, vec<tree> *,
+ vec<tree> *, slp_tree, int);
extern tree vect_gen_perm_mask (tree, unsigned char *);
/* In tree-vect-data-refs.c. */
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
-extern void vect_permute_store_chain (VEC(tree,heap) *,unsigned int, gimple,
- gimple_stmt_iterator *, VEC(tree,heap) **);
+extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple,
+ gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (gimple, gimple_stmt_iterator *, tree *,
enum dr_alignment_support, tree,
struct loop **);
-extern void vect_transform_grouped_load (gimple, VEC(tree,heap) *, int,
+extern void vect_transform_grouped_load (gimple, vec<tree> , int,
gimple_stmt_iterator *);
-extern void vect_record_grouped_load_vectors (gimple, VEC(tree,heap) *);
+extern void vect_record_grouped_load_vectors (gimple, vec<tree> );
extern int vect_get_place_in_interleaving_chain (gimple, gimple);
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_create_addr_base_for_vector_ref (gimple, gimple_seq *,
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance);
-extern bool vect_transform_slp_perm_load (gimple, VEC (tree, heap) *,
+extern bool vect_transform_slp_perm_load (gimple, vec<tree> ,
gimple_stmt_iterator *, int,
slp_instance, bool);
extern bool vect_schedule_slp (loop_vec_info, bb_vec_info);
extern bool vect_analyze_slp (loop_vec_info, bb_vec_info);
extern bool vect_make_slp_decision (loop_vec_info);
extern void vect_detect_hybrid_slp (loop_vec_info);
-extern void vect_get_slp_defs (VEC (tree, heap) *, slp_tree,
- VEC (slp_void_p, heap) **, int);
+extern void vect_get_slp_defs (vec<tree> , slp_tree,
+ vec<slp_void_p> *, int);
extern LOC find_bb_location (basic_block);
extern bb_vec_info vect_slp_analyze_bb (basic_block);
/* Pattern recognition functions.
Additional pattern recognition functions can (and will) be added
in the future. */
-typedef gimple (* vect_recog_func_ptr) (VEC (gimple, heap) **, tree *, tree *);
+typedef gimple (* vect_recog_func_ptr) (vec<gimple> *, tree *, tree *);
#define NUM_PATTERNS 10
void vect_pattern_recog (loop_vec_info, bb_vec_info);
tree vec;
} switch_update;
-static VEC (edge, heap) *to_remove_edges;
-DEF_VEC_O(switch_update);
-DEF_VEC_ALLOC_O(switch_update, heap);
-static VEC (switch_update, heap) *to_update_switch_stmts;
+static vec<edge> to_remove_edges;
+static vec<switch_update> to_update_switch_stmts;
/* Return the maximum value for TYPE. */
{
fprintf (dump_file, "removing unreachable case label\n");
}
- VEC_safe_push (edge, heap, to_remove_edges, e);
+ to_remove_edges.safe_push (e);
e->flags &= ~EDGE_EXECUTABLE;
}
/* And queue an update for the stmt. */
su.stmt = stmt;
su.vec = vec2;
- VEC_safe_push (switch_update, heap, to_update_switch_stmts, su);
+ to_update_switch_stmts.safe_push (su);
return false;
}
A NULL entry is used to mark the end of pairs which need to be
restored. */
-static VEC(tree,heap) *equiv_stack;
+static vec<tree> equiv_stack;
/* A trivial wrapper so that we can present the generic jump threading
code with a simple API for simplifying statements. STMT is the
/* Do not thread across edges we are about to remove. Just marking
them as EDGE_DFS_BACK will do. */
- FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
+ FOR_EACH_VEC_ELT (to_remove_edges, i, e)
e->flags |= EDGE_DFS_BACK;
/* Allocate our unwinder stack to unwind any temporary equivalences
that might be recorded. */
- equiv_stack = VEC_alloc (tree, heap, 20);
+ equiv_stack.create (20);
/* To avoid lots of silly node creation, we create a single
conditional and just modify it in-place when attempting to
finalize_jump_threads (void)
{
thread_through_all_blocks (false);
- VEC_free (tree, heap, equiv_stack);
+ equiv_stack.release ();
}
insert_range_assertions ();
- to_remove_edges = VEC_alloc (edge, heap, 10);
- to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
+ to_remove_edges.create (10);
+ to_update_switch_stmts.create (5);
threadedge_initialize_values ();
vrp_initialize ();
/* Remove dead edges from SWITCH_EXPR optimization. This leaves the
CFG in a broken state and requires a cfg_cleanup run. */
- FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
+ FOR_EACH_VEC_ELT (to_remove_edges, i, e)
remove_edge (e);
/* Update SWITCH_EXPR case label vector. */
- FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
+ FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
{
size_t j;
size_t n = TREE_VEC_LENGTH (su->vec);
CASE_HIGH (label) = NULL_TREE;
}
- if (VEC_length (edge, to_remove_edges) > 0)
+ if (to_remove_edges.length () > 0)
free_dominance_info (CDI_DOMINATORS);
- VEC_free (edge, heap, to_remove_edges);
- VEC_free (switch_update, heap, to_update_switch_stmts);
+ to_remove_edges.release ();
+ to_update_switch_stmts.release ();
threadedge_finalize_values ();
scev_finalize ();
{
case TREE_BINFO:
return (offsetof (struct tree_binfo, base_binfos)
- + VEC_embedded_size (tree, BINFO_N_BASE_BINFOS (node)));
+ + vec<tree, va_gc>
+ ::embedded_size (BINFO_N_BASE_BINFOS (node)));
case TREE_VEC:
return (sizeof (struct tree_vec)
are extracted from V, a vector of CONSTRUCTOR_ELT. */
tree
-build_vector_from_ctor (tree type, VEC(constructor_elt,gc) *v)
+build_vector_from_ctor (tree type, vec<constructor_elt, va_gc> *v)
{
tree *vec = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (type));
unsigned HOST_WIDE_INT idx;
}
else
{
- VEC(constructor_elt, gc) *v = VEC_alloc (constructor_elt, gc, nunits);
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, nunits);
for (i = 0; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, sc);
return build_constructor (vectype, v);
}
/* Return a new CONSTRUCTOR node whose type is TYPE and whose values
- are in the VEC pointed to by VALS. */
+ are in the vec pointed to by VALS. */
tree
-build_constructor (tree type, VEC(constructor_elt,gc) *vals)
+build_constructor (tree type, vec<constructor_elt, va_gc> *vals)
{
tree c = make_node (CONSTRUCTOR);
unsigned int i;
TREE_TYPE (c) = type;
CONSTRUCTOR_ELTS (c) = vals;
- FOR_EACH_VEC_ELT (constructor_elt, vals, i, elt)
+ FOR_EACH_VEC_SAFE_ELT (vals, i, elt)
{
/* Mostly ctors will have elts that don't have side-effects, so
the usual case is to scan all the elements. Hence a single
tree
build_constructor_single (tree type, tree index, tree value)
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
constructor_elt elt = {index, value};
- v = VEC_alloc (constructor_elt, gc, 1);
- VEC_quick_push (constructor_elt, v, elt);
+ vec_alloc (v, 1);
+ v->quick_push (elt);
return build_constructor (type, v);
}
build_constructor_from_list (tree type, tree vals)
{
tree t;
- VEC(constructor_elt,gc) *v = NULL;
+ vec<constructor_elt, va_gc> *v = NULL;
if (vals)
{
- v = VEC_alloc (constructor_elt, gc, list_length (vals));
+ vec_alloc (v, list_length (vals));
for (t = vals; t; t = TREE_CHAIN (t))
CONSTRUCTOR_APPEND_ELT (v, TREE_PURPOSE (t), TREE_VALUE (t));
}
{
tree t;
size_t length = (offsetof (struct tree_binfo, base_binfos)
- + VEC_embedded_size (tree, base_binfos));
+ + vec<tree, va_gc>::embedded_size (base_binfos));
record_node_allocation_statistics (TREE_BINFO, length);
TREE_SET_CODE (t, TREE_BINFO);
- VEC_embedded_init (tree, BINFO_BASE_BINFOS (t), base_binfos);
+ BINFO_BASE_BINFOS (t)->embedded_init (base_binfos);
return t;
}
/* Return true if ELEM is in V. */
bool
-vec_member (const_tree elem, VEC(tree,gc) *v)
+vec_member (const_tree elem, vec<tree, va_gc> *v)
{
unsigned ix;
tree t;
- FOR_EACH_VEC_ELT (tree, v, ix, t)
+ FOR_EACH_VEC_SAFE_ELT (v, ix, t)
if (elem == t)
return true;
return false;
/* Build a chain of TREE_LIST nodes from a vector. */
tree
-build_tree_list_vec_stat (const VEC(tree,gc) *vec MEM_STAT_DECL)
+build_tree_list_vec_stat (const vec<tree, va_gc> *vec MEM_STAT_DECL)
{
tree ret = NULL_TREE;
tree *pp = &ret;
unsigned int i;
tree t;
- FOR_EACH_VEC_ELT (tree, vec, i, t)
+ FOR_EACH_VEC_SAFE_ELT (vec, i, t)
{
*pp = build_tree_list_stat (NULL, t PASS_MEM_STAT);
pp = &TREE_CHAIN (*pp);
/* Return the values of the elements of a CONSTRUCTOR as a vector of
trees. */
-VEC(tree,gc) *
+vec<tree, va_gc> *
ctor_to_vec (tree ctor)
{
- VEC(tree, gc) *vec = VEC_alloc (tree, gc, CONSTRUCTOR_NELTS (ctor));
+ vec<tree, va_gc> *vec;
+ vec_alloc (vec, CONSTRUCTOR_NELTS (ctor));
unsigned int ix;
tree val;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (ctor), ix, val)
- VEC_quick_push (tree, vec, val);
+ vec->quick_push (val);
return vec;
}
/* Push tree EXP onto vector QUEUE if it is not already present. */
static void
-push_without_duplicates (tree exp, VEC (tree, heap) **queue)
+push_without_duplicates (tree exp, vec<tree> *queue)
{
unsigned int i;
tree iter;
- FOR_EACH_VEC_ELT (tree, *queue, i, iter)
+ FOR_EACH_VEC_ELT (*queue, i, iter)
if (simple_cst_equal (iter, exp) == 1)
break;
if (!iter)
- VEC_safe_push (tree, heap, *queue, exp);
+ queue->safe_push (exp);
}
/* Given a tree EXP, find all occurrences of references to fields
argument list. */
void
-find_placeholder_in_expr (tree exp, VEC (tree, heap) **refs)
+find_placeholder_in_expr (tree exp, vec<tree> *refs)
{
enum tree_code code = TREE_CODE (exp);
tree inner;
}
/* Similar to build_nt, but for creating a CALL_EXPR object with a
- tree VEC. */
+ tree vec. */
tree
-build_nt_call_vec (tree fn, VEC(tree,gc) *args)
+build_nt_call_vec (tree fn, vec<tree, va_gc> *args)
{
tree ret, t;
unsigned int ix;
- ret = build_vl_exp (CALL_EXPR, VEC_length (tree, args) + 3);
+ ret = build_vl_exp (CALL_EXPR, vec_safe_length (args) + 3);
CALL_EXPR_FN (ret) = fn;
CALL_EXPR_STATIC_CHAIN (ret) = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, args, ix, t)
+ FOR_EACH_VEC_SAFE_ELT (args, ix, t)
CALL_EXPR_ARG (ret, ix) = t;
return ret;
}
return decl;
}
-VEC(tree,gc) *all_translation_units;
+vec<tree, va_gc> *all_translation_units;
/* Builds a new translation-unit decl with name NAME, queues it in the
global list of translation-unit decls and returns it. */
tree tu = build_decl (UNKNOWN_LOCATION, TRANSLATION_UNIT_DECL,
name, NULL_TREE);
TRANSLATION_UNIT_LANGUAGE (tu) = lang_hooks.name;
- VEC_safe_push (tree, gc, all_translation_units, tu);
+ vec_safe_push (all_translation_units, tu);
return tu;
}
BINFO_INHERITANCE_CHAIN (binfo) = NULL_TREE;
BINFO_SUBVTT_INDEX (binfo) = NULL_TREE;
- FOR_EACH_VEC_ELT (tree, BINFO_BASE_BINFOS (binfo), i, t)
+ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (binfo), i, t)
free_lang_data_in_binfo (t);
}
struct free_lang_data_d
{
/* Worklist to avoid excessive recursion. */
- VEC(tree,heap) *worklist;
+ vec<tree> worklist;
/* Set of traversed objects. Used to avoid duplicate visits. */
struct pointer_set_t *pset;
/* Array of symbols to process with free_lang_data_in_decl. */
- VEC(tree,heap) *decls;
+ vec<tree> decls;
/* Array of types to process with free_lang_data_in_type. */
- VEC(tree,heap) *types;
+ vec<tree> types;
};
{
if (DECL_P (t))
{
- VEC_safe_push (tree, heap, fld->decls, t);
+ fld->decls.safe_push (t);
if (debug_info_level > DINFO_LEVEL_TERSE)
save_debug_info_for_decl (t);
}
else if (TYPE_P (t))
{
- VEC_safe_push (tree, heap, fld->types, t);
+ fld->types.safe_push (t);
if (debug_info_level > DINFO_LEVEL_TERSE)
save_debug_info_for_type (t);
}
fld_worklist_push (tree t, struct free_lang_data_d *fld)
{
if (t && !is_lang_specific (t) && !pointer_set_contains (fld->pset, t))
- VEC_safe_push (tree, heap, fld->worklist, (t));
+ fld->worklist.safe_push ((t));
}
{
unsigned i;
tree tem;
- for (i = 0; VEC_iterate (tree, BINFO_BASE_BINFOS (TYPE_BINFO (t)),
- i, tem); ++i)
+ FOR_EACH_VEC_ELT (*BINFO_BASE_BINFOS (TYPE_BINFO (t)), i, tem)
fld_worklist_push (TREE_TYPE (tem), fld);
tem = BINFO_VIRTUALS (TYPE_BINFO (t));
if (tem
{
if (!pointer_set_contains (fld->pset, t))
walk_tree (&t, find_decls_types_r, fld, fld->pset);
- if (VEC_empty (tree, fld->worklist))
+ if (fld->worklist.is_empty ())
break;
- t = VEC_pop (tree, fld->worklist);
+ t = fld->worklist.pop ();
}
}
/* Initialize sets and arrays to store referenced decls and types. */
fld.pset = pointer_set_create ();
- fld.worklist = NULL;
- fld.decls = VEC_alloc (tree, heap, 100);
- fld.types = VEC_alloc (tree, heap, 100);
+ fld.worklist.create (0);
+ fld.decls.create (100);
+ fld.types.create (100);
/* Find decls and types in the body of every function in the callgraph. */
FOR_EACH_FUNCTION (n)
find_decls_types_in_node (n, &fld);
- FOR_EACH_VEC_ELT (alias_pair, alias_pairs, i, p)
+ FOR_EACH_VEC_SAFE_ELT (alias_pairs, i, p)
find_decls_types (p->decl, &fld);
/* Find decls and types in every varpool symbol. */
/* Set the assembler name on every decl found. We need to do this
now because free_lang_data_in_decl will invalidate data needed
for mangling. This breaks mangling on interdependent decls. */
- FOR_EACH_VEC_ELT (tree, fld.decls, i, t)
+ FOR_EACH_VEC_ELT (fld.decls, i, t)
assign_assembler_name_if_neeeded (t);
/* Traverse every decl found freeing its language data. */
- FOR_EACH_VEC_ELT (tree, fld.decls, i, t)
+ FOR_EACH_VEC_ELT (fld.decls, i, t)
free_lang_data_in_decl (t);
/* Traverse every type found freeing its language data. */
- FOR_EACH_VEC_ELT (tree, fld.types, i, t)
+ FOR_EACH_VEC_ELT (fld.types, i, t)
free_lang_data_in_type (t);
pointer_set_destroy (fld.pset);
- VEC_free (tree, heap, fld.worklist);
- VEC_free (tree, heap, fld.decls);
- VEC_free (tree, heap, fld.types);
+ fld.worklist.release ();
+ fld.decls.release ();
+ fld.types.release ();
}
/* Lookup a vector of debug arguments for FROM, and return it if we
find one. */
-VEC(tree, gc) **
+vec<tree, va_gc> **
decl_debug_args_lookup (tree from)
{
struct tree_vec_map *h, in;
/* Insert a mapping FROM->empty vector of debug arguments in the value
expression hashtable. */
-VEC(tree, gc) **
+vec<tree, va_gc> **
decl_debug_args_insert (tree from)
{
struct tree_vec_map *h;
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT idx;
- VEC(constructor_elt, gc) *v1 = CONSTRUCTOR_ELTS (t1);
- VEC(constructor_elt, gc) *v2 = CONSTRUCTOR_ELTS (t2);
+ vec<constructor_elt, va_gc> *v1 = CONSTRUCTOR_ELTS (t1);
+ vec<constructor_elt, va_gc> *v2 = CONSTRUCTOR_ELTS (t2);
- if (VEC_length (constructor_elt, v1) != VEC_length (constructor_elt, v2))
+ if (vec_safe_length (v1) != vec_safe_length (v2))
return false;
- for (idx = 0; idx < VEC_length (constructor_elt, v1); ++idx)
+ for (idx = 0; idx < vec_safe_length (v1); ++idx)
/* ??? Should we handle also fields here? */
- if (!simple_cst_equal (VEC_index (constructor_elt, v1, idx).value,
- VEC_index (constructor_elt, v2, idx).value))
+ if (!simple_cst_equal ((*v1)[idx].value, (*v2)[idx].value))
return false;
return true;
}
return t;
}
-/* Like build_call_array, but takes a VEC. */
+/* Like build_call_array, but takes a vec. */
tree
-build_call_vec (tree return_type, tree fn, VEC(tree,gc) *args)
+build_call_vec (tree return_type, tree fn, vec<tree, va_gc> *args)
{
tree ret, t;
unsigned int ix;
- ret = build_call_1 (return_type, fn, VEC_length (tree, args));
- FOR_EACH_VEC_ELT (tree, args, ix, t)
+ ret = build_call_1 (return_type, fn, vec_safe_length (args));
+ FOR_EACH_VEC_SAFE_ELT (args, ix, t)
CALL_EXPR_ARG (ret, ix) = t;
process_call_operands (ret);
return ret;
unsigned HOST_WIDE_INT idx;
constructor_elt *ce;
- for (idx = 0;
- VEC_iterate(constructor_elt, CONSTRUCTOR_ELTS (*tp), idx, ce);
- idx++)
+ for (idx = 0; vec_safe_iterate(CONSTRUCTOR_ELTS (*tp), idx, &ce); idx++)
WALK_SUBTREE (ce->value);
}
break;
#include "input.h"
#include "statistics.h"
#include "vec.h"
-#include "vecir.h"
#include "double-int.h"
#include "real.h"
#include "fixed-value.h"
} alias_pair;
/* Define gc'd vector type. */
-DEF_VEC_O(alias_pair);
-DEF_VEC_ALLOC_O(alias_pair,gc);
-extern GTY(()) VEC(alias_pair,gc) * alias_pairs;
+extern GTY(()) vec<alias_pair, va_gc> *alias_pairs;
\f
/* Classify which part of the compiler has defined a given builtin function.
/* In a CONSTRUCTOR node. */
#define CONSTRUCTOR_ELTS(NODE) (CONSTRUCTOR_CHECK (NODE)->constructor.elts)
#define CONSTRUCTOR_ELT(NODE,IDX) \
- (&VEC_index (constructor_elt, CONSTRUCTOR_ELTS (NODE), IDX))
+ (&(*CONSTRUCTOR_ELTS (NODE))[IDX])
#define CONSTRUCTOR_NELTS(NODE) \
- (VEC_length (constructor_elt, CONSTRUCTOR_ELTS (NODE)))
+ (vec_safe_length (CONSTRUCTOR_ELTS (NODE)))
/* Iterate through the vector V of CONSTRUCTOR_ELT elements, yielding the
value of each element (stored within VAL). IX must be a scratch variable
of unsigned integer type. */
#define FOR_EACH_CONSTRUCTOR_VALUE(V, IX, VAL) \
- for (IX = 0; (IX >= VEC_length (constructor_elt, V)) \
+ for (IX = 0; (IX >= vec_safe_length (V)) \
? false \
- : ((VAL = VEC_index (constructor_elt, V, IX).value), \
+ : ((VAL = (*(V))[IX].value), \
true); \
(IX)++)
the value of each element (stored within VAL) and its index (stored
within INDEX). IX must be a scratch variable of unsigned integer type. */
#define FOR_EACH_CONSTRUCTOR_ELT(V, IX, INDEX, VAL) \
- for (IX = 0; (IX >= VEC_length (constructor_elt, V)) \
+ for (IX = 0; (IX >= vec_safe_length (V)) \
? false \
- : (((void) (VAL = VEC_index (constructor_elt, V, IX).value)), \
- (INDEX = VEC_index (constructor_elt, V, IX).index), \
+ : (((void) (VAL = (*V)[IX].value)), \
+ (INDEX = (*V)[IX].index), \
true); \
(IX)++)
#define CONSTRUCTOR_APPEND_ELT(V, INDEX, VALUE) \
do { \
constructor_elt _ce___ = {INDEX, VALUE}; \
- VEC_safe_push (constructor_elt, gc, V, _ce___); \
+ vec_safe_push ((V), _ce___); \
} while (0)
/* True if NODE, a FIELD_DECL, is to be processed as a bitfield for
tree value;
} constructor_elt;
-DEF_VEC_O(constructor_elt);
-DEF_VEC_ALLOC_O(constructor_elt,gc);
struct GTY(()) tree_constructor {
struct tree_typed typed;
- VEC(constructor_elt,gc) *elts;
+ vec<constructor_elt, va_gc> *elts;
};
/* Define fields and accessors for some nodes that represent expressions. */
#define BLOCK_NONLOCALIZED_VARS(NODE) \
(BLOCK_CHECK (NODE)->block.nonlocalized_vars)
#define BLOCK_NUM_NONLOCALIZED_VARS(NODE) \
- VEC_length (tree, BLOCK_NONLOCALIZED_VARS (NODE))
-#define BLOCK_NONLOCALIZED_VAR(NODE,N) \
- VEC_index (tree, BLOCK_NONLOCALIZED_VARS (NODE), N)
+ vec_safe_length (BLOCK_NONLOCALIZED_VARS (NODE))
+#define BLOCK_NONLOCALIZED_VAR(NODE,N) (*BLOCK_NONLOCALIZED_VARS (NODE))[N]
#define BLOCK_SUBBLOCKS(NODE) (BLOCK_CHECK (NODE)->block.subblocks)
#define BLOCK_SUPERCONTEXT(NODE) (BLOCK_CHECK (NODE)->block.supercontext)
#define BLOCK_CHAIN(NODE) (BLOCK_CHECK (NODE)->block.chain)
location_t locus;
tree vars;
- VEC(tree,gc) *nonlocalized_vars;
+ vec<tree, va_gc> *nonlocalized_vars;
tree subblocks;
tree supercontext;
#define BINFO_BASE_BINFOS(NODE) (&TREE_BINFO_CHECK(NODE)->binfo.base_binfos)
/* The number of basetypes for NODE. */
-#define BINFO_N_BASE_BINFOS(NODE) (VEC_length (tree, BINFO_BASE_BINFOS (NODE)))
+#define BINFO_N_BASE_BINFOS(NODE) (BINFO_BASE_BINFOS (NODE)->length ())
/* Accessor macro to get to the Nth base binfo of this binfo. */
#define BINFO_BASE_BINFO(NODE,N) \
- (VEC_index (tree, BINFO_BASE_BINFOS (NODE), (N)))
+ ((*BINFO_BASE_BINFOS (NODE))[(N)])
#define BINFO_BASE_ITERATE(NODE,N,B) \
- (VEC_iterate (tree, BINFO_BASE_BINFOS (NODE), (N), (B)))
+ (BINFO_BASE_BINFOS (NODE)->iterate ((N), &(B)))
#define BINFO_BASE_APPEND(NODE,T) \
- (VEC_quick_push (tree, BINFO_BASE_BINFOS (NODE), (T)))
+ (BINFO_BASE_BINFOS (NODE)->quick_push ((T)))
/* For a BINFO record describing a virtual base class, i.e., one where
TREE_VIA_VIRTUAL is set, this field assists in locating the virtual
#define BINFO_BASE_ACCESSES(NODE) (TREE_BINFO_CHECK(NODE)->binfo.base_accesses)
#define BINFO_BASE_ACCESS(NODE,N) \
- VEC_index (tree, BINFO_BASE_ACCESSES (NODE), (N))
+ (*BINFO_BASE_ACCESSES (NODE))[(N)]
#define BINFO_BASE_ACCESS_APPEND(NODE,T) \
- VEC_quick_push (tree, BINFO_BASE_ACCESSES (NODE), (T))
+ BINFO_BASE_ACCESSES (NODE)->quick_push ((T))
/* The index in the VTT where this subobject's sub-VTT can be found.
NULL_TREE if there is no sub-VTT. */
tree vtable;
tree virtuals;
tree vptr_field;
- VEC(tree,gc) *base_accesses;
+ vec<tree, va_gc> *base_accesses;
tree inheritance;
tree vtt_subvtt;
tree vtt_vptr;
- VEC(tree,none) base_binfos;
+ vec<tree, va_gc> base_binfos;
};
\f
#define DECL_DISREGARD_INLINE_LIMITS(NODE) \
(FUNCTION_DECL_CHECK (NODE)->function_decl.disregard_inline_limits)
-extern VEC(tree, gc) **decl_debug_args_lookup (tree);
-extern VEC(tree, gc) **decl_debug_args_insert (tree);
+extern vec<tree, va_gc> **decl_debug_args_lookup (tree);
+extern vec<tree, va_gc> **decl_debug_args_insert (tree);
/* Nonzero if a FUNCTION_DECL has DEBUG arguments attached to it. */
#define DECL_HAS_DEBUG_ARGS_P(NODE) \
};
/* A vector of all translation-units. */
-extern GTY (()) VEC(tree,gc) *all_translation_units;
+extern GTY (()) vec<tree, va_gc> *all_translation_units;
/* For a TYPE_DECL, holds the "original" type. (TREE_TYPE has the copy.) */
#define DECL_ORIGINAL_TYPE(NODE) \
/* Construct various types of nodes. */
extern tree build_nt (enum tree_code, ...);
-extern tree build_nt_call_vec (tree, VEC(tree,gc) *);
+extern tree build_nt_call_vec (tree, vec<tree, va_gc> *);
extern tree build0_stat (enum tree_code, tree MEM_STAT_DECL);
#define build0(c,t) build0_stat (c,t MEM_STAT_INFO)
#define make_vector(n) make_vector_stat (n MEM_STAT_INFO)
extern tree build_vector_stat (tree, tree * MEM_STAT_DECL);
#define build_vector(t,v) build_vector_stat (t, v MEM_STAT_INFO)
-extern tree build_vector_from_ctor (tree, VEC(constructor_elt,gc) *);
+extern tree build_vector_from_ctor (tree, vec<constructor_elt, va_gc> *);
extern tree build_vector_from_val (tree, tree);
-extern tree build_constructor (tree, VEC(constructor_elt,gc) *);
+extern tree build_constructor (tree, vec<constructor_elt, va_gc> *);
extern tree build_constructor_single (tree, tree, tree);
extern tree build_constructor_from_list (tree, tree);
extern tree build_real_from_int_cst (tree, const_tree);
extern tree build_string (int, const char *);
extern tree build_tree_list_stat (tree, tree MEM_STAT_DECL);
#define build_tree_list(t,q) build_tree_list_stat(t,q MEM_STAT_INFO)
-extern tree build_tree_list_vec_stat (const VEC(tree,gc) * MEM_STAT_DECL);
+extern tree build_tree_list_vec_stat (const vec<tree, va_gc> *MEM_STAT_DECL);
#define build_tree_list_vec(v) build_tree_list_vec_stat (v MEM_STAT_INFO)
extern tree build_decl_stat (location_t, enum tree_code,
tree, tree MEM_STAT_DECL);
#define build_call_array(T1,T2,N,T3)\
build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T3)
extern tree build_call_array_loc (location_t, tree, tree, int, const tree *);
-extern tree build_call_vec (tree, tree, VEC(tree,gc) *);
+extern tree build_call_vec (tree, tree, vec<tree, va_gc> *);
/* Construct various nodes representing data types. */
extern tree build_function_type_array (tree, int, tree *);
extern tree build_varargs_function_type_array (tree, int, tree *);
#define build_function_type_vec(RET, V) \
- build_function_type_array (RET, VEC_length (tree, V), VEC_address (tree, V))
+ build_function_type_array (RET, vec_safe_length (V), vec_safe_address (V))
#define build_varargs_function_type_vec(RET, V) \
- build_varargs_function_type_array (RET, VEC_length (tree, V), \
- VEC_address (tree, V))
+ build_varargs_function_type_array (RET, vec_safe_length (V), \
+ vec_safe_address (V))
extern tree build_method_type_directly (tree, tree, tree);
extern tree build_method_type (tree, tree);
extern tree build_offset_type (tree, tree);
extern tree value_member (tree, tree);
extern tree purpose_member (const_tree, tree);
-extern bool vec_member (const_tree, VEC(tree,gc) *);
+extern bool vec_member (const_tree, vec<tree, va_gc> *);
extern tree chain_index (int, tree);
extern int attribute_list_equal (const_tree, const_tree);
tree prev_field;
/* The static variables (i.e., class variables, as opposed to
instance variables) encountered in T. */
- VEC(tree,gc) *pending_statics;
+ vec<tree, va_gc> *pending_statics;
/* Bits remaining in the current alignment group */
int remaining_in_alignment;
/* True if we've seen a packed field that didn't have normal
/* Given a CONSTRUCTOR CTOR, return the element values as a vector. */
-extern VEC(tree,gc) *ctor_to_vec (tree);
+extern vec<tree, va_gc> *ctor_to_vec (tree);
extern bool categorize_ctor_elements (const_tree, HOST_WIDE_INT *,
HOST_WIDE_INT *, bool *);
or CALL_EXPRs with PLACEHOLDER_EXPRs occurring only in their
argument list. */
-extern void find_placeholder_in_expr (tree, VEC (tree, heap) **);
+extern void find_placeholder_in_expr (tree, vec<tree> *);
/* This macro calls the above function but short-circuits the common
case of a constant to save time and also checks for NULL. */
extern enum built_in_function builtin_mathfn_code (const_tree);
extern tree fold_builtin_call_array (location_t, tree, tree, int, tree *);
extern tree build_call_expr_loc_array (location_t, tree, int, tree *);
-extern tree build_call_expr_loc_vec (location_t, tree, VEC(tree,gc) *);
+extern tree build_call_expr_loc_vec (location_t, tree, vec<tree, va_gc> *);
extern tree build_call_expr_loc (location_t, tree, int, ...);
extern tree build_call_expr (tree, int, ...);
extern tree mathfn_built_in (tree, enum built_in_function fn);
/* In print-tree.c */
extern void debug_tree (tree);
-extern void debug_vec_tree (VEC(tree,gc) *);
+extern void debug_vec_tree (vec<tree, va_gc> *);
#ifdef BUFSIZ
extern void dump_addr (FILE*, const char *, const void *);
extern void print_node (FILE *, const char *, tree, int);
-extern void print_vec_tree (FILE *, const char *, VEC(tree,gc) *, int);
+extern void print_vec_tree (FILE *, const char *, vec<tree, va_gc> *, int);
extern void print_node_brief (FILE *, const char *, const_tree, int);
extern void indent_to (FILE *, int);
#endif
struct GTY(()) tree_vec_map {
struct tree_map_base base;
- VEC(tree,gc) *to;
+ vec<tree, va_gc> *to;
};
#define tree_vec_map_eq tree_map_base_eq
to profile. There are different histogram types (see HIST_TYPE_* in
value-prof.h) and each transformation can request one or more histogram
types per GIMPLE statement. The function gimple_find_values_to_profile()
- collects the values to profile in a VEC, and adds the number of counters
+ collects the values to profile in a vec, and adds the number of counters
required for the different histogram types.
For a -fprofile-generate run, the statements for which values should be
return true;
}
-static VEC(cgraph_node_ptr, heap) *cgraph_node_map = NULL;
+static vec<cgraph_node_ptr> cgraph_node_map
+ = vec<cgraph_node_ptr>();
/* Initialize map from FUNCDEF_NO to CGRAPH_NODE. */
struct cgraph_node *n;
if (get_last_funcdef_no ())
- VEC_safe_grow_cleared (cgraph_node_ptr, heap,
- cgraph_node_map, get_last_funcdef_no ());
+ cgraph_node_map.safe_grow_cleared (get_last_funcdef_no ());
FOR_EACH_FUNCTION (n)
{
if (DECL_STRUCT_FUNCTION (n->symbol.decl))
- VEC_replace (cgraph_node_ptr, cgraph_node_map,
- DECL_STRUCT_FUNCTION (n->symbol.decl)->funcdef_no, n);
+ cgraph_node_map[DECL_STRUCT_FUNCTION (n->symbol.decl)->funcdef_no] = n;
}
}
void
del_node_map (void)
{
- VEC_free (cgraph_node_ptr, heap, cgraph_node_map);
- cgraph_node_map = NULL;
+ cgraph_node_map.release ();
}
/* Return cgraph node for function with pid */
find_func_by_funcdef_no (int func_id)
{
int max_id = get_last_funcdef_no ();
- if (func_id >= max_id || VEC_index (cgraph_node_ptr,
- cgraph_node_map,
- func_id) == NULL)
+ if (func_id >= max_id || cgraph_node_map[func_id] == NULL)
{
if (flag_profile_correction)
inform (DECL_SOURCE_LOCATION (current_function_decl),
return NULL;
}
- return VEC_index (cgraph_node_ptr, cgraph_node_map, func_id);
+ return cgraph_node_map[func_id];
}
/* Perform sanity check on the indirect call target. Due to race conditions,
divisor = gimple_assign_rhs2 (stmt);
op0 = gimple_assign_rhs1 (stmt);
- VEC_reserve (histogram_value, heap, *values, 3);
+ values->reserve (3);
if (TREE_CODE (divisor) == SSA_NAME)
/* Check for the case where the divisor is the same value most
of the time. */
- VEC_quick_push (histogram_value, *values,
- gimple_alloc_histogram_value (cfun,
+ values->quick_push (gimple_alloc_histogram_value (cfun,
HIST_TYPE_SINGLE_VALUE,
stmt, divisor));
{
tree val;
/* Check for a special case where the divisor is power of 2. */
- VEC_quick_push (histogram_value, *values,
- gimple_alloc_histogram_value (cfun, HIST_TYPE_POW2,
- stmt, divisor));
+ values->quick_push (gimple_alloc_histogram_value (cfun,
+ HIST_TYPE_POW2,
+ stmt, divisor));
val = build2 (TRUNC_DIV_EXPR, type, op0, divisor);
hist = gimple_alloc_histogram_value (cfun, HIST_TYPE_INTERVAL,
stmt, val);
hist->hdata.intvl.int_start = 0;
hist->hdata.intvl.steps = 2;
- VEC_quick_push (histogram_value, *values, hist);
+ values->quick_push (hist);
}
return;
callee = gimple_call_fn (stmt);
- VEC_reserve (histogram_value, heap, *values, 3);
+ values->reserve (3);
- VEC_quick_push (histogram_value, *values,
- gimple_alloc_histogram_value (cfun, HIST_TYPE_INDIR_CALL,
- stmt, callee));
+ values->quick_push (gimple_alloc_histogram_value (cfun, HIST_TYPE_INDIR_CALL,
+ stmt, callee));
return;
}
if (TREE_CODE (blck_size) != INTEGER_CST)
{
- VEC_safe_push (histogram_value, heap, *values,
- gimple_alloc_histogram_value (cfun, HIST_TYPE_SINGLE_VALUE,
- stmt, blck_size));
- VEC_safe_push (histogram_value, heap, *values,
- gimple_alloc_histogram_value (cfun, HIST_TYPE_AVERAGE,
- stmt, blck_size));
+ values->safe_push (gimple_alloc_histogram_value (cfun,
+ HIST_TYPE_SINGLE_VALUE,
+ stmt, blck_size));
+ values->safe_push (gimple_alloc_histogram_value (cfun, HIST_TYPE_AVERAGE,
+ stmt, blck_size));
}
if (TREE_CODE (blck_size) != INTEGER_CST)
- VEC_safe_push (histogram_value, heap, *values,
- gimple_alloc_histogram_value (cfun, HIST_TYPE_IOR,
- stmt, dest));
+ values->safe_push (gimple_alloc_histogram_value (cfun, HIST_TYPE_IOR,
+ stmt, dest));
}
/* Find values inside STMT for that we want to measure histograms and adds
unsigned i;
histogram_value hist = NULL;
- *values = NULL;
+ values->create (0);
FOR_EACH_BB (bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
gimple_values_to_profile (gsi_stmt (gsi), values);
- FOR_EACH_VEC_ELT (histogram_value, *values, i, hist)
+ FOR_EACH_VEC_ELT (*values, i, hist)
{
switch (hist->type)
{
typedef struct histogram_value_t *histogram_value;
typedef const struct histogram_value_t *const_histogram_value;
-DEF_VEC_P(histogram_value);
-DEF_VEC_ALLOC_P(histogram_value,heap);
-typedef VEC(histogram_value,heap) *histogram_values;
+typedef vec<histogram_value> histogram_values;
extern void gimple_find_values_to_profile (histogram_values *);
extern bool gimple_value_profile_transformations (void);
} u;
} micro_operation;
-DEF_VEC_O(micro_operation);
-DEF_VEC_ALLOC_O(micro_operation,heap);
/* A declaration of a variable, or an RTL value being handled like a
declaration. */
typedef struct variable_tracking_info_def
{
/* The vector of micro operations. */
- VEC(micro_operation, heap) *mos;
+ vec<micro_operation> mos;
/* The IN and OUT set for dataflow analysis. */
dataflow_set in;
struct loc_exp_dep_s **pprev;
} loc_exp_dep;
-DEF_VEC_O (loc_exp_dep);
/* This data structure holds information about the depth of a variable
expansion. */
/* The depth of the cur_loc expression. */
expand_depth depth;
/* Dependencies actively used when expand FROM into cur_loc. */
- VEC (loc_exp_dep, none) deps;
+ vec<loc_exp_dep, va_heap, vl_embed> deps;
};
/* Structure describing one part of variable. */
rtx incoming;
} parm_reg_t;
-DEF_VEC_O(parm_reg_t);
-DEF_VEC_ALLOC_O(parm_reg_t, gc);
/* Vector of windowed parameter registers, if any. */
-static VEC(parm_reg_t, gc) *windowed_parm_regs = NULL;
+static vec<parm_reg_t, va_gc> *windowed_parm_regs = NULL;
#endif
/* Variable used to tell whether cselib_process_insn called our hook. */
if (RTX_FRAME_RELATED_P (insn)
&& find_reg_note (insn, REG_CFA_WINDOW_SAVE, NULL_RTX))
{
- unsigned int i, nregs = VEC_length(parm_reg_t, windowed_parm_regs);
+ unsigned int i, nregs = vec_safe_length (windowed_parm_regs);
rtx rtl = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs * 2));
parm_reg_t *p;
- FOR_EACH_VEC_ELT (parm_reg_t, windowed_parm_regs, i, p)
+ FOR_EACH_VEC_SAFE_ELT (windowed_parm_regs, i, p)
{
XVECEXP (rtl, 0, i * 2)
= gen_rtx_SET (VOIDmode, p->incoming, p->outgoing);
enum micro_operation_type mopt, FILE *out)
{
fprintf (out, "bb %i op %i insn %i %s ",
- bb->index, VEC_length (micro_operation, VTI (bb)->mos),
+ bb->index, VTI (bb)->mos.length (),
INSN_UID (insn), micro_operation_type_name[mopt]);
print_inline_rtx (out, x, 2);
fputc ('\n', out);
(RTL_FLAG_CHECK1 ("VAL_EXPR_IS_CLOBBERED", (x), CONCAT)->unchanging)
/* All preserved VALUEs. */
-static VEC (rtx, heap) *preserved_values;
+static vec<rtx> preserved_values;
/* Ensure VAL is preserved and remember it in a vector for vt_emit_notes. */
preserve_value (cselib_val *val)
{
cselib_preserve_value (val);
- VEC_safe_push (rtx, heap, preserved_values, val->val_rtx);
+ preserved_values.safe_push (val->val_rtx);
}
/* Helper function for MO_VAL_LOC handling. Return non-zero if
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (mo.u.loc, cui->bb, cui->insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
+ VTI (bb)->mos.safe_push (mo);
}
return 0;
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (moa.u.loc, cui->bb, cui->insn,
moa.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, moa);
+ VTI (bb)->mos.safe_push (moa);
}
resolve = false;
log_and_return:
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (mo.u.loc, cui->bb, cui->insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
+ VTI (bb)->mos.safe_push (mo);
}
/* Arguments to the call. */
&& TREE_CODE (fndecl) == FUNCTION_DECL
&& DECL_HAS_DEBUG_ARGS_P (fndecl))
{
- VEC(tree, gc) **debug_args = decl_debug_args_lookup (fndecl);
+ vec<tree, va_gc> **debug_args = decl_debug_args_lookup (fndecl);
if (debug_args)
{
unsigned int ix;
tree param;
- for (ix = 0; VEC_iterate (tree, *debug_args, ix, param); ix += 2)
+ for (ix = 0; vec_safe_iterate (*debug_args, ix, ¶m); ix += 2)
{
rtx item;
- tree dtemp = VEC_index (tree, *debug_args, ix + 1);
+ tree dtemp = (**debug_args)[ix + 1];
enum machine_mode mode = DECL_MODE (dtemp);
item = gen_rtx_DEBUG_PARAMETER_REF (mode, param);
item = gen_rtx_CONCAT (mode, item, DECL_RTL_KNOWN_SET (dtemp));
cui.sets = sets;
cui.n_sets = n_sets;
- n1 = VEC_length (micro_operation, VTI (bb)->mos);
+ n1 = VTI (bb)->mos.length ();
cui.store_p = false;
note_uses (&PATTERN (insn), add_uses_1, &cui);
- n2 = VEC_length (micro_operation, VTI (bb)->mos) - 1;
- mos = VEC_address (micro_operation, VTI (bb)->mos);
+ n2 = VTI (bb)->mos.length () - 1;
+ mos = VTI (bb)->mos.address ();
/* Order the MO_USEs to be before MO_USE_NO_VARs and MO_VAL_USE, and
MO_VAL_LOC last. */
}
}
- n2 = VEC_length (micro_operation, VTI (bb)->mos) - 1;
+ n2 = VTI (bb)->mos.length () - 1;
while (n1 < n2)
{
while (n1 < n2 && mos[n1].type != MO_VAL_LOC)
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (PATTERN (insn), bb, insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
+ VTI (bb)->mos.safe_push (mo);
}
- n1 = VEC_length (micro_operation, VTI (bb)->mos);
+ n1 = VTI (bb)->mos.length ();
/* This will record NEXT_INSN (insn), such that we can
insert notes before it without worrying about any
notes that MO_USEs might emit after the insn. */
cui.store_p = true;
note_stores (PATTERN (insn), add_stores, &cui);
- n2 = VEC_length (micro_operation, VTI (bb)->mos) - 1;
- mos = VEC_address (micro_operation, VTI (bb)->mos);
+ n2 = VTI (bb)->mos.length () - 1;
+ mos = VTI (bb)->mos.address ();
/* Order the MO_VAL_USEs first (note_stores does nothing
on DEBUG_INSNs, so there are no MO_VAL_LOCs from this
}
}
- n2 = VEC_length (micro_operation, VTI (bb)->mos) - 1;
+ n2 = VTI (bb)->mos.length () - 1;
while (n1 < n2)
{
while (n1 < n2 && mos[n1].type == MO_CLOBBER)
dataflow_set_copy (&old_out, out);
dataflow_set_copy (out, in);
- FOR_EACH_VEC_ELT (micro_operation, VTI (bb)->mos, i, mo)
+ FOR_EACH_VEC_ELT (VTI (bb)->mos, i, mo)
{
rtx insn = mo->insn;
delete_slot_part (set, loc, slot, offset);
}
-DEF_VEC_P (variable);
-DEF_VEC_ALLOC_P (variable, heap);
-
-DEF_VEC_ALLOC_P_STACK (rtx);
-#define VEC_rtx_stack_alloc(alloc) VEC_stack_alloc (rtx, alloc)
/* Structure for passing some other parameters to function
vt_expand_loc_callback. */
/* Stack of values and debug_exprs under expansion, and their
children. */
- VEC (rtx, stack) *expanding;
+ vec<rtx, va_stack> expanding;
/* Stack of values and debug_exprs whose expansion hit recursion
cycles. They will have VALUE_RECURSED_INTO marked when added to
resolves to a valid location. So, if the flag remains set at the
end of the search, we know no valid location for this one can
possibly exist. */
- VEC (rtx, stack) *pending;
+ vec<rtx, va_stack> pending;
/* The maximum depth among the sub-expressions under expansion.
Zero indicates no expansion so far. */
in the algorithm, so we instead leave an assertion to catch
errors. */
gcc_checking_assert (!count
- || VEC_empty (loc_exp_dep, VAR_LOC_DEP_VEC (var)));
+ || VAR_LOC_DEP_VEC (var) == NULL
+ || VAR_LOC_DEP_VEC (var)->is_empty ());
- if (VAR_LOC_1PAUX (var)
- && VEC_space (loc_exp_dep, VAR_LOC_DEP_VEC (var), count))
+ if (VAR_LOC_1PAUX (var) && VAR_LOC_DEP_VEC (var)->space (count))
return;
allocsize = offsetof (struct onepart_aux, deps)
- + VEC_embedded_size (loc_exp_dep, count);
+ + vec<loc_exp_dep, va_heap, vl_embed>::embedded_size (count);
if (VAR_LOC_1PAUX (var))
{
VAR_LOC_DEPTH (var).complexity = 0;
VAR_LOC_DEPTH (var).entryvals = 0;
}
- VEC_embedded_init (loc_exp_dep, VAR_LOC_DEP_VEC (var), count);
+ VAR_LOC_DEP_VEC (var)->embedded_init (count);
}
/* Remove all entries from the vector of active dependencies of VAR,
static void
loc_exp_dep_clear (variable var)
{
- while (!VEC_empty (loc_exp_dep, VAR_LOC_DEP_VEC (var)))
+ while (VAR_LOC_DEP_VEC (var) && !VAR_LOC_DEP_VEC (var)->is_empty ())
{
- loc_exp_dep *led = &VEC_last (loc_exp_dep, VAR_LOC_DEP_VEC (var));
+ loc_exp_dep *led = &VAR_LOC_DEP_VEC (var)->last ();
if (led->next)
led->next->pprev = led->pprev;
if (led->pprev)
*led->pprev = led->next;
- VEC_pop (loc_exp_dep, VAR_LOC_DEP_VEC (var));
+ VAR_LOC_DEP_VEC (var)->pop ();
}
}
{
loc_exp_dep empty;
memset (&empty, 0, sizeof (empty));
- VEC_quick_push (loc_exp_dep, VAR_LOC_DEP_VEC (var), empty);
- led = &VEC_last (loc_exp_dep, VAR_LOC_DEP_VEC (var));
+ VAR_LOC_DEP_VEC (var)->quick_push (empty);
+ led = &VAR_LOC_DEP_VEC (var)->last ();
}
led->dv = var->dv;
led->value = x;
{
bool pending_recursion = false;
- gcc_checking_assert (VEC_empty (loc_exp_dep, VAR_LOC_DEP_VEC (var)));
+ gcc_checking_assert (VAR_LOC_DEP_VEC (var) == NULL
+ || VAR_LOC_DEP_VEC (var)->is_empty ());
/* Set up all dependencies from last_child (as set up at the end of
the loop above) to the end. */
}
first_child = result_first_child = last_child
- = VEC_length (rtx, elcd->expanding);
+ = elcd->expanding.length ();
wanted_entryvals = found_entryvals;
elcd->depth.complexity = elcd->depth.entryvals = 0;
result = cselib_expand_value_rtx_cb (loc_from, regs, EXPR_DEPTH,
vt_expand_loc_callback, data);
- last_child = VEC_length (rtx, elcd->expanding);
+ last_child = elcd->expanding.length ();
if (result)
{
attempted locs as dependencies, so that we retry the
expansion should any of them change, in the hope it can give
us a new entry without an ENTRY_VALUE? */
- VEC_truncate (rtx, elcd->expanding, first_child);
+ elcd->expanding.truncate (first_child);
goto retry;
}
/* Register all encountered dependencies as active. */
pending_recursion = loc_exp_dep_set
- (var, result, VEC_address (rtx, elcd->expanding) + result_first_child,
+ (var, result, elcd->expanding.address () + result_first_child,
last_child - result_first_child, elcd->vars);
- VEC_truncate (rtx, elcd->expanding, first_child);
+ elcd->expanding.truncate (first_child);
/* Record where the expansion came from. */
gcc_checking_assert (!result || !pending_recursion);
return x;
}
- VEC_safe_push (rtx, stack, elcd->expanding, x);
+ elcd->expanding.safe_push (x);
/* Check that VALUE_RECURSED_INTO implies NO_LOC_P. */
gcc_checking_assert (!VALUE_RECURSED_INTO (x) || NO_LOC_P (x));
if (pending_recursion)
{
gcc_checking_assert (!result);
- VEC_safe_push (rtx, stack, elcd->pending, x);
+ elcd->pending.safe_push (x);
}
else
{
This function performs this finalization of NULL locations. */
static void
-resolve_expansions_pending_recursion (VEC (rtx, stack) *pending)
+resolve_expansions_pending_recursion (vec<rtx, va_stack> pending)
{
- while (!VEC_empty (rtx, pending))
+ while (!pending.is_empty ())
{
- rtx x = VEC_pop (rtx, pending);
+ rtx x = pending.pop ();
decl_or_value dv;
if (!VALUE_RECURSED_INTO (x))
}
/* Initialize expand_loc_callback_data D with variable hash table V.
- It must be a macro because of alloca (VEC stack). */
+ It must be a macro because of alloca (vec stack). */
#define INIT_ELCD(d, v) \
do \
{ \
(d).vars = (v); \
- (d).expanding = VEC_alloc (rtx, stack, 4); \
- (d).pending = VEC_alloc (rtx, stack, 4); \
+ vec_stack_alloc (rtx, (d).expanding, 4); \
+ vec_stack_alloc (rtx, (d).pending, 4); \
(d).depth.complexity = (d).depth.entryvals = 0; \
} \
while (0)
do \
{ \
resolve_expansions_pending_recursion ((d).pending); \
- VEC_free (rtx, stack, (d).pending); \
- VEC_free (rtx, stack, (d).expanding); \
+ (d).pending.release (); \
+ (d).expanding.release (); \
\
if ((l) && MEM_P (l)) \
(l) = targetm.delegitimize_address (l); \
loc = vt_expand_var_loc_chain (var, scratch_regs, &data, NULL);
- gcc_checking_assert (VEC_empty (rtx, data.expanding));
+ gcc_checking_assert (data.expanding.is_empty ());
FINI_ELCD (data, loc);
static int
values_to_stack (void **slot, void *data)
{
- VEC (rtx, stack) **changed_values_stack = (VEC (rtx, stack) **)data;
+ vec<rtx, va_stack> *changed_values_stack = (vec<rtx, va_stack> *) data;
variable var = (variable) *slot;
if (var->onepart == ONEPART_VALUE)
- VEC_safe_push (rtx, stack, *changed_values_stack, dv_as_value (var->dv));
+ changed_values_stack->safe_push (dv_as_value (var->dv));
else if (var->onepart == ONEPART_DEXPR)
- VEC_safe_push (rtx, stack, *changed_values_stack,
- DECL_RTL_KNOWN_SET (dv_as_decl (var->dv)));
+ changed_values_stack->safe_push (DECL_RTL_KNOWN_SET (dv_as_decl (var->dv)));
return 1;
}
static void
notify_dependents_of_changed_value (rtx val, htab_t htab,
- VEC (rtx, stack) **changed_values_stack)
+ vec<rtx, va_stack> *changed_values_stack)
{
void **slot;
variable var;
case ONEPART_VALUE:
case ONEPART_DEXPR:
set_dv_changed (ldv, true);
- VEC_safe_push (rtx, stack, *changed_values_stack, dv_as_rtx (ldv));
+ changed_values_stack->safe_push (dv_as_rtx (ldv));
break;
case ONEPART_VDECL:
{
int i, n;
rtx val;
- VEC (rtx, stack) *changed_values_stack = VEC_alloc (rtx, stack, 20);
+ vec<rtx, va_stack> changed_values_stack;
+
+ vec_stack_alloc (rtx, changed_values_stack, 20);
/* Move values from changed_variables to changed_values_stack. */
htab_traverse (changed_variables, values_to_stack, &changed_values_stack);
/* Back-propagate change notifications in values while popping
them from the stack. */
- for (n = i = VEC_length (rtx, changed_values_stack);
- i > 0; i = VEC_length (rtx, changed_values_stack))
+ for (n = i = changed_values_stack.length ();
+ i > 0; i = changed_values_stack.length ())
{
- val = VEC_pop (rtx, changed_values_stack);
+ val = changed_values_stack.pop ();
notify_dependents_of_changed_value (val, htab, &changed_values_stack);
/* This condition will hold when visiting each of the entries
}
}
- VEC_free (rtx, stack, changed_values_stack);
+ changed_values_stack.release ();
}
/* Emit NOTE_INSN_VAR_LOCATION note for each variable from a chain
dataflow_set_clear (set);
dataflow_set_copy (set, &VTI (bb)->in);
- FOR_EACH_VEC_ELT (micro_operation, VTI (bb)->mos, i, mo)
+ FOR_EACH_VEC_ELT (VTI (bb)->mos, i, mo)
{
rtx insn = mo->insn;
rtx next_insn = next_non_note_insn_var_location (insn);
= gen_rtx_REG_offset (incoming, GET_MODE (incoming),
OUTGOING_REGNO (REGNO (incoming)), 0);
p.outgoing = incoming;
- VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, p);
+ vec_safe_push (windowed_parm_regs, p);
}
else if (MEM_P (incoming)
&& REG_P (XEXP (incoming, 0))
p.incoming = reg;
reg = gen_raw_REG (GET_MODE (reg), OUTGOING_REGNO (REGNO (reg)));
p.outgoing = reg;
- VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, p);
+ vec_safe_push (windowed_parm_regs, p);
incoming = replace_equiv_address_nv (incoming, reg);
}
}
scratch_regs = BITMAP_ALLOC (NULL);
valvar_pool = create_alloc_pool ("small variable_def pool",
sizeof (struct variable_def), 256);
- preserved_values = VEC_alloc (rtx, heap, 256);
+ preserved_values.create (256);
}
else
{
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (PATTERN (insn), bb, insn,
MO_ADJUST, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos,
- mo);
+ VTI (bb)->mos.safe_push (mo);
VTI (bb)->out.stack_adjust += pre;
}
}
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (PATTERN (insn), bb, insn,
MO_ADJUST, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos,
- mo);
+ VTI (bb)->mos.safe_push (mo);
VTI (bb)->out.stack_adjust += post;
}
FOR_EACH_BB (bb)
{
- VEC_free (micro_operation, heap, VTI (bb)->mos);
+ VTI (bb)->mos.release ();
}
FOR_ALL_BB (bb)
free_alloc_pool (loc_exp_dep_pool);
loc_exp_dep_pool = NULL;
free_alloc_pool (valvar_pool);
- VEC_free (rtx, heap, preserved_values);
+ preserved_values.release ();
cselib_finish ();
BITMAP_FREE (scratch_regs);
scratch_regs = NULL;
}
#ifdef HAVE_window_save
- VEC_free (parm_reg_t, gc, windowed_parm_regs);
+ vec_free (windowed_parm_regs);
#endif
if (vui_vec)
case CONSTRUCTOR:
{
- VEC(constructor_elt, gc) *v1, *v2;
+ vec<constructor_elt, va_gc> *v1, *v2;
unsigned HOST_WIDE_INT idx;
typecode = TREE_CODE (TREE_TYPE (t1));
v1 = CONSTRUCTOR_ELTS (t1);
v2 = CONSTRUCTOR_ELTS (t2);
- if (VEC_length (constructor_elt, v1)
- != VEC_length (constructor_elt, v2))
- return 0;
+ if (vec_safe_length (v1) != vec_safe_length (v2))
+ return 0;
- for (idx = 0; idx < VEC_length (constructor_elt, v1); ++idx)
+ for (idx = 0; idx < vec_safe_length (v1); ++idx)
{
- constructor_elt *c1 = &VEC_index (constructor_elt, v1, idx);
- constructor_elt *c2 = &VEC_index (constructor_elt, v2, idx);
+ constructor_elt *c1 = &(*v1)[idx];
+ constructor_elt *c2 = &(*v2)[idx];
/* Check that each value is the same... */
if (!compare_constant (c1->value, c2->value))
case CONSTRUCTOR:
{
tree copy = copy_node (exp);
- VEC(constructor_elt, gc) *v;
+ vec<constructor_elt, va_gc> *v;
unsigned HOST_WIDE_INT idx;
tree purpose, value;
- v = VEC_alloc(constructor_elt, gc, VEC_length(constructor_elt,
- CONSTRUCTOR_ELTS (exp)));
+ vec_alloc (v, vec_safe_length (CONSTRUCTOR_ELTS (exp)));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, purpose, value)
{
constructor_elt ce = {purpose, copy_constant (value)};
- VEC_quick_push (constructor_elt, v, ce);
+ v->quick_push (ce);
}
CONSTRUCTOR_ELTS (copy) = v;
return copy;
/* Allow a constructor with no elements for any data type.
This means to fill the space with zeros. */
if (TREE_CODE (exp) == CONSTRUCTOR
- && VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (exp)))
+ && vec_safe_is_empty (CONSTRUCTOR_ELTS (exp)))
{
assemble_zeros (size);
return;
local.field = TYPE_FIELDS (local.type);
for (cnt = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), cnt, ce);
+ vec_safe_iterate (CONSTRUCTOR_ELTS (exp), cnt, &ce);
cnt++, local.field = local.field ? DECL_CHAIN (local.field) : 0)
{
local.val = ce->value;
targetm.asm_out.globalize_decl_name (asm_out_file, decl);
}
-VEC(alias_pair,gc) *alias_pairs;
+vec<alias_pair, va_gc> *alias_pairs;
/* Output the assembler code for a define (equate) using ASM_OUTPUT_DEF
or ASM_OUTPUT_DEF_FROM_DECLS. The function defines the symbol whose
else
{
alias_pair p = {decl, target};
- VEC_safe_push (alias_pair, gc, alias_pairs, p);
+ vec_safe_push (alias_pairs, p);
}
}
tree to;
} tm_alias_pair;
-DEF_VEC_O(tm_alias_pair);
-DEF_VEC_ALLOC_O(tm_alias_pair,heap);
/* Helper function for finish_tm_clone_pairs. Dump a hash table entry
into a VEC in INFO. */
dump_tm_clone_to_vec (void **slot, void *info)
{
struct tree_map *map = (struct tree_map *) *slot;
- VEC(tm_alias_pair,heap) **tm_alias_pairs = (VEC(tm_alias_pair, heap) **) info;
+ vec<tm_alias_pair> *tm_alias_pairs = (vec<tm_alias_pair> *) info;
tm_alias_pair p = {DECL_UID (map->base.from), map->base.from, map->to};
- VEC_safe_push (tm_alias_pair, heap, *tm_alias_pairs, p);
+ tm_alias_pairs->safe_push (p);
return 1;
}
/* Dump the actual pairs to the .tm_clone_table section. */
static void
-dump_tm_clone_pairs (VEC(tm_alias_pair,heap) *tm_alias_pairs)
+dump_tm_clone_pairs (vec<tm_alias_pair> tm_alias_pairs)
{
unsigned i;
tm_alias_pair *p;
bool switched = false;
- FOR_EACH_VEC_ELT (tm_alias_pair, tm_alias_pairs, i, p)
+ FOR_EACH_VEC_ELT (tm_alias_pairs, i, p)
{
tree src = p->from;
tree dst = p->to;
void
finish_tm_clone_pairs (void)
{
- VEC(tm_alias_pair,heap) *tm_alias_pairs = NULL;
+ vec<tm_alias_pair> tm_alias_pairs = vec<tm_alias_pair>();
if (tm_clone_hash == NULL)
return;
htab_traverse_noresize (tm_clone_hash, dump_tm_clone_to_vec,
(void *) &tm_alias_pairs);
/* Sort it. */
- VEC_qsort (tm_alias_pair, tm_alias_pairs, tm_alias_pair_cmp);
+ tm_alias_pairs.qsort (tm_alias_pair_cmp);
/* Dump it. */
dump_tm_clone_pairs (tm_alias_pairs);
htab_delete (tm_clone_hash);
tm_clone_hash = NULL;
- VEC_free (tm_alias_pair, heap, tm_alias_pairs);
+ tm_alias_pairs.release ();
}
block->alignment = MAX (block->alignment, alignment);
block->size = offset + size;
- VEC_safe_push (rtx, gc, block->objects, symbol);
+ vec_safe_push (block->objects, symbol);
}
/* Return the anchor that should be used to address byte offset OFFSET
/* Do a binary search to see if there's already an anchor we can use.
Set BEGIN to the new anchor's index if not. */
begin = 0;
- end = VEC_length (rtx, block->anchors);
+ end = vec_safe_length (block->anchors);
while (begin != end)
{
middle = (end + begin) / 2;
- anchor = VEC_index (rtx, block->anchors, middle);
+ anchor = (*block->anchors)[middle];
if (SYMBOL_REF_BLOCK_OFFSET (anchor) > offset)
end = middle;
else if (SYMBOL_REF_BLOCK_OFFSET (anchor) < offset)
SYMBOL_REF_FLAGS (anchor) |= model << SYMBOL_FLAG_TLS_SHIFT;
/* Insert it at index BEGIN. */
- VEC_safe_insert (rtx, gc, block->anchors, begin, anchor);
+ vec_safe_insert (block->anchors, begin, anchor);
return anchor;
}
tree decl;
rtx symbol;
- if (block->objects == NULL)
+ if (!block->objects)
return;
/* Switch to the section and make sure that the first byte is
/* Define the values of all anchors relative to the current section
position. */
- FOR_EACH_VEC_ELT (rtx, block->anchors, i, symbol)
+ FOR_EACH_VEC_SAFE_ELT (block->anchors, i, symbol)
targetm.asm_out.output_anchor (symbol);
/* Output the objects themselves. */
offset = 0;
- FOR_EACH_VEC_ELT (rtx, block->objects, i, symbol)
+ FOR_EACH_VEC_ELT (*block->objects, i, symbol)
{
/* Move to the object's offset, padding with zeros if necessary. */
assemble_zeros (SYMBOL_REF_BLOCK_OFFSET (symbol) - offset);
node->alias = false;
continue;
}
- if (!VEC_length (ipa_ref_t, node->symbol.ref_list.references))
+ if (!vec_safe_length (node->symbol.ref_list.references))
ipa_record_reference ((symtab_node)node, (symtab_node)tgt, IPA_REF_ALIAS, NULL);
if (node->extra_name_alias)
{
}
/* Account the overhead. */
-static void
-register_overhead (struct vec_prefix *ptr, size_t size,
- const char *name, int line, const char *function)
+
+void
+vec_prefix::register_overhead (size_t size, const char *name, int line,
+ const char *function)
{
struct vec_descriptor *loc = vec_descriptor (name, line, function);
struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
PTR *slot;
- p->ptr = ptr;
+ p->ptr = this;
p->loc = loc;
p->allocated = size;
if (!ptr_hash)
ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
- slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
+ slot = htab_find_slot_with_hash (ptr_hash, this, htab_hash_pointer (this),
+ INSERT);
gcc_assert (!*slot);
*slot = p;
loc->times++;
}
-/* Notice that the pointer has been freed. */
-static void
-free_overhead (struct vec_prefix *ptr)
+
+/* Notice that the memory allocated for the vector has been freed. */
+
+void
+vec_prefix::release_overhead (void)
{
- PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr),
+ PTR *slot = htab_find_slot_with_hash (ptr_hash, this,
+ htab_hash_pointer (this),
NO_INSERT);
struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot;
p->loc->allocated -= p->allocated;
htab_clear_slot (ptr_hash, slot);
- free (p);
+ ::free (p);
}
-void
-vec_heap_free (void *ptr)
-{
- if (GATHER_STATISTICS)
- free_overhead ((struct vec_prefix *)ptr);
- free (ptr);
-}
-/* Calculate the new ALLOC value, making sure that RESERVE slots are
- free. If EXACT grow exactly, otherwise grow exponentially. */
+/* Calculate the number of slots to reserve a vector, making sure that
+ RESERVE slots are free. If EXACT grow exactly, otherwise grow
+ exponentially. PFX is the control data for the vector. */
-static inline unsigned
-calculate_allocation (const struct vec_prefix *pfx, int reserve, bool exact)
+unsigned
+vec_prefix::calculate_allocation (vec_prefix *pfx, unsigned reserve, bool exact)
{
unsigned alloc = 0;
unsigned num = 0;
- gcc_assert (reserve >= 0);
-
if (pfx)
{
alloc = pfx->alloc_;
num = pfx->num_;
}
else if (!reserve)
- /* If there's no prefix, and we've not requested anything, then we
+ /* If there's no vector, and we've not requested anything, then we
will create a NULL vector. */
return 0;
/* We must have run out of room. */
- gcc_assert (alloc - num < (unsigned) reserve);
+ gcc_assert (alloc - num < reserve);
if (exact)
/* Exact size. */
return alloc;
}
-/* Ensure there are at least RESERVE free slots in VEC. If EXACT grow
- exactly, else grow exponentially. As a special case, if VEC is
- NULL and RESERVE is 0, no vector will be created. The vector's
- trailing array is at VEC_OFFSET offset and consists of ELT_SIZE
- sized elements. */
-
-void *
-vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size,
- bool exact MEM_STAT_DECL)
-{
- struct vec_prefix *pfx = (struct vec_prefix *) vec;
- unsigned alloc = calculate_allocation (pfx, reserve, exact);
- size_t size;
-
- if (!alloc)
- {
- if (pfx)
- ggc_free (pfx);
- return NULL;
- }
-
- /* Calculate the amount of space we want. */
- size = vec_offset + alloc * elt_size;
- /* Ask the allocator how much space it will really give us. */
- size = ggc_round_alloc_size (size);
- /* Adjust the number of slots accordingly. */
- alloc = (size - vec_offset) / elt_size;
- /* And finally, recalculate the amount of space we ask for. */
- size = vec_offset + alloc * elt_size;
-
- vec = ggc_realloc_stat (vec, size PASS_MEM_STAT);
-
- ((struct vec_prefix *)vec)->alloc_ = alloc;
- if (!pfx)
- ((struct vec_prefix *)vec)->num_ = 0;
-
- return vec;
-}
-
-
-/* As for vec_gc_o_reserve_1, but for heap allocated vectors. */
-
-void *
-vec_heap_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
- size_t elt_size, bool exact MEM_STAT_DECL)
-{
- struct vec_prefix *pfx = (struct vec_prefix *) vec;
- unsigned alloc = calculate_allocation (pfx, reserve, exact);
-
- if (!alloc)
- {
- if (pfx)
- vec_heap_free (pfx);
- return NULL;
- }
-
- if (GATHER_STATISTICS && vec)
- free_overhead (pfx);
-
- vec = xrealloc (vec, vec_offset + alloc * elt_size);
- ((struct vec_prefix *)vec)->alloc_ = alloc;
- if (!pfx)
- ((struct vec_prefix *)vec)->num_ = 0;
- if (GATHER_STATISTICS && vec)
- register_overhead ((struct vec_prefix *)vec,
- vec_offset + alloc * elt_size FINAL_PASS_MEM_STAT);
-
- return vec;
-}
-
/* Stack vectors are a little different. VEC_alloc turns into a call
- to vec_stack_p_reserve_exact1 and passes in space allocated via a
+ to vec<T, A>::stack_reserve and passes in space allocated via a
call to alloca. We record that pointer so that we know that we
shouldn't free it. If the vector is resized, we resize it on the
heap. We record the pointers in a vector and search it in LIFO
the end should normally be efficient even if they are used in a
recursive function. */
-typedef void *void_p;
-DEF_VEC_P(void_p);
-DEF_VEC_ALLOC_P(void_p,heap);
-
-static VEC(void_p,heap) *stack_vecs;
+static vec<void *> stack_vecs;
-/* Allocate a vector which uses alloca for the initial allocation.
- SPACE is space allocated using alloca, ALLOC is the number of
- entries allocated. */
+/* Add a stack vector to STACK_VECS. */
-void *
-vec_stack_p_reserve_exact_1 (int alloc, void *space)
+void
+register_stack_vec (void *vec)
{
- struct vec_prefix *pfx = (struct vec_prefix *) space;
-
- VEC_safe_push (void_p, heap, stack_vecs, space);
-
- pfx->num_ = 0;
- pfx->alloc_ = alloc;
-
- return space;
+ stack_vecs.safe_push (vec);
}
-/* Grow a vector allocated using alloca. When this happens, we switch
- back to heap allocation. We remove the vector from stack_vecs, if
- it is there, since we no longer need to avoid freeing it. */
-
-static void *
-vec_stack_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
- size_t elt_size, bool exact MEM_STAT_DECL)
-{
- bool found;
- unsigned int ix;
- void *newvec;
-
- found = false;
- for (ix = VEC_length (void_p, stack_vecs); ix > 0; --ix)
- {
- if (VEC_index (void_p, stack_vecs, ix - 1) == vec)
- {
- VEC_unordered_remove (void_p, stack_vecs, ix - 1);
- found = true;
- break;
- }
- }
-
- if (!found)
- {
- /* VEC is already on the heap. */
- return vec_heap_o_reserve_1 (vec, reserve, vec_offset, elt_size,
- exact PASS_MEM_STAT);
- }
-
- /* Move VEC to the heap. */
- reserve += ((struct vec_prefix *) vec)->num_;
- newvec = vec_heap_o_reserve_1 (NULL, reserve, vec_offset, elt_size,
- exact PASS_MEM_STAT);
- if (newvec && vec)
- {
- ((struct vec_prefix *) newvec)->num_ = ((struct vec_prefix *) vec)->num_;
- memcpy (((struct vec_prefix *) newvec)+1,
- ((struct vec_prefix *) vec)+1,
- ((struct vec_prefix *) vec)->num_ * elt_size);
- }
- return newvec;
-}
-/* Grow a vector allocated on the stack. */
+/* If VEC is registered in STACK_VECS, return its index.
+ Otherwise, return -1. */
-void *
-vec_stack_o_reserve (void *vec, int reserve, size_t vec_offset,
- size_t elt_size MEM_STAT_DECL)
+int
+stack_vec_register_index (void *vec)
{
- return vec_stack_o_reserve_1 (vec, reserve, vec_offset, elt_size, false
- PASS_MEM_STAT);
+ for (unsigned ix = stack_vecs.length (); ix > 0; --ix)
+ if (stack_vecs[ix - 1] == vec)
+ return static_cast<int> (ix - 1);
+ return -1;
}
-/* Exact version of vec_stack_o_reserve. */
-void *
-vec_stack_o_reserve_exact (void *vec, int reserve, size_t vec_offset,
- size_t elt_size MEM_STAT_DECL)
-{
- return vec_stack_o_reserve_1 (vec, reserve, vec_offset, elt_size, true
- PASS_MEM_STAT);
-}
-
-/* Free a vector allocated on the stack. Don't actually free it if we
- find it in the hash table. */
+/* Remove vector at slot IX from the list of registered stack vectors. */
void
-vec_stack_free (void *vec)
+unregister_stack_vec (unsigned ix)
{
- unsigned int ix;
-
- for (ix = VEC_length (void_p, stack_vecs); ix > 0; --ix)
- {
- if (VEC_index (void_p, stack_vecs, ix - 1) == vec)
- {
- VEC_unordered_remove (void_p, stack_vecs, ix - 1);
- return;
- }
- }
-
- /* VEC was not on the list of vecs allocated on the stack, so it
- must be allocated on the heap. */
- vec_heap_free (vec);
+ stack_vecs.unordered_remove (ix);
}
-#if ENABLE_CHECKING
-/* Issue a vector domain error, and then fall over. */
-
-void
-vec_assert_fail (const char *op, const char *struct_name,
- const char *file, unsigned int line, const char *function)
-{
- internal_error ("vector %s %s domain error, in %s at %s:%u",
- struct_name, op, function, trim_filename (file), line);
-}
-#endif
/* Helper for qsort; sort descriptors by amount of memory consumed. */
+
static int
cmp_statistic (const void *loc1, const void *loc2)
{
diff = l1->times - l2->times;
return diff > 0 ? 1 : diff < 0 ? -1 : 0;
}
+
+
/* Collect array of the descriptors from hashtable. */
+
static struct vec_descriptor **loc_array;
static int
add_statistics (void **slot, void *b)
#ifndef GCC_VEC_H
#define GCC_VEC_H
-#include "statistics.h" /* For MEM_STAT_DECL. */
+/* FIXME - When compiling some of the gen* binaries, we cannot enable GC
+ support because the headers generated by gengtype are still not
+ present. In particular, the header file gtype-desc.h is missing,
+ so compilation may fail if we try to include ggc.h.
+
+ Since we use some of those declarations, we need to provide them
+ (even if the GC-based templates are not used). This is not a
+ problem because the code that runs before gengtype is built will
+ never need to use GC vectors. But it does force us to declare
+ these functions more than once. */
+#ifdef GENERATOR_FILE
+#define VEC_GC_ENABLED 0
+#else
+#define VEC_GC_ENABLED 1
+#endif // GENERATOR_FILE
+
+#include "statistics.h" // For CXX_MEM_STAT_INFO.
+
+#if VEC_GC_ENABLED
+#include "ggc.h"
+#else
+# ifndef GCC_GGC_H
+ /* Even if we think that GC is not enabled, the test that sets it is
+ weak. There are files compiled with -DGENERATOR_FILE that already
+ include ggc.h. We only need to provide these definitions if ggc.h
+ has not been included. Sigh. */
+ extern void ggc_free (void *);
+ extern size_t ggc_round_alloc_size (size_t requested_size);
+ extern void *ggc_internal_cleared_alloc_stat (size_t MEM_STAT_DECL)
+ ATTRIBUTE_MALLOC;
+ extern void *ggc_realloc_stat (void *, size_t MEM_STAT_DECL);
+# endif // GCC_GGC_H
+#endif // VEC_GC_ENABLED
/* Templated vector type and associated interfaces.
reference. Because the iterator will be inlined, the address-of
can be optimized away.
- The vectors are implemented using the trailing array idiom, thus
- they are not resizeable without changing the address of the vector
- object itself. This means you cannot have variables or fields of
- vector type -- always use a pointer to a vector. The one exception
- is the final field of a structure, which could be a vector type.
- You will have to use the embedded_size & embedded_init calls to
- create such objects, and they will probably not be resizeable (so
- don't use the 'safe' allocation variants). The trailing array
- idiom is used (rather than a pointer to an array of data), because,
- if we allow NULL to also represent an empty vector, empty vectors
- occupy minimal space in the structure containing them.
-
Each operation that increases the number of active elements is
available in 'quick' and 'safe' variants. The former presumes that
there is sufficient allocated space for the operation to succeed
'lower_bound' function will determine where to place an item in the
array using insert that will maintain sorted order.
- When a vector type is defined, first a non-memory managed version
- is created. You can then define either or both garbage collected
- and heap allocated versions. The allocation mechanism is specified
- when the vector is allocated. This can occur via the VEC_alloc
- call or one of the VEC_safe_* functions that add elements to a
- vector. If the vector is NULL, it will be allocated using the
- allocation strategy selected in the call. The valid allocations
- are defined in enum vec_allocation_t.
+ Vectors are template types with three arguments: the type of the
+ elements in the vector, the allocation strategy, and the physical
+ layout to use
+
+ Four allocation strategies are supported:
+
+ - Heap: allocation is done using malloc/free. This is the
+ default allocation strategy.
+
+ - Stack: allocation is done using alloca.
+
+ - GC: allocation is done using ggc_alloc/ggc_free.
+
+ - GC atomic: same as GC with the exception that the elements
+ themselves are assumed to be of an atomic type that does
+ not need to be garbage collected. This means that marking
+ routines do not need to traverse the array marking the
+ individual elements. This increases the performance of
+ GC activities.
+
+ Two physical layouts are supported:
+
+ - Embedded: The vector is structured using the trailing array
+ idiom. The last member of the structure is an array of size
+ 1. When the vector is initially allocated, a single memory
+ block is created to hold the vector's control data and the
+ array of elements. These vectors cannot grow without
+ reallocation (see discussion on embeddable vectors below).
+
+ - Space efficient: The vector is structured as a pointer to an
+ embedded vector. This is the default layout. It means that
+ vectors occupy a single word of storage before initial
+ allocation. Vectors are allowed to grow (the internal
+ pointer is reallocated but the main vector instance does not
+ need to relocate).
+
+ The type, allocation and layout are specified when the vector is
+ declared.
If you need to directly manipulate a vector, then the 'address'
accessor will return the address of the start of the vector. Also
the 'space' predicate will tell you whether there is spare capacity
in the vector. You will not normally need to use these two functions.
- Variables of vector type are of type vec_t<ETYPE> where ETYPE is
- the type of the elements of the vector. Due to the way GTY works,
- you must annotate any structures you wish to insert or reference
- from a vector with a GTY(()) tag. You need to do this even if you
- never use the GC allocated variants.
+ Notes on the different layout strategies
+
+ * Embeddable vectors (vec<T, A, vl_embed>)
+
+ These vectors are suitable to be embedded in other data
+ structures so that they can be pre-allocated in a contiguous
+ memory block.
+
+ Embeddable vectors are implemented using the trailing array
+ idiom, thus they are not resizeable without changing the address
+ of the vector object itself. This means you cannot have
+ variables or fields of embeddable vector type -- always use a
+ pointer to a vector. The one exception is the final field of a
+ structure, which could be a vector type.
+
+ You will have to use the embedded_size & embedded_init calls to
+ create such objects, and they will not be resizeable (so the
+ 'safe' allocation variants are not available).
+
+ Properties of embeddable vectors:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block. It uses the trailing-vector idiom, so
+ allocation must reserve enough space for all the elements
+ in the vector plus its control data.
+ - The vector cannot be re-allocated.
+ - The vector cannot grow nor shrink.
+ - No indirections needed for access/manipulation.
+ - It requires 2 words of storage (prior to vector allocation).
+
+
+ * Space efficient vector (vec<T, A, vl_ptr>)
+
+ These vectors can grow dynamically and are allocated together
+ with their control data. They are suited to be included in data
+ structures. Prior to initial allocation, they only take a single
+ word of storage.
+
+ These vectors are implemented as a pointer to embeddable vectors.
+ The semantics allow for this pointer to be NULL to represent
+ empty vectors. This way, empty vectors occupy minimal space in
+ the structure containing them.
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block.
+ - The whole vector may be re-allocated.
+ - Vector data may grow and shrink.
+ - Access and manipulation requires a pointer test and
+ indirection.
+ - It requires 1 word of storage (prior to vector allocation).
An example of their use would be,
struct my_struct {
- vec_t<tree> *v; // A (pointer to) a vector of tree pointers.
+ // A space-efficient vector of tree pointers in GC memory.
+ vec<tree, va_gc, vl_ptr> v;
};
struct my_struct *s;
- if (VEC_length(tree,s->v)) { we have some contents }
- VEC_safe_push(tree,gc,s->v,decl); // append some decl onto the end
- for (ix = 0; VEC_iterate(tree,s->v,ix,elt); ix++)
+ if (s->v.length ()) { we have some contents }
+ s->v.safe_push (decl); // append some decl onto the end
+ for (ix = 0; s->v.iterate (ix, &elt); ix++)
{ do something with elt }
*/
-#if ENABLE_CHECKING
-#define ALONE_VEC_CHECK_INFO __FILE__, __LINE__, __FUNCTION__
-#define VEC_CHECK_INFO , ALONE_VEC_CHECK_INFO
-#define ALONE_VEC_CHECK_DECL const char *file_, unsigned line_, const char *function_
-#define VEC_CHECK_DECL , ALONE_VEC_CHECK_DECL
-#define ALONE_VEC_CHECK_PASS file_, line_, function_
-#define VEC_CHECK_PASS , ALONE_VEC_CHECK_PASS
-
-#define VEC_ASSERT(EXPR,OP,T,A) \
- (void)((EXPR) ? 0 : (VEC_ASSERT_FAIL(OP,VEC(T,A)), 0))
-
-extern void vec_assert_fail (const char *, const char * VEC_CHECK_DECL)
- ATTRIBUTE_NORETURN;
-#define VEC_ASSERT_FAIL(OP,VEC) vec_assert_fail (OP,#VEC VEC_CHECK_PASS)
-#else
-#define ALONE_VEC_CHECK_INFO
-#define VEC_CHECK_INFO
-#define ALONE_VEC_CHECK_DECL void
-#define VEC_CHECK_DECL
-#define ALONE_VEC_CHECK_PASS
-#define VEC_CHECK_PASS
-#define VEC_ASSERT(EXPR,OP,T,A) (void)(EXPR)
-#endif
+/* Support function for statistics. */
+extern void dump_vec_loc_statistics (void);
-#define VEC(T,A) vec_t<T>
-enum vec_allocation_t { heap, gc, stack };
+/* Control data for vectors. This contains the number of allocated
+ and used slots inside a vector. */
-struct vec_prefix
+class vec_prefix
{
- unsigned num_;
+protected:
+ /* Memory allocation support routines in vec.c. */
+ void register_overhead (size_t, const char *, int, const char *);
+ void release_overhead (void);
+ static unsigned calculate_allocation (vec_prefix *, unsigned, bool);
+
+ /* Note that vec_prefix should be a base class for vec, but we use
+ offsetof() on vector fields of tree structures (e.g.,
+ tree_binfo::base_binfos), and offsetof only supports base types.
+
+ To compensate, we make vec_prefix a field inside vec and make
+ vec a friend class of vec_prefix so it can access its fields. */
+ template <typename, typename, typename> friend class vec;
+
+ /* The allocator types also need access to our internals. */
+ friend struct va_gc;
+ friend struct va_gc_atomic;
+ friend struct va_heap;
+ friend struct va_stack;
+
unsigned alloc_;
+ unsigned num_;
};
-/* Vector type, user visible. */
-template<typename T>
-struct GTY(()) vec_t
-{
- unsigned length (void) const;
- bool empty (void) const;
- T *address (void);
- T &last (ALONE_VEC_CHECK_DECL);
- const T &operator[] (unsigned) const;
- T &operator[] (unsigned);
- void embedded_init (int, int = 0);
-
- template<enum vec_allocation_t A>
- vec_t<T> *copy (ALONE_MEM_STAT_DECL);
-
- bool space (int VEC_CHECK_DECL);
- void splice (vec_t<T> * VEC_CHECK_DECL);
- T *quick_push (const T & VEC_CHECK_DECL);
- T &pop (ALONE_VEC_CHECK_DECL);
- void truncate (unsigned VEC_CHECK_DECL);
- void replace (unsigned, const T & VEC_CHECK_DECL);
- void quick_insert (unsigned, const T & VEC_CHECK_DECL);
- void ordered_remove (unsigned VEC_CHECK_DECL);
- void unordered_remove (unsigned VEC_CHECK_DECL);
- void block_remove (unsigned, unsigned VEC_CHECK_DECL);
- unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
-
- /* Class-static member functions. Some of these will become member
- functions of a future handler class wrapping vec_t. */
- static size_t embedded_size (int);
+template<typename, typename, typename> class vec;
- template<enum vec_allocation_t A>
- static vec_t<T> *alloc (int MEM_STAT_DECL);
+/* Valid vector layouts
- static vec_t<T> *alloc (int, vec_t<T> *);
+ vl_embed - Embeddable vector that uses the trailing array idiom.
+ vl_ptr - Space efficient vector that uses a pointer to an
+ embeddable vector. */
+struct vl_embed { };
+struct vl_ptr { };
- template<enum vec_allocation_t A>
- static void free (vec_t<T> **);
- template<enum vec_allocation_t A>
- static vec_t<T> *reserve_exact (vec_t<T> *, int MEM_STAT_DECL);
+/* Types of supported allocations
- template<enum vec_allocation_t A>
- static bool reserve_exact (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+ va_heap - Allocation uses malloc/free.
+ va_gc - Allocation uses ggc_alloc.
+ va_gc_atomic - Same as GC, but individual elements of the array
+ do not need to be marked during collection.
+ va_stack - Allocation uses alloca. */
- template<enum vec_allocation_t A>
- static vec_t<T> *reserve (vec_t<T> *, int MEM_STAT_DECL);
+/* Allocator type for heap vectors. */
+struct va_heap
+{
+ /* Heap vectors are frequently regular instances, so use the vl_ptr
+ layout for them. */
+ typedef vl_ptr default_layout;
- template<enum vec_allocation_t A>
- static bool reserve (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+ template<typename T>
+ static void reserve (vec<T, va_heap, vl_embed> *&, unsigned, bool
+ CXX_MEM_STAT_INFO);
- template<enum vec_allocation_t A>
- static void safe_splice (vec_t<T> **, vec_t<T> * VEC_CHECK_DECL
- MEM_STAT_DECL);
+ template<typename T>
+ static void release (vec<T, va_heap, vl_embed> *&);
+};
- template<enum vec_allocation_t A>
- static T *safe_push (vec_t<T> **, const T & VEC_CHECK_DECL MEM_STAT_DECL);
- template<enum vec_allocation_t A>
- static void safe_grow (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+/* Allocator for heap memory. Ensure there are at least RESERVE free
+ slots in V. If EXACT is true, grow exactly, else grow
+ exponentially. As a special case, if the vector had not been
+ allocated and and RESERVE is 0, no vector will be created. */
- template<enum vec_allocation_t A>
- static void safe_grow_cleared (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
+template<typename T>
+inline void
+va_heap::reserve (vec<T, va_heap, vl_embed> *&v, unsigned reserve, bool exact
+ MEM_STAT_DECL)
+{
+ unsigned alloc = vec_prefix::calculate_allocation (v ? &v->pfx_ : 0, reserve,
+ exact);
+ if (!alloc)
+ {
+ release (v);
+ return;
+ }
- template<enum vec_allocation_t A>
- static void safe_insert (vec_t<T> **, unsigned, const T & VEC_CHECK_DECL
- MEM_STAT_DECL);
+ if (GATHER_STATISTICS && v)
+ v->pfx_.release_overhead ();
- static bool iterate (const vec_t<T> *, unsigned, T *);
- static bool iterate (const vec_t<T> *, unsigned, T **);
+ size_t size = vec<T, va_heap, vl_embed>::embedded_size (alloc);
+ unsigned nelem = v ? v->length () : 0;
+ v = static_cast <vec<T, va_heap, vl_embed> *> (xrealloc (v, size));
+ v->embedded_init (alloc, nelem);
- vec_prefix prefix_;
- T vec_[1];
-};
+ if (GATHER_STATISTICS)
+ v->pfx_.register_overhead (size FINAL_PASS_MEM_STAT);
+}
-/* Garbage collection support for vec_t. */
+/* Free the heap space allocated for vector V. */
template<typename T>
void
-gt_ggc_mx (vec_t<T> *v)
+va_heap::release (vec<T, va_heap, vl_embed> *&v)
{
- extern void gt_ggc_mx (T &);
- for (unsigned i = 0; i < v->length (); i++)
- gt_ggc_mx ((*v)[i]);
+ if (GATHER_STATISTICS)
+ v->pfx_.release_overhead ();
+ ::free (v);
+ v = NULL;
}
-/* PCH support for vec_t. */
+/* Allocator type for GC vectors. Notice that we need the structure
+ declaration even if GC is not enabled. */
-template<typename T>
-void
-gt_pch_nx (vec_t<T> *v)
+struct va_gc
{
- extern void gt_pch_nx (T &);
- for (unsigned i = 0; i < v->length (); i++)
- gt_pch_nx ((*v)[i]);
-}
+ /* Use vl_embed as the default layout for GC vectors. Due to GTY
+ limitations, GC vectors must always be pointers, so it is more
+ efficient to use a pointer to the vl_embed layout, rather than
+ using a pointer to a pointer as would be the case with vl_ptr. */
+ typedef vl_embed default_layout;
+
+ template<typename T, typename A>
+ static void reserve (vec<T, A, vl_embed> *&, unsigned, bool
+ CXX_MEM_STAT_INFO);
+
+ template<typename T, typename A>
+ static void release (vec<T, A, vl_embed> *&v) { v = NULL; }
+};
-template<typename T>
-void
-gt_pch_nx (vec_t<T *> *v, gt_pointer_operator op, void *cookie)
-{
- for (unsigned i = 0; i < v->length (); i++)
- op (&((*v)[i]), cookie);
-}
-template<typename T>
+/* Allocator for GC memory. Ensure there are at least RESERVE free
+ slots in V. If EXACT is true, grow exactly, else grow
+ exponentially. As a special case, if the vector had not been
+ allocated and and RESERVE is 0, no vector will be created. */
+
+template<typename T, typename A>
void
-gt_pch_nx (vec_t<T> *v, gt_pointer_operator op, void *cookie)
+va_gc::reserve (vec<T, A, vl_embed> *&v, unsigned reserve, bool exact
+ MEM_STAT_DECL)
{
- extern void gt_pch_nx (T *, gt_pointer_operator, void *);
- for (unsigned i = 0; i < v->length (); i++)
- gt_pch_nx (&((*v)[i]), op, cookie);
-}
+ unsigned alloc = vec_prefix::calculate_allocation (v ? &v->pfx_ : 0, reserve,
+ exact);
+ if (!alloc)
+ {
+ ::ggc_free (v);
+ v = NULL;
+ return;
+ }
+ /* Calculate the amount of space we want. */
+ size_t size = vec<T, A, vl_embed>::embedded_size (alloc);
-/* FIXME. Remove these definitions and update all calling sites after
- the handler class for vec_t is implemented. */
-
-/* Vector of integer-like object. */
-#define DEF_VEC_I(T) struct vec_swallow_trailing_semi
-#define DEF_VEC_ALLOC_I(T,A) struct vec_swallow_trailing_semi
-
-/* Vector of pointer to object. */
-#define DEF_VEC_P(T) struct vec_swallow_trailing_semi
-#define DEF_VEC_ALLOC_P(T,A) struct vec_swallow_trailing_semi
-
-/* Vector of object. */
-#define DEF_VEC_O(T) struct vec_swallow_trailing_semi
-#define DEF_VEC_ALLOC_O(T,A) struct vec_swallow_trailing_semi
-
-/* Vectors on the stack. */
-#define DEF_VEC_ALLOC_P_STACK(T) struct vec_swallow_trailing_semi
-#define DEF_VEC_ALLOC_O_STACK(T) struct vec_swallow_trailing_semi
-#define DEF_VEC_ALLOC_I_STACK(T) struct vec_swallow_trailing_semi
-
-/* Vectors of atomic types. Atomic types do not need to have its
- elements marked for GC and PCH. To avoid unnecessary traversals,
- we provide template instantiations for the GC/PCH functions that
- do not traverse the vector.
-
- FIXME cxx-conversion - Once vec_t users are converted this can
- be provided in some other way (e.g., adding an additional template
- parameter to the vec_t class). */
-#define DEF_VEC_A(TYPE) \
-template<typename T> \
-void \
-gt_ggc_mx (vec_t<TYPE> *v ATTRIBUTE_UNUSED) \
-{ \
-} \
- \
-template<typename T> \
-void \
-gt_pch_nx (vec_t<TYPE> *v ATTRIBUTE_UNUSED) \
-{ \
-} \
- \
-template<typename T> \
-void \
-gt_pch_nx (vec_t<TYPE> *v ATTRIBUTE_UNUSED, \
- gt_pointer_operator op ATTRIBUTE_UNUSED, \
- void *cookie ATTRIBUTE_UNUSED) \
-{ \
-} \
-struct vec_swallow_trailing_semi
-
-#define DEF_VEC_ALLOC_A(T,A) struct vec_swallow_trailing_semi
-
-/* Support functions for stack vectors. */
-extern void *vec_stack_p_reserve_exact_1 (int, void *);
-extern void *vec_stack_o_reserve (void *, int, size_t, size_t MEM_STAT_DECL);
-extern void *vec_stack_o_reserve_exact (void *, int, size_t, size_t
- MEM_STAT_DECL);
-extern void vec_stack_free (void *);
+ /* Ask the allocator how much space it will really give us. */
+ size = ggc_round_alloc_size (size);
-extern void dump_vec_loc_statistics (void);
-extern void ggc_free (void *);
-extern void vec_heap_free (void *);
+ /* Adjust the number of slots accordingly. */
+ size_t vec_offset = sizeof (vec_prefix);
+ size_t elt_size = sizeof (T);
+ alloc = (size - vec_offset) / elt_size;
+ /* And finally, recalculate the amount of space we ask for. */
+ size = vec_offset + alloc * elt_size;
-/* API compatibility macros (to be removed). */
-#define VEC_length(T,V) \
- ((V) ? (V)->length () : 0)
+ unsigned nelem = v ? v->length () : 0;
+ v = static_cast <vec<T, A, vl_embed> *> (ggc_realloc_stat (v, size));
+ v->embedded_init (alloc, nelem);
+}
-#define VEC_empty(T,V) \
- ((V) ? (V)->empty () : true)
-#define VEC_address(T,V) \
- vec_address<T> (V)
+/* Allocator type for GC vectors. This is for vectors of types
+ atomics w.r.t. collection, so allocation and deallocation is
+ completely inherited from va_gc. */
+struct va_gc_atomic : va_gc
+{
+};
-/* FIXME. For now, we need to continue expanding VEC_address into a
- function call. Otherwise, the warning machinery for -Wnonnull gets
- confused thinking that VEC_address may return null in calls to
- memcpy and qsort. This will disappear once vec_address becomes
- a member function for a handler class wrapping vec_t. */
-template<typename T>
-static inline T *
-vec_address (vec_t<T> *vec)
+/* Allocator type for stack vectors. */
+struct va_stack
{
- return vec ? vec->address() : NULL;
-}
+ /* Use vl_ptr as the default layout for stack vectors. */
+ typedef vl_ptr default_layout;
-#define VEC_last(T,V) \
- ((V)->last (ALONE_VEC_CHECK_INFO))
+ template<typename T>
+ static void alloc (vec<T, va_stack, vl_ptr>&, unsigned,
+ vec<T, va_stack, vl_embed> *);
-#define VEC_index(T,V,I) \
- ((*(V))[I])
+ template <typename T>
+ static void reserve (vec<T, va_stack, vl_embed> *&, unsigned, bool
+ CXX_MEM_STAT_INFO);
-#define VEC_iterate(T,V,I,P) \
- (vec_t<T>::iterate(V, I, &(P)))
+ template <typename T>
+ static void release (vec<T, va_stack, vl_embed> *&);
+};
+
+/* Helper functions to keep track of vectors allocated on the stack. */
+void register_stack_vec (void *);
+int stack_vec_register_index (void *);
+void unregister_stack_vec (unsigned);
-#define VEC_embedded_size(T,N) \
- (vec_t<T>::embedded_size (N))
+/* Allocate a vector V which uses alloca for the initial allocation.
+ SPACE is space allocated using alloca. NELEMS is the number of
+ entries allocated. */
+
+template<typename T>
+void
+va_stack::alloc (vec<T, va_stack, vl_ptr> &v, unsigned nelems,
+ vec<T, va_stack, vl_embed> *space)
+{
+ v.vec_ = space;
+ register_stack_vec (static_cast<void *> (v.vec_));
+ v.vec_->embedded_init (nelems, 0);
+}
-#define VEC_embedded_init(T,V,N) \
- ((V)->embedded_init (N))
-#define VEC_free(T,A,V) \
- (vec_t<T>::free<A> (&(V)))
+/* Reserve NELEMS slots for a vector initially allocated on the stack.
+ When this happens, we switch back to heap allocation. We remove
+ the vector from stack_vecs, if it is there, since we no longer need
+ to avoid freeing it. If EXACT is true, grow exactly, otherwise
+ grow exponentially. */
-#define VEC_copy(T,A,V) \
- ((V)->copy<A> (ALONE_MEM_STAT_INFO))
+template<typename T>
+void
+va_stack::reserve (vec<T, va_stack, vl_embed> *&v, unsigned nelems, bool exact
+ MEM_STAT_DECL)
+{
+ int ix = stack_vec_register_index (static_cast<void *> (v));
+ if (ix >= 0)
+ unregister_stack_vec (ix);
+ else
+ {
+ /* V is already on the heap. */
+ va_heap::reserve (reinterpret_cast<vec<T, va_heap, vl_embed> *&> (v),
+ nelems, exact);
+ return;
+ }
-#define VEC_space(T,V,R) \
- ((V) ? (V)->space (R VEC_CHECK_INFO) : (R) == 0)
+ /* Move VEC_ to the heap. */
+ nelems += v->pfx_.num_;
+ vec<T, va_stack, vl_embed> *oldvec = v;
+ v = NULL;
+ va_heap::reserve (reinterpret_cast<vec<T, va_heap, vl_embed> *&>(v), nelems,
+ exact);
+ if (v && oldvec)
+ {
+ v->pfx_.num_ = oldvec->length ();
+ memcpy (v->data_, oldvec->data_, oldvec->length () * sizeof (T));
+ }
+}
-#define VEC_reserve(T,A,V,R) \
- (vec_t<T>::reserve<A> (&(V), (int)(R) VEC_CHECK_INFO MEM_STAT_INFO))
-#define VEC_reserve_exact(T,A,V,R) \
- (vec_t<T>::reserve_exact<A> (&(V), R VEC_CHECK_INFO MEM_STAT_INFO))
+/* Free a vector allocated on the stack. Don't actually free it if we
+ find it in the hash table. */
-#define VEC_splice(T,DST,SRC) \
- (DST)->splice (SRC VEC_CHECK_INFO)
+template<typename T>
+void
+va_stack::release (vec<T, va_stack, vl_embed> *&v)
+{
+ int ix = stack_vec_register_index (static_cast<void *> (v));
+ if (ix >= 0)
+ {
+ unregister_stack_vec (ix);
+ v = NULL;
+ }
+ else
+ {
+ /* The vector was not on the list of vectors allocated on the stack, so it
+ must be allocated on the heap. */
+ va_heap::release (reinterpret_cast<vec<T, va_heap, vl_embed> *&> (v));
+ }
+}
-#define VEC_safe_splice(T,A,DST,SRC) \
- vec_t<T>::safe_splice<A> (&(DST), SRC VEC_CHECK_INFO MEM_STAT_INFO)
-#define VEC_quick_push(T,V,O) \
- ((V)->quick_push (O VEC_CHECK_INFO))
+/* Generic vector template. Default values for A and L indicate the
+ most commonly used strategies.
-#define VEC_safe_push(T,A,V,O) \
- (vec_t<T>::safe_push<A> (&(V), O VEC_CHECK_INFO MEM_STAT_INFO))
+ FIXME - Ideally, they would all be vl_ptr to encourage using regular
+ instances for vectors, but the existing GTY machinery is limited
+ in that it can only deal with GC objects that are pointers
+ themselves.
-#define VEC_pop(T,V) \
- ((V)->pop (ALONE_VEC_CHECK_INFO))
+ This means that vector operations that need to deal with
+ potentially NULL pointers, must be provided as free
+ functions (see the vec_safe_* functions above). */
+template<typename T,
+ typename A = va_heap,
+ typename L = typename A::default_layout>
+class GTY((user)) vec
+{
+};
-#define VEC_truncate(T,V,I) \
- (V \
- ? (V)->truncate ((unsigned)(I) VEC_CHECK_INFO) \
- : gcc_assert ((I) == 0))
-#define VEC_safe_grow(T,A,V,I) \
- (vec_t<T>::safe_grow<A> (&(V), (int)(I) VEC_CHECK_INFO MEM_STAT_INFO))
+/* Embeddable vector. These vectors are suitable to be embedded
+ in other data structures so that they can be pre-allocated in a
+ contiguous memory block.
-#define VEC_safe_grow_cleared(T,A,V,I) \
- (vec_t<T>::safe_grow_cleared<A> (&(V), (int)(I) \
- VEC_CHECK_INFO MEM_STAT_INFO))
+ Embeddable vectors are implemented using the trailing array idiom,
+ thus they are not resizeable without changing the address of the
+ vector object itself. This means you cannot have variables or
+ fields of embeddable vector type -- always use a pointer to a
+ vector. The one exception is the final field of a structure, which
+ could be a vector type.
-#define VEC_replace(T,V,I,O) \
- ((V)->replace ((unsigned)(I), O VEC_CHECK_INFO))
+ You will have to use the embedded_size & embedded_init calls to
+ create such objects, and they will not be resizeable (so the 'safe'
+ allocation variants are not available).
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block. It uses the trailing-vector idiom, so
+ allocation must reserve enough space for all the elements
+ in the vector plus its control data.
+ - The vector cannot be re-allocated.
+ - The vector cannot grow nor shrink.
+ - No indirections needed for access/manipulation.
+ - It requires 2 words of storage (prior to vector allocation). */
+
+template<typename T, typename A>
+class GTY((user)) vec<T, A, vl_embed>
+{
+public:
+ unsigned allocated (void) const { return pfx_.alloc_; }
+ unsigned length (void) const { return pfx_.num_; }
+ bool is_empty (void) const { return pfx_.num_ == 0; }
+ T *address (void) { return data_; }
+ const T *address (void) const { return data_; }
+ const T &operator[] (unsigned) const;
+ T &operator[] (unsigned);
+ T &last (void);
+ bool space (unsigned) const;
+ bool iterate (unsigned, T *) const;
+ bool iterate (unsigned, T **) const;
+ vec *copy (ALONE_MEM_STAT_DECL) const;
+ void splice (vec &);
+ void splice (vec *src);
+ T *quick_push (const T &);
+ T &pop (void);
+ void truncate (unsigned);
+ void quick_insert (unsigned, const T &);
+ void ordered_remove (unsigned);
+ void unordered_remove (unsigned);
+ void block_remove (unsigned, unsigned);
+ void qsort (int (*) (const void *, const void *));
+ unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
+ static size_t embedded_size (unsigned);
+ void embedded_init (unsigned, unsigned = 0);
+ void quick_grow (unsigned len);
+ void quick_grow_cleared (unsigned len);
+
+ /* vec class can access our internal data and functions. */
+ template <typename, typename, typename> friend class vec;
+
+ /* The allocator types also need access to our internals. */
+ friend struct va_gc;
+ friend struct va_gc_atomic;
+ friend struct va_heap;
+ friend struct va_stack;
+
+private:
+ vec_prefix pfx_;
+ T data_[1];
+};
-#define VEC_quick_insert(T,V,I,O) \
- ((V)->quick_insert (I,O VEC_CHECK_INFO))
-#define VEC_safe_insert(T,A,V,I,O) \
- (vec_t<T>::safe_insert<A> (&(V), I, O VEC_CHECK_INFO MEM_STAT_INFO))
+/* Convenience wrapper functions to use when dealing with pointers to
+ embedded vectors. Some functionality for these vectors must be
+ provided via free functions for these reasons:
-#define VEC_ordered_remove(T,V,I) \
- ((V)->ordered_remove (I VEC_CHECK_INFO))
+ 1- The pointer may be NULL (e.g., before initial allocation).
-#define VEC_unordered_remove(T,V,I) \
- ((V)->unordered_remove (I VEC_CHECK_INFO))
+ 2- When the vector needs to grow, it must be reallocated, so
+ the pointer will change its value.
-#define VEC_block_remove(T,V,I,L) \
- ((V)->block_remove (I, L VEC_CHECK_INFO))
+ Because of limitations with the current GC machinery, all vectors
+ in GC memory *must* be pointers. */
-#define VEC_lower_bound(T,V,O,LT) \
- ((V)->lower_bound (O, LT))
+/* If V contains no room for NELEMS elements, return false. Otherwise,
+ return true. */
+template<typename T, typename A>
+inline bool
+vec_safe_space (const vec<T, A, vl_embed> *v, unsigned nelems)
+{
+ return v ? v->space (nelems) : nelems == 0;
+}
-/* Return the number of active elements in this vector. */
-template<typename T>
+/* If V is NULL, return 0. Otherwise, return V->length(). */
+template<typename T, typename A>
inline unsigned
-vec_t<T>::length (void) const
+vec_safe_length (const vec<T, A, vl_embed> *v)
{
- return prefix_.num_;
+ return v ? v->length () : 0;
}
-/* Return true if this vector has no active elements. */
+/* If V is NULL, return NULL. Otherwise, return V->address(). */
+template<typename T, typename A>
+inline T *
+vec_safe_address (vec<T, A, vl_embed> *v)
+{
+ return v ? v->address () : NULL;
+}
+
-template<typename T>
+/* If V is NULL, return true. Otherwise, return V->is_empty(). */
+template<typename T, typename A>
inline bool
-vec_t<T>::empty (void) const
+vec_safe_is_empty (vec<T, A, vl_embed> *v)
{
- return length () == 0;
+ return v ? v->is_empty () : true;
}
-/* Return the address of the array of elements. If you need to
- directly manipulate the array (for instance, you want to feed it
- to qsort), use this accessor. */
+/* If V does not have space for NELEMS elements, call
+ V->reserve(NELEMS, EXACT). */
+template<typename T, typename A>
+inline bool
+vec_safe_reserve (vec<T, A, vl_embed> *&v, unsigned nelems, bool exact = false
+ MEM_STAT_DECL)
+{
+ bool extend = nelems ? !vec_safe_space (v, nelems) : false;
+ if (extend)
+ A::reserve (v, nelems, exact PASS_MEM_STAT);
+ return extend;
+}
-template<typename T>
-inline T *
-vec_t<T>::address (void)
+template<typename T, typename A>
+inline bool
+vec_safe_reserve_exact (vec<T, A, vl_embed> *&v, unsigned nelems MEM_STAT_DECL)
{
- return vec_;
+ return vec_safe_reserve (v, nelems, true PASS_MEM_STAT);
}
-/* Get the final element of the vector, which must not be empty. */
+/* Allocate GC memory for V with space for NELEMS slots. If NELEMS
+ is 0, V is initialized to NULL. */
-template<typename T>
-T &
-vec_t<T>::last (ALONE_VEC_CHECK_DECL)
+template<typename T, typename A>
+inline void
+vec_alloc (vec<T, A, vl_embed> *&v, unsigned nelems MEM_STAT_DECL)
{
- VEC_ASSERT (prefix_.num_, "last", T, base);
- return (*this)[prefix_.num_ - 1];
+ v = NULL;
+ vec_safe_reserve (v, nelems);
}
-/* Index into vector. Return the IX'th element. IX must be in the
- domain of the vector. */
+/* Free the GC memory allocated by vector V and set it to NULL. */
-template<typename T>
-const T &
-vec_t<T>::operator[] (unsigned ix) const
+template<typename T, typename A>
+inline void
+vec_free (vec<T, A, vl_embed> *&v)
{
- gcc_assert (ix < prefix_.num_);
- return vec_[ix];
+ A::release (v);
}
-template<typename T>
-T &
-vec_t<T>::operator[] (unsigned ix)
+
+/* Grow V to length LEN. Allocate it, if necessary. */
+template<typename T, typename A>
+inline void
+vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len MEM_STAT_DECL)
{
- gcc_assert (ix < prefix_.num_);
- return vec_[ix];
+ unsigned oldlen = vec_safe_length (v);
+ gcc_checking_assert (len >= oldlen);
+ vec_safe_reserve_exact (v, len - oldlen PASS_MEM_STAT);
+ v->quick_grow (len PASS_MEM_STAT);
}
-/* Return iteration condition and update PTR to point to the IX'th
- element of VEC. Use this to iterate over the elements of a vector
- as follows,
+/* If V is NULL, allocate it. Call V->safe_grow_cleared(LEN). */
+template<typename T, typename A>
+inline void
+vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len MEM_STAT_DECL)
+{
+ unsigned oldlen = vec_safe_length (v);
+ vec_safe_grow (v, len PASS_MEM_STAT);
+ memset (&(v->address()[oldlen]), 0, sizeof (T) * (len - oldlen));
+}
- for (ix = 0; vec_t<T>::iterate(v, ix, &ptr); ix++)
- continue;
-
- FIXME. This is a static member function because if VEC is NULL,
- PTR should be initialized to NULL. This will become a regular
- member function of the handler class. */
-template<typename T>
-bool
-vec_t<T>::iterate (const vec_t<T> *vec, unsigned ix, T *ptr)
+/* If V is NULL return false, otherwise return V->iterate(IX, PTR). */
+template<typename T, typename A>
+inline bool
+vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T **ptr)
{
- if (vec && ix < vec->prefix_.num_)
- {
- *ptr = vec->vec_[ix];
- return true;
- }
+ if (v)
+ return v->iterate (ix, ptr);
else
{
*ptr = 0;
}
}
-
-/* Return iteration condition and update *PTR to point to the
- IX'th element of VEC. Use this to iterate over the elements of a
- vector as follows,
-
- for (ix = 0; v->iterate(ix, &ptr); ix++)
- continue;
-
- This variant is for vectors of objects. FIXME, to be removed
- once the distinction between vec_t<T> and vec_t<T *> disappears. */
-
-template<typename T>
-bool
-vec_t<T>::iterate (const vec_t<T> *vec, unsigned ix, T **ptr)
+template<typename T, typename A>
+inline bool
+vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T *ptr)
{
- if (vec && ix < vec->prefix_.num_)
- {
- *ptr = CONST_CAST (T *, &vec->vec_[ix]);
- return true;
- }
+ if (v)
+ return v->iterate (ix, ptr);
else
{
*ptr = 0;
}
-/* Convenience macro for forward iteration. */
+/* If V has no room for one more element, reallocate it. Then call
+ V->quick_push(OBJ). */
+template<typename T, typename A>
+inline T *
+vec_safe_push (vec<T, A, vl_embed> *&v, const T &obj MEM_STAT_DECL)
+{
+ vec_safe_reserve (v, 1, false PASS_MEM_STAT);
+ return v->quick_push (obj PASS_MEM_STAT);
+}
-#define FOR_EACH_VEC_ELT(T, V, I, P) \
- for (I = 0; VEC_iterate (T, (V), (I), (P)); ++(I))
-/* Likewise, but start from FROM rather than 0. */
+/* if V has no room for one more element, reallocate it. Then call
+ V->quick_insert(IX, OBJ). */
+template<typename T, typename A>
+inline void
+vec_safe_insert (vec<T, A, vl_embed> *&v, unsigned ix, const T &obj
+ MEM_STAT_DECL)
+{
+ vec_safe_reserve (v, 1, false PASS_MEM_STAT);
+ v->quick_insert (ix, obj);
+}
-#define FOR_EACH_VEC_ELT_FROM(T, V, I, P, FROM) \
- for (I = (FROM); VEC_iterate (T, (V), (I), (P)); ++(I))
-/* Convenience macro for reverse iteration. */
+/* If V is NULL, do nothing. Otherwise, call V->truncate(SIZE). */
+template<typename T, typename A>
+inline void
+vec_safe_truncate (vec<T, A, vl_embed> *v, unsigned size)
+{
+ if (v)
+ v->truncate (size);
+}
-#define FOR_EACH_VEC_ELT_REVERSE(T, V, I, P) \
- for (I = VEC_length (T, (V)) - 1; \
- VEC_iterate (T, (V), (I), (P)); \
- (I)--)
+/* If SRC is not NULL, return a pointer to a copy of it. */
+template<typename T, typename A>
+inline vec<T, A, vl_embed> *
+vec_safe_copy (vec<T, A, vl_embed> *src)
+{
+ return src ? src->copy () : NULL;
+}
-/* Return the number of bytes needed to embed an instance of vec_t inside
- another data structure.
+/* Copy the elements from SRC to the end of DST as if by memcpy.
+ Reallocate DST, if necessary. */
+template<typename T, typename A>
+inline void
+vec_safe_splice (vec<T, A, vl_embed> *&dst, vec<T, A, vl_embed> *src
+ MEM_STAT_DECL)
+{
+ unsigned src_len = vec_safe_length (src);
+ if (src_len)
+ {
+ vec_safe_reserve_exact (dst, vec_safe_length (dst) + src_len);
+ dst->splice (*src);
+ }
+}
- Use these methods to determine the required size and initialization
- of a vector V of type T embedded within another structure (as the
- final member):
- size_t vec_t<T>::embedded_size<T> (int reserve);
- void v->embedded_init(int reserve, int active);
+/* Index into vector. Return the IX'th element. IX must be in the
+ domain of the vector. */
- These allow the caller to perform the memory allocation. */
+template<typename T, typename A>
+inline const T &
+vec<T, A, vl_embed>::operator[] (unsigned ix) const
+{
+ gcc_checking_assert (ix < pfx_.num_);
+ return data_[ix];
+}
-template<typename T>
-size_t
-vec_t<T>::embedded_size (int nelems)
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::operator[] (unsigned ix)
{
- return offsetof (vec_t<T>, vec_) + nelems * sizeof (T);
+ gcc_checking_assert (ix < pfx_.num_);
+ return data_[ix];
}
-/* Initialize the vector to contain room for NELEMS elements and
- ACTIVE active elements. */
+/* Get the final element of the vector, which must not be empty. */
-template<typename T>
-void
-vec_t<T>::embedded_init (int nelems, int active)
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::last (void)
{
- prefix_.num_ = active;
- prefix_.alloc_ = nelems;
+ gcc_checking_assert (pfx_.num_ > 0);
+ return (*this)[pfx_.num_ - 1];
}
-/* Allocate a new vector with space for RESERVE objects. If RESERVE
- is zero, NO vector is created.
-
- Note that this allocator must always be a macro:
-
- We support a vector which starts out with space on the stack and
- switches to heap space when forced to reallocate. This works a
- little differently. In the case of stack vectors, vec_alloc will
- expand to a call to vec_alloc_1 that calls XALLOCAVAR to request the
- initial allocation. This uses alloca to get the initial space.
- Since alloca can not be usefully called in an inline function,
- vec_alloc must always be a macro.
+/* If this vector has space for NELEMS additional entries, return
+ true. You usually only need to use this if you are doing your
+ own vector reallocation, for instance on an embedded vector. This
+ returns true in exactly the same circumstances that vec::reserve
+ will. */
- Important limitations of stack vectors:
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::space (unsigned nelems) const
+{
+ return pfx_.alloc_ - pfx_.num_ >= nelems;
+}
- - Only the initial allocation will be made using alloca, so pass a
- reasonable estimate that doesn't use too much stack space; don't
- pass zero.
- - Don't return a stack-allocated vector from the function which
- allocated it. */
+/* Return iteration condition and update PTR to point to the IX'th
+ element of this vector. Use this to iterate over the elements of a
+ vector as follows,
-#define VEC_alloc(T,A,N) \
- ((A == stack) \
- ? vec_t<T>::alloc (N, XALLOCAVAR (vec_t<T>, vec_t<T>::embedded_size (N)))\
- : vec_t<T>::alloc<A> (N MEM_STAT_INFO))
+ for (ix = 0; vec<T, A>::iterate(v, ix, &ptr); ix++)
+ continue; */
-template<typename T>
-template<enum vec_allocation_t A>
-vec_t<T> *
-vec_t<T>::alloc (int nelems MEM_STAT_DECL)
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::iterate (unsigned ix, T *ptr) const
{
- return reserve_exact<A> ((vec_t<T> *) NULL, nelems PASS_MEM_STAT);
+ if (ix < pfx_.num_)
+ {
+ *ptr = data_[ix];
+ return true;
+ }
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
}
-template<typename T>
-vec_t<T> *
-vec_t<T>::alloc (int nelems, vec_t<T> *space)
-{
- return static_cast <vec_t<T> *> (vec_stack_p_reserve_exact_1 (nelems, space));
-}
+/* Return iteration condition and update *PTR to point to the
+ IX'th element of this vector. Use this to iterate over the
+ elements of a vector as follows,
-/* Free vector *V and set it to NULL. */
+ for (ix = 0; v->iterate(ix, &ptr); ix++)
+ continue;
-template<typename T>
-template<enum vec_allocation_t A>
-void
-vec_t<T>::free (vec_t<T> **v)
+ This variant is for vectors of objects. */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::iterate (unsigned ix, T **ptr) const
{
- if (*v)
+ if (ix < pfx_.num_)
+ {
+ *ptr = CONST_CAST (T *, &data_[ix]);
+ return true;
+ }
+ else
{
- if (A == heap)
- vec_heap_free (*v);
- else if (A == gc)
- ggc_free (*v);
- else if (A == stack)
- vec_stack_free (*v);
+ *ptr = 0;
+ return false;
}
- *v = NULL;
}
-/* Return a copy of this vector. The new and old vectors need not be
- allocated by the same mechanism. */
+/* Return a pointer to a copy of this vector. */
-template<typename T>
-template<enum vec_allocation_t A>
-vec_t<T> *
-vec_t<T>::copy (ALONE_MEM_STAT_DECL)
+template<typename T, typename A>
+inline vec<T, A, vl_embed> *
+vec<T, A, vl_embed>::copy (ALONE_MEM_STAT_DECL) const
{
- unsigned len = VEC_length (T, this);
- vec_t<T> *new_vec = NULL;
-
+ vec<T, A, vl_embed> *new_vec = NULL;
+ unsigned len = length ();
if (len)
{
- new_vec = reserve_exact<A> (static_cast<vec_t<T> *> (NULL),
- len PASS_MEM_STAT);
+ vec_alloc (new_vec, len PASS_MEM_STAT);
new_vec->embedded_init (len, len);
- memcpy (new_vec->address (), vec_, sizeof (T) * len);
+ memcpy (new_vec->address(), data_, sizeof (T) * len);
}
-
return new_vec;
}
-/* If this vector has space for RESERVE additional entries, return
- true. You usually only need to use this if you are doing your
- own vector reallocation, for instance on an embedded vector. This
- returns true in exactly the same circumstances that vec_reserve
- will. */
+/* Copy the elements from SRC to the end of this vector as if by memcpy.
+ The vector must have sufficient headroom available. */
-template<typename T>
-bool
-vec_t<T>::space (int nelems VEC_CHECK_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::splice (vec<T, A, vl_embed> &src)
+{
+ unsigned len = src.length();
+ if (len)
+ {
+ gcc_checking_assert (space (len));
+ memcpy (address() + length(), src.address(), len * sizeof (T));
+ pfx_.num_ += len;
+ }
+}
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::splice (vec<T, A, vl_embed> *src)
{
- VEC_ASSERT (nelems >= 0, "space", T, base);
- return prefix_.alloc_ - prefix_.num_ >= static_cast <unsigned> (nelems);
+ if (src)
+ splice (*src);
}
-/* Ensure that the vector **VEC has at least RESERVE slots available. This
- will create additional headroom. Note this can cause **VEC to
- be reallocated. Returns true iff reallocation actually occurred. */
+/* Push OBJ (a new element) onto the end of the vector. There must be
+ sufficient space in the vector. Return a pointer to the slot
+ where OBJ was inserted. */
-template<typename T>
-template<enum vec_allocation_t A>
-bool
-vec_t<T>::reserve (vec_t<T> **vec, int nelems VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_embed>::quick_push (const T &obj)
{
- bool extend = (*vec) ? !(*vec)->space (nelems VEC_CHECK_PASS) : nelems != 0;
-
- if (extend)
- *vec = reserve<A> (*vec, nelems PASS_MEM_STAT);
-
- return extend;
+ gcc_checking_assert (space (1));
+ T *slot = &data_[pfx_.num_++];
+ *slot = obj;
+ return slot;
}
-/* Ensure that **VEC has at least NELEMS slots available. This will not
- create additional headroom. Note this can cause VEC to be
- reallocated. Returns true iff reallocation actually occurred. */
+/* Pop and return the last element off the end of the vector. */
-template<typename T>
-template<enum vec_allocation_t A>
-bool
-vec_t<T>::reserve_exact (vec_t<T> **vec, int nelems VEC_CHECK_DECL
- MEM_STAT_DECL)
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::pop (void)
{
- bool extend = (*vec) ? !(*vec)->space (nelems VEC_CHECK_PASS) : nelems != 0;
+ gcc_checking_assert (length () > 0);
+ return data_[--pfx_.num_];
+}
- if (extend)
- *vec = reserve_exact<A> (*vec, nelems PASS_MEM_STAT);
- return extend;
+/* Set the length of the vector to SIZE. The new length must be less
+ than or equal to the current length. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::truncate (unsigned size)
+{
+ gcc_checking_assert (length () >= size);
+ pfx_.num_ = size;
}
-/* Copy the elements from SRC to the end of this vector as if by memcpy.
- SRC and this vector need not be allocated with the same mechanism,
- although they most often will be. This vector is assumed to have
- sufficient headroom available. */
+/* Insert an element, OBJ, at the IXth position of this vector. There
+ must be sufficient space. */
-template<typename T>
-void
-vec_t<T>::splice (vec_t<T> *src VEC_CHECK_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
{
- if (src)
- {
- unsigned len = VEC_length (T, src);
- VEC_ASSERT (VEC_length (T, this) + len <= prefix_.alloc_, "splice", T,
- base);
- memcpy (address () + VEC_length (T, this),
- src->address (),
- len * sizeof (T));
- prefix_.num_ += len;
- }
+ gcc_checking_assert (length () < allocated ());
+ gcc_checking_assert (ix <= length ());
+ T *slot = &data_[ix];
+ memmove (slot + 1, slot, (pfx_.num_++ - ix) * sizeof (T));
+ *slot = obj;
}
-/* Copy the elements in SRC to the end of DST as if by memcpy. DST and
- SRC need not be allocated with the same mechanism, although they most
- often will be. DST need not have sufficient headroom and will be
- reallocated if needed. */
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is preserved. This is an O(N) operation due to
+ memmove. */
-template<typename T>
-template<enum vec_allocation_t A>
-void
-vec_t<T>::safe_splice (vec_t<T> **dst, vec_t<T> *src VEC_CHECK_DECL
- MEM_STAT_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::ordered_remove (unsigned ix)
{
- if (src)
+ gcc_checking_assert (ix < length());
+ T *slot = &data_[ix];
+ memmove (slot, slot + 1, (--pfx_.num_ - ix) * sizeof (T));
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is destroyed. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::unordered_remove (unsigned ix)
+{
+ gcc_checking_assert (ix < length());
+ data_[ix] = data_[--pfx_.num_];
+}
+
+
+/* Remove LEN elements starting at the IXth. Ordering is retained.
+ This is an O(N) operation due to memmove. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::block_remove (unsigned ix, unsigned len)
+{
+ gcc_checking_assert (ix + len <= length());
+ T *slot = &data_[ix];
+ pfx_.num_ -= len;
+ memmove (slot, slot + len, (pfx_.num_ - ix) * sizeof (T));
+}
+
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::qsort (int (*cmp) (const void *, const void *))
+{
+ ::qsort (address(), length(), sizeof (T), cmp);
+}
+
+
+/* Find and return the first position in which OBJ could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
+
+template<typename T, typename A>
+unsigned
+vec<T, A, vl_embed>::lower_bound (T obj, bool (*lessthan)(const T &, const T &))
+ const
+{
+ unsigned int len = length ();
+ unsigned int half, middle;
+ unsigned int first = 0;
+ while (len > 0)
{
- reserve_exact<A> (dst, VEC_length (T, src) VEC_CHECK_PASS MEM_STAT_INFO);
- (*dst)->splice (src VEC_CHECK_PASS);
+ half = len / 2;
+ middle = first;
+ middle += half;
+ T middle_elem = (*this)[middle];
+ if (lessthan (middle_elem, obj))
+ {
+ first = middle;
+ ++first;
+ len = len - half - 1;
+ }
+ else
+ len = half;
}
+ return first;
}
-
-/* Push OBJ (a new element) onto the end of the vector. There must be
- sufficient space in the vector. Return a pointer to the slot
- where OBJ was inserted. */
+/* Return the number of bytes needed to embed an instance of an
+ embeddable vec inside another data structure.
-template<typename T>
-T *
-vec_t<T>::quick_push (const T &obj VEC_CHECK_DECL)
+ Use these methods to determine the required size and initialization
+ of a vector V of type T embedded within another structure (as the
+ final member):
+
+ size_t vec<T, A, vl_embed>::embedded_size (unsigned alloc);
+ void v->embedded_init(unsigned alloc, unsigned num);
+
+ These allow the caller to perform the memory allocation. */
+
+template<typename T, typename A>
+inline size_t
+vec<T, A, vl_embed>::embedded_size (unsigned alloc)
{
- VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "push", T, base);
- T *slot = &vec_[prefix_.num_++];
- *slot = obj;
- return slot;
+ typedef vec<T, A, vl_embed> vec_embedded;
+ return offsetof (vec_embedded, data_) + alloc * sizeof (T);
}
-/* Push a new element OBJ onto the end of VEC. Reallocates VEC, if
- needed. Return a pointer to the slot where OBJ was inserted. */
+/* Initialize the vector to contain room for ALLOC elements and
+ NUM active elements. */
-template<typename T>
-template<enum vec_allocation_t A>
-T *
-vec_t<T>::safe_push (vec_t<T> **vec, const T &obj VEC_CHECK_DECL MEM_STAT_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::embedded_init (unsigned alloc, unsigned num)
{
- reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- return (*vec)->quick_push (obj VEC_CHECK_PASS);
+ pfx_.alloc_ = alloc;
+ pfx_.num_ = num;
}
-/* Pop and return the last element off the end of the vector. */
-
+/* Grow the vector to a specific length. LEN must be as long or longer than
+ the current length. The new elements are uninitialized. */
-template<typename T>
-T &
-vec_t<T>::pop (ALONE_VEC_CHECK_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_grow (unsigned len)
{
- VEC_ASSERT (prefix_.num_, "pop", T, base);
- return vec_[--prefix_.num_];
+ gcc_checking_assert (length () <= len && len <= pfx_.alloc_);
+ pfx_.num_ = len;
}
-/* Set the length of the vector to LEN. The new length must be less
- than or equal to the current length. This is an O(1) operation. */
+/* Grow the vector to a specific length. LEN must be as long or longer than
+ the current length. The new elements are initialized to zero. */
-template<typename T>
-void
-vec_t<T>::truncate (unsigned size VEC_CHECK_DECL)
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_grow_cleared (unsigned len)
{
- VEC_ASSERT (prefix_.num_ >= size, "truncate", T, base);
- prefix_.num_ = size;
+ unsigned oldlen = length ();
+ quick_grow (len);
+ memset (&(address()[oldlen]), 0, sizeof (T) * (len - oldlen));
}
-/* Grow the vector VEC to a specific length. The LEN must be as
- long or longer than the current length. The new elements are
- uninitialized. */
+/* Garbage collection support for vec<T, A, vl_embed>. */
template<typename T>
-template<enum vec_allocation_t A>
void
-vec_t<T>::safe_grow (vec_t<T> **vec, int size VEC_CHECK_DECL MEM_STAT_DECL)
+gt_ggc_mx (vec<T, va_gc> *v)
{
- VEC_ASSERT (size >= 0 && VEC_length (T, *vec) <= (unsigned)size,
- "grow", T, A);
- reserve_exact<A> (vec, size - (int)VEC_length (T, *vec)
- VEC_CHECK_PASS PASS_MEM_STAT);
- (*vec)->prefix_.num_ = size;
+ extern void gt_ggc_mx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_ggc_mx ((*v)[i]);
}
-
-/* Grow the vector *VEC to a specific length. The LEN must be as
- long or longer than the current length. The new elements are
- initialized to zero. */
-
template<typename T>
-template<enum vec_allocation_t A>
void
-vec_t<T>::safe_grow_cleared (vec_t<T> **vec, int size VEC_CHECK_DECL
- MEM_STAT_DECL)
+gt_ggc_mx (vec<T, va_gc_atomic, vl_embed> *v ATTRIBUTE_UNUSED)
{
- int oldsize = VEC_length (T, *vec);
- safe_grow<A> (vec, size VEC_CHECK_PASS PASS_MEM_STAT);
- memset (&((*vec)->address ()[oldsize]), 0, sizeof (T) * (size - oldsize));
+ /* Nothing to do. Vectors of atomic types wrt GC do not need to
+ be traversed. */
}
-/* Replace the IXth element of this vector with a new value, VAL. */
+/* PCH support for vec<T, A, vl_embed>. */
-template<typename T>
+template<typename T, typename A>
void
-vec_t<T>::replace (unsigned ix, const T &obj VEC_CHECK_DECL)
+gt_pch_nx (vec<T, A, vl_embed> *v)
{
- VEC_ASSERT (ix < prefix_.num_, "replace", T, base);
- vec_[ix] = obj;
+ extern void gt_pch_nx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx ((*v)[i]);
}
+template<typename T, typename A>
+void
+gt_pch_nx (vec<T *, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
+{
+ for (unsigned i = 0; i < v->length (); i++)
+ op (&((*v)[i]), cookie);
+}
-/* Insert an element, OBJ, at the IXth position of VEC. There must be
- sufficient space. */
-
-template<typename T>
+template<typename T, typename A>
void
-vec_t<T>::quick_insert (unsigned ix, const T &obj VEC_CHECK_DECL)
+gt_pch_nx (vec<T, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
{
- VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "insert", T, base);
- VEC_ASSERT (ix <= prefix_.num_, "insert", T, base);
- T *slot = &vec_[ix];
- memmove (slot + 1, slot, (prefix_.num_++ - ix) * sizeof (T));
- *slot = obj;
+ extern void gt_pch_nx (T *, gt_pointer_operator, void *);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx (&((*v)[i]), op, cookie);
}
-/* Insert an element, OBJ, at the IXth position of VEC. Reallocate
- VEC, if necessary. */
+/* Space efficient vector. These vectors can grow dynamically and are
+ allocated together with their control data. They are suited to be
+ included in data structures. Prior to initial allocation, they
+ only take a single word of storage.
+
+ These vectors are implemented as a pointer to an embeddable vector.
+ The semantics allow for this pointer to be NULL to represent empty
+ vectors. This way, empty vectors occupy minimal space in the
+ structure containing them.
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block.
+ - The whole vector may be re-allocated.
+ - Vector data may grow and shrink.
+ - Access and manipulation requires a pointer test and
+ indirection.
+ - It requires 1 word of storage (prior to vector allocation).
+
+
+ Limitations:
+
+ These vectors must be PODs because they are stored in unions.
+ (http://en.wikipedia.org/wiki/Plain_old_data_structures).
+ As long as we use C++03, we cannot have constructors nor
+ destructors in classes that are stored in unions. */
+
+template<typename T, typename A>
+class vec<T, A, vl_ptr>
+{
+public:
+ /* Memory allocation and deallocation for the embedded vector.
+ Needed because we cannot have proper ctors/dtors defined. */
+ void create (unsigned nelems CXX_MEM_STAT_INFO);
+ void release (void);
+
+ /* Vector operations. */
+ bool exists (void) const
+ { return vec_ != NULL; }
+
+ bool is_empty (void) const
+ { return vec_ ? vec_->is_empty() : true; }
+
+ unsigned length (void) const
+ { return vec_ ? vec_->length() : 0; }
+
+ T *address (void)
+ { return vec_ ? vec_->data_ : NULL; }
+
+ const T *address (void) const
+ { return vec_ ? vec_->data_ : NULL; }
+
+ const T &operator[] (unsigned ix) const
+ { return (*vec_)[ix]; }
+
+ bool operator!=(const vec &other) const
+ { return !(*this == other); }
+
+ bool operator==(const vec &other) const
+ { return address() == other.address(); }
+
+ T &operator[] (unsigned ix)
+ { return (*vec_)[ix]; }
+
+ T &last (void)
+ { return vec_->last(); }
+
+ bool space (int nelems) const
+ { return vec_ ? vec_->space (nelems) : nelems == 0; }
+
+ bool iterate (unsigned ix, T *p) const;
+ bool iterate (unsigned ix, T **p) const;
+ vec copy (ALONE_CXX_MEM_STAT_INFO) const;
+ bool reserve (unsigned, bool = false CXX_MEM_STAT_INFO);
+ bool reserve_exact (unsigned CXX_MEM_STAT_INFO);
+ void splice (vec &);
+ void safe_splice (vec & CXX_MEM_STAT_INFO);
+ T *quick_push (const T &);
+ T *safe_push (const T &CXX_MEM_STAT_INFO);
+ T &pop (void);
+ void truncate (unsigned);
+ void safe_grow (unsigned CXX_MEM_STAT_INFO);
+ void safe_grow_cleared (unsigned CXX_MEM_STAT_INFO);
+ void quick_grow (unsigned);
+ void quick_grow_cleared (unsigned);
+ void quick_insert (unsigned, const T &);
+ void safe_insert (unsigned, const T & CXX_MEM_STAT_INFO);
+ void ordered_remove (unsigned);
+ void unordered_remove (unsigned);
+ void block_remove (unsigned, unsigned);
+ void qsort (int (*) (const void *, const void *));
+ unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
+
+ template<typename T1>
+ friend void va_stack::alloc(vec<T1, va_stack, vl_ptr>&, unsigned,
+ vec<T1, va_stack, vl_embed> *);
+
+private:
+ vec<T, A, vl_embed> *vec_;
+};
+
+
+/* Empty specialization for GC allocation. This will prevent GC
+ vectors from using the vl_ptr layout. FIXME: This is needed to
+ circumvent limitations in the GTY machinery. */
template<typename T>
-template<enum vec_allocation_t A>
-void
-vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, const T &obj VEC_CHECK_DECL
- MEM_STAT_DECL)
+class vec<T, va_gc, vl_ptr>
{
- reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- (*vec)->quick_insert (ix, obj VEC_CHECK_PASS);
-}
+};
-/* Remove an element from the IXth position of this vector. Ordering of
- remaining elements is preserved. This is an O(N) operation due to
- a memmove. */
+/* Allocate heap memory for pointer V and create the internal vector
+ with space for NELEMS elements. If NELEMS is 0, the internal
+ vector is initialized to empty. */
template<typename T>
-void
-vec_t<T>::ordered_remove (unsigned ix VEC_CHECK_DECL)
+inline void
+vec_alloc (vec<T> *&v, unsigned nelems MEM_STAT_DECL)
{
- VEC_ASSERT (ix < prefix_.num_, "remove", T, base);
- T *slot = &vec_[ix];
- memmove (slot, slot + 1, (--prefix_.num_ - ix) * sizeof (T));
+ v = new vec<T>;
+ v->create (nelems PASS_MEM_STAT);
}
-/* Remove an element from the IXth position of VEC. Ordering of
- remaining elements is destroyed. This is an O(1) operation. */
+/* Conditionally allocate heap memory for VEC and its internal vector. */
template<typename T>
-void
-vec_t<T>::unordered_remove (unsigned ix VEC_CHECK_DECL)
+inline void
+vec_check_alloc (vec<T, va_heap> *&vec, unsigned nelems MEM_STAT_DECL)
{
- VEC_ASSERT (ix < prefix_.num_, "remove", T, base);
- vec_[ix] = vec_[--prefix_.num_];
+ if (!vec)
+ vec_alloc (vec, nelems PASS_MEM_STAT);
}
-/* Remove LEN elements starting at the IXth. Ordering is retained.
- This is an O(N) operation due to memmove. */
+/* Free the heap memory allocated by vector V and set it to NULL. */
template<typename T>
-void
-vec_t<T>::block_remove (unsigned ix, unsigned len VEC_CHECK_DECL)
+inline void
+vec_free (vec<T> *&v)
{
- VEC_ASSERT (ix + len <= prefix_.num_, "block_remove", T, base);
- T *slot = &vec_[ix];
- prefix_.num_ -= len;
- memmove (slot, slot + len, (prefix_.num_ - ix) * sizeof (T));
+ if (v == NULL)
+ return;
+
+ v->release ();
+ delete v;
+ v = NULL;
}
-/* Sort the contents of V with qsort. Use CMP as the comparison function. */
-#define VEC_qsort(T,V,CMP) \
- qsort (VEC_address (T, V), VEC_length (T, V), sizeof (T), CMP)
+/* Allocate a new stack vector with space for exactly NELEMS objects.
+ If NELEMS is zero, NO vector is created.
-/* Find and return the first position in which OBJ could be inserted
- without changing the ordering of this vector. LESSTHAN is a
- function that returns true if the first argument is strictly less
- than the second. */
+ For the stack allocator, no memory is really allocated. The vector
+ is initialized to be at address SPACE and contain NELEMS slots.
+ Memory allocation actually occurs in the expansion of VEC_alloc.
-template<typename T>
-unsigned
-vec_t<T>::lower_bound (T obj, bool (*lessthan)(const T &, const T &)) const
+ Usage notes:
+
+ * This does not allocate an instance of vec<T, A>. It allocates the
+ actual vector of elements (i.e., vec<T, A, vl_embed>) inside a
+ vec<T, A> instance.
+
+ * This allocator must always be a macro:
+
+ We support a vector which starts out with space on the stack and
+ switches to heap space when forced to reallocate. This works a
+ little differently. In the case of stack vectors, vec_alloc will
+ expand to a call to vec_alloc_1 that calls XALLOCAVAR to request
+ the initial allocation. This uses alloca to get the initial
+ space. Since alloca can not be usefully called in an inline
+ function, vec_alloc must always be a macro.
+
+ Important limitations of stack vectors:
+
+ - Only the initial allocation will be made using alloca, so pass
+ a reasonable estimate that doesn't use too much stack space;
+ don't pass zero.
+
+ - Don't return a stack-allocated vector from the function which
+ allocated it. */
+
+#define vec_stack_alloc(T,V,N) \
+ do { \
+ typedef vec<T, va_stack, vl_embed> stackv; \
+ va_stack::alloc (V, N, XALLOCAVAR (stackv, stackv::embedded_size (N)));\
+ } while (0)
+
+
+/* Return iteration condition and update PTR to point to the IX'th
+ element of this vector. Use this to iterate over the elements of a
+ vector as follows,
+
+ for (ix = 0; v.iterate(ix, &ptr); ix++)
+ continue; */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_ptr>::iterate (unsigned ix, T *ptr) const
{
- unsigned int len = VEC_length (T, this);
- unsigned int half, middle;
- unsigned int first = 0;
- while (len > 0)
+ if (vec_)
+ return vec_->iterate (ix, ptr);
+ else
{
- half = len / 2;
- middle = first;
- middle += half;
- T middle_elem = (*this)[middle];
- if (lessthan (middle_elem, obj))
- {
- first = middle;
- ++first;
- len = len - half - 1;
- }
- else
- len = half;
+ *ptr = 0;
+ return false;
}
- return first;
}
-void *vec_heap_o_reserve_1 (void *, int, size_t, size_t, bool MEM_STAT_DECL);
-void *vec_gc_o_reserve_1 (void *, int, size_t, size_t, bool MEM_STAT_DECL);
+/* Return iteration condition and update *PTR to point to the
+ IX'th element of this vector. Use this to iterate over the
+ elements of a vector as follows,
+
+ for (ix = 0; v->iterate(ix, &ptr); ix++)
+ continue;
-/* Ensure there are at least RESERVE free slots in VEC_, growing
- exponentially. If RESERVE < 0 grow exactly, else grow
- exponentially. As a special case, if VEC_ is NULL, and RESERVE is
- 0, no vector will be created. */
+ This variant is for vectors of objects. */
-template<typename T>
-template<enum vec_allocation_t A>
-vec_t<T> *
-vec_t<T>::reserve (vec_t<T> *vec, int reserve MEM_STAT_DECL)
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_ptr>::iterate (unsigned ix, T **ptr) const
{
- void *res = NULL;
- size_t off = offsetof (vec_t<T>, vec_);
- size_t sz = sizeof (T);
-
- switch (A)
+ if (vec_)
+ return vec_->iterate (ix, ptr);
+ else
{
- case gc:
- res = vec_gc_o_reserve_1 (vec, reserve, off, sz, false PASS_MEM_STAT);
- break;
- case heap:
- res = vec_heap_o_reserve_1 (vec, reserve, off, sz, false PASS_MEM_STAT);
- break;
- case stack:
- res = vec_stack_o_reserve (vec, reserve, off, sz PASS_MEM_STAT);
- break;
+ *ptr = 0;
+ return false;
}
+}
+
+
+/* Convenience macro for forward iteration. */
+#define FOR_EACH_VEC_ELT(V, I, P) \
+ for (I = 0; (V).iterate ((I), &(P)); ++(I))
+
+#define FOR_EACH_VEC_SAFE_ELT(V, I, P) \
+ for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I))
+
+/* Likewise, but start from FROM rather than 0. */
+#define FOR_EACH_VEC_ELT_FROM(V, I, P, FROM) \
+ for (I = (FROM); (V).iterate ((I), &(P)); ++(I))
- return static_cast <vec_t<T> *> (res);
+/* Convenience macro for reverse iteration. */
+#define FOR_EACH_VEC_ELT_REVERSE(V, I, P) \
+ for (I = (V).length () - 1; \
+ (V).iterate ((I), &(P)); \
+ (I)--)
+
+#define FOR_EACH_VEC_SAFE_ELT_REVERSE(V, I, P) \
+ for (I = vec_safe_length (V) - 1; \
+ vec_safe_iterate ((V), (I), &(P)); \
+ (I)--)
+
+
+/* Return a copy of this vector. */
+
+template<typename T, typename A>
+inline vec<T, A, vl_ptr>
+vec<T, A, vl_ptr>::copy (ALONE_MEM_STAT_DECL) const
+{
+ vec<T, A, vl_ptr> new_vec = vec<T, A, vl_ptr>();
+ if (length ())
+ new_vec.vec_ = vec_->copy ();
+ return new_vec;
}
-/* Ensure there are at least RESERVE free slots in VEC, growing
- exactly. If RESERVE < 0 grow exactly, else grow exponentially. As
- a special case, if VEC is NULL, and RESERVE is 0, no vector will be
- created. */
+/* Ensure that the vector has at least RESERVE slots available (if
+ EXACT is false), or exactly RESERVE slots available (if EXACT is
+ true).
-template<typename T>
-template<enum vec_allocation_t A>
-vec_t<T> *
-vec_t<T>::reserve_exact (vec_t<T> *vec, int reserve MEM_STAT_DECL)
+ This may create additional headroom if EXACT is false.
+
+ Note that this can cause the embedded vector to be reallocated.
+ Returns true iff reallocation actually occurred. */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_ptr>::reserve (unsigned nelems, bool exact MEM_STAT_DECL)
{
- void *res = NULL;
- size_t off = sizeof (struct vec_prefix);
- size_t sz = sizeof (T);
+ bool extend = nelems ? !space (nelems) : false;
+ if (extend)
+ A::reserve (vec_, nelems, exact PASS_MEM_STAT);
+ return extend;
+}
+
- gcc_assert (offsetof (vec_t<T>, vec_) == sizeof (struct vec_prefix));
+/* Ensure that this vector has exactly NELEMS slots available. This
+ will not create additional headroom. Note this can cause the
+ embedded vector to be reallocated. Returns true iff reallocation
+ actually occurred. */
- switch (A)
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_ptr>::reserve_exact (unsigned nelems MEM_STAT_DECL)
+{
+ return reserve (nelems, true PASS_MEM_STAT);
+}
+
+
+/* Create the internal vector and reserve NELEMS for it. This is
+ exactly like vec::reserve, but the internal vector is
+ unconditionally allocated from scratch. The old one, if it
+ existed, is lost. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::create (unsigned nelems MEM_STAT_DECL)
+{
+ vec_ = NULL;
+ if (nelems > 0)
+ reserve_exact (nelems PASS_MEM_STAT);
+}
+
+
+/* Free the memory occupied by the embedded vector. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::release (void)
+{
+ if (vec_)
+ A::release (vec_);
+}
+
+
+/* Copy the elements from SRC to the end of this vector as if by memcpy.
+ SRC and this vector must be allocated with the same memory
+ allocation mechanism. This vector is assumed to have sufficient
+ headroom available. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::splice (vec<T, A, vl_ptr> &src)
+{
+ if (src.vec_)
+ vec_->splice (*(src.vec_));
+}
+
+
+/* Copy the elements in SRC to the end of this vector as if by memcpy.
+ SRC and this vector must be allocated with the same mechanism.
+ If there is not enough headroom in this vector, it will be reallocated
+ as needed. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::safe_splice (vec<T, A, vl_ptr> &src MEM_STAT_DECL)
+{
+ if (src.length())
{
- case gc:
- res = vec_gc_o_reserve_1 (vec, reserve, off, sz, true PASS_MEM_STAT);
- break;
- case heap:
- res = vec_heap_o_reserve_1 (vec, reserve, off, sz, true PASS_MEM_STAT);
- break;
- case stack:
- res = vec_stack_o_reserve_exact (vec, reserve, off, sz PASS_MEM_STAT);
- break;
+ reserve_exact (src.length());
+ splice (src);
}
+}
+
+
+/* Push OBJ (a new element) onto the end of the vector. There must be
+ sufficient space in the vector. Return a pointer to the slot
+ where OBJ was inserted. */
+
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_ptr>::quick_push (const T &obj)
+{
+ return vec_->quick_push (obj);
+}
+
+
+/* Push a new element OBJ onto the end of this vector. Reallocates
+ the embedded vector, if needed. Return a pointer to the slot where
+ OBJ was inserted. */
+
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_ptr>::safe_push (const T &obj MEM_STAT_DECL)
+{
+ reserve (1, false PASS_MEM_STAT);
+ return quick_push (obj);
+}
+
- return static_cast <vec_t<T> *> (res);
+/* Pop and return the last element off the end of the vector. */
+
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_ptr>::pop (void)
+{
+ return vec_->pop ();
+}
+
+
+/* Set the length of the vector to LEN. The new length must be less
+ than or equal to the current length. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::truncate (unsigned size)
+{
+ if (vec_)
+ vec_->truncate (size);
+ else
+ gcc_checking_assert (size == 0);
+}
+
+
+/* Grow the vector to a specific length. LEN must be as long or
+ longer than the current length. The new elements are
+ uninitialized. Reallocate the internal vector, if needed. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::safe_grow (unsigned len MEM_STAT_DECL)
+{
+ unsigned oldlen = length ();
+ gcc_checking_assert (oldlen <= len);
+ reserve_exact (len - oldlen PASS_MEM_STAT);
+ vec_->quick_grow (len);
+}
+
+
+/* Grow the embedded vector to a specific length. LEN must be as
+ long or longer than the current length. The new elements are
+ initialized to zero. Reallocate the internal vector, if needed. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::safe_grow_cleared (unsigned len MEM_STAT_DECL)
+{
+ unsigned oldlen = length ();
+ safe_grow (len PASS_MEM_STAT);
+ memset (&(address()[oldlen]), 0, sizeof (T) * (len - oldlen));
+}
+
+
+/* Same as vec::safe_grow but without reallocation of the internal vector.
+ If the vector cannot be extended, a runtime assertion will be triggered. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::quick_grow (unsigned len)
+{
+ gcc_checking_assert (vec_);
+ vec_->quick_grow (len);
+}
+
+
+/* Same as vec::quick_grow_cleared but without reallocation of the
+ internal vector. If the vector cannot be extended, a runtime
+ assertion will be triggered. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::quick_grow_cleared (unsigned len)
+{
+ gcc_checking_assert (vec_);
+ vec_->quick_grow_cleared (len);
+}
+
+
+/* Insert an element, OBJ, at the IXth position of this vector. There
+ must be sufficient space. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::quick_insert (unsigned ix, const T &obj)
+{
+ vec_->quick_insert (ix, obj);
+}
+
+
+/* Insert an element, OBJ, at the IXth position of the vector.
+ Reallocate the embedded vector, if necessary. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::safe_insert (unsigned ix, const T &obj MEM_STAT_DECL)
+{
+ reserve (1, false PASS_MEM_STAT);
+ quick_insert (ix, obj);
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is preserved. This is an O(N) operation due to
+ a memmove. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::ordered_remove (unsigned ix)
+{
+ vec_->ordered_remove (ix);
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering
+ of remaining elements is destroyed. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::unordered_remove (unsigned ix)
+{
+ vec_->unordered_remove (ix);
+}
+
+
+/* Remove LEN elements starting at the IXth. Ordering is retained.
+ This is an O(N) operation due to memmove. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::block_remove (unsigned ix, unsigned len)
+{
+ vec_->block_remove (ix, len);
+}
+
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_ptr>::qsort (int (*cmp) (const void *, const void *))
+{
+ if (vec_)
+ vec_->qsort (cmp);
+}
+
+
+/* Find and return the first position in which OBJ could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
+
+template<typename T, typename A>
+inline unsigned
+vec<T, A, vl_ptr>::lower_bound (T obj, bool (*lessthan)(const T &, const T &)) const
+{
+ return vec_ ? vec_->lower_bound (obj, lessthan) : 0;
}
-#endif /* GCC_VEC_H */
+#endif // GCC_VEC_H
+++ /dev/null
-/* VEC types for basic types of the intermediate representations.
- Copyright (C) 2010 Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-#ifndef GCC_VECIR_H
-#define GCC_VECIR_H
-
-#ifndef GCC_CORETYPES_H
-#error "vecir.h must be included after coretypes.h"
-#endif
-
-/* A varray of trees. */
-DEF_VEC_P(tree);
-DEF_VEC_ALLOC_P(tree,gc);
-DEF_VEC_ALLOC_P(tree,heap);
-
-/* A varray of gimple statements. */
-DEF_VEC_P(gimple);
-DEF_VEC_ALLOC_P(gimple,heap);
-DEF_VEC_ALLOC_P(gimple,gc);
-
-/* A varray of pointers to gimple statements. */
-typedef gimple *gimple_p;
-DEF_VEC_P(gimple_p);
-DEF_VEC_ALLOC_P(gimple_p,heap);
-
-/* A varray gimple statement sequences. */
-DEF_VEC_P(gimple_seq);
-DEF_VEC_ALLOC_P(gimple_seq,gc);
-DEF_VEC_ALLOC_P(gimple_seq,heap);
-
-/* A varray of RTX objects. */
-DEF_VEC_P(rtx);
-DEF_VEC_ALLOC_P(rtx,heap);
-DEF_VEC_ALLOC_P(rtx,gc);
-
-/* A varray of call graph nodes. */
-typedef struct cgraph_node *cgraph_node_p;
-DEF_VEC_P (cgraph_node_p);
-DEF_VEC_ALLOC_P (cgraph_node_p, heap);
-
-#endif /* GCC_VECIR_H */
+++ /dev/null
-/* VEC types for primitive types
- Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-#ifndef GCC_VECPRIM_H
-#define GCC_VECPRIM_H
-
-DEF_VEC_I(char);
-DEF_VEC_ALLOC_I(char,heap);
-
-typedef unsigned char uchar;
-DEF_VEC_I(uchar);
-DEF_VEC_ALLOC_I(uchar,heap);
-DEF_VEC_ALLOC_I(uchar,gc);
-
-DEF_VEC_I(int);
-DEF_VEC_ALLOC_I(int,heap);
-
-DEF_VEC_I(unsigned);
-DEF_VEC_ALLOC_I(unsigned,heap);
-
-#endif /* GCC_VECPRIM_H */
#define FILE_TABLE_INCREMENT 64
typedef char *char_p;
-DEF_VEC_P(char_p);
-DEF_VEC_ALLOC_P(char_p,heap);
-static VEC(char_p,heap) *funcnam_table;
-static VEC(unsigned,heap) *funcnum_table;
+static vec<char_p> funcnam_table;
+static vec<unsigned> funcnum_table;
#define FUNC_TABLE_INITIAL 256
/* Local pointer to the name of the main input file. Initialized in
DST_ROUTINE_BEGIN rtnbeg;
DST_PROLOG prolog;
- rtnname = VEC_index (char_p, funcnam_table, rtnnum);
+ rtnname = funcnam_table[rtnnum];
rtnnamelen = strlen (rtnname);
rtnentryname = concat (rtnname, "..en", NULL);
ASM_GENERATE_INTERNAL_LABEL
(label, FUNC_PROLOG_LABEL,
- VEC_index (unsigned, funcnum_table, rtnnum));
+ funcnum_table[rtnnum]);
totsize += write_debug_addr (label, "prolog breakpoint addr",
dosizeonly);
}
ASM_GENERATE_INTERNAL_LABEL
(label1, FUNC_BEGIN_LABEL,
- VEC_index (unsigned, funcnum_table, rtnnum));
+ funcnum_table[rtnnum]);
ASM_GENERATE_INTERNAL_LABEL
(label2, FUNC_END_LABEL,
- VEC_index (unsigned, funcnum_table, rtnnum));
+ funcnum_table[rtnnum]);
totsize += write_debug_delta4 (label2, label1, "routine size", dosizeonly);
return totsize;
(*dwarf2_debug_hooks.begin_function) (decl);
/* Add the new entry to the end of the function name table. */
- VEC_safe_push (char_p, heap, funcnam_table, xstrdup (name));
- VEC_safe_push (unsigned, heap, funcnum_table,
- current_function_funcdef_no);
+ funcnam_table.safe_push (xstrdup (name));
+ funcnum_table.safe_push (current_function_funcdef_no);
}
static char fullname_buff [4096];
/* Skip the first entry - file numbers begin at 1. */
file_info_table_in_use = 1;
- funcnam_table = VEC_alloc (char_p, heap, FUNC_TABLE_INITIAL);
- funcnum_table = VEC_alloc (unsigned, heap, FUNC_TABLE_INITIAL);
+ funcnam_table.create (FUNC_TABLE_INITIAL);
+ funcnum_table.create (FUNC_TABLE_INITIAL);
/* Allocate the initial hunk of the line_info_table. */
line_info_table = XCNEWVEC (dst_line_info_entry, LINE_INFO_TABLE_INCREMENT);
ASM_OUTPUT_ALIGN (asm_out_file, 0);
totsize = write_modbeg (1);
- FOR_EACH_VEC_ELT (unsigned, funcnum_table, i, ifunc)
+ FOR_EACH_VEC_ELT (funcnum_table, i, ifunc)
{
totsize += write_rtnbeg (i, 1);
totsize += write_rtnend (i, 1);
totsize += write_pclines (1);
write_modbeg (0);
- FOR_EACH_VEC_ELT (unsigned, funcnum_table, i, ifunc)
+ FOR_EACH_VEC_ELT (funcnum_table, i, ifunc)
{
write_rtnbeg (i, 0);
write_rtnend (i, 0);