+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (n_basic_blocks_for_function): Rename macro to...
+ (n_basic_blocks_for_fn): ...this.
+
+ (n_basic_blocks): Eliminate macro as work towards making uses of
+ cfun be explicit.
+
+ * cfgloop.c (init_loops_structure): Update for renaming of
+ "n_basic_blocks_for_function" to "n_basic_blocks_for_fn".
+ * graph.c (draw_cfg_nodes_no_loops): Likewise.
+ * ipa-utils.c (ipa_merge_profiles): Likewise.
+ * lto-streamer-in.c (make_new_block): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ (dump_function_to_file): Likewise.
+
+ * alias.c (init_alias_analysis): Replace usage of "n_basic_blocks"
+ macro with "n_basic_blocks_for_fn (cfun)".
+ * bb-reorder.c (partition_hot_cold_basic_blocks): Likewise.
+ (duplicate_computed_gotos): Likewise.
+ (reorder_basic_blocks): Likewise.
+ * bt-load.c (augment_live_range): Likewise.
+ * cfg.c (expunge_block): Likewise.
+ (compact_blocks): Likewise.
+ * cfganal.c (single_pred_before_succ_order): Likewise.
+ (compute_idf): Likewise.
+ (flow_dfs_compute_reverse_init): Likewise.
+ (pre_and_rev_post_order_compute): Likewise.
+ (pre_and_rev_post_order_compute_fn): Likewise.
+ (inverted_post_order_compute): Likewise.
+ (post_order_compute): Likewise.
+ (print_edge_list): Likewise.
+ (find_unreachable_blocks): Likewise.
+ (mark_dfs_back_edges): Likewise.
+ * cfgcleanup.c (try_optimize_cfg): Likewise.
+ (try_forward_edges): Likewise.
+ * cfghooks.c (dump_flow_info): Likewise.
+ * cfgloop.c (verify_loop_structure): Likewise.
+ (get_loop_body): Likewise.
+ (flow_loops_find): Likewise.
+ * cfgloopmanip.c (add_loop): Likewise.
+ (remove_path): Likewise.
+ (find_path): Likewise.
+ * cfgrtl.c (rtl_flow_call_edges_add): Likewise.
+ (rtl_verify_bb_layout): Likewise.
+ (entry_of_function): Likewise.
+ (rtl_create_basic_block): Likewise.
+ * coverage.c (coverage_compute_cfg_checksum): Likewise.
+ * cprop.c (one_cprop_pass): Likewise.
+ (is_too_expensive): Likewise.
+ * df-core.c (df_compute_cfg_image): Likewise.
+ (df_compact_blocks): Likewise.
+ (df_worklist_dataflow_doublequeue): Likewise.
+ * dominance.c (calculate_dominance_info): Likewise.
+ (calc_dfs_tree): Likewise.
+ (calc_dfs_tree_nonrec): Likewise.
+ (init_dom_info): Likewise.
+ * domwalk.c (cmp_bb_postorder): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Likewise.
+ (generate_setjmp_warnings): Likewise.
+ * fwprop.c (build_single_def_use_links): Likewise.
+ * gcse.c (is_too_expensive): Likewise.
+ (one_code_hoisting_pass): Likewise.
+ (one_pre_gcse_pass): Likewise.
+ * graphite.c (graphite_initialize): Likewise.
+ * haifa-sched.c (haifa_sched_init): Likewise.
+ * ipa-inline-analysis.c (estimate_function_body_sizes): Likewise.
+ * ira.c (split_live_ranges_for_shrink_wrap): Likewise.
+ * ira-build.c (ira_build): Likewise.
+ * lcm.c (compute_nearerout): Likewise.
+ (compute_available): Likewise.
+ (compute_laterin): Likewise.
+ (compute_antinout_edge): Likewise.
+ * lra-lives.c (lra_create_live_ranges): Likewise.
+ * lra.c (has_nonexceptional_receiver): Likewise.
+ * mcf.c (create_fixup_graph): Likewise.
+ * profile.c (branch_prob): Likewise.
+ * reg-stack.c (convert_regs_2): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * reload1.c (has_nonexceptional_receiver): Likewise.
+ * reorg.c (dbr_schedule): Likewise.
+ * sched-deps.c (sched_deps_init): Likewise.
+ * sched-ebb.c (schedule_ebbs): Likewise.
+ * sched-rgn.c (extend_regions): Likewise.
+ (schedule_insns): Likewise.
+ (sched_rgn_init): Likewise.
+ (extend_rgns): Likewise.
+ (haifa_find_rgns): Likewise.
+ * sel-sched-ir.c (recompute_rev_top_order): Likewise.
+ (sel_recompute_toporder): Likewise.
+ * sel-sched.c (run_selective_scheduling): Likewise.
+ * store-motion.c (one_store_motion_pass): Likewise.
+ (remove_reachable_equiv_notes): Likewise.
+ * tracer.c (tracer): Likewise.
+ (tail_duplicate): Likewise.
+ * tree-cfg.c (gimple_flow_call_edges_add): Likewise.
+ (dump_cfg_stats): Likewise.
+ (gimple_dump_cfg): Likewise.
+ (create_bb): Likewise.
+ (build_gimple_cfg): Likewise.
+ * tree-cfgcleanup.c (merge_phi_nodes): Likewise.
+ * tree-inline.c (optimize_inline_calls): Likewise.
+ (fold_marked_statements): Likewise.
+ * tree-ssa-ifcombine.c (tree_ssa_ifcombine): Likewise.
+ * tree-ssa-loop-ch.c (copy_loop_headers): Likewise.
+ * tree-ssa-loop-im.c (analyze_memory_references): Likewise.
+ * tree-ssa-loop-manip.c (compute_live_loop_exits): Likewise.
+ * tree-ssa-math-opts.c (execute_cse_reciprocals): Likewise.
+ * tree-ssa-phiopt.c (tree_ssa_phiopt_worker): Likewise.
+ * tree-ssa-pre.c (do_pre): Likewise.
+ (init_pre): Likewise.
+ (compute_avail): Likewise.
+ * tree-ssa-reassoc.c (init_reassoc): Likewise.
+ * tree-ssa-sccvn.c (init_scc_vn): Likewise.
+ * tree-ssa-tail-merge.c (alloc_cluster_vectors): Likewise.
+ (init_worklist): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+ * var-tracking.c (variable_tracking_main_1): Likewise.
+ (vt_find_locations): Likewise.
+ (vt_stack_adjustments): Likewise.
+ * config/s390/s390.c (s390_optimize_nonescaping_tx): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+
2013-11-18 Jan Hubicka <jh@suse.cz>
* profile.c (compute_branch_probabilities): Do not sanity check run_max.
The state of the arrays for the set chain in question does not matter
since the program has undefined behavior. */
- rpo = XNEWVEC (int, n_basic_blocks);
+ rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
pass = 0;
#define ENTRY_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_entry_block_ptr)
#define EXIT_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_exit_block_ptr)
#define basic_block_info_for_function(FN) ((FN)->cfg->x_basic_block_info)
-#define n_basic_blocks_for_function(FN) ((FN)->cfg->x_n_basic_blocks)
+#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
#define n_edges_for_function(FN) ((FN)->cfg->x_n_edges)
#define last_basic_block_for_function(FN) ((FN)->cfg->x_last_basic_block)
#define label_to_block_map_for_function(FN) ((FN)->cfg->x_label_to_block_map)
#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr)
#define EXIT_BLOCK_PTR (cfun->cfg->x_exit_block_ptr)
#define basic_block_info (cfun->cfg->x_basic_block_info)
-#define n_basic_blocks (cfun->cfg->x_n_basic_blocks)
#define n_edges (cfun->cfg->x_n_edges)
#define last_basic_block (cfun->cfg->x_last_basic_block)
#define label_to_block_map (cfun->cfg->x_label_to_block_map)
gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return;
set_edge_can_fallthru_flag ();
bbd[i].node = NULL;
}
- traces = XNEWVEC (struct trace, n_basic_blocks);
+ traces = XNEWVEC (struct trace, n_basic_blocks_for_fn (cfun));
n_traces = 0;
find_traces (&n_traces, traces);
connect_traces (n_traces, traces);
bitmap candidates;
int max_size;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
clear_bb_flags ();
{
vec<edge> crossing_edges;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
df_set_flags (DF_DEFER_INSN_RESCAN);
{
basic_block *worklist, *tos;
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb))
{
bb->index = i;
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
for (; i < last_basic_block; i++)
SET_BASIC_BLOCK (i, NULL);
}
- last_basic_block = n_basic_blocks;
+ last_basic_block = n_basic_blocks_for_fn (cfun);
}
/* Remove block B from the basic block array. */
{
unlink_block (b);
SET_BASIC_BLOCK (b->index, NULL);
- n_basic_blocks--;
+ n_basic_blocks_for_fn (cfun)--;
/* We should be able to ggc_free here, but we are not.
The dead SSA_NAMES are left pointing to dead statements that are pointing
to dead basic blocks making garbage collector to die.
post = XCNEWVEC (int, last_basic_block);
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
edge_iterator ei;
basic_block *tos, *worklist, bb;
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Clear all the reachability flags. */
int x;
fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
- n_basic_blocks, elist->num_edges);
+ n_basic_blocks_for_fn (cfun), elist->num_edges);
for (x = 0; x < elist->num_edges; x++)
{
post_order[post_order_num++] = EXIT_BLOCK;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
/* Delete the unreachable blocks if some were found and we are
supposed to do it. */
- if (delete_unreachable && (count != n_basic_blocks))
+ if (delete_unreachable && (count != n_basic_blocks_for_fn (cfun)))
{
basic_block b;
basic_block next_bb;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
edge_iterator *stack;
int sp;
int pre_order_num = 0;
- int rev_post_order_num = n_basic_blocks - 1;
+ int rev_post_order_num = n_basic_blocks_for_fn (cfun) - 1;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
if (include_entry_exit)
include_entry_exit);
if (include_entry_exit)
/* The number of nodes visited should be the number of blocks. */
- gcc_assert (pre_order_num == n_basic_blocks);
+ gcc_assert (pre_order_num == n_basic_blocks_for_fn (cfun));
else
/* The number of nodes visited should be the number of blocks minus
the entry and exit blocks which are not visited here. */
- gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
+ gcc_assert (pre_order_num
+ == (n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS));
return pre_order_num;
}
flow_dfs_compute_reverse_init (depth_first_search_ds data)
{
/* Allocate stack for back-tracking up CFG. */
- data->stack = XNEWVEC (basic_block, n_basic_blocks);
+ data->stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
bitmap phi_insertion_points;
/* Each block can appear at most twice on the work-stack. */
- work_stack.create (2 * n_basic_blocks);
+ work_stack.create (2 * n_basic_blocks_for_fn (cfun));
phi_insertion_points = BITMAP_ALLOC (NULL);
/* Seed the work list with all the blocks in DEF_BLOCKS. We use
single_pred_before_succ_order (void)
{
basic_block x, y;
- basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
- unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
unsigned np, i;
sbitmap visited = sbitmap_alloc (last_basic_block);
&& find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
return changed;
- while (counter < n_basic_blocks)
+ while (counter < n_basic_blocks_for_fn (cfun))
{
basic_block new_target = NULL;
bool new_target_threaded = false;
/* Bypass trivial infinite loops. */
new_target = single_succ (target);
if (target == new_target)
- counter = n_basic_blocks;
+ counter = n_basic_blocks_for_fn (cfun);
else if (!optimize)
{
/* When not optimizing, ensure that edges or forwarder
if (t)
{
if (!threaded_edges)
- threaded_edges = XNEWVEC (edge, n_basic_blocks);
+ threaded_edges = XNEWVEC (edge,
+ n_basic_blocks_for_fn (cfun));
else
{
int i;
break;
if (i < nthreaded_edges)
{
- counter = n_basic_blocks;
+ counter = n_basic_blocks_for_fn (cfun);
break;
}
}
if (t->dest == b)
break;
- gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
+ gcc_assert (nthreaded_edges
+ < (n_basic_blocks_for_fn (cfun)
+ - NUM_FIXED_BLOCKS));
threaded_edges[nthreaded_edges++] = t;
new_target = t->dest;
threaded |= new_target_threaded;
}
- if (counter >= n_basic_blocks)
+ if (counter >= n_basic_blocks_for_fn (cfun))
{
if (dump_file)
fprintf (dump_file, "Infinite loop in BB %i.\n",
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (single_succ_edge (b)->flags & EDGE_FALLTHRU)
- && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
+ && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
{
if (dump_file)
fprintf (dump_file,
{
basic_block bb;
- fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges);
+ fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks_for_fn (cfun),
+ n_edges);
FOR_ALL_BB (bb)
dump_bb (file, bb, 0, flags);
/* Dummy loop containing whole function. */
root = alloc_loop ();
- root->num_nodes = n_basic_blocks_for_function (fn);
+ root->num_nodes = n_basic_blocks_for_fn (fn);
root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return loops;
/* The root loop node contains all basic-blocks. */
- loops->tree_root->num_nodes = n_basic_blocks;
+ loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
- rc_order = XNEWVEC (int, n_basic_blocks);
+ rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
pre_and_rev_post_order_compute (NULL, rc_order, false);
/* Gather all loop headers in reverse completion order and allocate
loop structures for loops that are not already present. */
larray.create (loops->larray->length ());
- for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
+ for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
{
basic_block header = BASIC_BLOCK (rc_order[b]);
if (bb_loop_header_p (header))
{
/* There may be blocks unreachable from EXIT_BLOCK, hence we need to
special-case the fake loop that contains the whole function. */
- gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
+ gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
body[tv++] = EXIT_BLOCK_PTR;
FOR_EACH_BB (bb)
/* Check the recorded loop father and sizes of loops. */
visited = sbitmap_alloc (last_basic_block);
bitmap_clear (visited);
- bbs = XNEWVEC (basic_block, n_basic_blocks);
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
{
unsigned n;
continue;
}
- n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+ n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
if (loop->num_nodes != n)
{
error ("size of loop %d should be %d, not %d",
gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
/* Find bbs in the path. */
- *bbs = XNEWVEC (basic_block, n_basic_blocks);
+ *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
- n_basic_blocks, e->dest);
+ n_basic_blocks_for_fn (cfun), e->dest);
}
/* Fix placement of basic block BB inside loop hierarchy --
nrem = find_path (e, &rem_bbs);
n_bord_bbs = 0;
- bord_bbs = XNEWVEC (basic_block, n_basic_blocks);
+ bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
seen = sbitmap_alloc (last_basic_block);
bitmap_clear (seen);
flow_loop_tree_node_add (outer, loop);
/* Find its nodes. */
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
for (i = 0; i < n; i++)
{
vec_safe_grow_cleared (basic_block_info, new_size);
}
- n_basic_blocks++;
+ n_basic_blocks_for_fn (cfun)++;
bb = create_basic_block_structure (head, end, NULL, after);
bb->aux = NULL;
rtx
entry_of_function (void)
{
- return (n_basic_blocks > NUM_FIXED_BLOCKS ?
+ return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
}
curr_bb = NULL;
}
- if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS)
+ if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
internal_error
("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
- num_bb_notes, n_basic_blocks);
+ num_bb_notes, n_basic_blocks_for_fn (cfun));
return err;
}
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
if (!cfun->machine->tbegin_p)
return;
- for (bb_index = 0; bb_index < n_basic_blocks; bb_index++)
+ for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
{
bb = BASIC_BLOCK (bb_index);
compact_blocks ();
spu_bb_info =
- (struct spu_bb_info *) xcalloc (n_basic_blocks,
+ (struct spu_bb_info *) xcalloc (n_basic_blocks_for_fn (cfun),
sizeof (struct spu_bb_info));
/* We need exact insn addresses and lengths. */
shorten_branches (get_insns ());
- for (i = n_basic_blocks - 1; i >= 0; i--)
+ for (i = n_basic_blocks_for_fn (cfun) - 1; i >= 0; i--)
{
bb = BASIC_BLOCK (i);
branch = 0;
coverage_compute_cfg_checksum (void)
{
basic_block bb;
- unsigned chksum = n_basic_blocks;
+ unsigned chksum = n_basic_blocks_for_fn (cfun);
FOR_EACH_BB (bb)
{
which have a couple switch statements. Rather than simply
threshold the number of blocks, uses something with a more
graceful degradation. */
- if (n_edges > 20000 + n_basic_blocks * 4)
+ if (n_edges > 20000 + n_basic_blocks_for_fn (cfun) * 4)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d edges/basic block",
- pass, n_basic_blocks, n_edges / n_basic_blocks);
+ pass, n_basic_blocks_for_fn (cfun),
+ n_edges / n_basic_blocks_for_fn (cfun));
return true;
}
/* If allocating memory for the cprop bitmap would take up too much
storage it's better just to disable the optimization. */
- if ((n_basic_blocks
+ if ((n_basic_blocks_for_fn (cfun)
* SBITMAP_SET_SIZE (max_reg_num ())
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d registers",
- pass, n_basic_blocks, max_reg_num ());
+ pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
return true;
}
int changed = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_ ("const/copy propagation disabled")))
return 0;
if (dump_file)
{
fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d local const props, %d local copy props, ",
local_const_prop_count, local_copy_prop_count);
fprintf (dump_file, "%d global const props, %d global copy props\n\n",
fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
"n_basic_blocks %d n_edges %d"
" count %d (%5.2g)\n",
- n_basic_blocks, n_edges,
- dcount, dcount / (float)n_basic_blocks);
+ n_basic_blocks_for_fn (cfun), n_edges,
+ dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
}
/* Worklist-based dataflow solver. It uses sbitmap as a worklist,
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
for (; i < last_basic_block; i++)
SET_BASIC_BLOCK (i, NULL);
df_compute_cfg_image (void)
{
basic_block bb;
- int size = 2 + (2 * n_basic_blocks);
+ int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
int i;
int * map;
init_dom_info (struct dom_info *di, enum cdi_direction dir)
{
/* We need memory for n_basic_blocks nodes. */
- unsigned int num = n_basic_blocks;
+ unsigned int num = n_basic_blocks_for_fn (cfun);
init_ar (di->dfs_parent, TBB, num, 0);
init_ar (di->path_min, TBB, num, i);
init_ar (di->key, TBB, num, i);
/* Ending block. */
basic_block ex_block;
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Initialize our border blocks, and the first edge. */
di->nodes = di->dfsnum - 1;
/* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */
- gcc_assert (di->nodes == (unsigned int) n_basic_blocks - 1);
+ gcc_assert (di->nodes == (unsigned int) n_basic_blocks_for_fn (cfun) - 1);
}
/* Compress the path from V to the root of its set and update path_min at the
{
b->dom[dir_index] = et_new_tree (b);
}
- n_bbs_in_dom_tree[dir_index] = n_basic_blocks;
+ n_bbs_in_dom_tree[dir_index] = n_basic_blocks_for_fn (cfun);
init_dom_info (&di, dir);
calc_dfs_tree (&di, reverse);
dom_walker::walk (basic_block bb)
{
basic_block dest;
- basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks * 2);
+ basic_block *worklist = XNEWVEC (basic_block,
+ n_basic_blocks_for_fn (cfun) * 2);
int sp = 0;
int *postorder, postorder_num;
if (m_dom_direction == CDI_DOMINATORS)
{
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
postorder_num = inverted_post_order_compute (postorder);
bb_postorder = XNEWVEC (int, last_basic_block);
for (int i = 0; i < postorder_num; ++i)
{
bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
- if (n_basic_blocks == NUM_FIXED_BLOCKS
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS
|| bitmap_empty_p (setjmp_crosses))
return;
/* Find the set of basic blocks that require a stack frame,
and blocks that are too big to be duplicated. */
- vec.create (n_basic_blocks);
+ vec.create (n_basic_blocks_for_fn (cfun));
CLEAR_HARD_REG_SET (set_up_by_prologue.set);
add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
reg_defs.create (max_reg_num ());
reg_defs.safe_grow_cleared (max_reg_num ());
- reg_defs_stack.create (n_basic_blocks * 10);
+ reg_defs_stack.create (n_basic_blocks_for_fn (cfun) * 10);
local_md = BITMAP_ALLOC (NULL);
local_lr = BITMAP_ALLOC (NULL);
gcse_create_count = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_("PRE disabled")))
return 0;
if (dump_file)
{
fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d substs, %d insns created\n",
gcse_subst_count, gcse_create_count);
}
gcse_create_count = 0;
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
|| is_too_expensive (_("GCSE disabled")))
return 0;
if (dump_file)
{
fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
- current_function_name (), n_basic_blocks, bytes_used);
+ current_function_name (), n_basic_blocks_for_fn (cfun),
+ bytes_used);
fprintf (dump_file, "%d substs, %d insns created\n",
gcse_subst_count, gcse_create_count);
}
which have a couple switch statements. Rather than simply
threshold the number of blocks, uses something with a more
graceful degradation. */
- if (n_edges > 20000 + n_basic_blocks * 4)
+ if (n_edges > 20000 + n_basic_blocks_for_fn (cfun) * 4)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d edges/basic block",
- pass, n_basic_blocks, n_edges / n_basic_blocks);
+ pass, n_basic_blocks_for_fn (cfun),
+ n_edges / n_basic_blocks_for_fn (cfun));
return true;
}
/* If allocating memory for the dataflow bitmaps would take up too much
storage it's better just to disable the optimization. */
- if ((n_basic_blocks
+ if ((n_basic_blocks_for_fn (cfun)
* SBITMAP_SET_SIZE (max_reg_num ())
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
warning (OPT_Wdisabled_optimization,
"%s: %d basic blocks and %d registers",
- pass, n_basic_blocks, max_reg_num ());
+ pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
return true;
}
static void
draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
{
- int *rpo = XNEWVEC (int, n_basic_blocks_for_function (fun));
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
int i, n;
sbitmap visited;
bitmap_clear (visited);
n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
- for (i = n_basic_blocks_for_function (fun) - n;
- i < n_basic_blocks_for_function (fun); i++)
+ for (i = n_basic_blocks_for_fn (fun) - n;
+ i < n_basic_blocks_for_fn (fun); i++)
{
basic_block bb = BASIC_BLOCK (rpo[i]);
draw_cfg_node (pp, fun->funcdef_no, bb);
}
free (rpo);
- if (n != n_basic_blocks_for_function (fun))
+ if (n != n_basic_blocks_for_fn (fun))
{
/* Some blocks are unreachable. We still want to dump them. */
basic_block bb;
if (number_of_loops (cfun) <= 1
/* FIXME: This limit on the number of basic blocks of a function
should be removed when the SCOP detection is faster. */
- || n_basic_blocks > PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION))
+ || (n_basic_blocks_for_fn (cfun) >
+ PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
print_global_statistics (dump_file);
whole function. */
{
bb_vec_t bbs;
- bbs.create (n_basic_blocks);
+ bbs.create (n_basic_blocks_for_fn (cfun));
basic_block bb;
sched_init_bbs ();
if (parms_info)
compute_bb_predicates (node, parms_info, info);
gcc_assert (cfun == my_function);
- order = XNEWVEC (int, n_basic_blocks);
+ order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
nblocks = pre_and_rev_post_order_compute (NULL, order, false);
for (n = 0; n < nblocks; n++)
{
cgraph_get_body (dst);
srccfun = DECL_STRUCT_FUNCTION (src->decl);
dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
- if (n_basic_blocks_for_function (srccfun)
- != n_basic_blocks_for_function (dstcfun))
+ if (n_basic_blocks_for_fn (srccfun)
+ != n_basic_blocks_for_fn (dstcfun))
{
if (cgraph_dump_file)
fprintf (cgraph_dump_file,
}
fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
current_loops == NULL ? 1 : number_of_loops (cfun),
- n_basic_blocks, ira_max_point);
+ n_basic_blocks_for_fn (cfun), ira_max_point);
fprintf (ira_dump_file,
" allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
ira_allocnos_num, nr_big, ira_copies_num, n, nr);
bitmap_initialize (&need_new, 0);
bitmap_initialize (&reachable, 0);
- queue.create (n_basic_blocks);
+ queue.create (n_basic_blocks_for_fn (cfun));
FOR_EACH_BB (bb)
FOR_BB_INSNS (bb, insn)
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks);
+ qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
}
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = XNEWVEC (basic_block, n_basic_blocks);
+ = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. */
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Iterate until the worklist is empty. */
while (qlen)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist =
- XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+ XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
/* We want a maximal solution. */
bitmap_vector_ones (avout, last_basic_block);
}
qin = worklist;
- qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
- qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+ qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
lra_point_freq = point_freq_vec.address ();
int *post_order_rev_cfg = XNEWVEC (int, last_basic_block);
int n_blocks_inverted = inverted_post_order_compute (post_order_rev_cfg);
- lra_assert (n_blocks_inverted == n_basic_blocks);
+ lra_assert (n_blocks_inverted == n_basic_blocks_for_fn (cfun));
for (i = n_blocks_inverted - 1; i >= 0; --i)
{
bb = BASIC_BLOCK (post_order_rev_cfg[i]);
return true;
/* First determine which blocks can reach exit via normal paths. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
FOR_EACH_BB (bb)
bb->flags &= ~BB_REACHABLE;
basic_block bb = alloc_block ();
bb->index = index;
SET_BASIC_BLOCK_FOR_FUNCTION (fn, index, bb);
- n_basic_blocks_for_function (fn)++;
+ n_basic_blocks_for_fn (fn)++;
return bb;
}
int fnum_edges;
/* Each basic_block will be split into 2 during vertex transformation. */
- int fnum_vertices_after_transform = 2 * n_basic_blocks;
- int fnum_edges_after_transform = n_edges + n_basic_blocks;
+ int fnum_vertices_after_transform = 2 * n_basic_blocks_for_fn (cfun);
+ int fnum_edges_after_transform = n_edges + n_basic_blocks_for_fn (cfun);
/* Count the new SOURCE and EXIT vertices to be added. */
int fmax_num_vertices =
- fnum_vertices_after_transform + n_edges + n_basic_blocks + 2;
+ fnum_vertices_after_transform + n_edges + n_basic_blocks_for_fn (cfun) + 2;
/* In create_fixup_graph: Each basic block and edge can be split into 3
edges. Number of balance edges = n_basic_blocks. So after
max_edges = 2 * (4 * n_basic_blocks + 3 * n_edges)
= 8 * n_basic_blocks + 6 * n_edges
< 8 * n_basic_blocks + 8 * n_edges. */
- int fmax_num_edges = 8 * (n_basic_blocks + n_edges);
+ int fmax_num_edges = 8 * (n_basic_blocks_for_fn (cfun) + n_edges);
/* Initial num of vertices in the fixup graph. */
- fixup_graph->num_vertices = n_basic_blocks;
+ fixup_graph->num_vertices = n_basic_blocks_for_fn (cfun);
/* Fixup graph vertex list. */
fixup_graph->vertex_list =
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
total_vertex_weight += bb->count;
- sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight / n_basic_blocks);
+ sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight /
+ n_basic_blocks_for_fn (cfun));
k_pos = K_POS (sqrt_avg_vertex_weight);
k_neg = K_NEG (sqrt_avg_vertex_weight);
num_instrumented++;
}
- total_num_blocks += n_basic_blocks;
+ total_num_blocks += n_basic_blocks_for_fn (cfun);
if (dump_file)
- fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
+ fprintf (dump_file, "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
total_num_edges += num_edges;
if (dump_file)
/* Basic block flags */
offset = gcov_write_tag (GCOV_TAG_BLOCKS);
- for (i = 0; i != (unsigned) (n_basic_blocks); i++)
+ for (i = 0; i != (unsigned) (n_basic_blocks_for_fn (cfun)); i++)
gcov_write_unsigned (0);
gcov_write_length (offset);
is only processed after all its predecessors. The number of predecessors
of every block has already been computed. */
- stack = XNEWVEC (basic_block, n_basic_blocks);
+ stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
sp = stack;
*sp++ = block;
n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
/* Gather some information about the blocks in this function. */
- rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks);
+ rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
FOR_EACH_BB (bb)
{
return true;
/* First determine which blocks can reach exit via normal paths. */
- tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
FOR_EACH_BB (bb)
bb->flags &= ~BB_REACHABLE;
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
{
/* Average number of insns in the basic block.
'+ 1' is used to make it nonzero. */
- int insns_in_block = sched_max_luid / n_basic_blocks + 1;
+ int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
init_deps_data_vector ();
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
if (profile_info && flag_branch_probabilities)
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = XNEWVEC (int, n_basic_blocks);
+ queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
if (extend_regions_p)
extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
- int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
+ int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
/* Compute regions for scheduling. */
if (single_blocks_p
- || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+ || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
|| !flag_schedule_interblock
|| is_cfg_nonregular ())
{
free_dominance_info (CDI_DOMINATORS);
}
- gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
+ gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
RGN_NR_BLOCKS (nr_regions - 1));
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
rgn_setup_common_sched_info ();
void
extend_regions (void)
{
- rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
- rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+ rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks_for_fn (cfun));
block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
}
int i, n, rgn;
int *postorder, n_blocks;
- postorder = XALLOCAVEC (int, n_basic_blocks);
+ postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
n_blocks = post_order_compute (postorder, false, false);
rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
rev_top_order_index_len);
}
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
n_blocks = post_order_compute (postorder, true, false);
- gcc_assert (n_basic_blocks == n_blocks);
+ gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
/* Build reverse function: for each basic block with BB->INDEX == K
rev_top_order_index[K] is it's reverse topological sort number. */
{
int rgn;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return;
sel_global_init ();
rtx last, insn, note;
rtx mem = smexpr->pattern;
- stack = XNEWVEC (edge_iterator, n_basic_blocks);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun));
sp = 0;
ei = ei_start (bb->succs);
if (dump_file)
{
fprintf (dump_file, "STORE_MOTION of %s, %d basic blocks, ",
- current_function_name (), n_basic_blocks);
+ current_function_name (), n_basic_blocks_for_fn (cfun));
fprintf (dump_file, "%d insns deleted, %d insns created\n",
n_stores_deleted, n_stores_created);
}
tail_duplicate (void)
{
fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
- basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
+ basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
int *counts = XNEWVEC (int, last_basic_block);
int ninsns = 0, nduplicated = 0;
gcov_type weighted_insns = 0, traced_insns = 0;
{
bool changed;
- if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+ if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
return 0;
mark_dfs_back_edges ();
/* Initialize the basic block array. */
init_flow (fn);
profile_status_for_function (fn) = PROFILE_ABSENT;
- n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS;
+ n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
vec_alloc (basic_block_info_for_function (fn), initial_cfg_capacity);
vec_safe_grow_cleared (basic_block_info_for_function (fn),
factor_computed_gotos ();
/* Make sure there is always at least one block, even if it's empty. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
create_empty_bb (ENTRY_BLOCK_PTR);
/* Adjust the size of the array. */
- if (basic_block_info->length () < (size_t) n_basic_blocks)
- vec_safe_grow_cleared (basic_block_info, n_basic_blocks);
+ if (basic_block_info->length () < (size_t) n_basic_blocks_for_fn (cfun))
+ vec_safe_grow_cleared (basic_block_info, n_basic_blocks_for_fn (cfun));
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
/* Add the newly created block to the array. */
SET_BASIC_BLOCK (last_basic_block, bb);
- n_basic_blocks++;
+ n_basic_blocks_for_fn (cfun)++;
last_basic_block++;
return bb;
{
dump_function_header (file, current_function_decl, flags);
fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
- n_basic_blocks, n_edges, last_basic_block);
+ n_basic_blocks_for_fn (cfun), n_edges, last_basic_block);
brief_dump_cfg (file, flags | TDF_COMMENT);
fprintf (file, "\n");
fprintf (file, fmt_str, "", " instances ", "used ");
fprintf (file, "---------------------------------------------------------\n");
- size = n_basic_blocks * sizeof (struct basic_block_def);
+ size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
total += size;
- fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks,
+ fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
SCALE (size), LABEL (size));
num_edges = 0;
if (!ignore_topmost_bind)
fprintf (file, "{\n");
- if (any_var && n_basic_blocks_for_function (fun))
+ if (any_var && n_basic_blocks_for_fn (fun))
fprintf (file, "\n");
FOR_EACH_BB_FN (bb, fun)
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
static unsigned int
merge_phi_nodes (void)
{
- basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
+ basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
basic_block *current = worklist;
basic_block bb;
static void
fold_marked_statements (int first, struct pointer_set_t *statements)
{
- for (; first < n_basic_blocks; first++)
+ for (; first < n_basic_blocks_for_fn (cfun); first++)
if (BASIC_BLOCK (first))
{
gimple_stmt_iterator gsi;
{
copy_body_data id;
basic_block bb;
- int last = n_basic_blocks;
+ int last = n_basic_blocks_for_fn (cfun);
struct gimplify_ctx gctx;
bool inlined_p = false;
inner ones, and also that we do not try to visit a removed
block. This is opposite of PHI-OPT, because we cascade the
combining rather than cascading PHIs. */
- for (i = n_basic_blocks - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
+ for (i = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
{
basic_block bb = bbs[i];
gimple stmt = last_stmt (bb);
return 0;
}
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- copied_bbs = XNEWVEC (basic_block, n_basic_blocks);
- bbs_size = n_basic_blocks;
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ copied_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ bbs_size = n_basic_blocks_for_fn (cfun);
FOR_EACH_LOOP (li, loop, 0)
{
/* Collect all basic-blocks in loops and sort them after their
loops postorder. */
i = 0;
- bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
FOR_EACH_BB (bb)
if (bb->loop_father != current_loops->tree_root)
bbs[i++] = bb;
/* Normally the work list size is bounded by the number of basic
blocks in the largest loop. We don't know this number, but we
can be fairly sure that it will be relatively small. */
- worklist.create (MAX (8, n_basic_blocks / 128));
+ worklist.create (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
occ_pool = create_alloc_pool ("dominators for recip",
sizeof (struct occurrence),
- n_basic_blocks / 3 + 1);
+ n_basic_blocks_for_fn (cfun) / 3 + 1);
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
calculate_dominance_info (CDI_DOMINATORS);
outer ones, and also that we do not try to visit a removed
block. */
bb_order = single_pred_before_succ_order ();
- n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
for (i = 0; i < n; i++)
{
}
/* Allocate the worklist. */
- worklist = XNEWVEC (basic_block, n_basic_blocks);
+ worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
/* Seed the algorithm by putting the dominator children of the entry
block on the worklist. */
connect_infinite_loops_to_exit ();
memset (&pre_stats, 0, sizeof (pre_stats));
- postorder = XNEWVEC (int, n_basic_blocks);
+ postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
postorder_num = inverted_post_order_compute (postorder);
alloc_aux_for_blocks (sizeof (struct bb_bitmap_sets));
fixed, don't run it when he have an incredibly large number of
bb's. If we aren't going to run insert, there is no point in
computing ANTIC, either, even though it's plenty fast. */
- if (n_basic_blocks < 4000)
+ if (n_basic_blocks_for_fn (cfun) < 4000)
{
compute_antic ();
insert ();
{
int i;
long rank = 2;
- int *bbs = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ int *bbs = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
/* Find the loops, so that we can prevent moving calculations in
them. */
}
/* Set up rank for each BB */
- for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
bb_rank[bbs[i]] = ++rank << 16;
free (bbs);
shared_lookup_phiargs.create (0);
shared_lookup_references.create (0);
rpo_numbers = XNEWVEC (int, last_basic_block);
- rpo_numbers_temp = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ rpo_numbers_temp =
+ XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
/* RPO numbers is an array of rpo ordering, rpo[i] = bb means that
the i'th block in RPO order is bb. We want to map bb's to RPO
numbers, so we need to rearrange this array. */
- for (j = 0; j < n_basic_blocks - NUM_FIXED_BLOCKS; j++)
+ for (j = 0; j < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; j++)
rpo_numbers[rpo_numbers_temp[j]] = j;
XDELETE (rpo_numbers_temp);
init_worklist (void)
{
alloc_aux_for_blocks (sizeof (struct aux_bb_info));
- same_succ_htab.create (n_basic_blocks);
+ same_succ_htab.create (n_basic_blocks_for_fn (cfun));
same_succ_edge_flags = XCNEWVEC (int, last_basic_block);
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
- worklist.create (n_basic_blocks);
+ worklist.create (n_basic_blocks_for_fn (cfun));
find_same_succ ();
if (dump_file && (dump_flags & TDF_DETAILS))
static void
alloc_cluster_vectors (void)
{
- all_clusters.create (n_basic_blocks);
+ all_clusters.create (n_basic_blocks_for_fn (cfun));
}
/* Reset all cluster vectors. */
/* Now walk over the blocks to determine which ones were
marked as being reached by a useful case label. */
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun); i++)
{
tree node = info[i];
VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = INCOMING_FRAME_SP_OFFSET;
/* Allocate stack for back-tracking up CFG. */
- stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Push the first edge on to the stack. */
timevar_push (TV_VAR_TRACKING_DATAFLOW);
/* Compute reverse completion order of depth first search of the CFG
so that the data-flow runs faster. */
- rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
bb_order = XNEWVEC (int, last_basic_block);
pre_and_rev_post_order_compute (NULL, rc_order, false);
- for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+ for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
bb_order[rc_order[i]] = i;
free (rc_order);
return 0;
}
- if (n_basic_blocks > 500 && n_edges / n_basic_blocks >= 20)
+ if (n_basic_blocks_for_fn (cfun) > 500 &&
+ n_edges / n_basic_blocks_for_fn (cfun) >= 20)
{
vt_debug_insns_local (true);
return 0;