]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
Eliminate n_basic_blocks macro
authorDavid Malcolm <dmalcolm@redhat.com>
Tue, 19 Nov 2013 01:13:23 +0000 (01:13 +0000)
committerDavid Malcolm <dmalcolm@gcc.gnu.org>
Tue, 19 Nov 2013 01:13:23 +0000 (01:13 +0000)
gcc/
* basic-block.h (n_basic_blocks_for_function): Rename macro to...
(n_basic_blocks_for_fn): ...this.

(n_basic_blocks): Eliminate macro as work towards making uses of
cfun be explicit.

* cfgloop.c (init_loops_structure): Update for renaming of
"n_basic_blocks_for_function" to "n_basic_blocks_for_fn".
* graph.c (draw_cfg_nodes_no_loops): Likewise.
* ipa-utils.c (ipa_merge_profiles): Likewise.
* lto-streamer-in.c (make_new_block): Likewise.
* tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
(dump_function_to_file): Likewise.

* alias.c (init_alias_analysis): Replace usage of "n_basic_blocks"
macro with "n_basic_blocks_for_fn (cfun)".
* bb-reorder.c (partition_hot_cold_basic_blocks): Likewise.
(duplicate_computed_gotos): Likewise.
(reorder_basic_blocks): Likewise.
* bt-load.c (augment_live_range): Likewise.
* cfg.c (expunge_block): Likewise.
(compact_blocks): Likewise.
* cfganal.c (single_pred_before_succ_order): Likewise.
(compute_idf): Likewise.
(flow_dfs_compute_reverse_init): Likewise.
(pre_and_rev_post_order_compute): Likewise.
(pre_and_rev_post_order_compute_fn): Likewise.
(inverted_post_order_compute): Likewise.
(post_order_compute): Likewise.
(print_edge_list): Likewise.
(find_unreachable_blocks): Likewise.
(mark_dfs_back_edges): Likewise.
* cfgcleanup.c (try_optimize_cfg): Likewise.
(try_forward_edges): Likewise.
* cfghooks.c (dump_flow_info): Likewise.
* cfgloop.c (verify_loop_structure): Likewise.
(get_loop_body): Likewise.
(flow_loops_find): Likewise.
* cfgloopmanip.c (add_loop): Likewise.
(remove_path): Likewise.
(find_path): Likewise.
* cfgrtl.c (rtl_flow_call_edges_add): Likewise.
(rtl_verify_bb_layout): Likewise.
(entry_of_function): Likewise.
(rtl_create_basic_block): Likewise.
* coverage.c (coverage_compute_cfg_checksum): Likewise.
* cprop.c (one_cprop_pass): Likewise.
(is_too_expensive): Likewise.
* df-core.c (df_compute_cfg_image): Likewise.
(df_compact_blocks): Likewise.
(df_worklist_dataflow_doublequeue): Likewise.
* dominance.c (calculate_dominance_info): Likewise.
(calc_dfs_tree): Likewise.
(calc_dfs_tree_nonrec): Likewise.
(init_dom_info): Likewise.
* domwalk.c (cmp_bb_postorder): Likewise.
* function.c (thread_prologue_and_epilogue_insns): Likewise.
(generate_setjmp_warnings): Likewise.
* fwprop.c (build_single_def_use_links): Likewise.
* gcse.c (is_too_expensive): Likewise.
(one_code_hoisting_pass): Likewise.
(one_pre_gcse_pass): Likewise.
* graphite.c (graphite_initialize): Likewise.
* haifa-sched.c (haifa_sched_init): Likewise.
* ipa-inline-analysis.c (estimate_function_body_sizes): Likewise.
* ira.c (split_live_ranges_for_shrink_wrap): Likewise.
* ira-build.c (ira_build): Likewise.
* lcm.c (compute_nearerout): Likewise.
(compute_available): Likewise.
(compute_laterin): Likewise.
(compute_antinout_edge): Likewise.
* lra-lives.c (lra_create_live_ranges): Likewise.
* lra.c (has_nonexceptional_receiver): Likewise.
* mcf.c (create_fixup_graph): Likewise.
* profile.c (branch_prob): Likewise.
* reg-stack.c (convert_regs_2): Likewise.
* regrename.c (regrename_analyze): Likewise.
* reload1.c (has_nonexceptional_receiver): Likewise.
* reorg.c (dbr_schedule): Likewise.
* sched-deps.c (sched_deps_init): Likewise.
* sched-ebb.c (schedule_ebbs): Likewise.
* sched-rgn.c (extend_regions): Likewise.
(schedule_insns): Likewise.
(sched_rgn_init): Likewise.
(extend_rgns): Likewise.
(haifa_find_rgns): Likewise.
* sel-sched-ir.c (recompute_rev_top_order): Likewise.
(sel_recompute_toporder): Likewise.
* sel-sched.c (run_selective_scheduling): Likewise.
* store-motion.c (one_store_motion_pass): Likewise.
(remove_reachable_equiv_notes): Likewise.
* tracer.c (tracer): Likewise.
(tail_duplicate): Likewise.
* tree-cfg.c (gimple_flow_call_edges_add): Likewise.
(dump_cfg_stats): Likewise.
(gimple_dump_cfg): Likewise.
(create_bb): Likewise.
(build_gimple_cfg): Likewise.
* tree-cfgcleanup.c (merge_phi_nodes): Likewise.
* tree-inline.c (optimize_inline_calls): Likewise.
(fold_marked_statements): Likewise.
* tree-ssa-ifcombine.c (tree_ssa_ifcombine): Likewise.
* tree-ssa-loop-ch.c (copy_loop_headers): Likewise.
* tree-ssa-loop-im.c (analyze_memory_references): Likewise.
* tree-ssa-loop-manip.c (compute_live_loop_exits): Likewise.
* tree-ssa-math-opts.c (execute_cse_reciprocals): Likewise.
* tree-ssa-phiopt.c (tree_ssa_phiopt_worker): Likewise.
* tree-ssa-pre.c (do_pre): Likewise.
(init_pre): Likewise.
(compute_avail): Likewise.
* tree-ssa-reassoc.c (init_reassoc): Likewise.
* tree-ssa-sccvn.c (init_scc_vn): Likewise.
* tree-ssa-tail-merge.c (alloc_cluster_vectors): Likewise.
(init_worklist): Likewise.
* tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
* var-tracking.c (variable_tracking_main_1): Likewise.
(vt_find_locations): Likewise.
(vt_stack_adjustments): Likewise.
* config/s390/s390.c (s390_optimize_nonescaping_tx): Likewise.
* config/spu/spu.c (spu_machine_dependent_reorg): Likewise.

From-SVN: r204995

61 files changed:
gcc/ChangeLog
gcc/alias.c
gcc/basic-block.h
gcc/bb-reorder.c
gcc/bt-load.c
gcc/cfg.c
gcc/cfganal.c
gcc/cfgcleanup.c
gcc/cfghooks.c
gcc/cfgloop.c
gcc/cfgloopmanip.c
gcc/cfgrtl.c
gcc/config/s390/s390.c
gcc/config/spu/spu.c
gcc/coverage.c
gcc/cprop.c
gcc/df-core.c
gcc/dominance.c
gcc/domwalk.c
gcc/function.c
gcc/fwprop.c
gcc/gcse.c
gcc/graph.c
gcc/graphite.c
gcc/haifa-sched.c
gcc/ipa-inline-analysis.c
gcc/ipa-utils.c
gcc/ira-build.c
gcc/ira.c
gcc/lcm.c
gcc/lra-lives.c
gcc/lra.c
gcc/lto-streamer-in.c
gcc/mcf.c
gcc/profile.c
gcc/reg-stack.c
gcc/regrename.c
gcc/reload1.c
gcc/reorg.c
gcc/sched-deps.c
gcc/sched-ebb.c
gcc/sched-rgn.c
gcc/sel-sched-ir.c
gcc/sel-sched.c
gcc/store-motion.c
gcc/tracer.c
gcc/tree-cfg.c
gcc/tree-cfgcleanup.c
gcc/tree-inline.c
gcc/tree-ssa-ifcombine.c
gcc/tree-ssa-loop-ch.c
gcc/tree-ssa-loop-im.c
gcc/tree-ssa-loop-manip.c
gcc/tree-ssa-math-opts.c
gcc/tree-ssa-phiopt.c
gcc/tree-ssa-pre.c
gcc/tree-ssa-reassoc.c
gcc/tree-ssa-sccvn.c
gcc/tree-ssa-tail-merge.c
gcc/tree-ssa-uncprop.c
gcc/var-tracking.c

index 6acc4e8aef01c49cc6de335a45f84df2f8302001..9d560ed0a7a94b395a6a597012aa55d38a3271d8 100644 (file)
@@ -1,3 +1,126 @@
+2013-11-19  David Malcolm  <dmalcolm@redhat.com>
+
+       * basic-block.h (n_basic_blocks_for_function): Rename macro to...
+       (n_basic_blocks_for_fn): ...this.
+
+       (n_basic_blocks): Eliminate macro as work towards making uses of
+       cfun be explicit.
+
+       * cfgloop.c (init_loops_structure): Update for renaming of
+       "n_basic_blocks_for_function" to "n_basic_blocks_for_fn".
+       * graph.c (draw_cfg_nodes_no_loops): Likewise.
+       * ipa-utils.c (ipa_merge_profiles): Likewise.
+       * lto-streamer-in.c (make_new_block): Likewise.
+       * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+       (dump_function_to_file): Likewise.
+
+       * alias.c (init_alias_analysis): Replace usage of "n_basic_blocks"
+       macro with "n_basic_blocks_for_fn (cfun)".
+       * bb-reorder.c (partition_hot_cold_basic_blocks): Likewise.
+       (duplicate_computed_gotos): Likewise.
+       (reorder_basic_blocks): Likewise.
+       * bt-load.c (augment_live_range): Likewise.
+       * cfg.c (expunge_block): Likewise.
+       (compact_blocks): Likewise.
+       * cfganal.c (single_pred_before_succ_order): Likewise.
+       (compute_idf): Likewise.
+       (flow_dfs_compute_reverse_init): Likewise.
+       (pre_and_rev_post_order_compute): Likewise.
+       (pre_and_rev_post_order_compute_fn): Likewise.
+       (inverted_post_order_compute): Likewise.
+       (post_order_compute): Likewise.
+       (print_edge_list): Likewise.
+       (find_unreachable_blocks): Likewise.
+       (mark_dfs_back_edges): Likewise.
+       * cfgcleanup.c (try_optimize_cfg): Likewise.
+       (try_forward_edges): Likewise.
+       * cfghooks.c (dump_flow_info): Likewise.
+       * cfgloop.c (verify_loop_structure): Likewise.
+       (get_loop_body): Likewise.
+       (flow_loops_find): Likewise.
+       * cfgloopmanip.c (add_loop): Likewise.
+       (remove_path): Likewise.
+       (find_path): Likewise.
+       * cfgrtl.c (rtl_flow_call_edges_add): Likewise.
+       (rtl_verify_bb_layout): Likewise.
+       (entry_of_function): Likewise.
+       (rtl_create_basic_block): Likewise.
+       * coverage.c (coverage_compute_cfg_checksum): Likewise.
+       * cprop.c (one_cprop_pass): Likewise.
+       (is_too_expensive): Likewise.
+       * df-core.c (df_compute_cfg_image): Likewise.
+       (df_compact_blocks): Likewise.
+       (df_worklist_dataflow_doublequeue): Likewise.
+       * dominance.c (calculate_dominance_info): Likewise.
+       (calc_dfs_tree): Likewise.
+       (calc_dfs_tree_nonrec): Likewise.
+       (init_dom_info): Likewise.
+       * domwalk.c (cmp_bb_postorder): Likewise.
+       * function.c (thread_prologue_and_epilogue_insns): Likewise.
+       (generate_setjmp_warnings): Likewise.
+       * fwprop.c (build_single_def_use_links): Likewise.
+       * gcse.c (is_too_expensive): Likewise.
+       (one_code_hoisting_pass): Likewise.
+       (one_pre_gcse_pass): Likewise.
+       * graphite.c (graphite_initialize): Likewise.
+       * haifa-sched.c (haifa_sched_init): Likewise.
+       * ipa-inline-analysis.c (estimate_function_body_sizes): Likewise.
+       * ira.c (split_live_ranges_for_shrink_wrap): Likewise.
+       * ira-build.c (ira_build): Likewise.
+       * lcm.c (compute_nearerout): Likewise.
+       (compute_available): Likewise.
+       (compute_laterin): Likewise.
+       (compute_antinout_edge): Likewise.
+       * lra-lives.c (lra_create_live_ranges): Likewise.
+       * lra.c (has_nonexceptional_receiver): Likewise.
+       * mcf.c (create_fixup_graph): Likewise.
+       * profile.c (branch_prob): Likewise.
+       * reg-stack.c (convert_regs_2): Likewise.
+       * regrename.c (regrename_analyze): Likewise.
+       * reload1.c (has_nonexceptional_receiver): Likewise.
+       * reorg.c (dbr_schedule): Likewise.
+       * sched-deps.c (sched_deps_init): Likewise.
+       * sched-ebb.c (schedule_ebbs): Likewise.
+       * sched-rgn.c (extend_regions): Likewise.
+       (schedule_insns): Likewise.
+       (sched_rgn_init): Likewise.
+       (extend_rgns): Likewise.
+       (haifa_find_rgns): Likewise.
+       * sel-sched-ir.c (recompute_rev_top_order): Likewise.
+       (sel_recompute_toporder): Likewise.
+       * sel-sched.c (run_selective_scheduling): Likewise.
+       * store-motion.c (one_store_motion_pass): Likewise.
+       (remove_reachable_equiv_notes): Likewise.
+       * tracer.c (tracer): Likewise.
+       (tail_duplicate): Likewise.
+       * tree-cfg.c (gimple_flow_call_edges_add): Likewise.
+       (dump_cfg_stats): Likewise.
+       (gimple_dump_cfg): Likewise.
+       (create_bb): Likewise.
+       (build_gimple_cfg): Likewise.
+       * tree-cfgcleanup.c (merge_phi_nodes): Likewise.
+       * tree-inline.c (optimize_inline_calls): Likewise.
+       (fold_marked_statements): Likewise.
+       * tree-ssa-ifcombine.c (tree_ssa_ifcombine): Likewise.
+       * tree-ssa-loop-ch.c (copy_loop_headers): Likewise.
+       * tree-ssa-loop-im.c (analyze_memory_references): Likewise.
+       * tree-ssa-loop-manip.c (compute_live_loop_exits): Likewise.
+       * tree-ssa-math-opts.c (execute_cse_reciprocals): Likewise.
+       * tree-ssa-phiopt.c (tree_ssa_phiopt_worker): Likewise.
+       * tree-ssa-pre.c (do_pre): Likewise.
+       (init_pre): Likewise.
+       (compute_avail): Likewise.
+       * tree-ssa-reassoc.c (init_reassoc): Likewise.
+       * tree-ssa-sccvn.c (init_scc_vn): Likewise.
+       * tree-ssa-tail-merge.c (alloc_cluster_vectors): Likewise.
+       (init_worklist): Likewise.
+       * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+       * var-tracking.c (variable_tracking_main_1): Likewise.
+       (vt_find_locations): Likewise.
+       (vt_stack_adjustments): Likewise.
+       * config/s390/s390.c (s390_optimize_nonescaping_tx): Likewise.
+       * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+
 2013-11-18  Jan Hubicka  <jh@suse.cz>
 
        * profile.c (compute_branch_probabilities): Do not sanity check run_max.
index e53cb8033c5378753fcf7d4eb7cbbd687191800b..f0bb32bf32849b580a2d42a0e8de90315b2de4ce 100644 (file)
@@ -2952,7 +2952,7 @@ init_alias_analysis (void)
      The state of the arrays for the set chain in question does not matter
      since the program has undefined behavior.  */
 
-  rpo = XNEWVEC (int, n_basic_blocks);
+  rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
 
   pass = 0;
index fd1681209fbbaf3fc449f617443c4c30c65372bc..d247d4f4509018fee4416d357d72deec98723747 100644 (file)
@@ -315,7 +315,7 @@ struct GTY(()) control_flow_graph {
 #define ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)     ((FN)->cfg->x_entry_block_ptr)
 #define EXIT_BLOCK_PTR_FOR_FUNCTION(FN)             ((FN)->cfg->x_exit_block_ptr)
 #define basic_block_info_for_function(FN)    ((FN)->cfg->x_basic_block_info)
-#define n_basic_blocks_for_function(FN)             ((FN)->cfg->x_n_basic_blocks)
+#define n_basic_blocks_for_fn(FN)           ((FN)->cfg->x_n_basic_blocks)
 #define n_edges_for_function(FN)            ((FN)->cfg->x_n_edges)
 #define last_basic_block_for_function(FN)    ((FN)->cfg->x_last_basic_block)
 #define label_to_block_map_for_function(FN)  ((FN)->cfg->x_label_to_block_map)
@@ -330,7 +330,6 @@ struct GTY(()) control_flow_graph {
 #define ENTRY_BLOCK_PTR                (cfun->cfg->x_entry_block_ptr)
 #define EXIT_BLOCK_PTR         (cfun->cfg->x_exit_block_ptr)
 #define basic_block_info       (cfun->cfg->x_basic_block_info)
-#define n_basic_blocks         (cfun->cfg->x_n_basic_blocks)
 #define n_edges                        (cfun->cfg->x_n_edges)
 #define last_basic_block       (cfun->cfg->x_last_basic_block)
 #define label_to_block_map     (cfun->cfg->x_label_to_block_map)
index 8e2348f476c56110f40cc6a9f5c9ec968af34cf6..45bf1289ec0772c9b8455a3e5487701cb2e88ca5 100644 (file)
@@ -2220,7 +2220,7 @@ reorder_basic_blocks (void)
 
   gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
 
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
     return;
 
   set_edge_can_fallthru_flag ();
@@ -2244,7 +2244,7 @@ reorder_basic_blocks (void)
       bbd[i].node = NULL;
     }
 
-  traces = XNEWVEC (struct trace, n_basic_blocks);
+  traces = XNEWVEC (struct trace, n_basic_blocks_for_fn (cfun));
   n_traces = 0;
   find_traces (&n_traces, traces);
   connect_traces (n_traces, traces);
@@ -2388,7 +2388,7 @@ duplicate_computed_gotos (void)
   bitmap candidates;
   int max_size;
 
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
     return 0;
 
   clear_bb_flags ();
@@ -2640,7 +2640,7 @@ partition_hot_cold_basic_blocks (void)
 {
   vec<edge> crossing_edges;
 
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
     return 0;
 
   df_set_flags (DF_DEFER_INSN_RESCAN);
index 5384d01d5256fd8a6bb2f7927736fb4ff792f671..348e40bdb46dc3cd1d399b21c3163f915c0a2d5a 100644 (file)
@@ -900,7 +900,7 @@ augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range,
 {
   basic_block *worklist, *tos;
 
-  tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
   if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb))
     {
index cfada7395dbf9c79020ed07edea53cc93ef8f19d..10791a778a25cb3c0d287f0fd59e5268f7c2a60a 100644 (file)
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -169,12 +169,12 @@ compact_blocks (void)
          bb->index = i;
          i++;
        }
-      gcc_assert (i == n_basic_blocks);
+      gcc_assert (i == n_basic_blocks_for_fn (cfun));
 
       for (; i < last_basic_block; i++)
        SET_BASIC_BLOCK (i, NULL);
     }
-  last_basic_block = n_basic_blocks;
+  last_basic_block = n_basic_blocks_for_fn (cfun);
 }
 
 /* Remove block B from the basic block array.  */
@@ -184,7 +184,7 @@ expunge_block (basic_block b)
 {
   unlink_block (b);
   SET_BASIC_BLOCK (b->index, NULL);
-  n_basic_blocks--;
+  n_basic_blocks_for_fn (cfun)--;
   /* We should be able to ggc_free here, but we are not.
      The dead SSA_NAMES are left pointing to dead statements that are pointing
      to dead basic blocks making garbage collector to die.
index b22161172271254be399bce55153f665931e0fc9..1c90f8c5b7e7023adc3befb9c00a3eac71121caa 100644 (file)
@@ -76,7 +76,7 @@ mark_dfs_back_edges (void)
   post = XCNEWVEC (int, last_basic_block);
 
   /* Allocate stack for back-tracking up CFG.  */
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   /* Allocate bitmap to track nodes that have been visited.  */
@@ -152,7 +152,7 @@ find_unreachable_blocks (void)
   edge_iterator ei;
   basic_block *tos, *worklist, bb;
 
-  tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
+  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
 
   /* Clear all the reachability flags.  */
 
@@ -256,7 +256,7 @@ print_edge_list (FILE *f, struct edge_list *elist)
   int x;
 
   fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
-          n_basic_blocks, elist->num_edges);
+          n_basic_blocks_for_fn (cfun), elist->num_edges);
 
   for (x = 0; x < elist->num_edges; x++)
     {
@@ -609,7 +609,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
     post_order[post_order_num++] = EXIT_BLOCK;
 
   /* Allocate stack for back-tracking up CFG.  */
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   /* Allocate bitmap to track nodes that have been visited.  */
@@ -667,7 +667,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
 
   /* Delete the unreachable blocks if some were found and we are
      supposed to do it.  */
-  if (delete_unreachable && (count != n_basic_blocks))
+  if (delete_unreachable && (count != n_basic_blocks_for_fn (cfun)))
     {
       basic_block b;
       basic_block next_bb;
@@ -762,7 +762,7 @@ inverted_post_order_compute (int *post_order)
   sbitmap visited;
 
   /* Allocate stack for back-tracking up CFG.  */
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   /* Allocate bitmap to track nodes that have been visited.  */
@@ -898,11 +898,11 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
   edge_iterator *stack;
   int sp;
   int pre_order_num = 0;
-  int rev_post_order_num = n_basic_blocks - 1;
+  int rev_post_order_num = n_basic_blocks_for_fn (cfun) - 1;
   sbitmap visited;
 
   /* Allocate stack for back-tracking up CFG.  */
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   if (include_entry_exit)
@@ -1000,11 +1000,12 @@ pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
                                         include_entry_exit);
   if (include_entry_exit)
     /* The number of nodes visited should be the number of blocks.  */
-    gcc_assert (pre_order_num == n_basic_blocks);
+    gcc_assert (pre_order_num == n_basic_blocks_for_fn (cfun));
   else
     /* The number of nodes visited should be the number of blocks minus
        the entry and exit blocks which are not visited here.  */
-    gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
+    gcc_assert (pre_order_num
+               == (n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS));
 
   return pre_order_num;
 }
@@ -1043,7 +1044,7 @@ static void
 flow_dfs_compute_reverse_init (depth_first_search_ds data)
 {
   /* Allocate stack for back-tracking up CFG.  */
-  data->stack = XNEWVEC (basic_block, n_basic_blocks);
+  data->stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   data->sp = 0;
 
   /* Allocate bitmap to track nodes that have been visited.  */
@@ -1275,7 +1276,7 @@ compute_idf (bitmap def_blocks, bitmap_head *dfs)
   bitmap phi_insertion_points;
 
   /* Each block can appear at most twice on the work-stack.  */
-  work_stack.create (2 * n_basic_blocks);
+  work_stack.create (2 * n_basic_blocks_for_fn (cfun));
   phi_insertion_points = BITMAP_ALLOC (NULL);
 
   /* Seed the work list with all the blocks in DEF_BLOCKS.  We use
@@ -1493,8 +1494,8 @@ basic_block *
 single_pred_before_succ_order (void)
 {
   basic_block x, y;
-  basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
-  unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
+  basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+  unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
   unsigned np, i;
   sbitmap visited = sbitmap_alloc (last_basic_block);
 
index 5161190736867f88f62a124e629f26551dbecae7..a2192cbaf75fc0e10ef5e89db0b607d4e972c128 100644 (file)
@@ -459,7 +459,7 @@ try_forward_edges (int mode, basic_block b)
          && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
        return changed;
 
-      while (counter < n_basic_blocks)
+      while (counter < n_basic_blocks_for_fn (cfun))
        {
          basic_block new_target = NULL;
          bool new_target_threaded = false;
@@ -472,7 +472,7 @@ try_forward_edges (int mode, basic_block b)
              /* Bypass trivial infinite loops.  */
              new_target = single_succ (target);
              if (target == new_target)
-               counter = n_basic_blocks;
+               counter = n_basic_blocks_for_fn (cfun);
              else if (!optimize)
                {
                  /* When not optimizing, ensure that edges or forwarder
@@ -521,7 +521,8 @@ try_forward_edges (int mode, basic_block b)
              if (t)
                {
                  if (!threaded_edges)
-                   threaded_edges = XNEWVEC (edge, n_basic_blocks);
+                   threaded_edges = XNEWVEC (edge,
+                                             n_basic_blocks_for_fn (cfun));
                  else
                    {
                      int i;
@@ -533,7 +534,7 @@ try_forward_edges (int mode, basic_block b)
                          break;
                      if (i < nthreaded_edges)
                        {
-                         counter = n_basic_blocks;
+                         counter = n_basic_blocks_for_fn (cfun);
                          break;
                        }
                    }
@@ -542,7 +543,9 @@ try_forward_edges (int mode, basic_block b)
                  if (t->dest == b)
                    break;
 
-                 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
+                 gcc_assert (nthreaded_edges
+                             < (n_basic_blocks_for_fn (cfun)
+                                - NUM_FIXED_BLOCKS));
                  threaded_edges[nthreaded_edges++] = t;
 
                  new_target = t->dest;
@@ -558,7 +561,7 @@ try_forward_edges (int mode, basic_block b)
          threaded |= new_target_threaded;
        }
 
-      if (counter >= n_basic_blocks)
+      if (counter >= n_basic_blocks_for_fn (cfun))
        {
          if (dump_file)
            fprintf (dump_file, "Infinite loop in BB %i.\n",
@@ -2713,7 +2716,7 @@ try_optimize_cfg (int mode)
                  /* Note that forwarder_block_p true ensures that
                     there is a successor for this block.  */
                  && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
-                 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
+                 && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
                {
                  if (dump_file)
                    fprintf (dump_file,
index c12a62fca500b3be42c4daf76818d24a592d0126..3016c54a9b10b7fa565d4a984b182a699a4daec0 100644 (file)
@@ -323,7 +323,8 @@ dump_flow_info (FILE *file, int flags)
 {
   basic_block bb;
 
-  fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges);
+  fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks_for_fn (cfun),
+          n_edges);
   FOR_ALL_BB (bb)
     dump_bb (file, bb, 0, flags);
 
index 01f250a1519d3ee5b27a66234194022745c74347..20c58e4fa69995e1c225a710d4e499d87f8f0633 100644 (file)
@@ -352,7 +352,7 @@ init_loops_structure (struct function *fn,
 
   /* Dummy loop containing whole function.  */
   root = alloc_loop ();
-  root->num_nodes = n_basic_blocks_for_function (fn);
+  root->num_nodes = n_basic_blocks_for_fn (fn);
   root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
   root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
   ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
@@ -422,21 +422,21 @@ flow_loops_find (struct loops *loops)
 
   /* Taking care of this degenerate case makes the rest of
      this code simpler.  */
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return loops;
 
   /* The root loop node contains all basic-blocks.  */
-  loops->tree_root->num_nodes = n_basic_blocks;
+  loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
 
   /* Compute depth first search order of the CFG so that outer
      natural loops will be found before inner natural loops.  */
-  rc_order = XNEWVEC (int, n_basic_blocks);
+  rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
   pre_and_rev_post_order_compute (NULL, rc_order, false);
 
   /* Gather all loop headers in reverse completion order and allocate
      loop structures for loops that are not already present.  */
   larray.create (loops->larray->length ());
-  for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
+  for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
     {
       basic_block header = BASIC_BLOCK (rc_order[b]);
       if (bb_loop_header_p (header))
@@ -832,7 +832,7 @@ get_loop_body (const struct loop *loop)
     {
       /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
         special-case the fake loop that contains the whole function.  */
-      gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
+      gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
       body[tv++] = loop->header;
       body[tv++] = EXIT_BLOCK_PTR;
       FOR_EACH_BB (bb)
@@ -1368,7 +1368,7 @@ verify_loop_structure (void)
   /* Check the recorded loop father and sizes of loops.  */
   visited = sbitmap_alloc (last_basic_block);
   bitmap_clear (visited);
-  bbs = XNEWVEC (basic_block, n_basic_blocks);
+  bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
     {
       unsigned n;
@@ -1380,7 +1380,7 @@ verify_loop_structure (void)
          continue;
        }
 
-      n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+      n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
       if (loop->num_nodes != n)
        {
          error ("size of loop %d should be %d, not %d",
index 0fc6552746bf9601d7ee0c6cc18f509b22dfda5c..c8f923b5fcaca3a376bcd08a912e22f559e09576 100644 (file)
@@ -71,9 +71,9 @@ find_path (edge e, basic_block **bbs)
   gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
 
   /* Find bbs in the path.  */
-  *bbs = XNEWVEC (basic_block, n_basic_blocks);
+  *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
-                            n_basic_blocks, e->dest);
+                            n_basic_blocks_for_fn (cfun), e->dest);
 }
 
 /* Fix placement of basic block BB inside loop hierarchy --
@@ -343,7 +343,7 @@ remove_path (edge e)
   nrem = find_path (e, &rem_bbs);
 
   n_bord_bbs = 0;
-  bord_bbs = XNEWVEC (basic_block, n_basic_blocks);
+  bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   seen = sbitmap_alloc (last_basic_block);
   bitmap_clear (seen);
 
@@ -450,8 +450,8 @@ add_loop (struct loop *loop, struct loop *outer)
   flow_loop_tree_node_add (outer, loop);
 
   /* Find its nodes.  */
-  bbs = XNEWVEC (basic_block, n_basic_blocks);
-  n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+  bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+  n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
 
   for (i = 0; i < n; i++)
     {
index 1f99aa1dfff800fbf9ad3232eed9f07238353f60..c81d3a5503c08139d0ebde60c3c787fddbaa8ca9 100644 (file)
@@ -361,7 +361,7 @@ rtl_create_basic_block (void *headp, void *endp, basic_block after)
       vec_safe_grow_cleared (basic_block_info, new_size);
     }
 
-  n_basic_blocks++;
+  n_basic_blocks_for_fn (cfun)++;
 
   bb = create_basic_block_structure (head, end, NULL, after);
   bb->aux = NULL;
@@ -500,7 +500,7 @@ make_pass_free_cfg (gcc::context *ctxt)
 rtx
 entry_of_function (void)
 {
-  return (n_basic_blocks > NUM_FIXED_BLOCKS ?
+  return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
          BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
 }
 
@@ -2921,10 +2921,10 @@ rtl_verify_bb_layout (void)
        curr_bb = NULL;
     }
 
-  if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS)
+  if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
     internal_error
       ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
-       num_bb_notes, n_basic_blocks);
+       num_bb_notes, n_basic_blocks_for_fn (cfun));
 
    return err;
 }
@@ -4764,7 +4764,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
   int last_bb = last_basic_block;
   bool check_last_block = false;
 
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return 0;
 
   if (! blocks)
index 39453038fe7d430d0ec15635760632d1fc69ef58..8519cd0452d4d8c45b9e148d91be17c21fc2e7c7 100644 (file)
@@ -7964,7 +7964,7 @@ s390_optimize_nonescaping_tx (void)
   if (!cfun->machine->tbegin_p)
     return;
 
-  for (bb_index = 0; bb_index < n_basic_blocks; bb_index++)
+  for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
     {
       bb = BASIC_BLOCK (bb_index);
 
index e344b73fce68f60c84f667570223471b4f4861b1..55b934b6a5b67f4f0512746431565cde0cb92e5e 100644 (file)
@@ -2470,13 +2470,13 @@ spu_machine_dependent_reorg (void)
   compact_blocks ();
 
   spu_bb_info =
-    (struct spu_bb_info *) xcalloc (n_basic_blocks,
+    (struct spu_bb_info *) xcalloc (n_basic_blocks_for_fn (cfun),
                                    sizeof (struct spu_bb_info));
 
   /* We need exact insn addresses and lengths.  */
   shorten_branches (get_insns ());
 
-  for (i = n_basic_blocks - 1; i >= 0; i--)
+  for (i = n_basic_blocks_for_fn (cfun) - 1; i >= 0; i--)
     {
       bb = BASIC_BLOCK (i);
       branch = 0;
index 43f9c0cb6f13fa11022b10292ca6e85bfd149061..3f4e334f078d20c9cb2a52e8c8d853291b297a89 100644 (file)
@@ -584,7 +584,7 @@ unsigned
 coverage_compute_cfg_checksum (void)
 {
   basic_block bb;
-  unsigned chksum = n_basic_blocks;
+  unsigned chksum = n_basic_blocks_for_fn (cfun);
 
   FOR_EACH_BB (bb)
     {
index 358fca9171a6ac51c3c39e9ebc939b37b8afbcb3..78cfeba6fdd3a68d8923be5a83c4a5a6ce1d5732 100644 (file)
@@ -1729,24 +1729,25 @@ is_too_expensive (const char *pass)
      which have a couple switch statements.  Rather than simply
      threshold the number of blocks, uses something with a more
      graceful degradation.  */
-  if (n_edges > 20000 + n_basic_blocks * 4)
+  if (n_edges > 20000 + n_basic_blocks_for_fn (cfun) * 4)
     {
       warning (OPT_Wdisabled_optimization,
               "%s: %d basic blocks and %d edges/basic block",
-              pass, n_basic_blocks, n_edges / n_basic_blocks);
+              pass, n_basic_blocks_for_fn (cfun),
+              n_edges / n_basic_blocks_for_fn (cfun));
 
       return true;
     }
 
   /* If allocating memory for the cprop bitmap would take up too much
      storage it's better just to disable the optimization.  */
-  if ((n_basic_blocks
+  if ((n_basic_blocks_for_fn (cfun)
        * SBITMAP_SET_SIZE (max_reg_num ())
        * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
     {
       warning (OPT_Wdisabled_optimization,
               "%s: %d basic blocks and %d registers",
-              pass, n_basic_blocks, max_reg_num ());
+              pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
 
       return true;
     }
@@ -1763,7 +1764,7 @@ one_cprop_pass (void)
   int changed = 0;
 
   /* Return if there's nothing to do, or it is too expensive.  */
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
       || is_too_expensive (_ ("const/copy propagation disabled")))
     return 0;
 
@@ -1873,7 +1874,8 @@ one_cprop_pass (void)
   if (dump_file)
     {
       fprintf (dump_file, "CPROP of %s, %d basic blocks, %d bytes needed, ",
-              current_function_name (), n_basic_blocks, bytes_used);
+              current_function_name (), n_basic_blocks_for_fn (cfun),
+              bytes_used);
       fprintf (dump_file, "%d local const props, %d local copy props, ",
               local_const_prop_count, local_copy_prop_count);
       fprintf (dump_file, "%d global const props, %d global copy props\n\n",
index deea7551053471b7158eedcd4ab6d451af0c555a..20d6c4e4fb5b38571783f091a639932fa9c9ab20 100644 (file)
@@ -1097,8 +1097,8 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
     fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
             "n_basic_blocks %d n_edges %d"
             " count %d (%5.2g)\n",
-            n_basic_blocks, n_edges,
-            dcount, dcount / (float)n_basic_blocks);
+            n_basic_blocks_for_fn (cfun), n_edges,
+            dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
 }
 
 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
@@ -1606,7 +1606,7 @@ df_compact_blocks (void)
       i++;
     }
 
-  gcc_assert (i == n_basic_blocks);
+  gcc_assert (i == n_basic_blocks_for_fn (cfun));
 
   for (; i < last_basic_block; i++)
     SET_BASIC_BLOCK (i, NULL);
@@ -1714,7 +1714,7 @@ static int *
 df_compute_cfg_image (void)
 {
   basic_block bb;
-  int size = 2 + (2 * n_basic_blocks);
+  int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
   int i;
   int * map;
 
index 569f1f43b21ab4bc33592b528974567b67eb2290..6530109485455f3c680bd58f5a3ec5929dd88dd9 100644 (file)
@@ -146,7 +146,7 @@ static void
 init_dom_info (struct dom_info *di, enum cdi_direction dir)
 {
   /* We need memory for n_basic_blocks nodes.  */
-  unsigned int num = n_basic_blocks;
+  unsigned int num = n_basic_blocks_for_fn (cfun);
   init_ar (di->dfs_parent, TBB, num, 0);
   init_ar (di->path_min, TBB, num, i);
   init_ar (di->key, TBB, num, i);
@@ -233,7 +233,7 @@ calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, bool reverse)
   /* Ending block.  */
   basic_block ex_block;
 
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   /* Initialize our border blocks, and the first edge.  */
@@ -394,7 +394,7 @@ calc_dfs_tree (struct dom_info *di, bool reverse)
   di->nodes = di->dfsnum - 1;
 
   /* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all.  */
-  gcc_assert (di->nodes == (unsigned int) n_basic_blocks - 1);
+  gcc_assert (di->nodes == (unsigned int) n_basic_blocks_for_fn (cfun) - 1);
 }
 
 /* Compress the path from V to the root of its set and update path_min at the
@@ -652,7 +652,7 @@ calculate_dominance_info (enum cdi_direction dir)
        {
          b->dom[dir_index] = et_new_tree (b);
        }
-      n_bbs_in_dom_tree[dir_index] = n_basic_blocks;
+      n_bbs_in_dom_tree[dir_index] = n_basic_blocks_for_fn (cfun);
 
       init_dom_info (&di, dir);
       calc_dfs_tree (&di, reverse);
index 4816b4c8d85f0204df4c96e3e433a771f5639de5..4c7354ecf92d791710d7d7e344808ffd7a47995c 100644 (file)
@@ -150,13 +150,14 @@ void
 dom_walker::walk (basic_block bb)
 {
   basic_block dest;
-  basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks * 2);
+  basic_block *worklist = XNEWVEC (basic_block,
+                                  n_basic_blocks_for_fn (cfun) * 2);
   int sp = 0;
   int *postorder, postorder_num;
 
   if (m_dom_direction == CDI_DOMINATORS)
     {
-      postorder = XNEWVEC (int, n_basic_blocks);
+      postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
       postorder_num = inverted_post_order_compute (postorder);
       bb_postorder = XNEWVEC (int, last_basic_block);
       for (int i = 0; i < postorder_num; ++i)
index 41382310e04de75c594f78989bbb8c8aa79adc42..724b969491729b2a1fb9b036cb034a931067675f 100644 (file)
@@ -4028,7 +4028,7 @@ generate_setjmp_warnings (void)
 {
   bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
 
-  if (n_basic_blocks == NUM_FIXED_BLOCKS
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS
       || bitmap_empty_p (setjmp_crosses))
     return;
 
@@ -6015,7 +6015,7 @@ thread_prologue_and_epilogue_insns (void)
       /* Find the set of basic blocks that require a stack frame,
         and blocks that are too big to be duplicated.  */
 
-      vec.create (n_basic_blocks);
+      vec.create (n_basic_blocks_for_fn (cfun));
 
       CLEAR_HARD_REG_SET (set_up_by_prologue.set);
       add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
index d08710c9614f0c7615dd6d2fd243803e22708947..da40a6775593427e1f958238128b8b72cb11d7a0 100644 (file)
@@ -289,7 +289,7 @@ build_single_def_use_links (void)
   reg_defs.create (max_reg_num ());
   reg_defs.safe_grow_cleared (max_reg_num ());
 
-  reg_defs_stack.create (n_basic_blocks * 10);
+  reg_defs_stack.create (n_basic_blocks_for_fn (cfun) * 10);
   local_md = BITMAP_ALLOC (NULL);
   local_lr = BITMAP_ALLOC (NULL);
 
index 571e8788c83db469d06876e2b9c1e2ea94e9ea36..5ed99bdf169712af54edb5e4bbd5289a5c9cce9e 100644 (file)
@@ -2662,7 +2662,7 @@ one_pre_gcse_pass (void)
   gcse_create_count = 0;
 
   /* Return if there's nothing to do, or it is too expensive.  */
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
       || is_too_expensive (_("PRE disabled")))
     return 0;
 
@@ -2708,7 +2708,8 @@ one_pre_gcse_pass (void)
   if (dump_file)
     {
       fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
-              current_function_name (), n_basic_blocks, bytes_used);
+              current_function_name (), n_basic_blocks_for_fn (cfun),
+              bytes_used);
       fprintf (dump_file, "%d substs, %d insns created\n",
               gcse_subst_count, gcse_create_count);
     }
@@ -3591,7 +3592,7 @@ one_code_hoisting_pass (void)
   gcse_create_count = 0;
 
   /* Return if there's nothing to do, or it is too expensive.  */
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
       || is_too_expensive (_("GCSE disabled")))
     return 0;
 
@@ -3642,7 +3643,8 @@ one_code_hoisting_pass (void)
   if (dump_file)
     {
       fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
-              current_function_name (), n_basic_blocks, bytes_used);
+              current_function_name (), n_basic_blocks_for_fn (cfun),
+              bytes_used);
       fprintf (dump_file, "%d substs, %d insns created\n",
               gcse_subst_count, gcse_create_count);
     }
@@ -4067,24 +4069,25 @@ is_too_expensive (const char *pass)
      which have a couple switch statements.  Rather than simply
      threshold the number of blocks, uses something with a more
      graceful degradation.  */
-  if (n_edges > 20000 + n_basic_blocks * 4)
+  if (n_edges > 20000 + n_basic_blocks_for_fn (cfun) * 4)
     {
       warning (OPT_Wdisabled_optimization,
               "%s: %d basic blocks and %d edges/basic block",
-              pass, n_basic_blocks, n_edges / n_basic_blocks);
+              pass, n_basic_blocks_for_fn (cfun),
+              n_edges / n_basic_blocks_for_fn (cfun));
 
       return true;
     }
 
   /* If allocating memory for the dataflow bitmaps would take up too much
      storage it's better just to disable the optimization.  */
-  if ((n_basic_blocks
+  if ((n_basic_blocks_for_fn (cfun)
        * SBITMAP_SET_SIZE (max_reg_num ())
        * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
     {
       warning (OPT_Wdisabled_optimization,
               "%s: %d basic blocks and %d registers",
-              pass, n_basic_blocks, max_reg_num ());
+              pass, n_basic_blocks_for_fn (cfun), max_reg_num ());
 
       return true;
     }
index 5c890e5956c5ceac0a48061cc8c5782f780282b2..1dc9dbc7734d0c4e91f3f7cd8c0ba8b6e8829f52 100644 (file)
@@ -153,7 +153,7 @@ draw_cfg_node_succ_edges (pretty_printer *pp, int funcdef_no, basic_block bb)
 static void
 draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
 {
-  int *rpo = XNEWVEC (int, n_basic_blocks_for_function (fun));
+  int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
   int i, n;
   sbitmap visited;
 
@@ -161,8 +161,8 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
   bitmap_clear (visited);
 
   n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
-  for (i = n_basic_blocks_for_function (fun) - n;
-       i < n_basic_blocks_for_function (fun); i++)
+  for (i = n_basic_blocks_for_fn (fun) - n;
+       i < n_basic_blocks_for_fn (fun); i++)
     {
       basic_block bb = BASIC_BLOCK (rpo[i]);
       draw_cfg_node (pp, fun->funcdef_no, bb);
@@ -170,7 +170,7 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
     }
   free (rpo);
 
-  if (n != n_basic_blocks_for_function (fun))
+  if (n != n_basic_blocks_for_fn (fun))
     {
       /* Some blocks are unreachable.  We still want to dump them.  */
       basic_block bb;
index 5223de959d54de67c842d29eb0df6315ca13ea18..f87aede8420637937125a750542a810a6f9b5856 100644 (file)
@@ -208,7 +208,8 @@ graphite_initialize (isl_ctx *ctx)
   if (number_of_loops (cfun) <= 1
       /* FIXME: This limit on the number of basic blocks of a function
         should be removed when the SCOP detection is faster.  */
-      || n_basic_blocks > PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION))
+      || (n_basic_blocks_for_fn (cfun) >
+         PARAM_VALUE (PARAM_GRAPHITE_MAX_BBS_PER_FUNCTION)))
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
        print_global_statistics (dump_file);
index aa3ffe3be30c5ab1bf91cfc3956643d035b81988..beddc116ffb9d82983b64153286c7984554e0162 100644 (file)
@@ -6754,7 +6754,7 @@ haifa_sched_init (void)
      whole function.  */
   {
     bb_vec_t bbs;
-    bbs.create (n_basic_blocks);
+    bbs.create (n_basic_blocks_for_fn (cfun));
     basic_block bb;
 
     sched_init_bbs ();
index fb05caec6571d411423fdf2dce729c70de09e212..a558dbc75ff0d3b0ac68e2566ac061cffcfb8885 100644 (file)
@@ -2397,7 +2397,7 @@ estimate_function_body_sizes (struct cgraph_node *node, bool early)
   if (parms_info)
     compute_bb_predicates (node, parms_info, info);
   gcc_assert (cfun == my_function);
-  order = XNEWVEC (int, n_basic_blocks);
+  order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
   nblocks = pre_and_rev_post_order_compute (NULL, order, false);
   for (n = 0; n < nblocks; n++)
     {
index db775f4cdcc5967659c8cd48abceccee1ca75cf3..1aa239d3b7d258cc643a677776151152b4a4c930 100644 (file)
@@ -700,8 +700,8 @@ ipa_merge_profiles (struct cgraph_node *dst,
   cgraph_get_body (dst);
   srccfun = DECL_STRUCT_FUNCTION (src->decl);
   dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
-  if (n_basic_blocks_for_function (srccfun)
-      != n_basic_blocks_for_function (dstcfun))
+  if (n_basic_blocks_for_fn (srccfun)
+      != n_basic_blocks_for_fn (dstcfun))
     {
       if (cgraph_dump_file)
        fprintf (cgraph_dump_file,
index ed513767f3c7c4d7c5f996650f2473f5b2488c97..ca6f64d0637e2b3e5c9363f6df57217776122f44 100644 (file)
@@ -3496,7 +3496,7 @@ ira_build (void)
        }
       fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
               current_loops == NULL ? 1 : number_of_loops (cfun),
-              n_basic_blocks, ira_max_point);
+              n_basic_blocks_for_fn (cfun), ira_max_point);
       fprintf (ira_dump_file,
               "    allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
               ira_allocnos_num, nr_big, ira_copies_num, n, nr);
index dbc5a0ad9977ff511475fb4d479528440632f1b3..a813b02bef9c3a24fe9a9c01c841bf5688fff17d 100644 (file)
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -4875,7 +4875,7 @@ split_live_ranges_for_shrink_wrap (void)
 
   bitmap_initialize (&need_new, 0);
   bitmap_initialize (&reachable, 0);
-  queue.create (n_basic_blocks);
+  queue.create (n_basic_blocks_for_fn (cfun));
 
   FOR_EACH_BB (bb)
     FOR_BB_INSNS (bb, insn)
index c13d2a6aa5127c089737d21bf708fe028e11cafb..6266d48500b3c27bf9ce98767b013f859050043f 100644 (file)
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -101,7 +101,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
   /* Allocate a worklist array/queue.  Entries are only added to the
      list if they were not already on the list.  So the size is
      bounded by the number of basic blocks.  */
-  qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks);
+  qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
 
   /* We want a maximal solution, so make an optimistic initialization of
      ANTIN.  */
@@ -116,8 +116,8 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
     }
 
   qin = worklist;
-  qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
-  qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+  qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+  qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
   /* Mark blocks which are predecessors of the exit block so that we
      can easily identify them below.  */
@@ -254,7 +254,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
      list if they were not already on the list.  So the size is
      bounded by the number of basic blocks.  */
   qin = qout = worklist
-    = XNEWVEC (basic_block, n_basic_blocks);
+    = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
 
   /* Initialize a mapping from each edge to its index.  */
   for (i = 0; i < num_edges; i++)
@@ -290,8 +290,8 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
   /* Note that we do not use the last allocated element for our queue,
      as EXIT_BLOCK is never inserted into it. */
   qin = worklist;
-  qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
-  qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+  qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+  qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
   /* Iterate until the worklist is empty.  */
   while (qlen)
@@ -481,7 +481,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
      list if they were not already on the list.  So the size is
      bounded by the number of basic blocks.  */
   qin = qout = worklist =
-    XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+    XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
 
   /* We want a maximal solution.  */
   bitmap_vector_ones (avout, last_basic_block);
@@ -495,8 +495,8 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
     }
 
   qin = worklist;
-  qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
-  qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
+  qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
+  qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
   /* Mark blocks which are successors of the entry block so that we
      can easily identify them below.  */
@@ -610,7 +610,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
   /* Allocate a worklist array/queue.  Entries are only added to the
      list if they were not already on the list.  So the size is
      bounded by the number of basic blocks.  */
-  tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
   /* Initialize NEARER for each edge and build a mapping from an edge to
      its index.  */
index f3bad974a87bb76f643ccbb09be42738d44af7ff..2839c5cf9401dd32133dfc0e6a4d6a64ef863bc3 100644 (file)
@@ -998,7 +998,7 @@ lra_create_live_ranges (bool all_p)
   lra_point_freq = point_freq_vec.address ();
   int *post_order_rev_cfg = XNEWVEC (int, last_basic_block);
   int n_blocks_inverted = inverted_post_order_compute (post_order_rev_cfg);
-  lra_assert (n_blocks_inverted == n_basic_blocks);
+  lra_assert (n_blocks_inverted == n_basic_blocks_for_fn (cfun));
   for (i = n_blocks_inverted - 1; i >= 0; --i)
     {
       bb = BASIC_BLOCK (post_order_rev_cfg[i]);
index 1aea599a2e550824f0b1e88ebc3700d4376a7c26..3c8b71d949d4f834821f75be84c6900de09cdc86 100644 (file)
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -2059,7 +2059,7 @@ has_nonexceptional_receiver (void)
     return true;
 
   /* First determine which blocks can reach exit via normal paths.  */
-  tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
   FOR_EACH_BB (bb)
     bb->flags &= ~BB_REACHABLE;
index 4a31b05d52ec5603839da337a26c6af0875b8763..98cf28ff91758b57a6861699a69727748385c4c7 100644 (file)
@@ -587,7 +587,7 @@ make_new_block (struct function *fn, unsigned int index)
   basic_block bb = alloc_block ();
   bb->index = index;
   SET_BASIC_BLOCK_FOR_FUNCTION (fn, index, bb);
-  n_basic_blocks_for_function (fn)++;
+  n_basic_blocks_for_fn (fn)++;
   return bb;
 }
 
index 52020b8c2f813e94989a117d5003fe87b6853652..e0e40d812766befc5e5470a5ab11336233574352 100644 (file)
--- a/gcc/mcf.c
+++ b/gcc/mcf.c
@@ -471,12 +471,12 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
   int fnum_edges;
 
   /* Each basic_block will be split into 2 during vertex transformation.  */
-  int fnum_vertices_after_transform =  2 * n_basic_blocks;
-  int fnum_edges_after_transform = n_edges + n_basic_blocks;
+  int fnum_vertices_after_transform =  2 * n_basic_blocks_for_fn (cfun);
+  int fnum_edges_after_transform = n_edges + n_basic_blocks_for_fn (cfun);
 
   /* Count the new SOURCE and EXIT vertices to be added.  */
   int fmax_num_vertices =
-    fnum_vertices_after_transform + n_edges + n_basic_blocks + 2;
+    fnum_vertices_after_transform + n_edges + n_basic_blocks_for_fn (cfun) + 2;
 
   /* In create_fixup_graph: Each basic block and edge can be split into 3
      edges. Number of balance edges = n_basic_blocks. So after
@@ -486,10 +486,10 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
      max_edges = 2 * (4 * n_basic_blocks + 3 * n_edges)
      = 8 * n_basic_blocks + 6 * n_edges
      < 8 * n_basic_blocks + 8 * n_edges.  */
-  int fmax_num_edges = 8 * (n_basic_blocks + n_edges);
+  int fmax_num_edges = 8 * (n_basic_blocks_for_fn (cfun) + n_edges);
 
   /* Initial num of vertices in the fixup graph.  */
-  fixup_graph->num_vertices = n_basic_blocks;
+  fixup_graph->num_vertices = n_basic_blocks_for_fn (cfun);
 
   /* Fixup graph vertex list.  */
   fixup_graph->vertex_list =
@@ -508,7 +508,8 @@ create_fixup_graph (fixup_graph_type *fixup_graph)
   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
     total_vertex_weight += bb->count;
 
-  sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight / n_basic_blocks);
+  sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight /
+                                    n_basic_blocks_for_fn (cfun));
 
   k_pos = K_POS (sqrt_avg_vertex_weight);
   k_neg = K_NEG (sqrt_avg_vertex_weight);
index 098a4be25df7e26973e61931502ff5272ec8a8d6..1f1c2652b55776f67522ffc44982e7628bbab022 100644 (file)
@@ -1177,9 +1177,9 @@ branch_prob (void)
        num_instrumented++;
     }
 
-  total_num_blocks += n_basic_blocks;
+  total_num_blocks += n_basic_blocks_for_fn (cfun);
   if (dump_file)
-    fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
+    fprintf (dump_file, "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
 
   total_num_edges += num_edges;
   if (dump_file)
@@ -1208,7 +1208,7 @@ branch_prob (void)
 
       /* Basic block flags */
       offset = gcov_write_tag (GCOV_TAG_BLOCKS);
-      for (i = 0; i != (unsigned) (n_basic_blocks); i++)
+      for (i = 0; i != (unsigned) (n_basic_blocks_for_fn (cfun)); i++)
        gcov_write_unsigned (0);
       gcov_write_length (offset);
 
index 1917c46fe961adcd0f00880ae4140dbda8b699f8..3740934b826919e3afd6d03a2d6acdefee6621da 100644 (file)
@@ -3080,7 +3080,7 @@ convert_regs_2 (basic_block block)
      is only processed after all its predecessors.  The number of predecessors
      of every block has already been computed.  */
 
-  stack = XNEWVEC (basic_block, n_basic_blocks);
+  stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   sp = stack;
 
   *sp++ = block;
index 5b2c85799bb3cb6dc698c79871bdbc46e16ed024..5e86fa5a61ab4d5f1815638c0599bfbd5c31b975 100644 (file)
@@ -672,7 +672,7 @@ regrename_analyze (bitmap bb_mask)
   n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
 
   /* Gather some information about the blocks in this function.  */
-  rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks);
+  rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
   i = 0;
   FOR_EACH_BB (bb)
     {
index a40e16b12c328c0c8f8495253b7f9e4c33971ca4..66b5ff16b22cf3dc79cad7d9dce70c0a6788c578 100644 (file)
@@ -611,7 +611,7 @@ has_nonexceptional_receiver (void)
     return true;
 
   /* First determine which blocks can reach exit via normal paths.  */
-  tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
+  tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
   FOR_EACH_BB (bb)
     bb->flags &= ~BB_REACHABLE;
index a87979db2934899ccda2b8b57f005baaa563f8a4..dc20de46bee8c84088a473c97eba093495b6fca9 100644 (file)
@@ -3643,7 +3643,7 @@ dbr_schedule (rtx first)
 
   /* If the current function has no insns other than the prologue and
      epilogue, then do not try to fill any delay slots.  */
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return;
 
   /* Find the highest INSN_UID and allocate and initialize our map from
index 8496014a72b8b4f8b93b16d2a6aeab4df73aa622..287b826cfc6d00fa43923f4362d15ee7c6b4ae2c 100644 (file)
@@ -3963,7 +3963,7 @@ sched_deps_init (bool global_p)
 {
   /* Average number of insns in the basic block.
      '+ 1' is used to make it nonzero.  */
-  int insns_in_block = sched_max_luid / n_basic_blocks + 1;
+  int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
 
   init_deps_data_vector ();
 
index b70e071a7f132e0bb3238b36161786d65ef0e20d..8d23e33f89e4a4323b0dade90c8d42bd6bb49e5e 100644 (file)
@@ -625,7 +625,7 @@ schedule_ebbs (void)
 
   /* Taking care of this degenerate case makes the rest of
      this code simpler.  */
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return;
 
   if (profile_info && flag_branch_probabilities)
index b2a7dbd4a944b5f4928e85efcf441ea3a329b7cf..20c29c50fc9a419e04d6c936ef5bb71d38173193 100644 (file)
@@ -793,7 +793,7 @@ haifa_find_rgns (void)
       /* Second traversal:find reducible inner loops and topologically sort
         block of each region.  */
 
-      queue = XNEWVEC (int, n_basic_blocks);
+      queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
 
       extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
       if (extend_regions_p)
@@ -1153,7 +1153,7 @@ void
 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
 {
   int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
-  int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
+  int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
   max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
 
@@ -3115,7 +3115,7 @@ sched_rgn_init (bool single_blocks_p)
 
   /* Compute regions for scheduling.  */
   if (single_blocks_p
-      || n_basic_blocks == NUM_FIXED_BLOCKS + 1
+      || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
       || !flag_schedule_interblock
       || is_cfg_nonregular ())
     {
@@ -3139,7 +3139,7 @@ sched_rgn_init (bool single_blocks_p)
        free_dominance_info (CDI_DOMINATORS);
     }
 
-  gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
+  gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
 
   RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
                             RGN_NR_BLOCKS (nr_regions - 1));
@@ -3375,7 +3375,7 @@ schedule_insns (void)
 
   /* Taking care of this degenerate case makes the rest of
      this code simpler.  */
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return;
 
   rgn_setup_common_sched_info ();
@@ -3421,8 +3421,8 @@ rgn_add_remove_insn (rtx insn, int remove_p)
 void
 extend_regions (void)
 {
-  rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
-  rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+  rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
+  rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks_for_fn (cfun));
   block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
   containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
 }
index 4eb27c5da5dea4e90414af0d24cb2f3c4c9efdec..90bf1e2da1bbcc44429ef303a525fed328bff712 100644 (file)
@@ -3649,7 +3649,7 @@ sel_recompute_toporder (void)
   int i, n, rgn;
   int *postorder, n_blocks;
 
-  postorder = XALLOCAVEC (int, n_basic_blocks);
+  postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
   n_blocks = post_order_compute (postorder, false, false);
 
   rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
@@ -4912,10 +4912,10 @@ recompute_rev_top_order (void)
                                         rev_top_order_index_len);
     }
 
-  postorder = XNEWVEC (int, n_basic_blocks);
+  postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
 
   n_blocks = post_order_compute (postorder, true, false);
-  gcc_assert (n_basic_blocks == n_blocks);
+  gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
 
   /* Build reverse function: for each basic block with BB->INDEX == K
      rev_top_order_index[K] is it's reverse topological sort number.  */
index 08fdc772292589dbe8ffa41b27f406df694f7ee3..c2d4185d6eca6e6487181e29f86001ddad15ca5c 100644 (file)
@@ -7764,7 +7764,7 @@ run_selective_scheduling (void)
 {
   int rgn;
 
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return;
 
   sel_global_init ();
index 68f293c6252189e1474d8dc9bd19e2f8d55bf656..ffbeed2a07171eb01f67a21bbecabd52be97dad7 100644 (file)
@@ -848,7 +848,7 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
   rtx last, insn, note;
   rtx mem = smexpr->pattern;
 
-  stack = XNEWVEC (edge_iterator, n_basic_blocks);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun));
   sp = 0;
   ei = ei_start (bb->succs);
 
@@ -1208,7 +1208,7 @@ one_store_motion_pass (void)
   if (dump_file)
     {
       fprintf (dump_file, "STORE_MOTION of %s, %d basic blocks, ",
-              current_function_name (), n_basic_blocks);
+              current_function_name (), n_basic_blocks_for_fn (cfun));
       fprintf (dump_file, "%d insns deleted, %d insns created\n",
               n_stores_deleted, n_stores_created);
     }
index 71a9201fd0941ba392138f381376ee9357e9685b..1ff89c56b75871e87510c1c177877ee8b53c55a3 100644 (file)
@@ -227,7 +227,7 @@ static bool
 tail_duplicate (void)
 {
   fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
-  basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
+  basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   int *counts = XNEWVEC (int, last_basic_block);
   int ninsns = 0, nduplicated = 0;
   gcov_type weighted_insns = 0, traced_insns = 0;
@@ -371,7 +371,7 @@ tracer (void)
 {
   bool changed;
 
-  if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
+  if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
     return 0;
 
   mark_dfs_back_edges ();
index dbcf20ca2b8f987c0c97fd8e98d322bbd52c9208..751db306aabf162309a3128296c5a9d4430b2680 100644 (file)
@@ -173,7 +173,7 @@ init_empty_tree_cfg_for_function (struct function *fn)
   /* Initialize the basic block array.  */
   init_flow (fn);
   profile_status_for_function (fn) = PROFILE_ABSENT;
-  n_basic_blocks_for_function (fn) = NUM_FIXED_BLOCKS;
+  n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
   last_basic_block_for_function (fn) = NUM_FIXED_BLOCKS;
   vec_alloc (basic_block_info_for_function (fn), initial_cfg_capacity);
   vec_safe_grow_cleared (basic_block_info_for_function (fn),
@@ -230,12 +230,12 @@ build_gimple_cfg (gimple_seq seq)
     factor_computed_gotos ();
 
   /* Make sure there is always at least one block, even if it's empty.  */
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     create_empty_bb (ENTRY_BLOCK_PTR);
 
   /* Adjust the size of the array.  */
-  if (basic_block_info->length () < (size_t) n_basic_blocks)
-    vec_safe_grow_cleared (basic_block_info, n_basic_blocks);
+  if (basic_block_info->length () < (size_t) n_basic_blocks_for_fn (cfun))
+    vec_safe_grow_cleared (basic_block_info, n_basic_blocks_for_fn (cfun));
 
   /* To speed up statement iterator walks, we first purge dead labels.  */
   cleanup_dead_labels ();
@@ -605,7 +605,7 @@ create_bb (void *h, void *e, basic_block after)
   /* Add the newly created block to the array.  */
   SET_BASIC_BLOCK (last_basic_block, bb);
 
-  n_basic_blocks++;
+  n_basic_blocks_for_fn (cfun)++;
   last_basic_block++;
 
   return bb;
@@ -2103,7 +2103,7 @@ gimple_dump_cfg (FILE *file, int flags)
     {
       dump_function_header (file, current_function_decl, flags);
       fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
-              n_basic_blocks, n_edges, last_basic_block);
+              n_basic_blocks_for_fn (cfun), n_edges, last_basic_block);
 
       brief_dump_cfg (file, flags | TDF_COMMENT);
       fprintf (file, "\n");
@@ -2138,9 +2138,9 @@ dump_cfg_stats (FILE *file)
   fprintf (file, fmt_str, "", "  instances  ", "used ");
   fprintf (file, "---------------------------------------------------------\n");
 
-  size = n_basic_blocks * sizeof (struct basic_block_def);
+  size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
   total += size;
-  fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks,
+  fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
           SCALE (size), LABEL (size));
 
   num_edges = 0;
@@ -7028,7 +7028,7 @@ dump_function_to_file (tree fndecl, FILE *file, int flags)
       if (!ignore_topmost_bind)
        fprintf (file, "{\n");
 
-      if (any_var && n_basic_blocks_for_function (fun))
+      if (any_var && n_basic_blocks_for_fn (fun))
        fprintf (file, "\n");
 
       FOR_EACH_BB_FN (bb, fun)
@@ -7406,7 +7406,7 @@ gimple_flow_call_edges_add (sbitmap blocks)
   int last_bb = last_basic_block;
   bool check_last_block = false;
 
-  if (n_basic_blocks == NUM_FIXED_BLOCKS)
+  if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
     return 0;
 
   if (! blocks)
index e864eed94f8bfef52851a7fec3f88d082e06859e..0d4c63d425968725415cfa6330f0684136267f21 100644 (file)
@@ -905,7 +905,7 @@ remove_forwarder_block_with_phi (basic_block bb)
 static unsigned int
 merge_phi_nodes (void)
 {
-  basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
+  basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
   basic_block *current = worklist;
   basic_block bb;
 
index caecbf27ee71e91f2a518fbc1fe2af1bc2cfc83b..d0eb27ba12c3a2466ad17007ca2c6472ca326711 100644 (file)
@@ -4425,7 +4425,7 @@ gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
 static void
 fold_marked_statements (int first, struct pointer_set_t *statements)
 {
-  for (; first < n_basic_blocks; first++)
+  for (; first < n_basic_blocks_for_fn (cfun); first++)
     if (BASIC_BLOCK (first))
       {
         gimple_stmt_iterator gsi;
@@ -4513,7 +4513,7 @@ optimize_inline_calls (tree fn)
 {
   copy_body_data id;
   basic_block bb;
-  int last = n_basic_blocks;
+  int last = n_basic_blocks_for_fn (cfun);
   struct gimplify_ctx gctx;
   bool inlined_p = false;
 
index d3bb5b246cdefd4298af0068b44cedfdae09b615..8ce4871e025d0af42ae7dcf519d7a1ebb082c63c 100644 (file)
@@ -679,7 +679,7 @@ tree_ssa_ifcombine (void)
      inner ones, and also that we do not try to visit a removed
      block.  This is opposite of PHI-OPT, because we cascade the
      combining rather than cascading PHIs. */
-  for (i = n_basic_blocks - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
+  for (i = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
     {
       basic_block bb = bbs[i];
       gimple stmt = last_stmt (bb);
index 57c1555d28c7e51aa849a1957761a257443c3af5..138c0a7a0fb2ad78eba6a3f5784aad2206c5c0c7 100644 (file)
@@ -146,9 +146,9 @@ copy_loop_headers (void)
       return 0;
     }
 
-  bbs = XNEWVEC (basic_block, n_basic_blocks);
-  copied_bbs = XNEWVEC (basic_block, n_basic_blocks);
-  bbs_size = n_basic_blocks;
+  bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+  copied_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+  bbs_size = n_basic_blocks_for_fn (cfun);
 
   FOR_EACH_LOOP (li, loop, 0)
     {
index 1c6113e6c638331d371bf280bd7afbbf8f99bd9e..7f29ea2053c064c0d113fa52678b1d2dae45c5cb 100644 (file)
@@ -1595,7 +1595,7 @@ analyze_memory_references (void)
   /* Collect all basic-blocks in loops and sort them after their
      loops postorder.  */
   i = 0;
-  bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
+  bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
   FOR_EACH_BB (bb)
     if (bb->loop_father != current_loops->tree_root)
       bbs[i++] = bb;
index ae51ee66f07b3864d5cc70ddb4c91ba7b13197a7..31db43a2c050019777c102bba27ccbd1d6435925 100644 (file)
@@ -194,7 +194,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
   /* Normally the work list size is bounded by the number of basic
      blocks in the largest loop.  We don't know this number, but we
      can be fairly sure that it will be relatively small.  */
-  worklist.create (MAX (8, n_basic_blocks / 128));
+  worklist.create (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
 
   EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
     {
index fb4ce58378d18179e27f28d61c1796924a2e3ea1..0c5dbb17aa5fba437e397ce1f3ab137d83665a8e 100644 (file)
@@ -512,7 +512,7 @@ execute_cse_reciprocals (void)
 
   occ_pool = create_alloc_pool ("dominators for recip",
                                sizeof (struct occurrence),
-                               n_basic_blocks / 3 + 1);
+                               n_basic_blocks_for_fn (cfun) / 3 + 1);
 
   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
   calculate_dominance_info (CDI_DOMINATORS);
index dd0ebc170eabbae27d58f5d6bf70a6c954ed4580..a0d739881d61345fcd1617787104e88e9673976d 100644 (file)
@@ -338,7 +338,7 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
      outer ones, and also that we do not try to visit a removed
      block.  */
   bb_order = single_pred_before_succ_order ();
-  n = n_basic_blocks - NUM_FIXED_BLOCKS;
+  n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
 
   for (i = 0; i < n; i++)
     {
index 7052d94e49a46878d32c01f0f6d3a6c29061c074..6ab1b10d330f9f6fb2f8c43348c9c60359247b6b 100644 (file)
@@ -3724,7 +3724,7 @@ compute_avail (void)
     }
 
   /* Allocate the worklist.  */
-  worklist = XNEWVEC (basic_block, n_basic_blocks);
+  worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
 
   /* Seed the algorithm by putting the dominator children of the entry
      block on the worklist.  */
@@ -4655,7 +4655,7 @@ init_pre (void)
   connect_infinite_loops_to_exit ();
   memset (&pre_stats, 0, sizeof (pre_stats));
 
-  postorder = XNEWVEC (int, n_basic_blocks);
+  postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
   postorder_num = inverted_post_order_compute (postorder);
 
   alloc_aux_for_blocks (sizeof (struct bb_bitmap_sets));
@@ -4731,7 +4731,7 @@ do_pre (void)
      fixed, don't run it when he have an incredibly large number of
      bb's.  If we aren't going to run insert, there is no point in
      computing ANTIC, either, even though it's plenty fast.  */
-  if (n_basic_blocks < 4000)
+  if (n_basic_blocks_for_fn (cfun) < 4000)
     {
       compute_antic ();
       insert ();
index ec5ca02b5b4074f3a441baf007ec2ebe27378d3e..709b1c1ba635643489319c97cc891305fb8637d8 100644 (file)
@@ -4537,7 +4537,7 @@ init_reassoc (void)
 {
   int i;
   long rank = 2;
-  int *bbs = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+  int *bbs = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
 
   /* Find the loops, so that we can prevent moving calculations in
      them.  */
@@ -4567,7 +4567,7 @@ init_reassoc (void)
     }
 
   /* Set up rank for each BB  */
-  for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+  for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
     bb_rank[bbs[i]] = ++rank  << 16;
 
   free (bbs);
index 26bb190503debd390595ec4f9236d9395ab39984..786cfaa0988c3e072e6e9961129c08e95f30953a 100644 (file)
@@ -3981,13 +3981,14 @@ init_scc_vn (void)
   shared_lookup_phiargs.create (0);
   shared_lookup_references.create (0);
   rpo_numbers = XNEWVEC (int, last_basic_block);
-  rpo_numbers_temp = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+  rpo_numbers_temp =
+    XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
   pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
 
   /* RPO numbers is an array of rpo ordering, rpo[i] = bb means that
      the i'th block in RPO order is bb.  We want to map bb's to RPO
      numbers, so we need to rearrange this array.  */
-  for (j = 0; j < n_basic_blocks - NUM_FIXED_BLOCKS; j++)
+  for (j = 0; j < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; j++)
     rpo_numbers[rpo_numbers_temp[j]] = j;
 
   XDELETE (rpo_numbers_temp);
index 8d81f57daf54cd5c6f1b07f146482dff47efb043..79be216569346123cdc68229e0e17eecaa626ff4 100644 (file)
@@ -763,11 +763,11 @@ static void
 init_worklist (void)
 {
   alloc_aux_for_blocks (sizeof (struct aux_bb_info));
-  same_succ_htab.create (n_basic_blocks);
+  same_succ_htab.create (n_basic_blocks_for_fn (cfun));
   same_succ_edge_flags = XCNEWVEC (int, last_basic_block);
   deleted_bbs = BITMAP_ALLOC (NULL);
   deleted_bb_preds = BITMAP_ALLOC (NULL);
-  worklist.create (n_basic_blocks);
+  worklist.create (n_basic_blocks_for_fn (cfun));
   find_same_succ ();
 
   if (dump_file && (dump_flags & TDF_DETAILS))
@@ -995,7 +995,7 @@ static vec<bb_cluster> all_clusters;
 static void
 alloc_cluster_vectors (void)
 {
-  all_clusters.create (n_basic_blocks);
+  all_clusters.create (n_basic_blocks_for_fn (cfun));
 }
 
 /* Reset all cluster vectors.  */
index 25f9f45b285f77e0fcbd64aa17c944b8a4bcfa37..e4b39986363505d717207dc7e7f9489d9dd01691 100644 (file)
@@ -193,7 +193,7 @@ associate_equivalences_with_edges (void)
 
              /* Now walk over the blocks to determine which ones were
                 marked as being reached by a useful case label.  */
-             for (i = 0; i < n_basic_blocks; i++)
+             for (i = 0; i < n_basic_blocks_for_fn (cfun); i++)
                {
                  tree node = info[i];
 
index edffd8b024cd25a5b926aac7920a8ea2580112c6..fc3fc93ca5b4b1310105400ee67dcc3433f98b16 100644 (file)
@@ -838,7 +838,7 @@ vt_stack_adjustments (void)
   VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = INCOMING_FRAME_SP_OFFSET;
 
   /* Allocate stack for back-tracking up CFG.  */
-  stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
+  stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
   sp = 0;
 
   /* Push the first edge on to the stack.  */
@@ -6904,10 +6904,10 @@ vt_find_locations (void)
   timevar_push (TV_VAR_TRACKING_DATAFLOW);
   /* Compute reverse completion order of depth first search of the CFG
      so that the data-flow runs faster.  */
-  rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+  rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
   bb_order = XNEWVEC (int, last_basic_block);
   pre_and_rev_post_order_compute (NULL, rc_order, false);
-  for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
+  for (i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; i++)
     bb_order[rc_order[i]] = i;
   free (rc_order);
 
@@ -10157,7 +10157,8 @@ variable_tracking_main_1 (void)
       return 0;
     }
 
-  if (n_basic_blocks > 500 && n_edges / n_basic_blocks >= 20)
+  if (n_basic_blocks_for_fn (cfun) > 500 &&
+      n_edges / n_basic_blocks_for_fn (cfun) >= 20)
     {
       vt_debug_insns_local (true);
       return 0;