+2013-11-19 David Malcolm <dmalcolm@redhat.com>
+
+ * basic-block.h (ENTRY_BLOCK_PTR_FOR_FUNCTION): Rename macro to...
+ (EXIT_BLOCK_PTR_FOR_FUNCTION): ...this.
+ (ENTRY_BLOCK_PTR_FOR_FN): Renamed macro to...
+ (EXIT_BLOCK_PTR_FOR_FN): ...this.
+ (ENTRY_BLOCK_PTR): Eliminate macro as work towards making uses of
+ cfun be explicit.
+ (EXIT_BLOCK_PTR): Likewise.
+ (FOR_ALL_BB): Rework for now to eliminate use of "ENTRY_BLOCK_PTR".
+ (FOR_ALL_BB_FN): Update for renaming of
+ "ENTRY_BLOCK_PTR_FOR_FUNCTION" to "ENTRY_BLOCK_PTR_FOR_FN".
+
+ * cfg.c (init_flow): Likewise.
+ (check_bb_profile): Likewise.
+ * cfganal.c (pre_and_rev_post_order_compute_fn): Likewise.
+ * cfgcleanup.c (walk_to_nondebug_insn): Likewise.
+ * cfghooks.c (account_profile_record): Likewise.
+ * cfgloop.c (init_loops_structure): Likewise.
+ * cgraphbuild.c (record_eh_tables): Likewise.
+ (compute_call_stmt_bb_frequency): Likewise.
+ * ipa-inline-analysis.c (compute_bb_predicates): Likewise.
+ * lto-streamer-in.c (input_cfg): Likewise.
+ * predict.c (maybe_hot_frequency_p): Likewise.
+ * tree-cfg.c (init_empty_tree_cfg_for_function): Likewise.
+ * tree-inline.c (initialize_cfun): Likewise.
+ (copy_cfg_body): Likewise.
+ (copy_body): Likewise.
+ (tree_function_versioning): Likewise.
+
+ * bb-reorder.c (add_labels_and_missing_jumps): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (duplicate_computed_gotos): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_rarely_executed_basic_blocks_and_crossing_edges): Remove uses of
+ macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (connect_traces): Likewise.
+ (rest_of_handle_reorder_blocks): Remove usage of EXIT_BLOCK_PTR macro.
+ (bb_to_key): Remove usage of ENTRY_BLOCK_PTR macro.
+ (fix_crossing_conditional_branches): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (find_traces_1_round): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (fix_up_fall_thru_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_traces): Remove usage of ENTRY_BLOCK_PTR macro.
+ (fix_up_crossing_landing_pad): Remove usage of EXIT_BLOCK_PTR macro.
+ (rotate_loop): Likewise.
+ * bt-load.c (migrate_btr_def): Remove usage of ENTRY_BLOCK_PTR macro.
+ * cfg.c (clear_aux_for_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (alloc_aux_for_edges): Likewise.
+ (clear_bb_flags): Remove usage of ENTRY_BLOCK_PTR macro.
+ (cached_make_edge): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compact_blocks): Likewise.
+ (clear_edges): Likewise.
+ * cfganal.c (single_pred_before_succ_order): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (bitmap_union_of_succs): Remove usage of EXIT_BLOCK_PTR macro.
+ (bitmap_union_of_preds): Remove usage of ENTRY_BLOCK_PTR macro.
+ (bitmap_intersection_of_succs): Remove usage of EXIT_BLOCK_PTR macro.
+ (bitmap_intersection_of_preds): Remove usage of ENTRY_BLOCK_PTR macro.
+ (inverted_post_order_compute): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_dominance_frontiers_1): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (post_order_compute): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (connect_infinite_loops_to_exit): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (remove_fake_edges): Remove usage of ENTRY_BLOCK_PTR macro.
+ (add_noreturn_fake_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_pdom): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (remove_fake_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (verify_edge_list): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (print_edge_list): Likewise.
+ (create_edge_list): Likewise.
+ (find_unreachable_blocks): Remove usage of ENTRY_BLOCK_PTR macro.
+ (mark_dfs_back_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * cfgbuild.c (find_bb_boundaries): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (find_many_sub_basic_blocks): Remove usage of EXIT_BLOCK_PTR macro.
+ (make_edges): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * cfgcleanup.c (delete_unreachable_blocks): Likewise.
+ (try_optimize_cfg): Likewise.
+ (try_head_merge_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (try_crossjump_to_edge): Remove usage of ENTRY_BLOCK_PTR macro.
+ (try_crossjump_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (merge_blocks_move): Remove usage of ENTRY_BLOCK_PTR macro.
+ (outgoing_edges_match): Remove usage of EXIT_BLOCK_PTR macro.
+ (try_forward_edges): Likewise.
+ (try_simplify_condjump): Likewise.
+ * cfgexpand.c (gimple_expand_cfg): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (construct_exit_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (construct_init_block): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (expand_gimple_basic_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (expand_gimple_tailcall): Likewise.
+ * cfghooks.c (can_duplicate_block_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (tidy_fallthru_edges): Likewise.
+ (verify_flow_info): Likewise.
+ * cfgloop.c (flow_bb_inside_loop_p): Likewise.
+ (num_loop_branches): Remove usage of EXIT_BLOCK_PTR macro.
+ (disambiguate_multiple_latches): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (get_loop_exit_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (bb_loop_header_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (get_loop_body_in_bfs_order): Remove usage of EXIT_BLOCK_PTR macro.
+ (get_loop_body_in_dom_order): Likewise.
+ (get_loop_body): Likewise.
+ * cfgloopanal.c (mark_irreducible_loops): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * cfgloopmanip.c (create_preheader): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (remove_path): Remove usage of EXIT_BLOCK_PTR macro.
+ (fix_bb_placement): Likewise.
+ * cfgrtl.c (rtl_block_empty_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (rtl_can_remove_branch_p): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_layout_split_edge): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (rtl_flow_call_edges_add): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_layout_can_merge_blocks_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (cfg_layout_redirect_edge_and_branch): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (fixup_fallthru_exit_predecessor): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (fixup_reorder_chain): Likewise.
+ (relink_block_chain): Likewise.
+ (cfg_layout_delete_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (rtl_verify_bb_layout): Remove usage of ENTRY_BLOCK_PTR macro.
+ (cfg_layout_duplicate_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (force_one_exit_fallthru): Likewise.
+ (rtl_verify_fallthru): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (rtl_verify_edges): Likewise.
+ (commit_edge_insertions): Likewise.
+ (commit_one_edge_insertion): Likewise.
+ (rtl_split_edge): Likewise.
+ (force_nonfallthru_and_redirect): Likewise.
+ (outof_cfg_layout_mode): Remove usage of EXIT_BLOCK_PTR macro.
+ (skip_insns_after_block): Likewise.
+ (fixup_partition_crossing): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (purge_dead_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (rtl_can_merge_blocks): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (contains_no_active_insn_p): Likewise.
+ (emit_insn_at_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (entry_of_function): Likewise.
+ (last_bb_in_partition): Remove usage of EXIT_BLOCK_PTR macro.
+ (fixup_new_cold_bb): Likewise.
+ (patch_jump_insn): Likewise.
+ (try_redirect_by_replacing_jump): Likewise.
+ (block_label): Likewise.
+ (could_fall_through): Likewise.
+ (can_fallthru): Likewise.
+ * cgraphbuild.c (cgraph_rebuild_references): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (rebuild_cgraph_edges): Likewise.
+ * cgraphunit.c (init_lowered_empty_function): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (expand_thunk): Remove usage of EXIT_BLOCK_PTR macro.
+ * combine.c (get_last_value): Remove usage of ENTRY_BLOCK_PTR macro.
+ (distribute_links): Remove usage of EXIT_BLOCK_PTR macro.
+ (get_last_value_validate): Remove usage of ENTRY_BLOCK_PTR macro.
+ (try_combine): Remove usage of EXIT_BLOCK_PTR macro.
+ (reg_num_sign_bit_copies_for_combine): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (reg_nonzero_bits_for_combine): Likewise.
+ (set_nonzero_bits_and_sign_copies): Likewise.
+ (combine_instructions): Likewise.
+ * cprop.c (one_cprop_pass): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (bypass_conditional_jumps): Likewise.
+ (bypass_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_implicit_sets): Likewise.
+ (cprop_jump): Likewise.
+ * cse.c (cse_cc_succs): Likewise.
+ (cse_find_path): Likewise.
+ * df-problems.c (df_lr_confluence_0): Likewise.
+ * df-scan.c (df_entry_block_defs_collect): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (df_exit_block_uses_collect): Remove usage of EXIT_BLOCK_PTR macro.
+ * dominance.c (iterate_fix_dominators): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (calc_idoms): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (determine_dominators_for_sons): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (calc_dfs_tree): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (prune_bbs_to_update_dominators): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (calc_dfs_tree_nonrec): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * domwalk.c (cmp_bb_postorder): Likewise.
+ * dse.c (dse_step1): Remove usage of EXIT_BLOCK_PTR macro.
+ * except.c (finish_eh_generation): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (sjlj_emit_function_enter): Likewise.
+ * final.c (compute_alignments): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Remove uses of
+ macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (reposition_prologue_and_epilogue_notes): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ (convert_jumps_to_returns): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (regno_clobbered_at_setjmp): Remove usage of ENTRY_BLOCK_PTR macro.
+ (next_block_for_reg): Remove usage of EXIT_BLOCK_PTR macro.
+ * gcse.c (hoist_code): Remove usage of ENTRY_BLOCK_PTR macro.
+ (update_bb_reg_pressure): Remove usage of EXIT_BLOCK_PTR macro.
+ (compute_code_hoist_vbeinout): Likewise.
+ (should_hoist_expr_to_dom): Remove usage of ENTRY_BLOCK_PTR macro.
+ (pre_expr_reaches_here_p_work): Likewise.
+ * gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
+ (gimple_find_edge_insert_loc): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * gimple-ssa-strength-reduction.c (slsr_process_phi): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * graph.c (draw_cfg_nodes_for_loop): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * graphite-clast-to-gimple.c (translate_clast_user): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * graphite-scop-detection.c (build_scops): Likewise.
+ (create_sese_edges): Remove usage of EXIT_BLOCK_PTR macro.
+ (scopdet_basic_block_info): Remove usage of ENTRY_BLOCK_PTR macro.
+ * haifa-sched.c (restore_bb_notes): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (unlink_bb_notes): Likewise.
+ (create_check_block_twin): Likewise.
+ (init_before_recovery): Likewise.
+ (sched_extend_bb): Likewise.
+ (priority): Likewise.
+ * hw-doloop.c (reorder_loops): Likewise.
+ (discover_loop): Likewise.
+ * ifcvt.c (dead_or_predicable): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (find_if_case_1): Remove usage of EXIT_BLOCK_PTR macro.
+ (block_has_only_trap): Likewise.
+ (cond_exec_find_if_block): Likewise.
+ (merge_if_block): Likewise.
+ * ipa-inline-analysis.c (param_change_prob): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (record_modified): Likewise.
+ * ipa-pure-const.c (execute_warn_function_noreturn): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ (local_pure_const): Likewise.
+ * ipa-split.c (split_function): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (find_split_points): Likewise.
+ (consider_split): Likewise.
+ (find_return_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (verify_non_ssa_vars): Remove usage of ENTRY_BLOCK_PTR macro.
+ * ira-build.c (ira_loop_tree_body_rev_postorder): Likewise.
+ * ira-color.c (print_loop_title): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * ira-emit.c (entered_from_non_parent_p): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (ira_emit): Remove usage of EXIT_BLOCK_PTR macro.
+ * ira-int.h (ira_assert): Remove usage of ENTRY_BLOCK_PTR macro.
+ * ira.c (split_live_ranges_for_shrink_wrap): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * lcm.c (compute_rev_insert_delete): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (compute_nearerout): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_farthest): Likewise.
+ (compute_available): Likewise.
+ (compute_insert_delete): Remove usage of EXIT_BLOCK_PTR macro.
+ (compute_laterin): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compute_earliest): Likewise.
+ (compute_antinout_edge): Likewise.
+ * loop-iv.c (simplify_using_initial_values): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * loop-unswitch.c (unswitch_loop): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lra-assigns.c (find_hard_regno_for): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * lra-constraints.c (lra_inheritance): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lra-lives.c (lra_create_live_ranges): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * lra.c (has_nonexceptional_receiver): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ * lto-streamer-in.c (input_function): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * lto-streamer-out.c (output_cfg): Likewise.
+ * mcf.c (adjust_cfg_counts): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (create_fixup_graph): Remove usage of ENTRY_BLOCK_PTR macro.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ (create_pre_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ * modulo-sched.c (rest_of_handle_sms): Likewise.
+ (canon_loop): Likewise.
+ * omp-low.c (build_omp_regions): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * postreload-gcse.c (eliminate_partially_redundant_loads): Remove uses
+ of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * predict.c (rebuild_frequencies): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (propagate_freq): Remove usage of EXIT_BLOCK_PTR macro.
+ (estimate_bb_frequencies): Remove usage of ENTRY_BLOCK_PTR macro.
+ (tree_estimate_probability_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (expensive_function_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (tree_bb_level_predictions): Remove usage of EXIT_BLOCK_PTR macro.
+ (counts_to_freqs): Remove usage of ENTRY_BLOCK_PTR macro.
+ (apply_return_prediction): Remove usage of EXIT_BLOCK_PTR macro.
+ (estimate_loops): Remove usage of ENTRY_BLOCK_PTR macro.
+ (gimple_predict_edge): Likewise.
+ (probably_never_executed): Likewise.
+ * profile.c (find_spanning_tree): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (branch_prob): Likewise.
+ (compute_branch_probabilities): Likewise.
+ (compute_frequency_overlap): Remove usage of ENTRY_BLOCK_PTR macro.
+ (is_inconsistent): Remove usage of EXIT_BLOCK_PTR macro.
+ (read_profile_edge_counts): Remove usage of ENTRY_BLOCK_PTR macro.
+ (set_bb_counts): Likewise.
+ (correct_negative_edge_counts): Likewise.
+ (get_exec_counts): Likewise.
+ (instrument_values): Likewise.
+ (instrument_edges): Likewise.
+ * reg-stack.c (convert_regs): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (compensate_edges): Remove usage of ENTRY_BLOCK_PTR macro.
+ (convert_regs_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ (convert_regs_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (reg_to_stack): Likewise.
+ * regs.h (REG_N_SETS): Likewise.
+ * reload.c (find_dummy_reload): Likewise.
+ (combine_reloads): Likewise.
+ (push_reload): Likewise.
+ * reload1.c (has_nonexceptional_receiver): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * resource.c (mark_target_live_regs): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (find_basic_block): Likewise.
+ * sched-ebb.c (ebb_add_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (schedule_ebbs): Likewise.
+ * sched-int.h (sel_sched_p): Likewise.
+ * sched-rgn.c (compute_dom_prob_ps): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (rgn_add_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (haifa_find_rgns): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (propagate_deps): Remove usage of EXIT_BLOCK_PTR macro.
+ (extend_rgns): Likewise.
+ (find_single_block_region): Likewise.
+ * sel-sched-ir.c (sel_remove_loop_preheader): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (setup_nop_and_exit_insns): Remove usage of EXIT_BLOCK_PTR macro.
+ (sel_create_recovery_block): Likewise.
+ (bb_ends_ebb_p): Likewise.
+ (sel_bb_end): Likewise.
+ (sel_bb_head): Likewise.
+ (free_lv_sets): Likewise.
+ (init_lv_sets): Likewise.
+ (tidy_control_flow): Likewise.
+ (maybe_tidy_empty_bb): Likewise.
+ * sel-sched-ir.h (_succ_iter_cond): Likewise.
+ (_succ_iter_start): Likewise.
+ (sel_bb_empty_or_nop_p): Likewise.
+ (get_loop_exit_edges_unique_dests): Likewise.
+ (inner_loop_header_p): Likewise.
+ * sel-sched.c (create_block_for_bookkeeping): Likewise.
+ (find_block_for_bookkeeping): Likewise.
+ * store-motion.c (remove_reachable_equiv_notes): Likewise.
+ (insert_store): Likewise.
+ * trans-mem.c (ipa_tm_transform_clone): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (tm_memopt_compute_available): Remove usage of EXIT_BLOCK_PTR macro.
+ (ipa_tm_scan_irr_function): Remove usage of ENTRY_BLOCK_PTR macro.
+ (gate_tm_init): Likewise.
+ (tm_region_init): Likewise.
+ * tree-cfg.c (execute_fixup_cfg): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (execute_warn_function_return): Remove usage of EXIT_BLOCK_PTR macro.
+ (split_critical_edges): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (print_loops): Remove usage of ENTRY_BLOCK_PTR macro.
+ (move_sese_region_to_fn): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (gimple_redirect_edge_and_branch): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (gimple_verify_flow_info): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (remove_edge_and_dominated_blocks): Remove usage of EXIT_BLOCK_PTR
+ macro.
+ (make_edges): Remove uses of macros: ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (gimple_flow_call_edges_add): Remove usage of EXIT_BLOCK_PTR macro.
+ (make_blocks): Remove usage of ENTRY_BLOCK_PTR macro.
+ (build_gimple_cfg): Likewise.
+ (gimple_duplicate_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (gimple_can_merge_blocks_p): Likewise.
+ * tree-cfgcleanup.c (tree_forwarder_block_p): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-complex.c (update_parameter_components): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-if-conv.c (get_loop_body_in_if_conv_order): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * tree-inline.c (tree_function_versioning): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (delete_unreachable_blocks_update_callgraph): Likewise.
+ (initialize_cfun): Likewise.
+ (copy_cfg_body): Remove usage of ENTRY_BLOCK_PTR macro.
+ (copy_edges_for_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (remap_ssa_name): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-into-ssa.c (update_ssa): Likewise.
+ (maybe_register_def): Remove usage of EXIT_BLOCK_PTR macro.
+ (insert_updated_phi_nodes_for): Remove usage of ENTRY_BLOCK_PTR macro.
+ (rewrite_into_ssa): Likewise.
+ (rewrite_debug_stmt_uses): Likewise.
+ * tree-outof-ssa.c (expand_phi_nodes): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-profile.c (gimple_gen_ic_func_profiler): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-scalar-evolution.h (block_before_loop): Likewise.
+ * tree-sra.c (sra_ipa_reset_debug_stmts): Likewise.
+ (dump_dereferences_table): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (analyze_caller_dereference_legality): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (propagate_dereference_distances): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (initialize_parameter_reductions): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * tree-ssa-ccp.c (gsi_prev_dom_bb_nondebug): Likewise.
+ (optimize_stack_restore): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-coalesce.c (create_outofssa_var_map): Likewise.
+ * tree-ssa-dce.c (eliminate_unnecessary_stmts): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (remove_dead_stmt): Remove usage of EXIT_BLOCK_PTR macro.
+ (propagate_necessity): Remove usage of ENTRY_BLOCK_PTR macro.
+ (mark_control_dependent_edges_necessary): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ * tree-ssa-dom.c (eliminate_degenerate_phis): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (tree_ssa_dominator_optimize): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-live.c (verify_live_on_entry): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (calculate_live_on_exit): Likewise.
+ (set_var_live_on_entry): Remove usage of ENTRY_BLOCK_PTR macro.
+ (loe_visit_block): Likewise.
+ * tree-ssa-live.h (live_on_exit): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (live_on_entry): Likewise.
+ * tree-ssa-loop-ivopts.c (find_interesting_uses): Remove usage of
+ EXIT_BLOCK_PTR macro.
+ * tree-ssa-loop-manip.c (compute_live_loop_exits): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * tree-ssa-loop-niter.c (simplify_using_initial_conditions): Likewise.
+ (bound_difference): Likewise.
+ * tree-ssa-loop-prefetch.c (may_use_storent_in_loop_p): Remove usage
+ of EXIT_BLOCK_PTR macro.
+ * tree-ssa-loop-unswitch.c (simplify_using_entry_checks): Remove usage
+ of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-math-opts.c (register_division_in): Likewise.
+ * tree-ssa-phiprop.c (tree_ssa_phiprop): Likewise.
+ * tree-ssa-pre.c (compute_avail): Likewise.
+ (compute_antic): Remove usage of EXIT_BLOCK_PTR macro.
+ (insert): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-propagate.c (ssa_prop_init): Likewise.
+ (simulate_block): Remove usage of EXIT_BLOCK_PTR macro.
+ (cfg_blocks_add): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ (add_control_edge): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-reassoc.c (do_reassoc): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (build_and_add_sum): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-ssa-sink.c (nearest_common_dominator_of_uses): Likewise.
+ (execute_sink_code): Remove usage of EXIT_BLOCK_PTR macro.
+ * tree-ssa-uninit.c (find_dom): Remove usage of ENTRY_BLOCK_PTR macro.
+ (compute_control_dep_chain): Remove usage of EXIT_BLOCK_PTR macro.
+ (find_pdom): Likewise.
+ (warn_uninitialized_vars): Remove usage of ENTRY_BLOCK_PTR macro.
+ * tree-stdarg.c (reachable_at_most_once): Likewise.
+ * tree-tailcall.c (tree_optimize_tail_calls_1): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (eliminate_tail_call): Likewise.
+ * tsan.c (instrument_func_entry): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ (instrument_func_exit): Remove usage of EXIT_BLOCK_PTR macro.
+ * var-tracking.c (vt_initialize): Remove uses of macros:
+ ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR.
+ (vt_add_function_parameter): Remove usage of ENTRY_BLOCK_PTR macro.
+ (vt_find_locations): Remove usage of EXIT_BLOCK_PTR macro.
+ (vt_stack_adjustments): Remove uses of macros: ENTRY_BLOCK_PTR,
+ EXIT_BLOCK_PTR.
+ * varasm.c (assemble_start_function): Remove usage of ENTRY_BLOCK_PTR
+ macro.
+ * config/bfin/bfin.c (hwloop_optimize): Likewise.
+ * config/nds32/nds32.c (nds32_fp_as_gp_check_available): Remove usage
+ of EXIT_BLOCK_PTR macro.
+ * config/arm/arm.c (require_pic_register): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ (arm_r3_live_at_start_p): Likewise.
+ (any_sibcall_could_use_r3): Remove usage of EXIT_BLOCK_PTR macro.
+ * config/rs6000/rs6000.c (rs6000_emit_prologue): Likewise.
+ * config/frv/frv.c (frv_optimize_membar_global): Likewise.
+ * config/alpha/alpha.c (alpha_gp_save_rtx): Remove usage of
+ ENTRY_BLOCK_PTR macro.
+ * config/i386/i386.c (ix86_count_insn): Likewise.
+ (ix86_seh_fixup_eh_fallthru): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_pad_short_function): Likewise.
+ (ix86_compute_frame_layout): Remove usage of ENTRY_BLOCK_PTR macro.
+ (ix86_pad_returns): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_eax_live_at_start_p): Remove usage of ENTRY_BLOCK_PTR macro.
+ (add_condition_to_bb): Remove usage of EXIT_BLOCK_PTR macro.
+ (ix86_expand_epilogue): Likewise.
+ * config/ia64/ia64.c (ia64_asm_unwind_emit): Likewise.
+ (ia64_expand_prologue): Likewise.
+
2013-11-19 Catherine Moore <clm@codesourcery.com>
* doc/invoke.texi (mfix-rm7000, mno-fix-rm7000): Document.
};
/* Defines for accessing the fields of the CFG structure for function FN. */
-#define ENTRY_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_entry_block_ptr)
-#define EXIT_BLOCK_PTR_FOR_FUNCTION(FN) ((FN)->cfg->x_exit_block_ptr)
+#define ENTRY_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_entry_block_ptr)
+#define EXIT_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_exit_block_ptr)
#define basic_block_info_for_function(FN) ((FN)->cfg->x_basic_block_info)
#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
((*basic_block_info_for_function (FN))[(N)] = (BB))
/* Defines for textual backward source compatibility. */
-#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr)
-#define EXIT_BLOCK_PTR (cfun->cfg->x_exit_block_ptr)
#define basic_block_info (cfun->cfg->x_basic_block_info)
#define last_basic_block (cfun->cfg->x_last_basic_block)
#define label_to_block_map (cfun->cfg->x_label_to_block_map)
exit block). */
#define FOR_ALL_BB(BB) \
- for (BB = ENTRY_BLOCK_PTR; BB; BB = BB->next_bb)
+ for (BB = ENTRY_BLOCK_PTR_FOR_FN (cfun); BB; BB = BB->next_bb)
#define FOR_ALL_BB_FN(BB, FN) \
- for (BB = ENTRY_BLOCK_PTR_FOR_FUNCTION (FN); BB; BB = BB->next_bb)
+ for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN); BB; BB = BB->next_bb)
\f
/* Stuff for recording basic block info. */
heap = fibheap_new ();
max_entry_frequency = 0;
max_entry_count = 0;
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
bbd[e->dest->index].heap = heap;
bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb_visited_trace (e->dest) != trace_n
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX))
{
gcc_assert (!(e->flags & EDGE_FAKE));
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bb_visited_trace (e->dest)
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e == best_edge
- || e->dest == EXIT_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb_visited_trace (e->dest))
continue;
header is not the first block of the function
we can rotate the loop. */
- if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
+ if (best_edge->dest
+ != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
if (dump_file)
{
is an end of the trace). */
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bb_visited_trace (e->dest))
continue;
or whose predecessor edge is EDGE_DFS_BACK. */
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
+ if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bbd[e->src->index].end_of_trace >= 0)
|| (e->flags & EDGE_DFS_BACK))
{
int edge_freq = EDGE_FREQUENCY (e);
{
int si = e->src->index;
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& bbd[si].end_of_trace >= 0
{
int di = e->dest->index;
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& bbd[di].start_of_trace >= 0
bool try_copy = false;
FOR_EACH_EDGE (e, ei, traces[t].last->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& (!best || e->probability > best->probability))
{
int di = e2->dest->index;
- if (e2->dest == EXIT_BLOCK_PTR
+ if (e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| ((e2->flags & EDGE_CAN_FALLTHRU)
&& !(e2->flags & EDGE_COMPLEX)
&& bbd[di].start_of_trace >= 0
{
best = e;
best2 = e2;
- if (e2->dest != EXIT_BLOCK_PTR)
+ if (e2->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
best2_len = traces[bbd[di].start_of_trace].length;
else
best2_len = INT_MAX;
traces[t].last->index, best->dest->index);
if (!next_bb)
fputc ('\n', dump_file);
- else if (next_bb == EXIT_BLOCK_PTR)
+ else if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
fprintf (dump_file, "exit\n");
else
fprintf (dump_file, "%d\n", next_bb->index);
new_bb = copy_bb (best->dest, best, traces[t].last, t);
traces[t].last = new_bb;
- if (next_bb && next_bb != EXIT_BLOCK_PTR)
+ if (next_bb && next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
t = bbd[next_bb->index].start_of_trace;
traces[last_trace].last->aux = traces[t].first;
JUMP_LABEL (jump) = post_label;
/* Create new basic block to be dest for lp. */
- last_bb = EXIT_BLOCK_PTR->prev_bb;
+ last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
new_bb = create_basic_block (new_label, jump, last_bb);
new_bb->aux = last_bb->aux;
last_bb->aux = new_bb;
/* We should never have EDGE_CROSSING set yet. */
gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
- if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_PARTITION (e->src) != BB_PARTITION (e->dest))
{
crossing_edges.safe_push (e);
basic_block dest = e->dest;
rtx label, new_jump;
- if (dest == EXIT_BLOCK_PTR)
+ if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* Make sure dest has a label. */
label = block_label (dest);
/* Nothing to do for non-fallthru edges. */
- if (src == ENTRY_BLOCK_PTR)
+ if (src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
if ((e->flags & EDGE_FALLTHRU) == 0)
continue;
}
}
- if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
+ if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
{
/* Check to see if the fall-thru edge is a crossing edge. */
new_jump = emit_jump_insn (gen_jump (old_label));
JUMP_LABEL (new_jump) = old_label;
- last_bb = EXIT_BLOCK_PTR->prev_bb;
+ last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
new_bb = create_basic_block (new_label, new_jump, last_bb);
new_bb->aux = last_bb->aux;
last_bb->aux = new_bb;
cleanup_cfg (CLEANUP_EXPENSIVE);
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
int size, all_flags;
/* Build the reorder chain for the original order of blocks. */
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
/* Obviously the block has to end in a computed jump. */
the exit block or the next block.
The destination must have more than one predecessor. */
if (!single_succ_p (bb)
- || single_succ (bb) == EXIT_BLOCK_PTR
+ || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| single_succ (bb) == bb->next_bb
|| single_pred_p (single_succ (bb)))
continue;
def_basic_block_freq = basic_block_freq (def->bb);
for (attempt = get_immediate_dominator (CDI_DOMINATORS, def->bb);
- !give_up && attempt && attempt != ENTRY_BLOCK_PTR && def->cost >= min_cost;
+ !give_up && attempt && attempt != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && def->cost >= min_cost;
attempt = get_immediate_dominator (CDI_DOMINATORS, attempt))
{
/* Try to move the instruction that sets the target register into
if (!the_fun->cfg)
the_fun->cfg = ggc_alloc_cleared_control_flow_graph ();
n_edges_for_fn (the_fun) = 0;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)
= ggc_alloc_cleared_basic_block_def ();
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = ENTRY_BLOCK;
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)
= ggc_alloc_cleared_basic_block_def ();
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = EXIT_BLOCK;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun);
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun);
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (the_fun);
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
}
\f
/* Helper function for remove_edge and clear_edges. Frees edge structure
vec_safe_truncate (bb->preds, 0);
}
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
free_edge (e);
- vec_safe_truncate (EXIT_BLOCK_PTR->preds, 0);
- vec_safe_truncate (ENTRY_BLOCK_PTR->succs, 0);
+ vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
+ vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
gcc_assert (!n_edges_for_fn (cfun));
}
{
int i;
- SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR);
- SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR);
+ SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (df)
df_compact_blocks ();
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
{
if (edge_cache == NULL
- || src == ENTRY_BLOCK_PTR
- || dst == EXIT_BLOCK_PTR)
+ || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
return make_edge (src, dst, flags);
/* Does the requested edge already exist? */
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->flags &= BB_FLAGS_TO_PRESERVE;
}
\f
if (profile_status_for_function (fun) == PROFILE_ABSENT)
return;
- if (bb != EXIT_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
{
FOR_EACH_EDGE (e, ei, bb->succs)
sum += e->probability;
(flags & TDF_COMMENT) ? ";; " : "", s_indent,
(int) lsum, (int) bb->count);
}
- if (bb != ENTRY_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
{
sum = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
basic_block bb;
edge e;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! bitmap_bit_p (visited, dest->index))
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && ! bitmap_bit_p (visited,
+ dest->index))
{
/* Mark that we have visited the destination. */
bitmap_set_bit (visited, dest->index);
}
else
{
- if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& pre[src->index] >= pre[dest->index]
&& post[dest->index] == 0)
ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true;
- if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
+ if (ei_one_before_end_p (ei)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
post[src->index] = postnum++;
if (!ei_one_before_end_p (ei))
be only one. It isn't inconceivable that we might one day directly
support Fortran alternate entry points. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
*tos++ = e->dest;
/* Determine the number of edges in the flow graph by counting successor
edges on each basic block. */
num_edges = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
num_edges += EDGE_COUNT (bb->succs);
}
num_edges = 0;
/* Follow successors of blocks, and register these edges. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
FOR_EACH_EDGE (e, ei, bb->succs)
elist->index_to_edge[num_edges++] = e;
for (x = 0; x < elist->num_edges; x++)
{
fprintf (f, " %-4d - edge(", x);
- if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
+ if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
fprintf (f, "entry,");
else
fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
- if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
+ if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR_FOR_FN (cfun))
fprintf (f, "exit)\n");
else
fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
basic_block bb, p, s;
edge_iterator ei;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
/* We've verified that all the edges are in the list, now lets make sure
there are no spurious edges in the list. This is an expensive check! */
- FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
- FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
+ FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
{
int found_edge = 0;
control_dependences::set_control_dependence_map_bit (basic_block bb,
int edge_index)
{
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
bitmap_set_bit (control_dependence_map[bb->index], edge_index);
}
static inline basic_block
find_pdom (basic_block block)
{
- gcc_assert (block != ENTRY_BLOCK_PTR);
+ gcc_assert (block != ENTRY_BLOCK_PTR_FOR_FN (cfun));
- if (block == EXIT_BLOCK_PTR)
- return EXIT_BLOCK_PTR;
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb = get_immediate_dominator (CDI_POST_DOMINATORS, block);
if (! bb)
- return EXIT_BLOCK_PTR;
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
basic_block current_block;
basic_block ending_block;
- gcc_assert (INDEX_EDGE_PRED_BB (m_el, edge_index) != EXIT_BLOCK_PTR);
+ gcc_assert (INDEX_EDGE_PRED_BB (m_el, edge_index)
+ != EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (INDEX_EDGE_PRED_BB (m_el, edge_index) == ENTRY_BLOCK_PTR)
- ending_block = single_succ (ENTRY_BLOCK_PTR);
+ if (INDEX_EDGE_PRED_BB (m_el, edge_index) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ ending_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
else
ending_block = find_pdom (INDEX_EDGE_PRED_BB (m_el, edge_index));
for (current_block = INDEX_EDGE_SUCC_BB (m_el, edge_index);
- current_block != ending_block && current_block != EXIT_BLOCK_PTR;
+ current_block != ending_block
+ && current_block != EXIT_BLOCK_PTR_FOR_FN (cfun);
current_block = find_pdom (current_block))
{
edge e = INDEX_EDGE (m_el, edge_index);
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
remove_fake_predecessors (bb);
}
void
remove_fake_exit_edges (void)
{
- remove_fake_predecessors (EXIT_BLOCK_PTR);
+ remove_fake_predecessors (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
FOR_EACH_BB (bb)
if (EDGE_COUNT (bb->succs) == 0)
- make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
/* This function adds a fake edge between any infinite loops to the
void
connect_infinite_loops_to_exit (void)
{
- basic_block unvisited_block = EXIT_BLOCK_PTR;
+ basic_block unvisited_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
basic_block deadend_block;
struct depth_first_search_dsS dfs_ds;
/* Perform depth-first search in the reverse graph to find nodes
reachable from the exit block. */
flow_dfs_compute_reverse_init (&dfs_ds);
- flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR);
+ flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Repeatedly add fake edges, updating the unreachable nodes. */
while (1)
break;
deadend_block = dfs_find_deadend (unvisited_block);
- make_edge (deadend_block, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (deadend_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
flow_dfs_compute_reverse_add_bb (&dfs_ds, deadend_block);
}
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
dest = ei_edge (ei)->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! bitmap_bit_p (visited, dest->index))
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && ! bitmap_bit_p (visited, dest->index))
{
/* Mark that we have visited the destination. */
bitmap_set_bit (visited, dest->index);
}
else
{
- if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR)
+ if (ei_one_before_end_p (ei)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
post_order[post_order_num++] = src->index;
if (!ei_one_before_end_p (ei))
{
basic_block b;
basic_block next_bb;
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
{
next_bb = b->next_bb;
}
else
{
- if (bb != EXIT_BLOCK_PTR && ei_one_before_end_p (ei))
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && ei_one_before_end_p (ei))
post_order[post_order_num++] = bb->index;
if (!ei_one_before_end_p (ei))
/* Detect any infinite loop and activate the kludge.
Note that this doesn't check EXIT_BLOCK itself
since EXIT_BLOCK is always added after the outer do-while loop. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
if (!bitmap_bit_p (visited, bb->index))
{
has_unvisited_bb = true;
{
/* No blocks are reachable from EXIT at all.
Find a dead-end from the ENTRY, and restart the iteration. */
- basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR);
+ basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (be != NULL);
bitmap_set_bit (visited, be->index);
stack[sp++] = ei_start (be->preds);
bitmap_clear (visited);
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs);
while (sp)
{
dest = ei_edge (ei)->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR_FOR_FUNCTION (fn)
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (fn)
&& ! bitmap_bit_p (visited, dest->index))
{
/* Mark that we have visited the destination. */
else
{
if (ei_one_before_end_p (ei)
- && src != ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (fn)
&& rev_post_order)
/* There are no more successors for the SRC node
so assign its reverse completion number. */
{
basic_block runner = p->src;
basic_block domsb;
- if (runner == ENTRY_BLOCK_PTR)
+ if (runner == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
domsb = get_immediate_dominator (CDI_DOMINATORS, b);
for (e = NULL, ix = 0; ix < EDGE_COUNT (b->succs); ix++)
{
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->dest->index]);
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->dest->index]->elms;
for (e = NULL, ix = 0; ix < EDGE_COUNT (b->preds); ix++)
{
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->src->index]);
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->src->index]->elms;
for (ix = 0; ix < EDGE_COUNT (b->succs); ix++)
{
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->dest->index]);
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_SUCC (b, ix);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->dest->index]->elms;
for (ix = 0; ix < EDGE_COUNT (b->preds); ix++)
{
e = EDGE_PRED (b, ix);
- if (e->src== ENTRY_BLOCK_PTR)
+ if (e->src== ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
bitmap_copy (dst, src[e->src->index]);
SBITMAP_ELT_TYPE *p, *r;
e = EDGE_PRED (b, ix);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
p = src[e->src->index]->elms;
bitmap_clear (visited);
- MARK_VISITED (ENTRY_BLOCK_PTR);
+ MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
FOR_EACH_BB (x)
{
if (VISITED_P (x))
/* By nature of the way these get numbered, ENTRY_BLOCK_PTR->next_bb block
is always the entry. */
- if (min == ENTRY_BLOCK_PTR->next_bb)
- make_edge (ENTRY_BLOCK_PTR, min, EDGE_FALLTHRU);
+ if (min == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), min, EDGE_FALLTHRU);
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
if (update_p)
{
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (edge_cache, e->dest->index);
}
}
if (LABEL_P (BB_HEAD (bb))
&& LABEL_ALT_ENTRY_P (BB_HEAD (bb)))
- cached_make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0);
+ cached_make_edge (NULL, ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, 0);
/* Examine the last instruction of the block, and discover the
ways we can leave the block. */
/* Returns create an exit out. */
else if (returnjump_p (insn))
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, 0);
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
/* Recognize asm goto and do the right thing. */
else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
worry about EH edges, since we wouldn't have created the sibling call
in the first place. */
if (code == CALL_INSN && SIBLING_CALL_P (insn))
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR,
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
EDGE_SIBCALL | EDGE_ABNORMAL);
/* If this is a CALL_INSN, then mark it as reaching the active EH
/* Find out if we can drop through to the next block. */
insn = NEXT_INSN (insn);
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e && e->flags & EDGE_FALLTHRU)
insn = NULL;
insn = NEXT_INSN (insn);
if (!insn)
- cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
- else if (bb->next_bb != EXIT_BLOCK_PTR)
+ cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
+ else if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (insn == BB_HEAD (bb->next_bb))
cached_make_edge (edge_cache, bb, bb->next_bb, EDGE_FALLTHRU);
remove_edge (fallthru);
flow_transfer_insn = NULL_RTX;
if (code == CODE_LABEL && LABEL_ALT_ENTRY_P (insn))
- make_edge (ENTRY_BLOCK_PTR, bb, 0);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, 0);
}
else if (code == BARRIER)
{
break;
min = max = bb;
- for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb)
+ for (; bb != EXIT_BLOCK_PTR_FOR_FN (cfun); bb = bb->next_bb)
if (STATE (bb) != BLOCK_ORIGINAL)
max = bb;
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (!single_pred_p (jump_block)
- || jump_block->next_bb == EXIT_BLOCK_PTR
+ || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = single_succ (jump_block);
unconditional branch. */
cbranch_dest_block = cbranch_jump_edge->dest;
- if (cbranch_dest_block == EXIT_BLOCK_PTR
+ if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| !can_fallthru (jump_block, cbranch_dest_block))
return false;
bb-reorder.c:partition_hot_cold_basic_blocks for complete
details. */
- if (first != EXIT_BLOCK_PTR
+ if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
return changed;
if (FORWARDER_BLOCK_P (target)
&& !(single_succ_edge (target)->flags & EDGE_CROSSING)
- && single_succ (target) != EXIT_BLOCK_PTR)
+ && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Bypass trivial infinite loops. */
new_target = single_succ (target);
e->goto_locus = goto_locus;
/* Don't force if target is exit block. */
- if (threaded && target != EXIT_BLOCK_PTR)
+ if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
notice_new_block (redirect_edge_and_branch_force (e, target));
if (dump_file)
fprintf (dump_file, "Merged %d and %d without moving.\n",
b_index, c_index);
- return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
+ return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
}
/* Otherwise we will need to move code around. Do that only if expensive
if (! c_has_outgoing_fallthru)
{
merge_blocks_move_successor_nojumps (b, c);
- return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
+ return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
}
/* If B does not have an incoming fallthru, then it can be moved
{
basic_block bb;
- if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
+ if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return NULL;
bb = force_nonfallthru (b_fallthru_edge);
if (bb)
}
merge_blocks_move_predecessor_nojumps (b, c);
- return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
+ return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
}
return NULL;
return;
fallthru = find_fallthru_edge ((*bb1)->preds);
- if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !single_succ_p (fallthru->src))
return;
whether they went through the prologue. Sibcalls are fine, we know
that we either didn't need or inserted an epilogue before them. */
if (crtl->shrink_wrapped
- && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
+ && single_succ_p (bb1)
+ && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !JUMP_P (BB_END (bb1))
&& !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
return false;
e2 = single_pred_edge (src2), src2 = e2->src;
/* Nothing to do if we reach ENTRY, or a common source block. */
- if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
+ if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return false;
if (src1 == src2)
return false;
/* Don't crossjump if this block ends in a computed jump,
unless we are optimizing for size. */
if (optimize_bb_for_size_p (bb)
- && bb != EXIT_BLOCK_PTR
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& computed_jump_p (BB_END (bb)))
return false;
/* Don't crossjump if this block ends in a computed jump,
unless we are optimizing for size. */
if (optimize_bb_for_size_p (bb)
- && bb != EXIT_BLOCK_PTR
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& computed_jump_p (BB_END (bb)))
return false;
}
for (ix = 0; ix < nedges; ix++)
- if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
+ if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
for (ix = 0; ix < nedges; ix++)
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun);)
{
basic_block c;
edge s;
if (EDGE_COUNT (b->preds) == 0
|| (EDGE_COUNT (b->succs) == 0
&& trivially_empty_bb_p (b)
- && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
+ && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
+ != b))
{
c = b->prev_bb;
if (EDGE_COUNT (b->preds) > 0)
delete_basic_block (b);
changed = true;
/* Avoid trying to remove ENTRY_BLOCK_PTR. */
- b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
+ b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
continue;
}
if CASE_DROPS_THRU, this can be a tablejump with
some element going to the same place as the
default (fallthru). */
- && (single_pred (b) == ENTRY_BLOCK_PTR
+ && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !JUMP_P (BB_END (single_pred (b)))
|| ! label_is_jump_target_p (BB_HEAD (b),
BB_END (single_pred (b)))))
"Deleting fallthru block %i.\n",
b->index);
- c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
+ c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ ? b->next_bb : b->prev_bb);
redirect_edge_succ_nodup (single_pred_edge (b),
single_succ (b));
delete_basic_block (b);
if (single_succ_p (b)
&& (s = single_succ_edge (b))
&& !(s->flags & EDGE_COMPLEX)
- && (c = s->dest) != EXIT_BLOCK_PTR
+ && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (c)
&& b != c)
{
can either delete the jump entirely, or replace it
with a simple unconditional jump. */
if (single_succ_p (b)
- && single_succ (b) != EXIT_BLOCK_PTR
+ && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& onlyjump_p (BB_END (b))
&& !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
&& try_redirect_by_replacing_jump (single_succ_edge (b),
}
if ((mode & CLEANUP_CROSSJUMP)
- && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
+ && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
changed = true;
if (block_was_dirty)
if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
&& dom_info_available_p (CDI_DOMINATORS))
{
- for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
{
prev_bb = b->prev_bb;
}
else
{
- for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
{
prev_bb = b->prev_bb;
{
if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
{
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
e->dest->count -= e->count;
e->dest->frequency -= EDGE_FREQUENCY (e);
delete_insn (NEXT_INSN (last));
}
- e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL);
+ e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_ABNORMAL
+ | EDGE_SIBCALL);
e->probability += probability;
e->count += count;
BB_END (bb) = last;
gimple ret_stmt = gsi_stmt (gsi);
gcc_assert (single_succ_p (bb));
- gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR);
+ gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !gimple_return_retval (ret_stmt))
{
gsi_remove (&gsi, false);
int flags;
/* Multiple entry points not supported yet. */
- gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1);
- init_rtl_bb_info (ENTRY_BLOCK_PTR);
- init_rtl_bb_info (EXIT_BLOCK_PTR);
- ENTRY_BLOCK_PTR->flags |= BB_RTL;
- EXIT_BLOCK_PTR->flags |= BB_RTL;
+ gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1);
+ init_rtl_bb_info (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ init_rtl_bb_info (EXIT_BLOCK_PTR_FOR_FN (cfun));
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
- e = EDGE_SUCC (ENTRY_BLOCK_PTR, 0);
+ e = EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0);
/* When entry edge points to first basic block, we don't need jump,
otherwise we have to jump into proper target. */
- if (e && e->dest != ENTRY_BLOCK_PTR->next_bb)
+ if (e && e->dest != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
tree label = gimple_block_label (e->dest);
init_block = create_basic_block (NEXT_INSN (get_insns ()),
get_last_insn (),
- ENTRY_BLOCK_PTR);
- init_block->frequency = ENTRY_BLOCK_PTR->frequency;
- init_block->count = ENTRY_BLOCK_PTR->count;
- if (current_loops && ENTRY_BLOCK_PTR->loop_father)
- add_bb_to_loop (init_block, ENTRY_BLOCK_PTR->loop_father);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ init_block->frequency = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
+ init_block->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+ if (current_loops && ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father)
+ add_bb_to_loop (init_block, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
if (e)
{
first_block = e->dest;
e = make_edge (init_block, first_block, flags);
}
else
- e = make_edge (init_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ e = make_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FALLTHRU);
e->probability = REG_BR_PROB_BASE;
- e->count = ENTRY_BLOCK_PTR->count;
+ e->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
update_bb_for_insn (init_block);
return init_block;
edge e, e2;
unsigned ix;
edge_iterator ei;
- rtx orig_end = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ rtx orig_end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
- rtl_profile_for_bb (EXIT_BLOCK_PTR);
+ rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Make sure the locus is set to the end of the function, so that
epilogue line numbers and warnings are set properly. */
return;
/* While emitting the function end we could move end of the last basic block.
*/
- BB_END (EXIT_BLOCK_PTR->prev_bb) = orig_end;
+ BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = orig_end;
while (NEXT_INSN (head) && NOTE_P (NEXT_INSN (head)))
head = NEXT_INSN (head);
exit_block = create_basic_block (NEXT_INSN (head), end,
- EXIT_BLOCK_PTR->prev_bb);
- exit_block->frequency = EXIT_BLOCK_PTR->frequency;
- exit_block->count = EXIT_BLOCK_PTR->count;
- if (current_loops && EXIT_BLOCK_PTR->loop_father)
- add_bb_to_loop (exit_block, EXIT_BLOCK_PTR->loop_father);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
+ exit_block->frequency = EXIT_BLOCK_PTR_FOR_FN (cfun)->frequency;
+ exit_block->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
+ if (current_loops && EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father)
+ add_bb_to_loop (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father);
ix = 0;
- while (ix < EDGE_COUNT (EXIT_BLOCK_PTR->preds))
+ while (ix < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
- e = EDGE_PRED (EXIT_BLOCK_PTR, ix);
+ e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), ix);
if (!(e->flags & EDGE_ABNORMAL))
redirect_edge_succ (e, exit_block);
else
ix++;
}
- e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ e = make_edge (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FALLTHRU);
e->probability = REG_BR_PROB_BASE;
- e->count = EXIT_BLOCK_PTR->count;
- FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds)
+ e->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
+ FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e2 != e)
{
e->count -= e2->count;
/* Dominators are not kept up-to-date as we may create new basic-blocks. */
free_dominance_info (CDI_DOMINATORS);
- rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+ rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
insn_locations_init ();
if (!DECL_IS_BUILTIN (current_function_decl))
/* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
remaining edges later. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->flags &= ~EDGE_EXECUTABLE;
lab_rtx_for_bb = pointer_map_create ();
- FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ next_bb)
bb = expand_gimple_basic_block (bb, var_ret_seq != NULL_RTX);
if (MAY_HAVE_DEBUG_INSNS)
split edges which edge insertions might do. */
rebuild_jump_labels (get_insns ());
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
rebuild_jump_labels_chain (e->insns.r);
/* Put insns after parm birth, but before
NOTE_INSNS_FUNCTION_BEG. */
- if (e->src == ENTRY_BLOCK_PTR
- && single_succ_p (ENTRY_BLOCK_PTR))
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
rtx insns = e->insns.r;
e->insns.r = NULL_RTX;
/* We're done expanding trees to RTL. */
currently_expanding_to_rtl = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
edge_checksum = XCNEWVEC (size_t, last_basic_block);
/* Check bb chain & numbers. */
- last_bb_seen = ENTRY_BLOCK_PTR;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
{
- if (bb != EXIT_BLOCK_PTR
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb != BASIC_BLOCK (bb->index))
{
error ("bb %d on wrong place", bb->index);
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
edge_checksum[e->dest->index] += (size_t) e;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
edge_checksum[e->dest->index] -= (size_t) e;
}
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
if (edge_checksum[bb->index])
{
error ("basic block %i edge lists are corrupted", bb->index);
err = 1;
}
- last_bb_seen = ENTRY_BLOCK_PTR;
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Clean up. */
free (last_visited);
if (!cfg_hooks->tidy_fallthru_edge)
return;
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
- FOR_BB_BETWEEN (b, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, next_bb)
+ FOR_BB_BETWEEN (b, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb, next_bb)
{
edge s;
internal_error ("%s does not support can_duplicate_block_p",
cfg_hooks->name);
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return false;
return cfg_hooks->can_duplicate_block_p (bb);
FOR_ALL_BB (bb)
{
- if (bb != EXIT_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& profile_status != PROFILE_ABSENT)
{
sum = 0;
&& (lsum - bb->count > 100 || lsum - bb->count < -100))
record->num_mismatched_count_out[after_pass]++;
}
- if (bb != ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& profile_status != PROFILE_ABSENT)
{
sum = 0;
if (lsum - bb->count > 100 || lsum - bb->count < -100)
record->num_mismatched_count_in[after_pass]++;
}
- if (bb == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
- || bb == EXIT_BLOCK_PTR_FOR_FUNCTION (cfun))
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
gcc_assert (cfg_hooks->account_profile_record);
cfg_hooks->account_profile_record (bb, after_pass, record);
/* Dummy loop containing whole function. */
root = alloc_loop ();
root->num_nodes = n_basic_blocks_for_fn (fn);
- root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
+ root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
+ root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
+ EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
loops->larray->quick_push (root);
loops->tree_root = root;
FOR_EACH_EDGE (e, ei, header->preds)
{
basic_block latch = e->src;
- if (latch != ENTRY_BLOCK_PTR
+ if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& dominated_by_p (CDI_DOMINATORS, latch, header))
return true;
}
block. This would cause problems if the entry edge was the one from the
entry block. To avoid having to handle this case specially, split
such entry edge. */
- e = find_edge (ENTRY_BLOCK_PTR, loop->header);
+ e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
if (e)
split_edge (e);
{
struct loop *source_loop;
- if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
source_loop = bb->loop_father;
body = XNEWVEC (basic_block, loop->num_nodes);
- if (loop->latch == EXIT_BLOCK_PTR)
+ if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* There may be blocks unreachable from EXIT_BLOCK, hence we need to
special-case the fake loop that contains the whole function. */
gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
- body[tv++] = EXIT_BLOCK_PTR;
+ body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_BB (bb)
body[tv++] = bb;
}
tovisit = XNEWVEC (basic_block, loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
tv = 0;
fill_sons_in_loop (loop, loop->header, tovisit, &tv);
unsigned int vc = 1;
gcc_assert (loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
blocks = XNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
edge_iterator ei;
struct loop_exit *exit;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
/* If we maintain the lists of exits, use them. Otherwise we must
scan the body of the loop. */
unsigned i, n;
basic_block * body;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
body = get_loop_body (loop);
n = 0;
gcc_assert (current_loops != NULL);
/* Reset the flags. */
- FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
act->flags &= ~BB_IRREDUCIBLE_LOOP;
FOR_EACH_EDGE (e, ei, act->succs)
/* Create the edge lists. */
g = new_graph (last_basic_block + num);
- FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
FOR_EACH_EDGE (e, ei, act->succs)
{
/* Ignore edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
src = BB_REPR (act);
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
act = e->dest->loop_father;
bitmap_set_bit (seen, rem_bbs[i]->index);
if (!irred_invalidated)
FOR_EACH_EDGE (ae, ei, e->src->succs)
- if (ae != e && ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index)
+ if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && !bitmap_bit_p (seen, ae->dest->index)
&& ae->flags & EDGE_IRREDUCIBLE_LOOP)
{
irred_invalidated = true;
{
bb = rem_bbs[i];
FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
- if (ae->dest != EXIT_BLOCK_PTR && !bitmap_bit_p (seen, ae->dest->index))
+ if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && !bitmap_bit_p (seen, ae->dest->index))
{
bitmap_set_bit (seen, ae->dest->index);
bord_bbs[n_bord_bbs++] = ae->dest;
/* We do not allow entry block to be the loop preheader, since we
cannot emit code there. */
- if (single_entry->src == ENTRY_BLOCK_PTR)
+ if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
need_forwarder_block = true;
else
{
entry_of_function (void)
{
return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
- BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
+ BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
}
/* Emit INSN at the entry point of the function, ensuring that it is only
void
emit_insn_at_entry (rtx insn)
{
- edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs);
+ edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
edge e = ei_safe_edge (ei);
gcc_assert (e->flags & EDGE_FALLTHRU);
{
rtx insn;
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| !single_succ_p (bb))
return false;
edge e;
edge_iterator ei;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
if (src->next_bb != target)
return false;
return false;
FOR_EACH_EDGE (e, ei, src->succs)
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& e->flags & EDGE_FALLTHRU)
return false;
edge e;
edge_iterator ei;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
FOR_EACH_EDGE (e, ei, src->succs)
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& e->flags & EDGE_FALLTHRU)
return 0;
return true;
/* Must be simple edge. */
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a->next_bb == b
- && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* If the jump insn has side effects,
we can't kill the edge. */
&& (!JUMP_P (BB_END (a))
rtx
block_label (basic_block block)
{
- if (block == EXIT_BLOCK_PTR)
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_RTX;
if (!LABEL_P (BB_HEAD (block)))
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
- gcc_assert (target == EXIT_BLOCK_PTR);
+ gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
return NULL;
}
}
/* Cannot do anything for target exit block. */
- else if (target == EXIT_BLOCK_PTR)
+ else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL;
/* Or replace possibly complicated jump insn by simple jump insn. */
int j;
rtx new_label = block_label (new_bb);
- if (new_bb == EXIT_BLOCK_PTR)
+ if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
if (GET_CODE (PATTERN (tmp)) == ADDR_VEC)
vec = XVEC (PATTERN (tmp), 0);
int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
rtx new_label, note;
- if (new_bb == EXIT_BLOCK_PTR)
+ if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
new_label = block_label (new_bb);
target is exit block on some arches. */
if (!redirect_jump (insn, block_label (new_bb), 0))
{
- gcc_assert (new_bb == EXIT_BLOCK_PTR);
+ gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
return false;
}
}
{
rtx note;
- if (e->src == ENTRY_BLOCK_PTR || e->dest == EXIT_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* If we redirected an existing edge, it may already be marked
crossing, even though the new src is missing a reg crossing note.
boundary fixup by calling fixup_partition_crossing itself. */
if ((e->flags & EDGE_FALLTHRU)
&& BB_PARTITION (bb) != BB_PARTITION (e->dest)
- && e->dest != EXIT_BLOCK_PTR)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
force_nonfallthru (e);
else
fixup_partition_crossing (e);
/* In the case the last instruction is conditional jump to the next
instruction, first redirect the jump itself and then continue
by creating a basic block afterwards to redirect fallthru edge. */
- if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& any_condjump_p (BB_END (e->src))
&& JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
{
else
{
gcc_assert (e->flags & EDGE_FALLTHRU);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
/* We can't redirect the entry block. Create an empty block
at the start of the function which we use to add the new
edge_iterator ei;
bool found = false;
- basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
+ basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
e->src = bb;
- for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); )
+ for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
+ (tmp = ei_safe_edge (ei)); )
{
if (tmp == e)
{
- ENTRY_BLOCK_PTR->succs->unordered_remove (ei.index);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ei.index);
found = true;
break;
}
gcc_assert (found);
vec_safe_push (bb->succs, e);
- make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
+ EDGE_FALLTHRU);
}
}
/* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
don't point to the target or fallthru label. */
if (JUMP_P (BB_END (e->src))
- && target != EXIT_BLOCK_PTR
+ && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_FALLTHRU)
&& (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
{
loc = e->goto_locus;
e->flags &= ~EDGE_FALLTHRU;
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (jump_label == ret_rtx)
{
last_bb_in_partition (basic_block start_bb)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
return bb;
}
/* Create the basic block note. */
- if (edge_in->dest != EXIT_BLOCK_PTR)
+ if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
before = BB_HEAD (edge_in->dest);
else
before = NULL_RTX;
/* If this is a fall through edge to the exit block, the blocks might be
not adjacent, and the right place is after the source. */
- if ((edge_in->flags & EDGE_FALLTHRU) && edge_in->dest == EXIT_BLOCK_PTR)
+ if ((edge_in->flags & EDGE_FALLTHRU)
+ && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
before = NEXT_INSN (BB_END (edge_in->src));
bb = create_basic_block (before, NULL, edge_in->src);
}
else
{
- if (edge_in->src == ENTRY_BLOCK_PTR)
+ if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
BB_COPY_PARTITION (bb, edge_in->dest);
/* Can't allow a region crossing edge to be fallthrough. */
if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
- && edge_in->dest != EXIT_BLOCK_PTR)
+ && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
new_bb = force_nonfallthru (single_succ_edge (bb));
gcc_assert (!new_bb);
}
else
{
- if (edge_in->src != ENTRY_BLOCK_PTR)
+ if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
/* For asm goto even splitting of fallthru edge might
need insn patching, as other labels might point to the
rtx last = BB_END (edge_in->src);
if (last
&& JUMP_P (last)
- && edge_in->dest != EXIT_BLOCK_PTR
+ && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& extract_asm_operands (PATTERN (last)) != NULL_RTX
&& patch_jump_insn (last, before, bb))
df_set_bb_dirty (edge_in->src);
/* Figure out where to put these insns. If the destination has
one predecessor, insert there. Except for the exit block. */
- if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
+ if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bb = e->dest;
the basic block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
&& single_succ_p (e->src)
- && e->src != ENTRY_BLOCK_PTR
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (!JUMP_P (BB_END (e->src))
|| simplejump_p (BB_END (e->src))))
{
to EXIT. */
e = single_succ_edge (bb);
- gcc_assert (e->dest == EXIT_BLOCK_PTR
+ gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
verify_flow_info ();
#endif
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
n_fallthru++, fallthru = e;
is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
- && e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR);
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
has_crossing_edge |= is_crossing;
if (e->flags & EDGE_CROSSING)
{
break;
}
}
- else if (e->src != ENTRY_BLOCK_PTR
- && e->dest != EXIT_BLOCK_PTR)
+ else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx insn;
rtx x;
int num_bb_notes;
const rtx rtx_first = get_insns ();
- basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL;
+ basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
num_bb_notes = 0;
- last_bb_seen = ENTRY_BLOCK_PTR;
+ last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
for (x = rtx_first; x; x = NEXT_INSN (x))
{
ei_next (&ei);
continue;
}
- else if (e->dest != EXIT_BLOCK_PTR
+ else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_HEAD (e->dest) == JUMP_LABEL (insn))
/* If the destination block is the target of the jump,
keep the edge. */
ei_next (&ei);
continue;
}
- else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn))
+ else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && returnjump_p (insn))
/* If the destination block is the exit block, and this
instruction is a return, then keep the edge. */
{
rtx insn, last_insn, next_head, prev;
next_head = NULL_RTX;
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
next_head = BB_HEAD (bb->next_bb);
for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
basic_block bb;
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
if (dump_file)
{
fprintf (dump_file, "Reordered sequence:\n");
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
+ NUM_FIXED_BLOCKS;
bb;
bb = (basic_block) bb->aux, index++)
{
}
/* Now reorder the blocks. */
- prev_bb = ENTRY_BLOCK_PTR;
- bb = ENTRY_BLOCK_PTR->next_bb;
+ prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
{
bb->prev_bb = prev_bb;
prev_bb->next_bb = bb;
}
- prev_bb->next_bb = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->prev_bb = prev_bb;
+ prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
/* Then, clean up the aux fields. */
FOR_ALL_BB (bb)
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
- for (bb = ENTRY_BLOCK_PTR->next_bb; bb; bb = (basic_block) bb->aux)
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
+ bb->aux)
{
if (BB_HEADER (bb))
{
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
- for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = (basic_block) bb->aux)
+ for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
+ bb->aux)
{
edge e_fall, e_taken, e;
rtx bb_end_insn;
/* If the old fallthru is still next, nothing to do. */
if (bb->aux == e_fall->dest
- || e_fall->dest == EXIT_BLOCK_PTR)
+ || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* The degenerated case of conditional jump jumping to the next
if (note
&& XINT (note, 0) < REG_BR_PROB_BASE / 2
&& invert_jump (bb_end_insn,
- (e_fall->dest == EXIT_BLOCK_PTR
+ (e_fall->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
: label_for_bb (e_fall->dest)), 0))
{
/* Otherwise we can try to invert the jump. This will
basically never fail, however, keep up the pretense. */
else if (invert_jump (bb_end_insn,
- (e_fall->dest == EXIT_BLOCK_PTR
+ (e_fall->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
: label_for_bb (e_fall->dest)), 0))
{
__builtin_unreachable ()), nothing to do. */
if (! e_fall
|| bb->aux == e_fall->dest
- || e_fall->dest == EXIT_BLOCK_PTR)
+ || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* Otherwise we'll have to use the fallthru fixup below. */
continue;
/* A fallthru to exit block. */
- if (e_fall->dest == EXIT_BLOCK_PTR)
+ if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
}
continue;
}
dest = e->dest;
- if (dest == EXIT_BLOCK_PTR)
+ if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Non-fallthru edges to the exit block cannot be split. */
if (!(e->flags & EDGE_FALLTHRU))
value. */
gcc_assert (reload_completed);
- e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
+ e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
if (e)
bb = e->src;
if (bb && bb->aux)
{
- basic_block c = ENTRY_BLOCK_PTR->next_bb;
+ basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
/* If the very first block is the one with the fall-through exit
edge, we have to split that block. */
edge_iterator ei;
basic_block forwarder, bb;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_FALLTHRU)
{
if (predecessor == NULL)
/* Exit has several fallthru predecessors. Create a forwarder block for
them. */
forwarder = split_edge (predecessor);
- for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
+ for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
+ (e = ei_safe_edge (ei)); )
{
if (e->src == forwarder
|| !(e->flags & EDGE_FALLTHRU))
insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
new_bb = create_basic_block (insn,
insn ? get_last_insn () : NULL,
- EXIT_BLOCK_PTR->prev_bb);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
BB_COPY_PARTITION (new_bb, bb);
if (BB_HEADER (bb))
if (e->dest == dest)
return e;
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (ret = try_redirect_by_replacing_jump (e, dest, true)))
{
df_set_bb_dirty (src);
return ret;
}
- if (e->src == ENTRY_BLOCK_PTR
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
{
if (dump_file)
set_last_insn (insn);
}
}
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
to = &BB_HEADER (bb->next_bb);
else
to = &cfg_layout_function_footer;
if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
{
edge e = find_fallthru_edge (b->succs);
- if (e && e->dest == EXIT_BLOCK_PTR)
+ if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
}
&& a != b
/* Must be simple edge. */
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
- && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* If the jump insn has side effects, we can't kill the edge.
When not optimizing, try_redirect_by_replacing_jump will
not allow us to redirect an edge by replacing a table jump. */
cfg_layout_split_edge (edge e)
{
basic_block new_bb =
- create_basic_block (e->src != ENTRY_BLOCK_PTR
+ create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
? NEXT_INSN (BB_END (e->src)) : get_insns (),
NULL_RTX, e->src);
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
BB_COPY_PARTITION (new_bb, e->src);
else
BB_COPY_PARTITION (new_bb, e->dest);
{
rtx insn;
- if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return true;
FOR_BB_INSNS (bb, insn)
if (! blocks)
check_last_block = true;
else
- check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
rtx insn = BB_END (bb);
/* Back up past insns that must be kept in the same block as a call. */
{
edge e;
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e)
{
insert_insn_on_edge (gen_use (const0_rtx), e);
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
{
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
#endif
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
if (insn == BB_HEAD (bb))
const_rtx insn = BB_END (src), set;
/* The conditions are taken from try_redirect_by_replacing_jump. */
- if (target == EXIT_BLOCK_PTR)
+ if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
int
compute_call_stmt_bb_frequency (tree decl, basic_block bb)
{
- int entry_freq = ENTRY_BLOCK_PTR_FOR_FUNCTION
+ int entry_freq = ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (decl))->frequency;
int freq = bb->frequency;
cgraph_node_remove_callees (node);
ipa_remove_all_references (&node->ref_list);
- node->count = ENTRY_BLOCK_PTR->count;
+ node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
FOR_EACH_BB (bb)
{
else
i++;
- node->count = ENTRY_BLOCK_PTR->count;
+ node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
FOR_EACH_BB (bb)
{
loops_for_fn (cfun)->state |= LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
/* Create BB for body of the function and connect it properly. */
- bb = create_basic_block (NULL, (void *) 0, ENTRY_BLOCK_PTR);
- make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
- make_edge (bb, EXIT_BLOCK_PTR, 0);
- add_bb_to_loop (bb, ENTRY_BLOCK_PTR->loop_father);
+ bb = create_basic_block (NULL, (void *) 0, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FALLTHRU);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
+ add_bb_to_loop (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
return bb;
}
gsi_insert_after (&bsi, stmt, GSI_NEW_STMT);
make_edge (bb, then_bb, EDGE_TRUE_VALUE);
make_edge (bb, else_bb, EDGE_FALSE_VALUE);
- make_edge (return_bb, EXIT_BLOCK_PTR, 0);
+ make_edge (return_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
make_edge (then_bb, return_bb, EDGE_FALLTHRU);
make_edge (else_bb, return_bb, EDGE_FALLTHRU);
bsi = gsi_last_bb (then_bb);
setup_incoming_promotions (first);
/* Allow the entry block and the first block to fall into the same EBB.
Conceptually the incoming promotions are assigned to the entry block. */
- last_bb = ENTRY_BLOCK_PTR;
+ last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
create_log_links ();
FOR_EACH_BB (this_basic_block)
label_tick = label_tick_ebb_start = 1;
init_reg_last ();
setup_incoming_promotions (first);
- last_bb = ENTRY_BLOCK_PTR;
+ last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_BB (this_basic_block)
{
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
&& ! REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
&& HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
{
reg_stat_type *rsp = ®_stat[REGNO (x)];
ni2dest = SET_DEST (newi2pat);
for (insn = NEXT_INSN (i3);
- insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| insn != BB_HEAD (this_basic_block->next_bb));
insn = NEXT_INSN (insn))
{
&& ! find_reg_note (i2, REG_UNUSED,
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
for (temp = NEXT_INSN (i2);
- temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ temp
+ && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| BB_HEAD (this_basic_block) != temp);
temp = NEXT_INSN (temp))
if (temp != i3 && INSN_P (temp))
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& !REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ REGNO (x)))))
{
*nonzero &= rsp->last_set_nonzero_bits;
return NULL;
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& !REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ REGNO (x)))))
{
*result = rsp->last_set_sign_bit_copies;
return NULL;
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
&& (!REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
+ regno)))
&& rsp->last_set_label > tick))
{
if (replace)
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
|| REGNO_REG_SET_P
- (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
+ (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
since most links don't point very far away. */
for (insn = NEXT_INSN (link->insn);
- (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
+ (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| BB_HEAD (this_basic_block->next_bb) != insn));
insn = NEXT_INSN (insn))
if (DEBUG_INSN_P (insn))
label. Emit the sequence properly on the edge. We are only
invoked from dw2_build_landing_pads and finish_eh_generation
will call commit_edge_insertions thanks to a kludge. */
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq,
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
cfun->machine->gp_save_rtx = m;
}
we can't yet emit instructions directly in the final
insn stream. Queue the insns on the entry edge, they will
be committed after everything else is expanded. */
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq,
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
}
}
}
/* Just look at cfg info, which is still close enough to correct at this
point. This gives false positives for broken functions that might use
uninitialized data that happens to be allocated in r3, but who cares? */
- return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 3);
+ return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ 3);
}
/* Compute the number of bytes used to store the static chain register on the
if (!crtl->tail_call_emit)
return false;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_SIBCALL)
{
rtx call = BB_END (e->src);
if (single_pred_p (bb)
&& single_pred_edge (bb)->flags & EDGE_FALLTHRU
- && single_pred (bb) != ENTRY_BLOCK_PTR)
+ && single_pred (bb) != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
bb = single_pred (bb);
last_insn = BB_END (bb);
/* We need to keep the membar if there is an edge to the exit block. */
FOR_EACH_EDGE (succ, ei, bb->succs)
/* for (succ = bb->succ; succ != 0; succ = succ->succ_next) */
- if (succ->dest == EXIT_BLOCK_PTR)
+ if (succ->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* Work out the union of all successor blocks. */
to correct at this point. This gives false positives for broken
functions that might use uninitialized data that happens to be
allocated in eax, but who cares? */
- return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
+ return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 0);
}
static bool
Recompute the value as needed. Do not recompute when amount of registers
didn't change as reload does multiple calls to the function and does not
expect the decision to change within single iteration. */
- else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR)
+ else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR_FOR_FN (cfun))
&& cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
{
int count = frame->nregs;
/* Leave results in shorter dependency chains on CPUs that are
able to grok it fast. */
else if (TARGET_USE_LEAVE
- || optimize_bb_for_size_p (EXIT_BLOCK_PTR)
+ || optimize_bb_for_size_p (EXIT_BLOCK_PTR_FOR_FN (cfun))
|| !cfun->machine->use_fast_prologue_epilogue)
ix86_emit_leave ();
else
make_edge (bb1, bb3, EDGE_FALSE_VALUE);
remove_edge (e23);
- make_edge (bb2, EXIT_BLOCK_PTR, 0);
+ make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
pop_cfun ();
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
basic_block bb = e->src;
rtx ret = BB_END (bb);
edge prev_e;
edge_iterator prev_ei;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
min_prev_count = 0;
break;
}
FOR_EACH_EDGE (prev_e, prev_ei, e->src->preds)
{
- if (prev_e->src == ENTRY_BLOCK_PTR)
+ if (prev_e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
int count = ix86_count_insn_bb (e->src);
if (count < min_prev_count)
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx ret = BB_END (e->src);
if (JUMP_P (ret) && ANY_RETURN_P (PATTERN (ret)))
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx insn, next;
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FAKE) == 0
&& (e->flags & EDGE_FALLTHRU) != 0)
break;
if (NOTE_INSN_BASIC_BLOCK_P (insn))
{
- last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
+ last_block = NOTE_BASIC_BLOCK (insn)->next_bb
+ == EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Restore unwind state from immediately before the epilogue. */
if (need_copy_state)
|| frame_pointer_needed
|| NDS32_REQUIRED_CALLEE_SAVED_P (FP_REGNUM)
|| (cfun->stdarg == 1)
- || (find_fallthru_edge (EXIT_BLOCK_PTR->preds) == NULL))
+ || (find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == NULL))
return 0;
/* Now we can check the possibility of using fp_as_gp optimization. */
&& DEFAULT_ABI == ABI_V4
&& flag_pic
&& ! info->lr_save_p
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
if (save_LR_around_toc_setup)
{
rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& BB_HEAD (e->dest) == JUMP_LABEL (jump))
{
e->flags |= EDGE_FALLTHRU;
? BRANCH_EDGE (bb)->dest : FALLTHRU_EDGE (bb)->dest;
/* If DEST doesn't go anywhere, ignore it. */
- if (! dest || dest == EXIT_BLOCK_PTR)
+ if (! dest || dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* We have found a suitable implicit set. Try to record it now as
old_dest = e->dest;
if (dest != NULL
&& dest != old_dest
- && dest != EXIT_BLOCK_PTR)
+ && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
redirect_edge_and_branch_force (e, dest);
rtx dest;
/* Note we start at block 1. */
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
bypass_last_basic_block = last_basic_block;
mark_dfs_back_edges ();
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
- EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
/* Check for more than one predecessor. */
if (!single_pred_p (bb))
/* Allocate vars to track sets of regs. */
reg_set_bitmap = ALLOC_REG_SET (NULL);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR,
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun),
next_bb)
{
/* Reset tables used to keep track of what's still valid [since
&& e == BRANCH_EDGE (previous_bb_in_path))
{
bb = FALLTHRU_EDGE (previous_bb_in_path)->dest;
- if (bb != EXIT_BLOCK_PTR
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (bb)
/* We used to assert here that we would only see blocks
that we have not visited yet. But we may end up
if (e
&& !((e->flags & EDGE_ABNORMAL_CALL) && cfun->has_nonlocal_label)
- && e->dest != EXIT_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_pred_p (e->dest)
/* Avoid visiting basic blocks twice. The large comment
above explains why this can happen. */
continue;
if (EDGE_COUNT (e->dest->preds) != 1
- || e->dest == EXIT_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
/* Avoid endless recursion on unreachable blocks. */
|| e->dest == orig_bb)
continue;
df_lr_confluence_0 (basic_block bb)
{
bitmap op1 = &df_lr_get_bb_info (bb->index)->out;
- if (bb != EXIT_BLOCK_PTR)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (op1, &df->hardware_regs_used);
}
EXECUTE_IF_SET_IN_BITMAP (entry_block_defs, 0, i, bi)
{
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
- ENTRY_BLOCK_PTR, NULL, DF_REF_REG_DEF, 0);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_DEF, 0);
}
df_canonize_collection_rec (collection_rec);
EXECUTE_IF_SET_IN_BITMAP (exit_block_uses, 0, i, bi)
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
- EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0);
+ EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* It is deliberate that this is not put in the exit block uses but
I do not know why. */
if (reload_completed
&& !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM)
- && bb_has_eh_pred (EXIT_BLOCK_PTR)
+ && bb_has_eh_pred (EXIT_BLOCK_PTR_FOR_FN (cfun))
&& fixed_regs[ARG_POINTER_REGNUM])
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[ARG_POINTER_REGNUM], NULL,
- EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0);
+ EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
#endif
df_canonize_collection_rec (collection_rec);
if (reverse)
{
ei = ei_start (bb->preds);
- en_block = EXIT_BLOCK_PTR;
- ex_block = ENTRY_BLOCK_PTR;
+ en_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
+ ex_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
}
else
{
ei = ei_start (bb->succs);
- en_block = ENTRY_BLOCK_PTR;
- ex_block = EXIT_BLOCK_PTR;
+ en_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
+ ex_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
}
/* When the stack is empty we break out of this loop. */
calc_dfs_tree (struct dom_info *di, bool reverse)
{
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
- basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR;
+ basic_block begin = (reverse
+ ? EXIT_BLOCK_PTR_FOR_FN (cfun) : ENTRY_BLOCK_PTR_FOR_FN (cfun));
di->dfs_order[last_basic_block] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
edge_iterator ei, einext;
if (reverse)
- en_block = EXIT_BLOCK_PTR;
+ en_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
else
- en_block = ENTRY_BLOCK_PTR;
+ en_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Go backwards in DFS order, to first look at the leafs. */
v = di->nodes;
for (i = 0; bbs.iterate (i, &bb);)
{
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
goto succeed;
if (single_pred_p (bb))
if (son[y] == -1)
return;
if (y == (int) bbs.length ())
- ybb = ENTRY_BLOCK_PTR;
+ ybb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
else
ybb = bbs[y];
set_immediate_dominator (CDI_DOMINATORS, bb, NULL);
*map->insert (bb) = i;
}
- *map->insert (ENTRY_BLOCK_PTR) = n;
+ *map->insert (ENTRY_BLOCK_PTR_FOR_FN (cfun)) = n;
g = new_graph (n + 1);
for (y = 0; y < g->n_vertices; y++)
{
/* Don't worry about unreachable blocks. */
if (EDGE_COUNT (bb->preds) > 0
- || bb == ENTRY_BLOCK_PTR
- || bb == EXIT_BLOCK_PTR)
+ || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Callback for subclasses to do custom things before we have walked
the dominator children, but before we walk statements. */
if (stores_off_frame_dead_at_return
&& (EDGE_COUNT (bb->succs) == 0
|| (single_succ_p (bb)
- && single_succ (bb) == EXIT_BLOCK_PTR
+ && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& ! crtl->calls_eh_return)))
{
insn_info_t i_ptr = active_local_stores;
}
if (fn_begin_outside_block)
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
+ insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
else
emit_insn_after (seq, fn_begin);
}
if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ
/* Kludge for Alpha (see alpha_gp_save_rtx). */
- || single_succ_edge (ENTRY_BLOCK_PTR)->insns.r)
+ || single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->insns.r)
commit_edge_insertions ();
/* Redirect all EH edges from the post_landing_pad to the landing pad. */
&& (branch_frequency > freq_threshold
|| (bb->frequency > bb->prev_bb->frequency * 10
&& (bb->prev_bb->frequency
- <= ENTRY_BLOCK_PTR->frequency / 2))))
+ <= ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency / 2))))
{
log = JUMP_ALIGN (label);
if (dump_file)
return false;
return ((REG_N_SETS (regno) > 1
- || REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), regno))
+ || REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ regno))
&& REGNO_REG_SET_P (setjmp_crosses, regno));
}
/* We can sometimes encounter dead code. Don't try to move it
into the exit block. */
- if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR)
+ if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL;
/* Reject targets of abnormal edges. This is needed for correctness
src_bbs.create (EDGE_COUNT (last_bb->preds));
FOR_EACH_EDGE (e, ei, last_bb->preds)
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
src_bbs.quick_push (e->src);
label = BB_HEAD (last_bb);
}
/* Fix up the CFG for the successful change we just made. */
- redirect_edge_succ (e, EXIT_BLOCK_PTR);
+ redirect_edge_succ (e, EXIT_BLOCK_PTR_FOR_FN (cfun));
e->flags &= ~EDGE_CROSSING;
}
src_bbs.release ();
df_analyze ();
- rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+ rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
inserted = false;
seq = NULL_RTX;
/* Can't deal with multiple successors of the entry block at the
moment. Function should always have at least one entry
point. */
- gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
- entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+ gcc_assert (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
orig_entry_edge = entry_edge;
split_prologue_seq = NULL_RTX;
basic_block tmp_bb = vec.pop ();
FOR_EACH_EDGE (e, ei, tmp_bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (&bb_flags, e->dest->index))
vec.quick_push (e->dest);
}
/* Find the set of basic blocks that need no prologue, have a
single successor, can be duplicated, meet a max size
requirement, and go to the exit via like blocks. */
- vec.quick_push (EXIT_BLOCK_PTR);
+ vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun));
while (!vec.is_empty ())
{
basic_block tmp_bb = vec.pop ();
{
/* Otherwise put the copy at the end of the function. */
copy_bb = create_basic_block (NULL_RTX, NULL_RTX,
- EXIT_BLOCK_PTR->prev_bb);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
BB_COPY_PARTITION (copy_bb, bb);
}
dup_block_and_redirect (tbb, copy_bb, insert_point,
&bb_flags);
tbb = single_succ (tbb);
- if (tbb == EXIT_BLOCK_PTR)
+ if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun))
break;
e = split_block (copy_bb, PREV_INSN (insert_point));
copy_bb = e->dest;
if (CALL_P (PREV_INSN (insert_point))
&& SIBLING_CALL_P (PREV_INSN (insert_point)))
eflags = EDGE_SIBCALL | EDGE_ABNORMAL;
- make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR, eflags);
+ make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ eflags);
/* verify_flow_info doesn't like a note after a
sibling call. */
/* If the exit block has no non-fake predecessors, we don't need
an epilogue. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FAKE) == 0)
break;
if (e == NULL)
goto epilogue_done;
- rtl_profile_for_bb (EXIT_BLOCK_PTR);
+ rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
- exit_fallthru_edge = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
+ exit_fallthru_edge = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
/* If we're allowed to generate a simple return instruction, then by
definition we don't need a full epilogue. If the last basic
/* convert_jumps_to_returns may add to EXIT_BLOCK_PTR->preds
(but won't remove). Stop at end of current preds. */
- last = EDGE_COUNT (EXIT_BLOCK_PTR->preds);
+ last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
for (i = 0; i < last; i++)
{
- e = EDGE_I (EXIT_BLOCK_PTR->preds, i);
+ e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
if (LABEL_P (BB_HEAD (e->src))
&& !bitmap_bit_p (&bb_flags, e->src->index)
&& !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
code. In order to be able to properly annotate these with unwind
info, try to split them now. If we get a valid split, drop an
EPILOGUE_BEG note and mark the insns as epilogue insns. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx prev, last, trial;
/* The epilogue insns we inserted may cause the exit edge to no longer
be fallthru. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
if (((e->flags & EDGE_FALLTHRU) != 0)
&& returnjump_p (BB_END (e->src)))
}
/* Also check returns we might need to add to tail blocks. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (EDGE_COUNT (e->src->preds) != 0
&& (e->flags & EDGE_FAKE) != 0
&& !bitmap_bit_p (&bb_flags, e->src->index))
inserting new BBs at the end of the function. Do this
after the call to split_block above which may split
the original exit pred. */
- exit_pred = EXIT_BLOCK_PTR->prev_bb;
+ exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
{
emit_barrier_after (start);
*pdest_bb = bb;
- make_edge (bb, EXIT_BLOCK_PTR, 0);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
}
redirect_edge_and_branch_force (e, *pdest_bb);
}
if (entry_edge != orig_entry_edge)
{
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (EDGE_COUNT (e->src->preds) != 0
&& (e->flags & EDGE_FAKE) != 0
&& !bitmap_bit_p (&bb_flags, e->src->index))
#ifdef HAVE_sibcall_epilogue
/* Emit sibling epilogues before any sibling call sites. */
- for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
+ for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); (e =
+ ei_safe_edge (ei));
+ )
{
basic_block bb = e->src;
rtx insn = BB_END (bb);
edge_iterator ei;
edge e;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
rtx insn, first = NULL, note = NULL;
basic_block bb = e->src;
{
basic_block pred_bb = pred->src;
- if (pred->src == ENTRY_BLOCK_PTR
+ if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
/* Has predecessor has already been visited? */
|| visited[pred_bb->index])
;/* Nothing to do. */
the convergence. */
FOR_EACH_BB_REVERSE (bb)
{
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bitmap_intersection_of_succs (hoist_vbeout[bb->index],
hoist_vbein, bb);
FOR_EACH_EDGE (succ, ei, bb->succs)
{
succ_bb = succ->dest;
- if (succ_bb == EXIT_BLOCK_PTR)
+ if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
{
basic_block pred_bb = pred->src;
- if (pred->src == ENTRY_BLOCK_PTR)
+ if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
else if (pred_bb == expr_bb)
continue;
bb_size[bb->index] = to_head;
}
- gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
- && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
- == ENTRY_BLOCK_PTR->next_bb));
+ gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
+ && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
from_bbs = BITMAP_ALLOC (NULL);
if (flag_ira_hoist_pressure)
hoisted_bbs = BITMAP_ALLOC (NULL);
dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
- ENTRY_BLOCK_PTR->next_bb);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
restart:
if (single_pred_p (dest)
&& gimple_seq_empty_p (phi_nodes (dest))
- && dest != EXIT_BLOCK_PTR)
+ && dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*gsi = gsi_start_bb (dest);
if (gsi_end_p (*gsi))
src = e->src;
if ((e->flags & EDGE_ABNORMAL) == 0
&& single_succ_p (src)
- && src != ENTRY_BLOCK_PTR)
+ && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
*gsi = gsi_last_bb (src);
if (gsi_end_p (*gsi))
edge e;
edge_iterator ei;
- gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR), NULL);
+ gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ NULL);
FOR_EACH_BB (bb)
FOR_EACH_EDGE (e, ei, bb->succs)
derived_base_name = arg;
if (SSA_NAME_IS_DEFAULT_DEF (arg))
- arg_bb = single_succ (ENTRY_BLOCK_PTR);
+ arg_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
else
gimple_bb (SSA_NAME_DEF_STMT (arg));
}
const char *fillcolors[3] = { "grey88", "grey77", "grey66" };
if (loop->header != NULL
- && loop->latch != EXIT_BLOCK_PTR)
+ && loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))
pp_printf (pp,
"\tsubgraph cluster_%d_%d {\n"
"\tstyle=\"filled\";\n"
if (loop->header == NULL)
return;
- if (loop->latch == EXIT_BLOCK_PTR)
+ if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
body = get_loop_body (loop);
else
body = get_loop_body_in_bfs_order (loop);
free (body);
- if (loop->latch != EXIT_BLOCK_PTR)
+ if (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))
pp_printf (pp, "\t}\n");
}
gimple_bb_p gbb = PBB_BLACK_BOX (pbb);
vec<tree> iv_map;
- if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
+ if (GBB_BB (gbb) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return next_e;
nb_loops = number_of_loops (cfun);
gimple stmt;
/* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
- basic_block entry_block = ENTRY_BLOCK_PTR;
+ basic_block entry_block = ENTRY_BLOCK_PTR_FOR_FN (cfun);
stmt = harmful_stmt_in_bb (entry_block, outermost_loop, bb);
result.difficult = (stmt != NULL);
result.exit = NULL;
FOR_EACH_VEC_ELT (regions, i, s)
/* Don't handle multiple edges exiting the function. */
if (!find_single_exit_edge (s)
- && s->exit != EXIT_BLOCK_PTR)
+ && s->exit != EXIT_BLOCK_PTR_FOR_FN (cfun))
create_single_exit_edge (s);
unmark_exit_edges (regions);
stack_vec<sd_region, 3> regions;
canonicalize_loop_closed_ssa_form ();
- build_scops_1 (single_succ (ENTRY_BLOCK_PTR), ENTRY_BLOCK_PTR->loop_father,
+ build_scops_1 (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father,
®ions, loop);
create_sese_edges (regions);
build_graphite_scops (regions, scops);
/* Selective scheduling does not define RECOVERY_BLOCK macro. */
rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
- if (!rec || rec == EXIT_BLOCK_PTR)
+ if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
prev_first = PREV_INSN (insn);
twin = insn;
sched_extend_bb (void)
{
/* The following is done to keep current_sched_info->next_tail non null. */
- rtx end = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ rtx end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
rtx insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
if (NEXT_INSN (end) == 0
|| (!NOTE_P (insn)
rtx note = emit_note_after (NOTE_INSN_DELETED, end);
/* Make note appear outside BB. */
set_block_for_insn (note, NULL);
- BB_END (EXIT_BLOCK_PTR->prev_bb) = end;
+ BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
}
}
basic_block last;
edge e;
- last = EXIT_BLOCK_PTR->prev_bb;
+ last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
e = find_fallthru_edge_from (last);
if (e)
redirect_edge_succ (e, single);
make_single_succ_edge (single, empty, 0);
- make_single_succ_edge (empty, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
+ make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
label = block_label (empty);
x = emit_jump_insn_after (gen_jump (label), BB_END (single));
}
else
{
- rec = EXIT_BLOCK_PTR;
+ rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
label = NULL_RTX;
}
/* Emit CHECK. */
check = targetm.sched.gen_spec_check (insn, label, todo_spec);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* To have mem_reg alive at the beginning of second_bb,
we emit check BEFORE insn, so insn after splitting
/* Initialize TWIN (twin is a duplicate of original instruction
in the recovery block). */
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
sd_iterator_def sd_it;
dep_t dep;
provide correct value for INSN_TICK (TWIN). */
sd_copy_back_deps (twin, insn, true);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
/* In case of branchy check, fix CFG. */
{
basic_block first_bb, second_bb;
sched_create_recovery_edges (first_bb, rec, second_bb);
sched_init_only_bb (second_bb, first_bb);
- sched_init_only_bb (rec, EXIT_BLOCK_PTR);
+ sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
jump = BB_END (rec);
haifa_init_insn (jump);
init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
sd_add_dep (new_dep, false);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
DEP_CON (new_dep) = twin;
sd_add_dep (new_dep, false);
/* Future speculations: call the helper. */
process_insn_forw_deps_be_in_spec (insn, twin, fs);
- if (rec != EXIT_BLOCK_PTR)
+ if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* Which types of dependencies should we use here is,
generally, machine-dependent question... But, for now,
bb_header = XNEWVEC (rtx, last_basic_block);
/* Make a sentinel. */
- if (last->next_bb != EXIT_BLOCK_PTR)
+ if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb_header[last->next_bb->index] = 0;
first = first->next_bb;
first = first->next_bb;
/* Remember: FIRST is actually a second basic block in the ebb. */
- while (first != EXIT_BLOCK_PTR
+ while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb_header[first->index])
{
rtx prev, label, note, next;
{
edge e;
edge_iterator ei;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* We've reached the exit block. The loop must be bad. */
if (dump_file)
FOR_EACH_BB (bb)
{
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
else
bb->aux = NULL;
/* There should still be something at the end of the THEN or ELSE
blocks taking us to our final destination. */
gcc_assert (JUMP_P (last)
- || (EDGE_SUCC (combo_bb, 0)->dest == EXIT_BLOCK_PTR
+ || (EDGE_SUCC (combo_bb, 0)->dest
+ == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& CALL_P (last)
&& SIBLING_CALL_P (last))
|| ((EDGE_SUCC (combo_bb, 0)->flags & EDGE_EH)
may be zero incoming edges if the THEN block didn't actually join
back up (as with a call to a non-return function). */
else if (EDGE_COUNT (join_bb->preds) < 2
- && join_bb != EXIT_BLOCK_PTR)
+ && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* We can merge the JOIN cleanly and update the dataflow try
again on this pass.*/
&& single_succ (combo_bb) == join_bb);
/* Remove the jump and cruft from the end of the COMBO block. */
- if (join_bb != EXIT_BLOCK_PTR)
+ if (join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
tidy_fallthru_edge (single_succ_edge (combo_bb));
}
code processing. ??? we should fix this in the future. */
if (EDGE_COUNT (then_bb->succs) == 0)
{
- if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR)
+ if (single_pred_p (else_bb) && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx last_insn = BB_END (then_bb);
next = then_bb;
if (else_bb && (next = next->next_bb) != else_bb)
return FALSE;
- if ((next = next->next_bb) != join_bb && join_bb != EXIT_BLOCK_PTR)
+ if ((next = next->next_bb) != join_bb
+ && join_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (else_bb)
join_bb = NULL;
rtx trap;
/* We're not the exit block. */
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_RTX;
/* The block must have no successors. */
predictable_edge_p (then_edge)))))
return FALSE;
- if (else_bb == EXIT_BLOCK_PTR)
+ if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rtx jump = BB_END (else_edge->src);
gcc_assert (JUMP_P (jump));
if (then_bb->next_bb == else_bb
&& then_bb->prev_bb == test_bb
- && else_bb != EXIT_BLOCK_PTR)
+ && else_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
redirect_edge_succ (FALLTHRU_EDGE (test_bb), else_bb);
new_bb = 0;
}
- else if (else_bb == EXIT_BLOCK_PTR)
+ else if (else_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
new_bb = force_nonfallthru_and_redirect (FALLTHRU_EDGE (test_bb),
else_bb, else_target);
else
saved in caller-saved regs. A caller-saved reg requires the
prologue, killing a shrink-wrap opportunity. */
if ((flag_shrink_wrap && HAVE_simple_return && !epilogue_completed)
- && ENTRY_BLOCK_PTR->next_bb == test_bb
+ && ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == test_bb
&& single_succ_p (new_dest)
- && single_succ (new_dest) == EXIT_BLOCK_PTR
+ && single_succ (new_dest) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_intersect_p (df_get_live_in (new_dest), merge_set))
{
regset return_regs;
&& targetm.calls.function_value_regno_p (i))
bitmap_set_bit (return_regs, INCOMING_REGNO (i));
- bitmap_and_into (return_regs, df_get_live_out (ENTRY_BLOCK_PTR));
- bitmap_and_into (return_regs, df_get_live_in (EXIT_BLOCK_PTR));
+ bitmap_and_into (return_regs,
+ df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ bitmap_and_into (return_regs,
+ df_get_live_in (EXIT_BLOCK_PTR_FOR_FN (cfun)));
if (!bitmap_empty_p (return_regs))
{
FOR_BB_INSNS_REVERSE (new_dest, insn)
{
if (JUMP_P (BB_END (dest_edge->src)))
new_dest_label = JUMP_LABEL (BB_END (dest_edge->src));
- else if (new_dest == EXIT_BLOCK_PTR)
+ else if (new_dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
new_dest_label = ret_rtx;
else
new_dest_label = block_label (new_dest);
}
/* Entry block is always executable. */
- ENTRY_BLOCK_PTR_FOR_FUNCTION (my_function)->aux
+ ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= pool_alloc (edge_predicate_pool);
- *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FUNCTION (my_function)->aux
+ *(struct predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux
= true_predicate ();
/* A simple dataflow propagation of predicates forward in the CFG.
return false;
bitmap_set_bit (info->bb_set,
SSA_NAME_IS_DEFAULT_DEF (vdef)
- ? ENTRY_BLOCK_PTR->index
+ ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index
: gimple_bb (SSA_NAME_DEF_STMT (vdef))->index);
return false;
}
return REG_BR_PROB_BASE;
if (SSA_NAME_IS_DEFAULT_DEF (op))
- init_freq = ENTRY_BLOCK_PTR->frequency;
+ init_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
else
init_freq = gimple_bb (SSA_NAME_DEF_STMT (op))->frequency;
/* Assume that every memory is initialized at entry.
TODO: Can we easilly determine if value is always defined
and thus we may skip entry block? */
- if (ENTRY_BLOCK_PTR->frequency)
- max = ENTRY_BLOCK_PTR->frequency;
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
+ max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
else
max = 1;
/* Do NORETURN discovery. */
if (!skip && !TREE_THIS_VOLATILE (current_function_decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 0)
{
warn_function_noreturn (cfun->decl);
if (dump_file)
execute_warn_function_noreturn (void)
{
if (!TREE_THIS_VOLATILE (current_function_decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 0)
warn_function_noreturn (current_function_decl);
return 0;
}
bool ok = true;
FOR_EACH_EDGE (e, ei, current->entry_bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (current->split_bbs, e->src->index))
{
worklist.safe_push (e->src);
basic_block bb = worklist.pop ();
FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (seen, e->src->index))
{
gcc_checking_assert (!bitmap_bit_p (current->split_bbs,
/* Do not split when we would end up calling function anyway. */
if (incoming_freq
- >= (ENTRY_BLOCK_PTR->frequency
+ >= (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency
* PARAM_VALUE (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY) / 100))
{
/* When profile is guessed, we can not expect it to give us
is likely noticeable win. */
if (back_edge
&& profile_status != PROFILE_READ
- && incoming_freq < ENTRY_BLOCK_PTR->frequency)
+ && incoming_freq < ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" Split before loop, accepting despite low frequencies %i %i.\n",
incoming_freq,
- ENTRY_BLOCK_PTR->frequency);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
}
else
{
/* split_function fixes up at most one PHI non-virtual PHI node in return_bb,
for the return value. If there are other PHIs, give up. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gimple_stmt_iterator psi;
find_return_bb (void)
{
edge e;
- basic_block return_bb = EXIT_BLOCK_PTR;
+ basic_block return_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
gimple_stmt_iterator bsi;
bool found_return = false;
tree retval = NULL_TREE;
- if (!single_pred_p (EXIT_BLOCK_PTR))
+ if (!single_pred_p (EXIT_BLOCK_PTR_FOR_FN (cfun)))
return return_bb;
- e = single_pred_edge (EXIT_BLOCK_PTR);
+ e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun));
for (bsi = gsi_last_bb (e->src); !gsi_end_p (bsi); gsi_prev (&bsi))
{
gimple stmt = gsi_stmt (bsi);
current.split_size = 0;
current.ssa_names_to_pass = BITMAP_ALLOC (NULL);
- first.bb = ENTRY_BLOCK_PTR;
+ first.bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
first.edge_num = 0;
first.overall_time = 0;
first.overall_size = 0;
first.used_ssa_names = 0;
first.bbs_visited = 0;
stack.safe_push (first);
- ENTRY_BLOCK_PTR->aux = (void *)(intptr_t)-1;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = (void *)(intptr_t)-1;
while (!stack.is_empty ())
{
articulation, we want to have processed everything reachable
from articulation but nothing that reaches into it. */
if (entry->edge_num == EDGE_COUNT (entry->bb->succs)
- && entry->bb != ENTRY_BLOCK_PTR)
+ && entry->bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
int pos = stack.length ();
entry->can_split &= visit_bb (entry->bb, return_bb,
entry->edge_num++;
/* New BB to visit, push it to the stack. */
- if (dest != return_bb && dest != EXIT_BLOCK_PTR
+ if (dest != return_bb && dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !dest->aux)
{
stack_entry new_entry;
}
/* We are done with examining the edges. Pop off the value from stack
and merge stuff we accumulate during the walk. */
- else if (entry->bb != ENTRY_BLOCK_PTR)
+ else if (entry->bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
stack_entry *prev = &stack[stack.length () - 2];
else
stack.pop ();
}
- ENTRY_BLOCK_PTR->aux = NULL;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = NULL;
FOR_EACH_BB (bb)
bb->aux = NULL;
stack.release ();
if (!split_part_return_p)
;
/* We have no return block, so nothing is needed. */
- else if (return_bb == EXIT_BLOCK_PTR)
+ else if (return_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
;
/* When we do not want to return value, we need to construct
new return block with empty return statement.
break;
}
}
- e = make_edge (new_return_bb, EXIT_BLOCK_PTR, 0);
+ e = make_edge (new_return_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
e->probability = REG_BR_PROB_BASE;
e->count = new_return_bb->count;
if (current_loops)
Note this can happen whether or not we have a return value. If we have
a return value, then RETURN_BB may have PHIs for real operands too. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
bool phi_p = false;
for (gsi = gsi_start_phis (return_bb); !gsi_end_p (gsi);)
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
var = BLOCK_VARS (DECL_INITIAL (node->decl));
i = vec_safe_length (*debug_args);
- cgsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ cgsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
do
{
i -= 2;
else
{
e = make_edge (call_bb, return_bb,
- return_bb == EXIT_BLOCK_PTR ? 0 : EDGE_FALLTHRU);
+ return_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ ? 0 : EDGE_FALLTHRU);
e->count = call_bb->count;
e->probability = REG_BR_PROB_BASE;
/* If there is return basic block, see what value we need to store
return value into and put call just before it. */
- if (return_bb != EXIT_BLOCK_PTR)
+ if (return_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
real_retval = retval = find_retval (return_bb);
ira_loop_tree_node_t pred_node;
basic_block pred_bb = e->src;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
{
fprintf (ira_dump_file, " %d", subloop_node->bb->index);
FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
!= loop_tree_node))
fprintf (ira_dump_file, "(->%d:l%d)",
if (bb_node->bb != NULL)
{
FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
- if (e->src != ENTRY_BLOCK_PTR
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
{
for (parent = src_loop_node->parent;
at_bb_start[bb->index] = NULL;
at_bb_end[bb->index] = NULL;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
generate_edge_moves (e);
}
allocno_last_set
executed, frequency is always equivalent. Otherwise rescale the
edge frequency. */
#define REG_FREQ_FROM_EDGE_FREQ(freq) \
- (optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
- ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
+ (optimize_size || (flag_branch_probabilities \
+ && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count) \
+ ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
/* A modified value of flag `-fira-verbose' used internally. */
split_live_ranges_for_shrink_wrap (void)
{
basic_block bb, call_dom = NULL;
- basic_block first = single_succ (ENTRY_BLOCK_PTR);
+ basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
rtx insn, last_interesting_insn = NULL;
bitmap_head need_new, reachable;
vec<basic_block> queue;
bb = queue.pop ();
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bitmap_set_bit (&reachable, e->dest->index))
queue.quick_push (e->dest);
}
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- e->src->aux = EXIT_BLOCK_PTR;
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
+ e->src->aux = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate until the worklist is empty. */
while (qlen)
if (qout >= qend)
qout = worklist;
- if (bb->aux == EXIT_BLOCK_PTR)
+ if (bb->aux == EXIT_BLOCK_PTR_FOR_FN (cfun))
/* Do not clear the aux field for blocks which are predecessors of
the EXIT block. That way we never add then to the worklist
again. */
to add the predecessors of this block to the worklist
if they are not already on the worklist. */
FOR_EACH_EDGE (e, ei, bb->preds)
- if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
+ if (!e->src->aux && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->src;
e->src->aux = e;
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (pred == ENTRY_BLOCK_PTR)
+ if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (earliest[x], antin[succ->index]);
else
{
- if (succ == EXIT_BLOCK_PTR)
+ if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_clear (earliest[x]);
else
{
do not want to be overly optimistic. Consider an outgoing edge from
the entry block. That edge should always have a LATER value the
same as EARLIEST for that edge. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
bitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit from
antloc[e->src->index])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
- && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest->aux == 0)
{
*qin++ = e->dest;
e->dest->aux = e;
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
bitmap_ones (laterin[last_basic_block]);
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
bitmap_and (laterin[last_basic_block],
laterin[last_basic_block],
later[(size_t) e->aux]);
{
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (b == EXIT_BLOCK_PTR)
+ if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_and_compl (insert[x], later[x], laterin[last_basic_block]);
else
bitmap_and_compl (insert[x], later[x], laterin[b->index]);
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
- e->dest->aux = ENTRY_BLOCK_PTR;
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
+ e->dest->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Iterate until the worklist is empty. */
while (qlen)
/* If one of the predecessor blocks is the ENTRY block, then the
intersection of avouts is the null set. We can identify such blocks
by the special value in the AUX field in the block structure. */
- if (bb->aux == ENTRY_BLOCK_PTR)
+ if (bb->aux == ENTRY_BLOCK_PTR_FOR_FN (cfun))
/* Do not clear the aux field for blocks which are successors of the
ENTRY block. That way we never add then to the worklist again. */
bitmap_clear (avin[bb->index]);
to add the successors of this block to the worklist
if they are not already on the worklist. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
+ if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->dest;
e->dest->aux = e;
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
- if (succ == EXIT_BLOCK_PTR)
+ if (succ == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_copy (farthest[x], st_avout[pred->index]);
else
{
- if (pred == ENTRY_BLOCK_PTR)
+ if (pred == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_clear (farthest[x]);
else
{
do not want to be overly optimistic. Consider an incoming edge to
the exit block. That edge should always have a NEARER value the
same as FARTHEST for that edge. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
bitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);
/* Add all the blocks to the worklist. This prevents an early exit
st_avloc[e->dest->index])
/* If NEARER for an incoming edge was changed, then we need
to add the source of the incoming edge to the worklist. */
- && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && e->src->aux == 0)
{
*tos++ = e->src;
e->src->aux = e;
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
bitmap_ones (nearerout[last_basic_block]);
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
bitmap_and (nearerout[last_basic_block],
nearerout[last_basic_block],
nearer[(size_t) e->aux]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
- if (b == ENTRY_BLOCK_PTR)
+ if (b == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_and_compl (insert[x], nearer[x], nearerout[last_basic_block]);
else
bitmap_and_compl (insert[x], nearer[x], nearerout[b->index]);
return;
e = loop_preheader_edge (loop);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
altered = ALLOC_REG_SET (®_obstack);
}
if (!single_pred_p (e->src)
- || single_pred (e->src) == ENTRY_BLOCK_PTR)
+ || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
break;
e = single_pred_edge (e->src);
}
/* Create a block with the condition. */
prob = true_edge->probability;
- switch_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
+ switch_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
seq = compare_and_jump_seq (XEXP (cond, 0), XEXP (cond, 1), GET_CODE (cond),
block_label (true_edge->dest),
prob, cinsn);
&& ! df_regs_ever_live_p (hard_regno + j))
/* It needs save restore. */
hard_regno_costs[hard_regno]
- += 2 * ENTRY_BLOCK_PTR->next_bb->frequency + 1;
+ += 2 * ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->frequency + 1;
priority = targetm.register_priority (hard_regno);
if (best_hard_regno < 0 || hard_regno_costs[hard_regno] < best_cost
|| (hard_regno_costs[hard_regno] == best_cost
{
if (lra_dump_file != NULL)
fprintf (lra_dump_file, " %d", bb->index);
- if (bb->next_bb == EXIT_BLOCK_PTR || LABEL_P (BB_HEAD (bb->next_bb)))
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || LABEL_P (BB_HEAD (bb->next_bb)))
break;
e = find_fallthru_edge (bb->succs);
if (! e)
for (i = n_blocks_inverted - 1; i >= 0; --i)
{
bb = BASIC_BLOCK (post_order_rev_cfg[i]);
- if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb
+ == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
process_bb_lives (bb, curr_point);
}
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
- EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
- *tos++ = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
+ *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
index = streamer_read_hwi (ib);
}
- p_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
+ p_bb = ENTRY_BLOCK_PTR_FOR_FN (fn);
index = streamer_read_hwi (ib);
while (index != -1)
{
of a gimple body is used by the cgraph routines, but we should
really use the presence of the CFG. */
{
- edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs);
+ edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
gimple_set_body (fn_decl, bb_seq (ei_edge (ei)->dest));
}
streamer_write_hwi (ob, -1);
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (bb->next_bb)
{
streamer_write_hwi (ob, bb->next_bb->index);
/* Compute constants b, k_pos, k_neg used in the cost function calculation.
b = sqrt(avg_vertex_weight(cfg)); k_pos = b; k_neg = 50b. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
total_vertex_weight += bb->count;
sqrt_avg_vertex_weight = mcf_sqrt (total_vertex_weight /
if (dump_file)
fprintf (dump_file, "\nVertex transformation:\n");
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
/* v'->v'': index1->(index1+1). */
i = 2 * bb->index;
if (dump_file)
fprintf (dump_file, "\nadjust_cfg_counts():\n");
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
i = 2 * bb->index;
}
}
- ENTRY_BLOCK_PTR->count = sum_edge_counts (ENTRY_BLOCK_PTR->succs);
- EXIT_BLOCK_PTR->count = sum_edge_counts (EXIT_BLOCK_PTR->preds);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ sum_edge_counts (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ sum_edge_counts (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
/* Compute edge probabilities. */
FOR_ALL_BB (bb)
fallthrough edge; there can be at most one, but there could be
none at all, e.g. when exit is called. */
pre_exit = 0;
- FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (eg->flags & EDGE_FALLTHRU)
{
basic_block src_bb = eg->src;
/* If this function returns a value at the end, we have to
insert the final mode switch before the return value copy
to its hard register. */
- if (EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 1
+ if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
&& NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
&& GET_CODE (PATTERN (last_insn)) == USE
&& GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
/* Split the edge from the entry block, so that we can note that
there NORMAL_MODE is supplied. */
- post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
#endif
/* Avoid annoying special cases of edges going to exit
block. */
- FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if ((e->flags & EDGE_FALLTHRU) && (EDGE_COUNT (e->src->succs) > 1))
split_edge (e);
/* Finalize layout changes. */
FOR_EACH_BB (bb)
- if (bb->next_bb != EXIT_BLOCK_PTR)
+ if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
free_dominance_info (CDI_DOMINATORS);
cfg_layout_finalize ();
{
gcc_assert (root_omp_region == NULL);
calculate_dominance_info (CDI_DOMINATORS);
- build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
+ build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
}
/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
/* Note we start at block 1. */
- if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
FOR_BB_BETWEEN (bb,
- ENTRY_BLOCK_PTR->next_bb->next_bb,
- EXIT_BLOCK_PTR,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun),
next_bb)
{
/* Don't try anything on basic blocks with strange predecessors. */
if (profile_status_for_function (fun) == PROFILE_ABSENT)
return true;
if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
- && freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency * 2 / 3))
+ && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
return false;
if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
return false;
- if (freq < (ENTRY_BLOCK_PTR_FOR_FUNCTION (fun)->frequency
+ if (freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency
/ PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
return false;
return true;
return false;
if (!frequency)
return true;
- if (!ENTRY_BLOCK_PTR->frequency)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
return false;
- if (ENTRY_BLOCK_PTR->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
{
gcov_type computed_count;
/* Check for possibility of overflow, in which case entry bb count
is large enough to do the division first without losing much
precision. */
- if (ENTRY_BLOCK_PTR->count < REG_BR_PROB_BASE * REG_BR_PROB_BASE)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count < REG_BR_PROB_BASE *
+ REG_BR_PROB_BASE)
{
gcov_type scaled_count
- = frequency * ENTRY_BLOCK_PTR->count * unlikely_count_fraction;
- computed_count = RDIV (scaled_count, ENTRY_BLOCK_PTR->frequency);
+ = frequency * ENTRY_BLOCK_PTR_FOR_FN (cfun)->count *
+ unlikely_count_fraction;
+ computed_count = RDIV (scaled_count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
}
else
{
- computed_count = RDIV (ENTRY_BLOCK_PTR->count,
- ENTRY_BLOCK_PTR->frequency);
+ computed_count = RDIV (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
computed_count *= frequency * unlikely_count_fraction;
}
if (computed_count >= profile_info->runs)
gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
{
gcc_assert (profile_status != PROFILE_GUESSED);
- if ((e->src != ENTRY_BLOCK_PTR && EDGE_COUNT (e->src->succs) > 1)
+ if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && EDGE_COUNT (e->src->succs) >
+ 1)
&& flag_guess_branch_prob && optimize)
{
struct edge_prediction *i = XNEW (struct edge_prediction);
enum prediction direction;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
return_stmt = last_stmt (e->src);
if (return_stmt
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
{
has_return_edges = true;
FOR_EACH_EDGE (e, ei, bb->succs)
{
/* Predict edges to user labels with attributes. */
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gimple_stmt_iterator gi;
for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
return_block:
return_stmt. */
if (e->dest != bb->next_bb
- && e->dest != EXIT_BLOCK_PTR
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& single_succ_p (e->dest)
- && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR
+ && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (last = last_stmt (e->dest)) != NULL
&& gimple_code (last) == GIMPLE_RETURN)
{
/* Look for block we are guarding (ie we dominate it,
but it doesn't postdominate us). */
- if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
&& dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
&& !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
{
}
BLOCK_INFO (bb)->npredecessors = count;
/* When function never returns, we will never process exit block. */
- if (!count && bb == EXIT_BLOCK_PTR)
+ if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->count = bb->frequency = 0;
}
{
bitmap_set_bit (tovisit, bb->index);
}
- propagate_freq (ENTRY_BLOCK_PTR, tovisit);
+ propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
BITMAP_FREE (tovisit);
}
/* Don't overwrite the estimated frequencies when the profile for
the function is missing. We may drop this function PROFILE_GUESSED
later in drop_profile (). */
- if (!ENTRY_BLOCK_PTR->count)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
return 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
true_count_max = MAX (bb->count, true_count_max);
count_max = MAX (true_count_max, 1);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
return true_count_max;
/* Frequencies are out of range. This either means that function contains
internal loop executing more than BB_FREQ_MAX times or profile feedback
is available and function has not been executed at all. */
- if (ENTRY_BLOCK_PTR->frequency == 0)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency == 0)
return true;
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
- limit = ENTRY_BLOCK_PTR->frequency * threshold;
+ limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
FOR_EACH_BB (bb)
{
rtx insn;
mark_dfs_back_edges ();
- single_succ_edge (ENTRY_BLOCK_PTR)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
+ REG_BR_PROB_BASE;
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
alloc_aux_for_edges (sizeof (struct edge_info_def));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
sreal_div (&freq_max, &real_bb_freq_max, &freq_max);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
sreal tmp;
max counts. */
gcov_type count_max = 0;
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
count_max = MAX (bb->count, count_max);
if (profile_status == PROFILE_GUESSED
int num_edges = NUM_EDGES (el);
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
case HIST_TYPE_TIME_PROFILE:
{
- basic_block bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ basic_block bb =
+ split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gimple_stmt_iterator gsi = gsi_start_bb (bb);
gimple_gen_time_profiler (t, 0, gsi);
gcov_type *counts;
/* Count the edges to be (possibly) instrumented. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
edge e;
edge_iterator ei;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
inconsistent = true;
}
if (bb->count != sum_edge_counts (bb->succs) &&
- ! (find_edge (bb, EXIT_BLOCK_PTR) != NULL && block_ends_with_call_p (bb)))
+ ! (find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)) != NULL
+ && block_ends_with_call_p (bb)))
{
if (dump_file)
{
set_bb_counts (void)
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
bb->count = sum_edge_counts (bb->succs);
gcc_assert (bb->count >= 0);
/* The first count in the .da file is the number of times that the function
was entered. This is the exec_count for block zero. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
int overlap = 0;
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
count_total += bb->count;
freq_total += bb->frequency;
if (count_total == 0 || freq_total == 0)
return 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
overlap += MIN (bb->count * OVERLAP_BASE / count_total,
bb->frequency * OVERLAP_BASE / freq_total);
/* Attach extra info block to each bb. */
alloc_aux_for_blocks (sizeof (struct bb_info));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
}
/* Avoid predicting entry on exit nodes. */
- BB_INFO (EXIT_BLOCK_PTR)->succ_count = 2;
- BB_INFO (ENTRY_BLOCK_PTR)->pred_count = 2;
+ BB_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->succ_count = 2;
+ BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (cfun))->pred_count = 2;
num_edges = read_profile_edge_counts (exec_counts);
{
passes++;
changes = 0;
- FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
+ FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, prev_bb)
{
struct bb_info *bi = BB_INFO (bb);
if (! bi->count_valid)
hist_br_prob[i] = 0;
num_branches = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
edge e;
edge_iterator ei;
already present. We get negative frequency from the entry
point. */
if ((e->count < 0
- && e->dest == EXIT_BLOCK_PTR)
+ && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
|| (e->count > bb->count
- && e->dest != EXIT_BLOCK_PTR))
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
{
if (block_ends_with_call_p (bb))
e->count = e->count < 0 ? 0 : bb->count;
ne->goto_locus = e->goto_locus;
}
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->dest != EXIT_BLOCK_PTR)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
need_exit_edge = 1;
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
have_exit_edge = 1;
}
FOR_EACH_EDGE (e, ei, bb->preds)
{
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->src != ENTRY_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
need_entry_edge = 1;
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
have_entry_edge = 1;
}
if (dump_file)
fprintf (dump_file, "Adding fake exit edge to bb %i\n",
bb->index);
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
if (need_entry_edge && !have_entry_edge)
{
if (dump_file)
fprintf (dump_file, "Adding fake entry edge to bb %i\n",
bb->index);
- make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
/* Avoid bbs that have both fake entry edge and also some
exit edge. One of those edges wouldn't be added to the
spanning tree, but we can't instrument any of them. */
/* Mark edges we've replaced by fake edges above as ignored. */
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
EDGE_INFO (e)->ignore = 1;
ignored_edges++;
gcov_write_length (offset);
/* Arcs */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
gimple_stmt_iterator gsi;
gcov_position_t offset = 0;
- if (bb == ENTRY_BLOCK_PTR->next_bb)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
{
expanded_location curr_location =
expand_location (DECL_SOURCE_LOCATION (current_function_decl));
basic_block bb;
/* We use aux field for standard union-find algorithm. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->aux = bb;
/* Add fake edge exit to entry we can't instrument. */
- union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
+ union_groups (EXIT_BLOCK_PTR_FOR_FN (cfun), ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* First add all abnormal edges to the tree unless they form a cycle. Also
add all edges to EXIT_BLOCK_PTR to avoid inserting profiling code behind
{
edge e = INDEX_EDGE (el, i);
if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
- || e->dest == EXIT_BLOCK_PTR)
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
&& !EDGE_INFO (e)->ignore
&& (find_group (e->src) != find_group (e->dest)))
{
Note that we are inserting converted code here. This code is
never seen by the convert_regs pass. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
{
basic_block block = e->dest;
block_info bi = BLOCK_INFO (block);
value_reg_high = END_HARD_REGNO (retvalue) - 1;
}
- output_stack = &BLOCK_INFO (EXIT_BLOCK_PTR)->stack_in;
+ output_stack = &BLOCK_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->stack_in;
if (value_reg_low == -1)
output_stack->top = -1;
else
starting_stack_p = false;
FOR_EACH_BB (bb)
- if (bb != ENTRY_BLOCK_PTR)
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
edge e;
edge_iterator ei;
/* Construct the desired stack for function exit. */
convert_regs_exit ();
- BLOCK_INFO (EXIT_BLOCK_PTR)->done = 1;
+ BLOCK_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->done = 1;
/* ??? Future: process inner loops first, and give them arbitrary
initial stacks which emit_swap_insn can modify. This ought to
prevent double fxch that often appears at the head of a loop. */
/* Process all blocks reachable from all entry points. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
cfg_altered |= convert_regs_2 (e->dest);
/* ??? Process all unreachable blocks. Though there's no excuse
FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_DFS_BACK)
- && e->src != ENTRY_BLOCK_PTR)
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
bi->predecessors++;
/* Set current register status at last instruction `uninitialized'. */
frequency. */
#define REG_FREQ_FROM_BB(bb) (optimize_size \
|| (flag_branch_probabilities \
- && !ENTRY_BLOCK_PTR->count) \
+ && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count) \
? REG_FREQ_MAX \
: ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\
? ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\
&& reg_mentioned_p (XEXP (note, 0), in)
/* Check that a former pseudo is valid; see find_dummy_reload. */
&& (ORIGINAL_REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
- || (! bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (! bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (XEXP (note, 0)))
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] == 1))
&& ! refers_to_regno_for_reload_p (regno,
&& !fixed_regs[regno]
/* Check that a former pseudo is valid; see find_dummy_reload. */
&& (ORIGINAL_REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER
- || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (XEXP (note, 0)))
&& hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] == 1)))
{
can ignore the conflict). We must never introduce writes
to such hardregs, as they would clobber the other live
pseudo. See PR 20973. */
- || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR),
+ || (!bitmap_bit_p (DF_LR_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
ORIGINAL_REGNO (in))
/* Similarly, only do this if we can be sure that the death
note is still valid. global can assign some hardreg to
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
- EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
- *tos++ = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
+ *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
/* The start of the function. */
else if (insn == 0)
- return ENTRY_BLOCK_PTR->next_bb->index;
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->index;
/* See if any of the upcoming CODE_LABELs start a basic block. If we reach
anything other than a CODE_LABEL or note, we can't find this code. */
/* Get starting and ending insn, handling the case where each might
be a SEQUENCE. */
- start_insn = (b == ENTRY_BLOCK_PTR->next_bb->index ?
+ start_insn = (b == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb->index ?
insns : BB_HEAD (BASIC_BLOCK (b)));
stop_insn = target;
{
edge e;
tail = BB_END (bb);
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
e = find_fallthru_edge (bb->succs);
/* Recovery blocks are always bounded by BARRIERS,
therefore, they always form single block EBB,
therefore, we can use rec->index to identify such EBBs. */
- if (after == EXIT_BLOCK_PTR)
+ if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (&dont_calc_deps, bb->index);
else if (after == last_bb)
last_bb = bb;
/* INSN is a speculation check that will simply reexecute the speculatively
scheduled instruction if the speculation fails. */
#define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
- (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR)
+ (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR_FOR_FN (cfun))
/* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
speculation fails. Insns in that block will reexecute the speculatively
scheduled code and then will return immediately after INSN thus preserving
semantics of the program. */
#define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
- (RECOVERY_BLOCK (INSN) != NULL && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR)
+ (RECOVERY_BLOCK (INSN) != NULL \
+ && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR_FOR_FN (cfun))
\f
/* Dep status (aka ds_t) of the link encapsulates all information for a given
BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
i++;
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
/* DFS traversal to find inner loops in the cfg. */
- current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
+ current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
sp = -1;
while (1)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (jbb != EXIT_BLOCK_PTR)
+ if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
/* Decrease degree of all I's successors for topological
ordering. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (single_succ_p (jbb)
- && single_succ (jbb) == EXIT_BLOCK_PTR)
+ && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
queue[++tail] = jbb->index;
bitmap_set_bit (in_queue, jbb->index);
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
node = e->src->index;
/* See discussion above about nodes not marked as in
this loop during the initial DFS traversal. */
- if (e->src == ENTRY_BLOCK_PTR
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| max_hdr[node] != loop_head)
{
tail = -1;
queue[head] = queue[tail--];
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
--degree[e->dest->index];
}
else
This may provide several smaller regions instead
of one too_large region. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (extended_rgn_header, e->dest->index);
}
}
BLOCK_TO_BB (bbn) = 0;
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
if (!large)
idx++;
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
degree[e->dest->index]--;
}
}
edge out_edge;
edge_iterator out_ei;
- if (in_edge->src == ENTRY_BLOCK_PTR)
+ if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
pred_bb = BLOCK_TO_BB (in_edge->src->index);
FOR_EACH_EDGE (e, ei, block->succs)
{
/* Only bbs "below" bb, in the same region, are interesting. */
- if (e->dest == EXIT_BLOCK_PTR
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
|| BLOCK_TO_BB (e->dest->index) <= bb)
continue;
extend_regions ();
bitmap_set_bit (¬_in_df, bb->index);
- if (after == 0 || after == EXIT_BLOCK_PTR)
+ if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
rgn_make_new_region_out_of_new_block (bb);
- RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
+ RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
+ == EXIT_BLOCK_PTR_FOR_FN (cfun));
}
else
{
successors. Otherwise remove it. */
if (!sel_bb_empty_p (bb)
|| (single_succ_p (bb)
- && single_succ (bb) == EXIT_BLOCK_PTR
+ && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& (!single_pred_p (bb)
|| !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
|| EDGE_COUNT (bb->preds) == 0
&& EDGE_COUNT (xbb->succs) == 1
&& (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
/* When successor is an EXIT block, it may not be the next block. */
- && single_succ (xbb) != EXIT_BLOCK_PTR
+ && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
/* And unconditional jump in previous basic block leads to
next basic block of XBB and this jump can be safely removed. */
&& in_current_region_p (xbb->prev_bb)
init_lv_set (bb);
/* Don't forget EXIT_BLOCK. */
- init_lv_set (EXIT_BLOCK_PTR);
+ init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Release lv set of HEAD. */
basic_block bb;
/* Don't forget EXIT_BLOCK. */
- free_lv_set (EXIT_BLOCK_PTR);
+ free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Free LV sets. */
FOR_EACH_BB (bb)
{
insn_t head;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
gcc_assert (exit_insn != NULL_RTX);
head = exit_insn;
if (sel_bb_empty_p (bb))
return NULL_RTX;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return BB_END (bb);
}
basic_block next_bb = bb_next_bb (bb);
edge e;
- if (next_bb == EXIT_BLOCK_PTR
+ if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bitmap_bit_p (forced_ebb_heads, next_bb->index)
|| (LABEL_P (BB_HEAD (next_bb))
/* NB: LABEL_NUSES () is not maintained outside of jump.c.
recovery_block = sched_create_recovery_block (&before_recovery);
if (before_recovery)
- copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
+ copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (sel_bb_empty_p (recovery_block));
sched_create_recovery_edges (first_bb, recovery_block, second_bb);
emit_insn (nop_pattern);
exit_insn = get_insns ();
end_sequence ();
- set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
+ set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Free special insns used in the scheduler. */
If it is so - delete this jump and clear data sets of its
basic block if it becomes empty. */
if (next_bb->prev_bb == prev_bb
- && prev_bb != ENTRY_BLOCK_PTR
+ && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& bb_has_removable_jump_to_p (prev_bb, next_bb))
{
redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
if (!current_loop_nest)
return false;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
inner_loop = bb->loop_father;
vec<edge> edges = vNULL;
struct loop_exit *exit;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& current_loops->state & LOOPS_HAVE_RECORDED_EXITS);
for (exit = loop->exits->next; exit->e; exit = exit->next)
if (!INSN_NOP_P (first))
return false;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
last = sel_bb_end (bb);
i.current_exit = -1;
i.loop_exits.create (0);
- if (bb != EXIT_BLOCK_PTR && BB_END (bb) != insn)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun) && BB_END (bb) != insn)
{
i.bb_end = false;
{
basic_block bb = ip->e2->dest;
- if (bb == EXIT_BLOCK_PTR || bb == after_recovery)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == after_recovery)
*succp = exit_insn;
else
{
edge e;
/* Loop over edges from E1 to E2, inclusive. */
- for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR; e = EDGE_SUCC (e->dest, 0))
+ for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun); e =
+ EDGE_SUCC (e->dest, 0))
{
if (EDGE_COUNT (e->dest->preds) == 2)
{
if (DEBUG_INSN_P (insn)
&& single_succ_p (new_bb)
&& (succ = single_succ (new_bb))
- && succ != EXIT_BLOCK_PTR
+ && succ != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
{
while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
/* If tmp is NULL, we found an insertion on every edge, blank the
insertion vector for these edges, and insert at the start of the BB. */
- if (!tmp && bb != EXIT_BLOCK_PTR)
+ if (!tmp && bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
FOR_EACH_EDGE (tmp, ei, e->dest->preds)
{
}
bb = act->dest;
- if (bb == EXIT_BLOCK_PTR
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| bitmap_bit_p (visited, bb->index))
{
if (!ei_end_p (ei))
vec<tm_region_p> bb_regions = vNULL;
all_tm_regions = region;
- bb = single_succ (ENTRY_BLOCK_PTR);
+ bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
struct tm_region *region = (struct tm_region *)
obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
memset (region, 0, sizeof (*region));
- region->entry_block = single_succ (ENTRY_BLOCK_PTR);
+ region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* For a clone, the entire function is the region. But even if
we don't need to record any exit blocks, we may need to
record irrevocable blocks. */
/* If the out state of this block changed, then we need to add
its successors to the worklist if they are not already in. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
+ if (!AVAIL_IN_WORKLIST_P (e->dest)
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
*qin++ = e->dest;
AVAIL_IN_WORKLIST_P (e->dest) = true;
if (for_clone)
{
old_irr = d->irrevocable_blocks_clone;
- queue.quick_push (single_succ (ENTRY_BLOCK_PTR));
+ queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
{
- ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
+ ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ new_irr,
old_irr, NULL);
- ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
+ ret = bitmap_bit_p (new_irr,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
}
}
else
calculate_dominance_info (CDI_DOMINATORS);
need_ssa_rename =
- ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
+ ipa_tm_transform_calls (d->clone, NULL,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
d->irrevocable_blocks_clone);
if (need_ssa_rename)
initial_cfg_capacity);
SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
+ ENTRY_BLOCK_PTR_FOR_FN (fn));
SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
+ EXIT_BLOCK_PTR_FOR_FN (fn));
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (fn);
+ EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (fn);
}
void
/* Make sure there is always at least one block, even if it's empty. */
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
- create_empty_bb (ENTRY_BLOCK_PTR);
+ create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Adjust the size of the array. */
if (basic_block_info->length () < (size_t) n_basic_blocks_for_fn (cfun))
gimple stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
- basic_block bb = ENTRY_BLOCK_PTR;
+ basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
while (!gsi_end_p (i))
{
/* Create an edge from entry to the first block with executable
statements in it. */
- make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), BASIC_BLOCK (NUM_FIXED_BLOCKS),
+ EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
FOR_EACH_BB (bb)
fallthru = false;
break;
case GIMPLE_RETURN:
- make_edge (bb, EXIT_BLOCK_PTR, 0);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
fallthru = false;
break;
case GIMPLE_COND:
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
- make_edge (bb, EXIT_BLOCK_PTR, 0), fallthru = false;
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0), fallthru =
+ false;
/* Some calls are known not to return. */
else
fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
if (!single_pred_p (b))
return false;
- if (b == EXIT_BLOCK_PTR)
+ if (b == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
/* If A ends by a statement causing exceptions or something similar, we
edge e;
edge_iterator ei;
- if (ENTRY_BLOCK_PTR->il.gimple.seq || ENTRY_BLOCK_PTR->il.gimple.phi_nodes)
+ if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("ENTRY_BLOCK has IL associated with it");
err = 1;
}
- if (EXIT_BLOCK_PTR->il.gimple.seq || EXIT_BLOCK_PTR->il.gimple.phi_nodes)
+ if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
+ || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
{
error ("EXIT_BLOCK has IL associated with it");
err = 1;
}
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
if (e->flags & EDGE_FALLTHRU)
{
error ("fallthru to exit from bb %d", e->src->index);
error ("wrong outgoing edge flags at end of bb %d", bb->index);
err = 1;
}
- if (single_succ (bb) != EXIT_BLOCK_PTR)
+ if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
error ("return edge does not point to exit in bb %d",
bb->index);
if (e->flags & EDGE_EH)
return redirect_eh_edge (e, dest);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
ret = gimple_try_redirect_by_replacing_jump (e, dest);
if (ret)
gimple_seq phis = phi_nodes (bb);
gimple phi, stmt, copy;
- new_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
+ new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
/* Copy the PHI nodes. We ignore PHI node arguments here because
the incoming edges have not been setup yet. */
FIXME, this is silly. The CFG ought to become a parameter to
these helpers. */
push_cfun (dest_cfun);
- make_edge (ENTRY_BLOCK_PTR, entry_bb, EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
if (exit_bb)
- make_edge (exit_bb, EXIT_BLOCK_PTR, 0);
+ make_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
pop_cfun ();
/* Back in the original function, the SESE region has disappeared,
{
basic_block bb;
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
if (bb && bb->loop_father)
print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
}
if (! blocks)
check_last_block = true;
else
- check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
gimple t = NULL;
{
edge e;
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (e)
{
gsi_insert_on_edge (e, gimple_build_nop ());
#ifdef ENABLE_CHECKING
if (stmt == last_stmt)
{
- e = find_edge (bb, EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
#endif
if (e)
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
gsi_prev (&gsi);
}
}
/* No updating is needed for edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (cfgcleanup_altered_bbs)
bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
{
FOR_EACH_EDGE (f, ei, bb->succs)
{
- if (f->dest != EXIT_BLOCK_PTR)
+ if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (df, f->dest->index);
}
}
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)
|| !gimple_seq_empty_p (phi_nodes (e->dest))
- || e->dest == EXIT_BLOCK_PTR)
- && e->src != ENTRY_BLOCK_PTR
+ || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !(e->flags & EDGE_ABNORMAL))
{
gimple_stmt_iterator gsi;
/* If we have a path to EXIT, then we do return. */
if (TREE_THIS_VOLATILE (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0)
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0)
{
location = UNKNOWN_LOCATION;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
last = last_stmt (e->src);
if ((gimple_code (last) == GIMPLE_RETURN
without returning a value. */
else if (warn_return_type
&& !TREE_NO_WARNING (cfun->decl)
- && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0
&& !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
{
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
gimple last = last_stmt (e->src);
if (gimple_code (last) == GIMPLE_RETURN
count_scale
= GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count,
- ENTRY_BLOCK_PTR->count);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count);
- ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count;
- EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ cgraph_get_node (current_function_decl)->count;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ apply_scale (EXIT_BLOCK_PTR_FOR_FN (cfun)->count,
count_scale);
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->count = apply_scale (e->count, count_scale);
FOR_EACH_BB (bb)
Otherwise, BB must have PHI nodes. */
|| gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
/* BB may not be a predecessor of EXIT_BLOCK_PTR. */
- || single_succ (bb) == EXIT_BLOCK_PTR
+ || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
/* Nor should this be an infinite loop. */
|| single_succ (bb) == bb
/* BB may not have an abnormal outgoing edge. */
|| (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
return false;
- gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
+ gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
locus = single_succ_edge (bb)->goto_locus;
edge e;
FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
return false;
/* If goto_locus of any of the edges differs, prevent removing
the forwarder block for -O0. */
static void
update_parameter_components (void)
{
- edge entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+ edge entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree parm;
for (parm = DECL_ARGUMENTS (cfun->decl); parm ; parm = DECL_CHAIN (parm))
unsigned int visited_count = 0;
gcc_assert (loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
blocks = XCNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
if (SSA_NAME_IS_DEFAULT_DEF (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
&& id->entry_bb == NULL
- && single_succ_p (ENTRY_BLOCK_PTR))
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
gimple def_temp;
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (name);
DECL_MODE (vexpr) = DECL_MODE (SSA_NAME_VAR (name));
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
return vexpr;
}
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)
&& (!SSA_NAME_VAR (name)
|| TREE_CODE (SSA_NAME_VAR (name)) != PARM_DECL)
- && (id->entry_bb != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
+ && (id->entry_bb != EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ 0)->dest
|| EDGE_COUNT (id->entry_bb->preds) != 1))
{
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
/* Return edges do get a FALLTHRU flag when the get inlined. */
if (old_edge->dest->index == EXIT_BLOCK && !old_edge->flags
- && old_edge->dest->aux != EXIT_BLOCK_PTR)
+ && old_edge->dest->aux != EXIT_BLOCK_PTR_FOR_FN (cfun))
flags |= EDGE_FALLTHRU;
new_edge = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
new_edge->count = apply_scale (old_edge->count, count_scale);
if (!DECL_RESULT (new_fndecl))
DECL_RESULT (new_fndecl) = DECL_RESULT (callee_fndecl);
- if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count)
count_scale
= GCOV_COMPUTE_SCALE (count,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count);
+ ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
else
count_scale = REG_BR_PROB_BASE;
init_empty_tree_cfg ();
profile_status_for_function (cfun) = profile_status_for_function (src_cfun);
- ENTRY_BLOCK_PTR->count =
- (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count * count_scale /
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count =
+ (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count * count_scale /
REG_BR_PROB_BASE);
- ENTRY_BLOCK_PTR->frequency
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->frequency;
- EXIT_BLOCK_PTR->count =
- (EXIT_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count * count_scale /
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency
+ = ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->frequency;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->count =
+ (EXIT_BLOCK_PTR_FOR_FN (src_cfun)->count * count_scale /
REG_BR_PROB_BASE);
- EXIT_BLOCK_PTR->frequency =
- EXIT_BLOCK_PTR_FOR_FUNCTION (src_cfun)->frequency;
+ EXIT_BLOCK_PTR_FOR_FN (cfun)->frequency =
+ EXIT_BLOCK_PTR_FOR_FN (src_cfun)->frequency;
if (src_cfun->eh)
init_eh_for_function ();
before inlining, using the guessed edge frequencies, so that we don't
end up with a 0-count inline body which can confuse downstream
optimizations such as function splitting. */
- if (!ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count && count)
+ if (!ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count && count)
{
/* Apply the larger of the call bb count and the total incoming
call edge count to the callee. */
freqs_to_counts (id->src_node, count > in_count ? count : in_count);
}
- if (ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count)
+ if (ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count)
count_scale
= GCOV_COMPUTE_SCALE (count,
- ENTRY_BLOCK_PTR_FOR_FUNCTION (src_cfun)->count);
+ ENTRY_BLOCK_PTR_FOR_FN (src_cfun)->count);
else
count_scale = REG_BR_PROB_BASE;
incoming_count = apply_scale (incoming_count, count_scale);
incoming_frequency
= apply_scale ((gcov_type)incoming_frequency, frequency_scale);
- ENTRY_BLOCK_PTR->count = incoming_count;
- ENTRY_BLOCK_PTR->frequency = incoming_frequency;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = incoming_count;
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency = incoming_frequency;
}
/* Must have a CFG here at this point. */
- gcc_assert (ENTRY_BLOCK_PTR_FOR_FUNCTION
+ gcc_assert (ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (callee_fndecl)));
cfun_to_copy = id->src_cfun = DECL_STRUCT_FUNCTION (callee_fndecl);
- ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy)->aux = entry_block_map;
- EXIT_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy)->aux = exit_block_map;
- entry_block_map->aux = ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy);
- exit_block_map->aux = EXIT_BLOCK_PTR_FOR_FUNCTION (cfun_to_copy);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = entry_block_map;
+ EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy)->aux = exit_block_map;
+ entry_block_map->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun_to_copy);
+ exit_block_map->aux = EXIT_BLOCK_PTR_FOR_FN (cfun_to_copy);
/* Duplicate any exception-handling regions. */
if (cfun->eh)
tree body;
/* If this body has a CFG, walk CFG and copy. */
- gcc_assert (ENTRY_BLOCK_PTR_FOR_FUNCTION (DECL_STRUCT_FUNCTION (fndecl)));
+ gcc_assert (ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (fndecl)));
body = copy_cfg_body (id, count, frequency_scale, entry_block_map, exit_block_map,
new_entry);
copy_debug_stmts (id);
/* Delete all unreachable basic blocks. */
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
+ != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
{
next_bb = b->next_bb;
id.transform_parameter = false;
id.transform_lang_insert_block = NULL;
- old_entry_block = ENTRY_BLOCK_PTR_FOR_FUNCTION
+ old_entry_block = ENTRY_BLOCK_PTR_FOR_FN
(DECL_STRUCT_FUNCTION (old_decl));
DECL_RESULT (new_decl) = DECL_RESULT (old_decl);
DECL_ARGUMENTS (new_decl) = DECL_ARGUMENTS (old_decl);
/* Copy the Function's body. */
copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
- ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, new_entry);
+ ENTRY_BLOCK_PTR_FOR_FN (cfun), EXIT_BLOCK_PTR_FOR_FN (cfun),
+ new_entry);
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
/* We want to create the BB unconditionally, so that the addition of
debug stmts doesn't affect BB count, which may in the end cause
codegen differences. */
- bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
while (init_stmts.length ())
insert_init_stmt (&id, bb, init_stmts.pop ());
update_clone_info (&id);
struct cgraph_edge *e;
rebuild_frequencies ();
- new_version_node->count = ENTRY_BLOCK_PTR->count;
+ new_version_node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
for (e = new_version_node->callees; e; e = e->next_callee)
{
basic_block bb = gimple_bb (e->call_stmt);
def = info->current_def;
if (!def)
{
- if (TREE_CODE (var) == PARM_DECL && single_succ_p (ENTRY_BLOCK_PTR))
+ if (TREE_CODE (var) == PARM_DECL
+ && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
gimple_stmt_iterator gsi
- = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ =
+ gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
int lim;
/* Search a few source bind stmts at the start of first bb to
see if a DEBUG_EXPR_DECL can't be reused. */
DECL_ARTIFICIAL (def) = 1;
TREE_TYPE (def) = TREE_TYPE (var);
DECL_MODE (def) = DECL_MODE (var);
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi =
+ gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
}
update = true;
bind stmts, but there wouldn't be a PC to bind
them to either, so avoid diverging the CFG. */
if (ef && single_pred_p (ef->dest)
- && ef->dest != EXIT_BLOCK_PTR)
+ && ef->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* If there were PHI nodes in the node, we'd
have to make sure the value we're binding
insert_phi_nodes (dfs);
/* 4- Rename all the blocks. */
- rewrite_blocks (ENTRY_BLOCK_PTR, REWRITE_ALL);
+ rewrite_blocks (ENTRY_BLOCK_PTR_FOR_FN (cfun), REWRITE_ALL);
/* Free allocated memory. */
FOR_EACH_BB (bb)
common dominator of all the definition blocks. */
entry = nearest_common_dominator_for_set (CDI_DOMINATORS,
db->def_blocks);
- if (entry != ENTRY_BLOCK_PTR)
+ if (entry != ENTRY_BLOCK_PTR_FOR_FN (cfun))
EXECUTE_IF_SET_IN_BITMAP (idf, 0, i, bi)
if (BASIC_BLOCK (i) != entry
&& dominated_by_p (CDI_DOMINATORS, BASIC_BLOCK (i), entry))
be possible to determine the nearest block that had a
definition for each of the symbols that are marked for
updating. For now this seems more work than it's worth. */
- start_bb = ENTRY_BLOCK_PTR;
+ start_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Traverse the CFG looking for existing definitions and uses of
symbols in SSA operands. Mark interesting blocks and
/* Insertion of PHI nodes may have added blocks to the region.
We need to re-compute START_BB to include the newly added
blocks. */
- if (start_bb != ENTRY_BLOCK_PTR)
+ if (start_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
start_bb = nearest_common_dominator_for_set (CDI_DOMINATORS,
blocks_to_update);
}
elim_graph g = new_elim_graph (sa->map->num_partitions);
g->map = sa->map;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
if (!gimple_seq_empty_p (phi_nodes (bb)))
{
edge e;
stmt1: __gcov_indirect_call_profiler_v2 (profile_id,
¤t_function_decl)
*/
- gsi = gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR)));
+ gsi =
+ gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))));
cur_func = force_gimple_operand_gsi (&gsi,
build_addr (current_function_decl,
block_before_loop (loop_p loop)
{
edge preheader = loop_preheader_edge (loop);
- return (preheader ? preheader->src : ENTRY_BLOCK_PTR);
+ return (preheader ? preheader->src : ENTRY_BLOCK_PTR_FOR_FN (cfun));
}
/* Analyze all the parameters of the chrec that were left under a
seq = gsi_seq (gsi);
if (seq)
- gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR), seq);
+ gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
}
/* The "main" function of intraprocedural SRA passes. Runs the analysis and if
basic_block bb;
queue.create (last_basic_block_for_function (cfun));
- queue.quick_push (ENTRY_BLOCK_PTR);
+ queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
FOR_EACH_BB (bb)
{
queue.quick_push (bb);
{
int succ_idx = e->dest->index * func_param_count + i;
- if (e->src == EXIT_BLOCK_PTR)
+ if (e->src == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (first)
basic_block bb;
fprintf (dump_file, str);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
fprintf (f, "%4i %i ", bb->index, bitmap_bit_p (final_bbs, bb->index));
- if (bb != EXIT_BLOCK_PTR)
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
int i;
for (i = 0; i < func_param_count; i++)
for (i = 0; i < func_param_count; i++)
{
struct access *repr = representatives[i];
- int idx = ENTRY_BLOCK_PTR->index * func_param_count + i;
+ int idx = ENTRY_BLOCK_PTR_FOR_FN (cfun)->index * func_param_count + i;
if (!repr || no_accesses_p (repr))
continue;
int i, len;
gimple_stmt_iterator *gsip = NULL, gsi;
- if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR))
+ if (MAY_HAVE_DEBUG_STMTS && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
{
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gsip = &gsi;
}
len = adjustments.length ();
while (gsi_end_p (*i))
{
dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
- if (dom == NULL || dom == ENTRY_BLOCK_PTR)
+ if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
*i = gsi_last_bb (dom);
case 0:
break;
case 1:
- if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR)
+ if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
return NULL_TREE;
break;
default:
v2 = SSA_NAME_VERSION (var);
bitmap_set_bit (used_in_copy, v1);
bitmap_set_bit (used_in_copy, v2);
- cost = coalesce_cost_bb (EXIT_BLOCK_PTR);
+ cost = coalesce_cost_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
add_coalesce (cl, v1, v2, cost);
}
}
unsigned edge_number;
bool skipped = false;
- gcc_assert (bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
- if (bb == ENTRY_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return;
EXECUTE_IF_SET_IN_BITMAP (cd->get_edges_dependent_on (bb->index),
containing STMT is control dependent, but only if we haven't
already done so. */
basic_block bb = gimple_bb (stmt);
- if (bb != ENTRY_BLOCK_PTR
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (visited_control_parents, bb->index))
mark_control_dependent_edges_necessary (bb, false);
}
if (!bitmap_bit_p (last_stmt_necessary, arg_bb->index))
mark_last_stmt_necessary (arg_bb);
}
- else if (arg_bb != ENTRY_BLOCK_PTR
+ else if (arg_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& !bitmap_bit_p (visited_control_parents,
arg_bb->index))
mark_control_dependent_edges_necessary (arg_bb, true);
fake edges in the dominator tree. */
if (e)
;
- else if (! post_dom_bb || post_dom_bb == EXIT_BLOCK_PTR)
+ else if (! post_dom_bb || post_dom_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
e = EDGE_SUCC (bb, 0);
else
e = forward_edge_to_pdom (EDGE_SUCC (bb, 0), post_dom_bb);
as desired. */
gcc_assert (dom_info_available_p (CDI_DOMINATORS));
- h = get_all_dominated_blocks (CDI_DOMINATORS, single_succ (ENTRY_BLOCK_PTR));
+ h = get_all_dominated_blocks (CDI_DOMINATORS,
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
while (h.length ())
{
find_unreachable_blocks ();
/* Delete all unreachable basic blocks in reverse dominator order. */
- for (bb = EXIT_BLOCK_PTR->prev_bb; bb != ENTRY_BLOCK_PTR; bb = prev_bb)
+ for (bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun); bb = prev_bb)
{
prev_bb = bb->prev_bb;
while (single_succ_p (bb)
&& (single_succ_edge (bb)->flags & EDGE_EH) == 0)
bb = single_succ (bb);
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if ((unsigned) bb->index != i)
bitmap_set_bit (need_eh_cleanup, bb->index);
phase in dominator order. Presumably this is because walking
in dominator order leaves fewer PHIs for later examination
by the worklist phase. */
- eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
+ eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ interesting_names);
/* Second phase. Eliminate second order degenerate PHIs as well
as trivial copies or constant initializations identified by
FOR_EACH_EDGE (e, ei, bb->preds)
{
pred_bb = e->src;
- if (pred_bb == ENTRY_BLOCK_PTR)
+ if (pred_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
continue;
/* TMP is variables live-on-entry from BB that aren't defined in the
predecessor block. This should be the live on entry vars to pred.
bitmap_set_bit (&live->liveout[def_bb->index], p);
}
else
- def_bb = ENTRY_BLOCK_PTR;
+ def_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
/* Visit each use of SSA_NAME and if it isn't in the same block as the def,
add it to the list of live on entry blocks. */
defined in that block, or whether its live on entry. */
int index = PHI_ARG_INDEX_FROM_USE (use);
edge e = gimple_phi_arg_edge (use_stmt, index);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
if (e->src != def_bb)
add_block = e->src;
if (p == NO_PARTITION)
continue;
e = gimple_phi_arg_edge (phi, i);
- if (e->src != ENTRY_BLOCK_PTR)
+ if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
bitmap_set_bit (&liveinfo->liveout[e->src->index], p);
}
}
/* Add each successors live on entry to this bock live on exit. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR)
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
bitmap_ior_into (&liveinfo->liveout[bb->index],
live_on_entry (liveinfo, e->dest));
}
/* Check for live on entry partitions and report those with a DEF in
the program. This will typically mean an optimization has done
something wrong. */
- bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
num = 0;
FOR_EACH_EDGE (e, ei, bb->succs)
{
int entry_block = e->dest->index;
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
for (i = 0; i < (unsigned)num_var_partitions (map); i++)
{
live_on_entry (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->livein
- && bb != ENTRY_BLOCK_PTR
- && bb != EXIT_BLOCK_PTR);
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return &live->livein[bb->index];
}
live_on_exit (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->liveout
- && bb != ENTRY_BLOCK_PTR
- && bb != EXIT_BLOCK_PTR);
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
return &live->liveout[bb->index];
}
bb = body[i];
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest != EXIT_BLOCK_PTR
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !flow_bb_inside_loop_p (data->current_loop, e->dest))
find_interesting_uses_outside (data, e);
bool pred_visited;
/* We should have met DEF_BB along the way. */
- gcc_assert (pred != ENTRY_BLOCK_PTR);
+ gcc_assert (pred != ENTRY_BLOCK_PTR_FOR_FN (cfun));
if (pred_loop_depth >= def_loop_depth)
{
/* Now walk the dominators of the loop header and use the entry
guards to refine the estimates. */
for (bb = loop->header;
- bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
if (!single_pred_p (bb))
the number of BBs times the number of loops in degenerate
cases. */
for (bb = loop->header;
- bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
+ bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
if (!single_pred_p (bb))
FOR_EACH_VEC_ELT (exits, i, exit)
if ((exit->flags & EDGE_ABNORMAL)
- && exit->dest == EXIT_BLOCK_PTR)
+ && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
ret = false;
exits.release ();
return cond;
e = single_pred_edge (e->src);
- if (e->src == ENTRY_BLOCK_PTR)
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
return cond;
}
}
if (!occ)
{
occ = occ_new (bb, NULL);
- insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
+ insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
}
occ->bb_has_division = true;
/* Walk the dominator tree in preorder. */
bbs = get_all_dominated_blocks (CDI_DOMINATORS,
- single_succ (ENTRY_BLOCK_PTR));
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
FOR_EACH_VEC_ELT (bbs, i, bb)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
did_something |= propagate_with_phi (bb, gsi_stmt (gsi), phivn, n);
}
/* At the exit block we anticipate nothing. */
- BB_VISITED (EXIT_BLOCK_PTR) = 1;
+ BB_VISITED (EXIT_BLOCK_PTR_FOR_FN (cfun)) = 1;
changed_blocks = sbitmap_alloc (last_basic_block + 1);
bitmap_ones (changed_blocks);
num_iterations++;
if (dump_file && dump_flags & TDF_DETAILS)
fprintf (dump_file, "Starting insert iteration %d\n", num_iterations);
- new_stuff = insert_aux (ENTRY_BLOCK_PTR);
+ new_stuff = insert_aux (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Clear the NEW sets before the next iteration. We have already
fully propagated its contents. */
e = get_or_alloc_expr_for_name (name);
add_to_value (get_expr_value_id (e), e);
- bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR), e);
- bitmap_value_insert_into_set (AVAIL_OUT (ENTRY_BLOCK_PTR), e);
+ bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR_FOR_FN (cfun)), e);
+ bitmap_value_insert_into_set (AVAIL_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ e);
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
- print_bitmap_set (dump_file, TMP_GEN (ENTRY_BLOCK_PTR),
+ print_bitmap_set (dump_file, TMP_GEN (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
"tmp_gen", ENTRY_BLOCK);
- print_bitmap_set (dump_file, AVAIL_OUT (ENTRY_BLOCK_PTR),
+ print_bitmap_set (dump_file, AVAIL_OUT (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
"avail_out", ENTRY_BLOCK);
}
/* Seed the algorithm by putting the dominator children of the entry
block on the worklist. */
- for (son = first_dom_son (CDI_DOMINATORS, ENTRY_BLOCK_PTR);
+ for (son = first_dom_son (CDI_DOMINATORS, ENTRY_BLOCK_PTR_FOR_FN (cfun));
son;
son = next_dom_son (CDI_DOMINATORS, son))
worklist[sp++] = son;
{
bool head = false;
- gcc_assert (bb != ENTRY_BLOCK_PTR && bb != EXIT_BLOCK_PTR);
+ gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (!bitmap_bit_p (bb_in_list, bb->index));
if (cfg_blocks_empty_p ())
add_control_edge (edge e)
{
basic_block bb = e->dest;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
/* If the edge had already been executed, skip it. */
gimple_stmt_iterator gsi;
/* There is nothing to do for the exit block. */
- if (block == EXIT_BLOCK_PTR)
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
if (dump_file && (dump_flags & TDF_DETAILS))
/* Seed the algorithm by adding the successors of the entry block to the
edge worklist. */
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
add_control_edge (e);
}
if ((!op1def || gimple_nop_p (op1def))
&& (!op2def || gimple_nop_p (op2def)))
{
- gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR));
+ gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
if (gsi_end_p (gsi))
{
gimple_stmt_iterator gsi2
- = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR));
+ = gsi_last_bb (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
gimple_set_uid (sum,
gsi_end_p (gsi2) ? 1 : gimple_uid (gsi_stmt (gsi2)));
}
static void
do_reassoc (void)
{
- break_up_subtract_bb (ENTRY_BLOCK_PTR);
- reassociate_bb (EXIT_BLOCK_PTR);
+ break_up_subtract_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ reassociate_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
}
/* Initialize the reassociation pass. */
}
/* Short circuit. Nothing dominates the entry block. */
- if (useblock == ENTRY_BLOCK_PTR)
+ if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
BITMAP_FREE (blocks);
return NULL;
memset (&sink_stats, 0, sizeof (sink_stats));
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
- sink_code_in_bb (EXIT_BLOCK_PTR);
+ sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
statistics_counter_event (cfun, "Sunk statements", sink_stats.sunk);
free_dominance_info (CDI_POST_DOMINATORS);
remove_fake_exit_edges ();
FOR_EACH_BB (bb)
{
bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
- single_succ (ENTRY_BLOCK_PTR), bb);
+ single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
static inline basic_block
find_pdom (basic_block block)
{
- if (block == EXIT_BLOCK_PTR)
- return EXIT_BLOCK_PTR;
+ if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb
= get_immediate_dominator (CDI_POST_DOMINATORS, block);
if (! bb)
- return EXIT_BLOCK_PTR;
+ return EXIT_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
static inline basic_block
find_dom (basic_block block)
{
- if (block == ENTRY_BLOCK_PTR)
- return ENTRY_BLOCK_PTR;
+ if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun);
else
{
basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
if (! bb)
- return ENTRY_BLOCK_PTR;
+ return ENTRY_BLOCK_PTR_FOR_FN (cfun);
return bb;
}
}
cd_bb = find_pdom (cd_bb);
post_dom_check++;
- if (cd_bb == EXIT_BLOCK_PTR || post_dom_check > MAX_POSTDOM_CHECK)
+ if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || post_dom_check >
+ MAX_POSTDOM_CHECK)
break;
}
cur_cd_chain->pop ();
break;
}
- gcc_assert (src != ENTRY_BLOCK_PTR);
+ gcc_assert (src != ENTRY_BLOCK_PTR_FOR_FN (cfun));
if (! bitmap_bit_p (visited, src->index))
{
gcc_assert (is_gimple_call (stmt));
- first = single_succ (ENTRY_BLOCK_PTR);
+ first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Remove the code after call_gsi that will become unreachable. The
possibly unreachable code in other blocks is removed later in
/* Number of executions of function has reduced by the tailcall. */
e = single_succ_edge (gsi_bb (t->call_gsi));
- decrease_profile (EXIT_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- decrease_profile (ENTRY_BLOCK_PTR, e->count, EDGE_FREQUENCY (e));
- if (e->dest != EXIT_BLOCK_PTR)
+ decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e));
+ decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count,
+ EDGE_FREQUENCY (e));
+ if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e));
/* Replace the call by a jump to the start of function. */
bool phis_constructed = false;
struct tailcall *tailcalls = NULL, *act, *next;
bool changed = false;
- basic_block first = single_succ (ENTRY_BLOCK_PTR);
+ basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree param;
gimple stmt;
edge_iterator ei;
if (opt_tailcalls)
opt_tailcalls = suitable_for_tail_call_opt_p ();
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
/* Only traverse the normal exits, i.e. those that end with return
statement. */
or if there are existing degenerate PHI nodes. */
if (!single_pred_p (first)
|| !gimple_seq_empty_p (phi_nodes (first)))
- first = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
+ first =
+ split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Copy the args if needed. */
for (param = DECL_ARGUMENTS (current_function_decl);
if (a_acc || m_acc)
{
/* Modify the remaining return statements. */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
stmt = last_stmt (e->src);
tree ret_addr, builtin_decl;
gimple g;
- succ_bb = single_succ (ENTRY_BLOCK_PTR);
+ succ_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gsi = gsi_after_labels (succ_bb);
builtin_decl = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
edge_iterator ei;
/* Find all function exits. */
- exit_bb = EXIT_BLOCK_PTR;
+ exit_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
gsi = gsi_last_bb (e->src);
int sp;
/* Initialize entry block. */
- VTI (ENTRY_BLOCK_PTR)->visited = true;
- VTI (ENTRY_BLOCK_PTR)->in.stack_adjust = INCOMING_FRAME_SP_OFFSET;
- VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = INCOMING_FRAME_SP_OFFSET;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->visited = true;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->in.stack_adjust =
+ INCOMING_FRAME_SP_OFFSET;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out.stack_adjust =
+ INCOMING_FRAME_SP_OFFSET;
/* Allocate stack for back-tracking up CFG. */
stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Push the first edge on to the stack. */
- stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs);
+ stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
while (sp)
{
VTI (dest)->visited = true;
VTI (dest)->in.stack_adjust = offset = VTI (src)->out.stack_adjust;
- if (dest != EXIT_BLOCK_PTR)
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
for (insn = BB_HEAD (dest);
insn != NEXT_INSN (BB_END (dest));
insn = NEXT_INSN (insn))
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->dest == EXIT_BLOCK_PTR)
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
if (bitmap_bit_p (visited, e->dest->index))
if (!track_loc_p (incoming, parm, offset, false, &mode, &offset))
return;
- out = &VTI (ENTRY_BLOCK_PTR)->out;
+ out = &VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out;
dv = dv_from_decl (parm);
for (;;)
{
edge e;
- if (bb->next_bb == EXIT_BLOCK_PTR
+ if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
|| ! single_pred_p (bb->next_bb))
break;
e = find_edge (bb, bb->next_bb);
}
hard_frame_pointer_adjustment = -1;
- VTI (ENTRY_BLOCK_PTR)->flooded = true;
+ VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->flooded = true;
cfa_base_rtx = NULL_RTX;
return true;
}
align the hot section and write out the hot section label.
But if the current function is a thunk, we do not have a CFG. */
if (!cfun->is_thunk
- && BB_PARTITION (ENTRY_BLOCK_PTR->next_bb) == BB_COLD_PARTITION)
+ && BB_PARTITION (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) == BB_COLD_PARTITION)
{
switch_to_section (text_section);
assemble_align (DECL_ALIGN (decl));