/* Instruction scheduling pass. Selective scheduler and pipeliner.
- Copyright (C) 2006-2017 Free Software Foundation, Inc.
+ Copyright (C) 2006-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "tm_p.h"
#include "regs.h"
#include "cfgbuild.h"
+#include "cfgcleanup.h"
#include "insn-config.h"
#include "insn-attr.h"
-#include "params.h"
#include "target.h"
#include "sched-int.h"
#include "rtlhooks-def.h"
#include "sel-sched-dump.h"
#include "sel-sched.h"
#include "dbgcnt.h"
+#include "function-abi.h"
/* Implementation of selective scheduling approach.
The below implementation follows the original approach with the following
that the whole set is not computed yet. */
HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
- /* For every mode, this stores registers not available due to
- call clobbering. */
- HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
-
/* All registers that are used or call used. */
HARD_REG_SET regs_ever_used;
/* These are *available* for renaming. */
HARD_REG_SET available_for_renaming;
- /* Whether this code motion path crosses a call. */
- bool crosses_call;
+ /* The set of ABIs used by calls that the code motion path crosses. */
+ unsigned int crossed_call_abis : NUM_ABI_IDS;
};
/* A global structure that contains the needed information about harg
/* Pointer to the list of original insns definitions. */
def_list_t *original_insns;
- /* True if a code motion path contains a CALL insn. */
- bool crosses_call;
+ /* The set of ABIs used by calls that the code motion path crosses. */
+ unsigned int crossed_call_abis : NUM_ABI_IDS;
};
typedef struct fur_static_params *fur_static_params_p;
/* Set of basic blocks that are forced to start new ebbs. This is a subset
of all the ebb heads. */
-static bitmap_head _forced_ebb_heads;
-bitmap_head *forced_ebb_heads = &_forced_ebb_heads;
+bitmap forced_ebb_heads;
/* Blocks that need to be rescheduled after pipelining. */
bitmap blocks_to_reschedule = NULL;
int cur_reg;
CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
- CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
{
if (i >= 0)
continue;
- if (targetm.hard_regno_call_part_clobbered (cur_reg, mode))
- SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
- cur_reg);
-
/* If the CUR_REG passed all the checks above,
then it's ok. */
SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
- if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
+ if (df_regs_ever_live_p (cur_reg)
+ || crtl->abi->clobbers_full_reg_p (cur_reg))
SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
/* Initialize registers that are valid based on mode when this is
SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
/* Give a chance for original register, if it isn't in used_regs. */
- if (!def->crosses_call)
+ if (!def->crossed_call_abis)
CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
return;
The HARD_REGNO_RENAME_OK covers other cases in condition below. */
if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
&& REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
- sel_hrd.stack_regs);
+ reg_rename_p->unavailable_hard_regs |= sel_hrd.stack_regs;
#endif
- /* If there's a call on this path, make regs from call_used_reg_set
- unavailable. */
- if (def->crosses_call)
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
- call_used_reg_set);
+ mode = GET_MODE (orig_dest);
+
+ /* If there's a call on this path, make regs from full_reg_clobbers
+ unavailable.
+
+ ??? It would be better to track the set of clobbered registers
+ directly, but that would be quite expensive in a def_t. */
+ if (def->crossed_call_abis)
+ reg_rename_p->unavailable_hard_regs
+ |= call_clobbers_in_region (def->crossed_call_abis,
+ reg_class_contents[ALL_REGS], mode);
- /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
- but not register classes. */
+ /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and
+ crossed_call_abis, but not register classes. */
if (!reload_completed)
return;
/* Leave regs as 'available' only from the current
register class. */
- COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
- reg_class_contents[cl]);
-
- mode = GET_MODE (orig_dest);
+ reg_rename_p->available_for_renaming = reg_class_contents[cl];
/* Leave only registers available for this mode. */
if (!sel_hrd.regs_for_mode_ok[mode])
init_regs_for_mode (mode);
- AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
- sel_hrd.regs_for_mode[mode]);
-
- /* Exclude registers that are partially call clobbered. */
- if (def->crosses_call
- && !targetm.hard_regno_call_part_clobbered (regno, mode))
- AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
- sel_hrd.regs_for_call_clobbered[mode]);
+ reg_rename_p->available_for_renaming &= sel_hrd.regs_for_mode[mode];
/* Leave only those that are ok to rename. */
EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
cur_reg);
}
- AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
- reg_rename_p->unavailable_hard_regs);
+ reg_rename_p->available_for_renaming &= ~reg_rename_p->unavailable_hard_regs;
/* Regno is always ok from the renaming part of view, but it really
could be in *unavailable_hard_regs already, so set it here instead
/* Don't let register cross a call if it doesn't already
cross one. This condition is written in accordance with
that in sched-deps.c sched_analyze_reg(). */
- if (!reg_rename_p->crosses_call
+ if (!reg_rename_p->crossed_call_abis
|| REG_N_CALLS_CROSSED (orig_regno) > 0)
return gen_rtx_REG (mode, orig_regno);
}
max_regno = max_reg_num ();
maybe_extend_reg_info_p ();
- REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
+ REG_N_CALLS_CROSSED (REGNO (new_reg))
+ = reg_rename_p->crossed_call_abis ? 1 : 0;
return new_reg;
}
as well. */
gcc_assert (scheduled_something_on_previous_fence || !live_available
|| !hard_available
- || (!reload_completed && reg_rename_p->crosses_call
+ || (!reload_completed
+ && reg_rename_p->crossed_call_abis
&& REG_N_CALLS_CROSSED (regno) == 0));
}
/* Join hard registers unavailable due to register class
restrictions and live range intersection. */
- IOR_HARD_REG_SET (hard_regs_used,
- reg_rename_data.unavailable_hard_regs);
+ hard_regs_used |= reg_rename_data.unavailable_hard_regs;
best_reg = choose_best_reg (hard_regs_used, ®_rename_data,
original_insns, is_orig_reg_p);
preprocess_constraints (insn);
alternative_mask prefrred = get_preferred_alternatives (insn);
ira_implicitly_set_insn_hard_regs (&temp, prefrred);
- AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
+ temp &= ~ira_no_alloc_regs;
/* If any implicit clobber registers intersect with regular ones in
through_insn, we have a dependency and thus bail out. */
FOR_EACH_VEC_ELT (sinfo->succs_ok, is, succ)
{
basic_block succ_bb = BLOCK_FOR_INSN (succ);
+ av_set_t av_succ = (is_ineligible_successor (succ, p)
+ ? NULL
+ : BB_AV_SET (succ_bb));
gcc_assert (BB_LV_SET_VALID_P (succ_bb));
- mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
- BB_LV_SET (succ_bb));
+ mark_unavailable_targets (av1, av_succ, BB_LV_SET (succ_bb));
}
/* Finally, check liveness restrictions on paths leaving the region. */
ORIGINAL_INSNS list.
REG_RENAME_P denotes the set of hardware registers that
- can not be used with renaming due to the register class restrictions,
+ cannot be used with renaming due to the register class restrictions,
mode restrictions and other (the register we'll choose should be
compatible class with the original uses, shouldn't be in call_used_regs,
should be HARD_REGNO_RENAME_OK etc).
All the original operations found during the traversal are saved in the
ORIGINAL_INSNS list.
- REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
+ REG_RENAME_P->CROSSED_CALL_ABIS is true, if there is a call insn on the path
from INSN to original insn. In this case CALL_USED_REG_SET will be added
to unavailable hard regs at the point original operation is found. */
bitmap_clear (code_motion_visited_blocks);
/* Init parameters for code_motion_path_driver. */
- sparams.crosses_call = false;
+ sparams.crossed_call_abis = 0;
sparams.original_insns = original_insns;
sparams.used_regs = used_regs;
res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
- reg_rename_p->crosses_call |= sparams.crosses_call;
+ reg_rename_p->crossed_call_abis |= sparams.crossed_call_abis;
gcc_assert (res == 1);
gcc_assert (original_insns && *original_insns);
/* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly. */
EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
- gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
-
if (sched_verbose >= 4)
sel_print ("sel_target_adjust_priority: insn %d, %d+%d = %d.\n",
INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
return 1;
- /* Prefer an expr with greater priority. */
- if (EXPR_USEFULNESS (tmp) != 0 || EXPR_USEFULNESS (tmp2) != 0)
- {
- int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2),
- p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp);
+ /* Prefer an expr with non-zero usefulness. */
+ int u1 = EXPR_USEFULNESS (tmp), u2 = EXPR_USEFULNESS (tmp2);
- val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
+ if (u1 == 0)
+ {
+ if (u2 == 0)
+ u1 = u2 = 1;
+ else
+ return 1;
}
- else
- val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
- + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
+ else if (u2 == 0)
+ return -1;
+
+ /* Prefer an expr with greater priority. */
+ val = (u2 * (EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2))
+ - u1 * (EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp)));
if (val)
return val;
FOR_EACH_EXPR_1 (expr, si, av_ptr)
{
if (EXPR_SCHED_TIMES (expr)
- >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
+ >= param_selsched_max_sched_times)
av_set_iter_remove (&si);
}
}
distinguishing between bookkeeping copies and original insns. */
static int max_uid_before_move_op = 0;
+/* When true, we're always scheduling next insn on the already scheduled code
+ to get the right insn data for the following bundling or other passes. */
+static int force_next_insn = 0;
+
/* Remove from AV_VLIW_P all instructions but next when debug counter
tells us so. Next instruction is fetched from BNDS. */
static void
remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
{
- if (! dbg_cnt (sel_sched_insn_cnt))
+ if (! dbg_cnt (sel_sched_insn_cnt) || force_next_insn)
/* Leave only the next insn in av_vliw. */
{
av_set_iterator av_it;
bool insn_emitted = false;
rtx cur_reg;
- /* Bail out early when expression can not be renamed at all. */
+ /* Bail out early when expression cannot be renamed at all. */
if (!EXPR_SEPARABLE_P (params->c_expr))
return false;
/* The function is called when original expr is found.
INSN - current insn traversed, EXPR - the corresponding expr found,
- crosses_call and original_insns in STATIC_PARAMS are updated. */
+ crossed_call_abis and original_insns in STATIC_PARAMS are updated. */
static void
fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
regset tmp;
if (CALL_P (insn))
- params->crosses_call = true;
+ params->crossed_call_abis |= 1 << insn_callee_abi (insn).id ();
- def_list_add (params->original_insns, insn, params->crosses_call);
+ def_list_add (params->original_insns, insn, params->crossed_call_abis);
/* Mark the registers that do not meet the following condition:
(2) not among the live registers of the point
least one insn in ORIGINAL_INSNS. */
gcc_assert (*sparams->original_insns);
- /* Adjust CROSSES_CALL, since we may have come to this block along
+ /* Adjust CROSSED_CALL_ABIS, since we may have come to this block along
different path. */
- DEF_LIST_DEF (*sparams->original_insns)->crosses_call
- |= sparams->crosses_call;
+ DEF_LIST_DEF (*sparams->original_insns)->crossed_call_abis
+ |= sparams->crossed_call_abis;
}
else
local_params->old_original_insns = *sparams->original_insns;
fur_static_params_p sparams = (fur_static_params_p) static_params;
if (CALL_P (insn))
- sparams->crosses_call = true;
+ sparams->crossed_call_abis |= 1 << insn_callee_abi (insn).id ();
else if (DEBUG_INSN_P (insn))
return true;
{
expr_t expr = NULL;
basic_block bb = BLOCK_FOR_INSN (insn);
- insn_t first_insn, bb_tail, before_first;
+ insn_t first_insn, original_insn, bb_tail, before_first;
bool removed_last_insn = false;
if (sched_verbose >= 6)
/* It is enough to place only heads and tails of visited basic blocks into
the PATH. */
ilist_add (&path, insn);
- first_insn = insn;
+ first_insn = original_insn = insn;
bb_tail = sel_bb_end (bb);
/* Descend the basic block in search of the original expr; this part
{
insn = sel_bb_end (bb);
first_insn = sel_bb_head (bb);
+ if (first_insn != original_insn)
+ first_insn = original_insn;
}
/* Remove bb tail from path. */
&& (flag_sel_sched_pipelining != 0)
&& current_loop_nest != NULL
&& loop_has_exit_edges (current_loop_nest));
- max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
+ max_insns_to_rename = param_selsched_insns_to_rename;
max_ws = MAX_WS;
}
memset (reg_rename_tick, 0, sizeof reg_rename_tick);
reg_rename_this_tick = 0;
- bitmap_initialize (forced_ebb_heads, 0);
- bitmap_clear (forced_ebb_heads);
+ forced_ebb_heads = BITMAP_ALLOC (NULL);
setup_nop_vinsn ();
current_copies = BITMAP_ALLOC (NULL);
sel_finish_global_and_expr ();
- bitmap_clear (forced_ebb_heads);
+ BITMAP_FREE (forced_ebb_heads);
free_nop_vinsn ();
if (schedule_p)
sel_sched_region_1 ();
else
- /* Force initialization of INSN_SCHED_CYCLEs for correct bundling. */
- reset_sched_cycles_p = true;
-
+ {
+ /* Schedule always selecting the next insn to make the correct data
+ for bundling or other later passes. */
+ pipelining_p = false;
+ reset_sched_cycles_p = false;
+ force_next_insn = 1;
+ sel_sched_region_1 ();
+ force_next_insn = 0;
+ }
sel_region_finish (reset_sched_cycles_p);
}
static void
sel_global_init (void)
{
+ /* Remove empty blocks: their presence can break assumptions elsewhere,
+ e.g. the logic to invoke update_liveness_on_insn in sel_region_init. */
+ cleanup_cfg (0);
+
calculate_dominance_info (CDI_DOMINATORS);
alloc_sched_pools ();