/* Variable tracking routines for the GNU compiler.
- Copyright (C) 2002-2015 Free Software Foundation, Inc.
+ Copyright (C) 2002-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
#include "regs.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "cselib.h"
-#include "params.h"
#include "tree-pretty-print.h"
#include "rtl-iter.h"
#include "fibonacci_heap.h"
+#include "print-rtl.h"
+#include "function-abi.h"
typedef fibonacci_heap <long, basic_block_def> bb_heap_t;
typedef fibonacci_node <long, basic_block_def> bb_heap_node_t;
/* Pointer to the BB's information specific to variable tracking pass. */
#define VTI(BB) ((variable_tracking_info *) (BB)->aux)
-/* Macro to access MEM_OFFSET as an HOST_WIDE_INT. Evaluates MEM twice. */
-#define INT_MEM_OFFSET(mem) (MEM_OFFSET_KNOWN_P (mem) ? MEM_OFFSET (mem) : 0)
+/* Return MEM_OFFSET (MEM) as a HOST_WIDE_INT, or 0 if we can't. */
+
+static inline HOST_WIDE_INT
+int_mem_offset (const_rtx mem)
+{
+ HOST_WIDE_INT offset;
+ if (MEM_OFFSET_KNOWN_P (mem) && MEM_OFFSET (mem).is_constant (&offset))
+ return offset;
+ return 0;
+}
#if CHECKING_P && (GCC_VERSION >= 2007)
static void dataflow_set_destroy (dataflow_set *);
static bool track_expr_p (tree, bool);
-static bool same_variable_part_p (rtx, tree, HOST_WIDE_INT);
static void add_uses_1 (rtx *, void *);
static void add_stores (rtx, const_rtx, void *);
static bool compute_bb_dataflow (basic_block);
static void emit_notes_in_bb (basic_block, dataflow_set *);
static void vt_emit_notes (void);
-static bool vt_get_decl_and_offset (rtx, tree *, HOST_WIDE_INT *);
static void vt_add_function_parameters (void);
static bool vt_initialize (void);
static void vt_finalize (void);
pointer is often restored via a load-multiple instruction
and so no stack_adjust offset is recorded for it. This means
that the stack offset at the end of the epilogue block is the
- the same as the offset before the epilogue, whereas other paths
+ same as the offset before the epilogue, whereas other paths
to the exit block will have the correct stack_adjust.
It is safe to ignore these differences because (a) we never
or hard_frame_pointer_rtx. */
static inline rtx
-compute_cfa_pointer (HOST_WIDE_INT adjustment)
+compute_cfa_pointer (poly_int64 adjustment)
{
return plus_constant (Pmode, cfa_base_rtx, adjustment + cfa_base_offset);
}
/* Adjustment for hard_frame_pointer_rtx to cfa base reg,
or -1 if the replacement shouldn't be done. */
-static HOST_WIDE_INT hard_frame_pointer_adjustment = -1;
+static poly_int64 hard_frame_pointer_adjustment = -1;
/* Data for adjust_mems callback. */
-struct adjust_mem_data
+class adjust_mem_data
{
+public:
bool store;
machine_mode mem_mode;
HOST_WIDE_INT stack_adjust;
- rtx_expr_list *side_effects;
+ auto_vec<rtx> side_effects;
};
/* Helper for adjust_mems. Return true if X is suitable for
case MULT:
break;
case ASHIFT:
+ if (GET_MODE (XEXP (x, 1)) != VOIDmode)
+ {
+ enum machine_mode mode = GET_MODE (subreg);
+ rtx op1 = XEXP (x, 1);
+ enum machine_mode op1_mode = GET_MODE (op1);
+ if (GET_MODE_PRECISION (as_a <scalar_int_mode> (mode))
+ < GET_MODE_PRECISION (as_a <scalar_int_mode> (op1_mode)))
+ {
+ poly_uint64 byte = subreg_lowpart_offset (mode, op1_mode);
+ if (GET_CODE (op1) == SUBREG || GET_CODE (op1) == CONCAT)
+ {
+ if (!simplify_subreg (mode, op1, op1_mode, byte))
+ return false;
+ }
+ else if (!validate_subreg (mode, op1_mode, op1, byte))
+ return false;
+ }
+ }
iter.substitute (XEXP (x, 0));
break;
default:
/* Transform X into narrower mode MODE from wider mode WMODE. */
static rtx
-use_narrower_mode (rtx x, machine_mode mode, machine_mode wmode)
+use_narrower_mode (rtx x, scalar_int_mode mode, scalar_int_mode wmode)
{
rtx op0, op1;
if (CONSTANT_P (x))
/* Ensure shift amount is not wider than mode. */
if (GET_MODE (op1) == VOIDmode)
op1 = lowpart_subreg (mode, op1, wmode);
- else if (GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (op1)))
+ else if (GET_MODE_PRECISION (mode)
+ < GET_MODE_PRECISION (as_a <scalar_int_mode> (GET_MODE (op1))))
op1 = lowpart_subreg (mode, op1, GET_MODE (op1));
return simplify_gen_binary (ASHIFT, mode, op0, op1);
default:
static rtx
adjust_mems (rtx loc, const_rtx old_rtx, void *data)
{
- struct adjust_mem_data *amd = (struct adjust_mem_data *) data;
+ class adjust_mem_data *amd = (class adjust_mem_data *) data;
rtx mem, addr = loc, tem;
machine_mode mem_mode_save;
bool store_save;
+ scalar_int_mode tem_mode, tem_subreg_mode;
+ poly_int64 size;
switch (GET_CODE (loc))
{
case REG:
return compute_cfa_pointer (amd->stack_adjust);
else if (loc == hard_frame_pointer_rtx
&& frame_pointer_needed
- && hard_frame_pointer_adjustment != -1
+ && maybe_ne (hard_frame_pointer_adjustment, -1)
&& cfa_base_rtx)
return compute_cfa_pointer (hard_frame_pointer_adjustment);
gcc_checking_assert (loc != virtual_incoming_args_rtx);
return mem;
case PRE_INC:
case PRE_DEC:
- addr = gen_rtx_PLUS (GET_MODE (loc), XEXP (loc, 0),
- gen_int_mode (GET_CODE (loc) == PRE_INC
- ? GET_MODE_SIZE (amd->mem_mode)
- : -GET_MODE_SIZE (amd->mem_mode),
- GET_MODE (loc)));
+ size = GET_MODE_SIZE (amd->mem_mode);
+ addr = plus_constant (GET_MODE (loc), XEXP (loc, 0),
+ GET_CODE (loc) == PRE_INC ? size : -size);
+ /* FALLTHRU */
case POST_INC:
case POST_DEC:
if (addr == loc)
addr = XEXP (loc, 0);
gcc_assert (amd->mem_mode != VOIDmode && amd->mem_mode != BLKmode);
addr = simplify_replace_fn_rtx (addr, old_rtx, adjust_mems, data);
- tem = gen_rtx_PLUS (GET_MODE (loc), XEXP (loc, 0),
- gen_int_mode ((GET_CODE (loc) == PRE_INC
- || GET_CODE (loc) == POST_INC)
- ? GET_MODE_SIZE (amd->mem_mode)
- : -GET_MODE_SIZE (amd->mem_mode),
- GET_MODE (loc)));
+ size = GET_MODE_SIZE (amd->mem_mode);
+ tem = plus_constant (GET_MODE (loc), XEXP (loc, 0),
+ (GET_CODE (loc) == PRE_INC
+ || GET_CODE (loc) == POST_INC) ? size : -size);
store_save = amd->store;
amd->store = false;
tem = simplify_replace_fn_rtx (tem, old_rtx, adjust_mems, data);
amd->store = store_save;
- amd->side_effects = alloc_EXPR_LIST (0,
- gen_rtx_SET (XEXP (loc, 0), tem),
- amd->side_effects);
+ amd->side_effects.safe_push (gen_rtx_SET (XEXP (loc, 0), tem));
return addr;
case PRE_MODIFY:
addr = XEXP (loc, 1);
+ /* FALLTHRU */
case POST_MODIFY:
if (addr == loc)
addr = XEXP (loc, 0);
tem = simplify_replace_fn_rtx (XEXP (loc, 1), old_rtx,
adjust_mems, data);
amd->store = store_save;
- amd->side_effects = alloc_EXPR_LIST (0,
- gen_rtx_SET (XEXP (loc, 0), tem),
- amd->side_effects);
+ amd->side_effects.safe_push (gen_rtx_SET (XEXP (loc, 0), tem));
return addr;
case SUBREG:
/* First try without delegitimization of whole MEMs and
if (tem == NULL_RTX)
tem = gen_rtx_raw_SUBREG (GET_MODE (loc), addr, SUBREG_BYTE (loc));
finish_subreg:
- if (MAY_HAVE_DEBUG_INSNS
+ if (MAY_HAVE_DEBUG_BIND_INSNS
&& GET_CODE (tem) == SUBREG
&& (GET_CODE (SUBREG_REG (tem)) == PLUS
|| GET_CODE (SUBREG_REG (tem)) == MINUS
|| GET_CODE (SUBREG_REG (tem)) == MULT
|| GET_CODE (SUBREG_REG (tem)) == ASHIFT)
- && (GET_MODE_CLASS (GET_MODE (tem)) == MODE_INT
- || GET_MODE_CLASS (GET_MODE (tem)) == MODE_PARTIAL_INT)
- && (GET_MODE_CLASS (GET_MODE (SUBREG_REG (tem))) == MODE_INT
- || GET_MODE_CLASS (GET_MODE (SUBREG_REG (tem))) == MODE_PARTIAL_INT)
- && GET_MODE_PRECISION (GET_MODE (tem))
- < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (tem)))
+ && is_a <scalar_int_mode> (GET_MODE (tem), &tem_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (tem)),
+ &tem_subreg_mode)
+ && (GET_MODE_PRECISION (tem_mode)
+ < GET_MODE_PRECISION (tem_subreg_mode))
&& subreg_lowpart_p (tem)
&& use_narrower_mode_test (SUBREG_REG (tem), tem))
- return use_narrower_mode (SUBREG_REG (tem), GET_MODE (tem),
- GET_MODE (SUBREG_REG (tem)));
+ return use_narrower_mode (SUBREG_REG (tem), tem_mode, tem_subreg_mode);
return tem;
case ASM_OPERANDS:
/* Don't do any replacements in second and following
static void
adjust_insn (basic_block bb, rtx_insn *insn)
{
- struct adjust_mem_data amd;
rtx set;
#ifdef HAVE_window_save
}
#endif
+ adjust_mem_data amd;
amd.mem_mode = VOIDmode;
amd.stack_adjust = -VTI (bb)->out.stack_adjust;
- amd.side_effects = NULL;
amd.store = true;
- note_stores (PATTERN (insn), adjust_mem_stores, &amd);
+ note_stores (insn, adjust_mem_stores, &amd);
amd.store = false;
if (GET_CODE (PATTERN (insn)) == PARALLEL
validate_change (NULL_RTX, &SET_SRC (set), XEXP (note, 0), true);
}
- if (amd.side_effects)
+ if (!amd.side_effects.is_empty ())
{
- rtx *pat, new_pat, s;
- int i, oldn, newn;
+ rtx *pat, new_pat;
+ int i, oldn;
pat = &PATTERN (insn);
if (GET_CODE (*pat) == COND_EXEC)
oldn = XVECLEN (*pat, 0);
else
oldn = 1;
- for (s = amd.side_effects, newn = 0; s; newn++)
- s = XEXP (s, 1);
+ unsigned int newn = amd.side_effects.length ();
new_pat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (oldn + newn));
if (GET_CODE (*pat) == PARALLEL)
for (i = 0; i < oldn; i++)
XVECEXP (new_pat, 0, i) = XVECEXP (*pat, 0, i);
else
XVECEXP (new_pat, 0, 0) = *pat;
- for (s = amd.side_effects, i = oldn; i < oldn + newn; i++, s = XEXP (s, 1))
- XVECEXP (new_pat, 0, i) = XEXP (s, 0);
- free_EXPR_LIST_list (&amd.side_effects);
+
+ rtx effect;
+ unsigned int j;
+ FOR_EACH_VEC_ELT_REVERSE (amd.side_effects, j, effect)
+ XVECEXP (new_pat, 0, j + oldn) = effect;
validate_change (NULL_RTX, pat, new_pat, true);
}
}
{
tree decl;
- if (!MAY_HAVE_DEBUG_INSNS)
+ if (!MAY_HAVE_DEBUG_BIND_INSNS)
return NOT_ONEPART;
if (dv_is_value_p (dv))
static inline tree
var_debug_decl (tree decl)
{
- if (decl && TREE_CODE (decl) == VAR_DECL
- && DECL_HAS_DEBUG_EXPR_P (decl))
+ if (decl && VAR_P (decl) && DECL_HAS_DEBUG_EXPR_P (decl))
{
tree debugdecl = DECL_DEBUG_EXPR (decl);
if (DECL_P (debugdecl))
set_variable_part (set, loc, dv, offset, initialized, set_src, iopt);
}
+/* Return true if we should track a location that is OFFSET bytes from
+ a variable. Store the constant offset in *OFFSET_OUT if so. */
+
+static bool
+track_offset_p (poly_int64 offset, HOST_WIDE_INT *offset_out)
+{
+ HOST_WIDE_INT const_offset;
+ if (!offset.is_constant (&const_offset)
+ || !IN_RANGE (const_offset, 0, MAX_VAR_PARTS - 1))
+ return false;
+ *offset_out = const_offset;
+ return true;
+}
+
+/* Return the offset of a register that track_offset_p says we
+ should track. */
+
+static HOST_WIDE_INT
+get_tracked_reg_offset (rtx loc)
+{
+ HOST_WIDE_INT offset;
+ if (!track_offset_p (REG_OFFSET (loc), &offset))
+ gcc_unreachable ();
+ return offset;
+}
+
/* Set the register to contain REG_EXPR (LOC), REG_OFFSET (LOC). */
static void
rtx set_src)
{
tree decl = REG_EXPR (loc);
- HOST_WIDE_INT offset = REG_OFFSET (loc);
+ HOST_WIDE_INT offset = get_tracked_reg_offset (loc);
var_reg_decl_set (set, loc, initialized,
dv_from_decl (decl), offset, set_src, INSERT);
enum var_init_status initialized, rtx set_src)
{
tree decl = REG_EXPR (loc);
- HOST_WIDE_INT offset = REG_OFFSET (loc);
+ HOST_WIDE_INT offset = get_tracked_reg_offset (loc);
attrs *node, *next;
attrs **nextp;
attrs **nextp = &set->regs[REGNO (loc)];
attrs *node, *next;
- if (clobber)
+ HOST_WIDE_INT offset;
+ if (clobber && track_offset_p (REG_OFFSET (loc), &offset))
{
tree decl = REG_EXPR (loc);
- HOST_WIDE_INT offset = REG_OFFSET (loc);
decl = var_debug_decl (decl);
negative_power_of_two_p (HOST_WIDE_INT i)
{
unsigned HOST_WIDE_INT x = -(unsigned HOST_WIDE_INT)i;
- return x == (x & -x);
+ return pow2_or_zerop (x);
}
/* Strip constant offsets and alignments off of LOC. Return the base
static rtx
vt_canonicalize_addr (dataflow_set *set, rtx oloc)
{
- HOST_WIDE_INT ofst = 0;
+ poly_int64 ofst = 0, term;
machine_mode mode = GET_MODE (oloc);
rtx loc = oloc;
rtx x;
while (retry)
{
while (GET_CODE (loc) == PLUS
- && GET_CODE (XEXP (loc, 1)) == CONST_INT)
+ && poly_int_rtx_p (XEXP (loc, 1), &term))
{
- ofst += INTVAL (XEXP (loc, 1));
+ ofst += term;
loc = XEXP (loc, 0);
}
loc = get_addr_from_global_cache (loc);
/* Consolidate plus_constants. */
- while (ofst && GET_CODE (loc) == PLUS
- && GET_CODE (XEXP (loc, 1)) == CONST_INT)
+ while (maybe_ne (ofst, 0)
+ && GET_CODE (loc) == PLUS
+ && poly_int_rtx_p (XEXP (loc, 1), &term))
{
- ofst += INTVAL (XEXP (loc, 1));
+ ofst += term;
loc = XEXP (loc, 0);
}
}
/* Add OFST back in. */
- if (ofst)
+ if (maybe_ne (ofst, 0))
{
/* Don't build new RTL if we can help it. */
- if (GET_CODE (oloc) == PLUS
- && XEXP (oloc, 0) == loc
- && INTVAL (XEXP (oloc, 1)) == ofst)
+ if (strip_offset (oloc, &term) == loc && known_eq (term, ofst))
return oloc;
loc = plus_constant (mode, loc, ofst);
};
/* Remove all MEMs that overlap with COMS->LOC from the location list
- of a hash table entry for a value. COMS->ADDR must be a
+ of a hash table entry for a onepart variable. COMS->ADDR must be a
canonicalized form of COMS->LOC's address, and COMS->LOC must be
canonicalized itself. */
rtx mloc = coms->loc, addr = coms->addr;
variable *var = *slot;
- if (var->onepart == ONEPART_VALUE)
+ if (var->onepart != NOT_ONEPART)
{
location_chain *loc, **locp;
bool changed = false;
rtx set_src)
{
tree decl = MEM_EXPR (loc);
- HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
+ HOST_WIDE_INT offset = int_mem_offset (loc);
var_mem_decl_set (set, loc, initialized,
dv_from_decl (decl), offset, set_src, INSERT);
enum var_init_status initialized, rtx set_src)
{
tree decl = MEM_EXPR (loc);
- HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
+ HOST_WIDE_INT offset = int_mem_offset (loc);
clobber_overlapping_mems (set, loc);
decl = var_debug_decl (decl);
var_mem_delete (dataflow_set *set, rtx loc, bool clobber)
{
tree decl = MEM_EXPR (loc);
- HOST_WIDE_INT offset = INT_MEM_OFFSET (loc);
+ HOST_WIDE_INT offset = int_mem_offset (loc);
clobber_overlapping_mems (set, loc);
decl = var_debug_decl (decl);
{
decl_or_value cdv = dv_from_value (cval);
- /* Keep the remaining values connected, accummulating links
+ /* Keep the remaining values connected, accumulating links
in the canonical value. */
for (node = var->var_part[0].loc_chain; node; node = node->next)
{
case ONEPART_DEXPR:
if (newv)
NO_LOC_P (DECL_RTL_KNOWN_SET (dv_as_decl (dv))) = false;
- /* Fall through... */
+ /* Fall through. */
default:
DECL_CHANGED (dv_as_decl (dv)) = newv;
else
return 1;
+ case 'p':
+ r = compare_sizes_for_sort (SUBREG_BYTE (x), SUBREG_BYTE (y));
+ if (r != 0)
+ return r;
+ break;
+
case 'V':
case 'E':
/* Compare the vector length first. */
for (node = var->var_part[0].loc_chain; node; node = node->next)
if (MEM_P (node->loc)
&& MEM_EXPR (node->loc) == expr
- && INT_MEM_OFFSET (node->loc) == 0)
+ && int_mem_offset (node->loc) == 0)
{
where = node;
break;
{
for (loc = var->var_part[0].loc_chain; loc; loc = loc->next)
{
- /* We want to remove dying MEMs that doesn't refer to DECL. */
+ /* We want to remove dying MEMs that don't refer to DECL. */
if (GET_CODE (loc->loc) == MEM
&& (MEM_EXPR (loc->loc) != decl
- || INT_MEM_OFFSET (loc->loc) != 0)
- && !mem_dies_at_call (loc->loc))
+ || int_mem_offset (loc->loc) != 0)
+ && mem_dies_at_call (loc->loc))
break;
/* We want to move here MEMs that do refer to DECL. */
else if (GET_CODE (loc->loc) == VALUE
if (GET_CODE (loc->loc) != MEM
|| (MEM_EXPR (loc->loc) == decl
- && INT_MEM_OFFSET (loc->loc) == 0)
+ && int_mem_offset (loc->loc) == 0)
|| !mem_dies_at_call (loc->loc))
{
if (old_loc != loc->loc && emit_notes)
}
/* Remove all MEMs from the location list of a hash table entry for a
- value. */
+ onepart variable. */
int
dataflow_set_remove_mem_locs (variable **slot, dataflow_set *set)
{
variable *var = *slot;
- if (var->onepart == ONEPART_VALUE)
+ if (var->onepart != NOT_ONEPART)
{
location_chain *loc, **locp;
bool changed = false;
{
unsigned int r;
hard_reg_set_iterator hrsi;
- HARD_REG_SET invalidated_regs;
- get_call_reg_set_usage (call_insn, &invalidated_regs,
- regs_invalidated_by_call);
+ HARD_REG_SET callee_clobbers
+ = insn_callee_abi (call_insn).full_reg_clobbers ();
- EXECUTE_IF_SET_IN_HARD_REG_SET (invalidated_regs, 0, r, hrsi)
+ EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, r, hrsi)
var_regno_delete (set, r);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
set->traversed_vars = set->vars;
shared_hash_htab (set->vars)
return lc1 != lc2;
}
+/* Return true if one-part variables VAR1 and VAR2 are different.
+ They must be in canonical order. */
+
+static void
+dump_onepart_variable_differences (variable *var1, variable *var2)
+{
+ location_chain *lc1, *lc2;
+
+ gcc_assert (var1 != var2);
+ gcc_assert (dump_file);
+ gcc_assert (dv_as_opaque (var1->dv) == dv_as_opaque (var2->dv));
+ gcc_assert (var1->n_var_parts == 1
+ && var2->n_var_parts == 1);
+
+ lc1 = var1->var_part[0].loc_chain;
+ lc2 = var2->var_part[0].loc_chain;
+
+ gcc_assert (lc1 && lc2);
+
+ while (lc1 && lc2)
+ {
+ switch (loc_cmp (lc1->loc, lc2->loc))
+ {
+ case -1:
+ fprintf (dump_file, "removed: ");
+ print_rtl_single (dump_file, lc1->loc);
+ lc1 = lc1->next;
+ continue;
+ case 0:
+ break;
+ case 1:
+ fprintf (dump_file, "added: ");
+ print_rtl_single (dump_file, lc2->loc);
+ lc2 = lc2->next;
+ continue;
+ default:
+ gcc_unreachable ();
+ }
+ lc1 = lc1->next;
+ lc2 = lc2->next;
+ }
+
+ while (lc1)
+ {
+ fprintf (dump_file, "removed: ");
+ print_rtl_single (dump_file, lc1->loc);
+ lc1 = lc1->next;
+ }
+
+ while (lc2)
+ {
+ fprintf (dump_file, "added: ");
+ print_rtl_single (dump_file, lc2->loc);
+ lc2 = lc2->next;
+ }
+}
+
/* Return true if variables VAR1 and VAR2 are different. */
static bool
{
variable_iterator_type hi;
variable *var1;
+ bool diffound = false;
+ bool details = (dump_file && (dump_flags & TDF_DETAILS));
+
+#define RETRUE \
+ do \
+ { \
+ if (!details) \
+ return true; \
+ else \
+ diffound = true; \
+ } \
+ while (0)
if (old_set->vars == new_set->vars)
return false;
if (shared_hash_htab (old_set->vars)->elements ()
!= shared_hash_htab (new_set->vars)->elements ())
- return true;
+ RETRUE;
FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (old_set->vars),
var1, variable, hi)
{
variable_table_type *htab = shared_hash_htab (new_set->vars);
variable *var2 = htab->find_with_hash (var1->dv, dv_htab_hash (var1->dv));
+
if (!var2)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "dataflow difference found: removal of:\n");
dump_var (var1);
}
- return true;
+ RETRUE;
}
-
- if (variable_different_p (var1, var2))
+ else if (variable_different_p (var1, var2))
{
- if (dump_file && (dump_flags & TDF_DETAILS))
+ if (details)
{
fprintf (dump_file, "dataflow difference found: "
"old and new follow:\n");
dump_var (var1);
+ if (dv_onepart_p (var1->dv))
+ dump_onepart_variable_differences (var1, var2);
dump_var (var2);
}
- return true;
+ RETRUE;
}
}
- /* No need to traverse the second hashtab, if both have the same number
- of elements and the second one had all entries found in the first one,
- then it can't have any extra entries. */
- return false;
+ /* There's no need to traverse the second hashtab unless we want to
+ print the details. If both have the same number of elements and
+ the second one had all entries found in the first one, then the
+ second can't have any extra entries. */
+ if (!details)
+ return diffound;
+
+ FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (new_set->vars),
+ var1, variable, hi)
+ {
+ variable_table_type *htab = shared_hash_htab (old_set->vars);
+ variable *var2 = htab->find_with_hash (var1->dv, dv_htab_hash (var1->dv));
+ if (!var2)
+ {
+ if (details)
+ {
+ fprintf (dump_file, "dataflow difference found: addition of:\n");
+ dump_var (var1);
+ }
+ RETRUE;
+ }
+ }
+
+#undef RETRUE
+
+ return diffound;
}
/* Free the contents of dataflow set SET. */
set->vars = NULL;
}
+/* Return true if T is a tracked parameter with non-degenerate record type. */
+
+static bool
+tracked_record_parameter_p (tree t)
+{
+ if (TREE_CODE (t) != PARM_DECL)
+ return false;
+
+ if (DECL_MODE (t) == BLKmode)
+ return false;
+
+ tree type = TREE_TYPE (t);
+ if (TREE_CODE (type) != RECORD_TYPE)
+ return false;
+
+ if (TYPE_FIELDS (type) == NULL_TREE
+ || DECL_CHAIN (TYPE_FIELDS (type)) == NULL_TREE)
+ return false;
+
+ return true;
+}
+
/* Shall EXPR be tracked? */
static bool
return DECL_RTL_SET_P (expr);
/* If EXPR is not a parameter or a variable do not track it. */
- if (TREE_CODE (expr) != VAR_DECL && TREE_CODE (expr) != PARM_DECL)
+ if (!VAR_P (expr) && TREE_CODE (expr) != PARM_DECL)
return 0;
/* It also must have a name... */
don't need to track this expression if the ultimate declaration is
ignored. */
realdecl = expr;
- if (TREE_CODE (realdecl) == VAR_DECL && DECL_HAS_DEBUG_EXPR_P (realdecl))
+ if (VAR_P (realdecl) && DECL_HAS_DEBUG_EXPR_P (realdecl))
{
realdecl = DECL_DEBUG_EXPR (realdecl);
if (!DECL_P (realdecl))
|| (TREE_CODE (realdecl) == MEM_REF
&& TREE_CODE (TREE_OPERAND (realdecl, 0)) == ADDR_EXPR))
{
- HOST_WIDE_INT bitsize, bitpos, maxsize;
+ HOST_WIDE_INT bitsize, bitpos;
bool reverse;
tree innerdecl
- = get_ref_base_and_extent (realdecl, &bitpos, &bitsize,
- &maxsize, &reverse);
- if (!DECL_P (innerdecl)
+ = get_ref_base_and_extent_hwi (realdecl, &bitpos,
+ &bitsize, &reverse);
+ if (!innerdecl
+ || !DECL_P (innerdecl)
|| DECL_IGNORED_P (innerdecl)
- /* Do not track declarations for parts of tracked parameters
- since we want to track them as a whole instead. */
- || (TREE_CODE (innerdecl) == PARM_DECL
- && DECL_MODE (innerdecl) != BLKmode
- && TREE_CODE (TREE_TYPE (innerdecl)) != UNION_TYPE)
+ /* Do not track declarations for parts of tracked record
+ parameters since we want to track them as a whole. */
+ || tracked_record_parameter_p (innerdecl)
|| TREE_STATIC (innerdecl)
- || bitsize <= 0
- || bitpos + bitsize > 256
- || bitsize != maxsize)
+ || bitsize == 0
+ || bitpos + bitsize > 256)
return 0;
else
realdecl = expr;
if (decl_rtl && MEM_P (decl_rtl))
{
/* Do not track structures and arrays. */
- if (GET_MODE (decl_rtl) == BLKmode
- || AGGREGATE_TYPE_P (TREE_TYPE (realdecl)))
+ if ((GET_MODE (decl_rtl) == BLKmode
+ || AGGREGATE_TYPE_P (TREE_TYPE (realdecl)))
+ && !tracked_record_parameter_p (realdecl))
return 0;
if (MEM_SIZE_KNOWN_P (decl_rtl)
- && MEM_SIZE (decl_rtl) > MAX_VAR_PARTS)
+ && maybe_gt (MEM_SIZE (decl_rtl), MAX_VAR_PARTS))
return 0;
}
EXPR+OFFSET. */
static bool
-same_variable_part_p (rtx loc, tree expr, HOST_WIDE_INT offset)
+same_variable_part_p (rtx loc, tree expr, poly_int64 offset)
{
tree expr2;
- HOST_WIDE_INT offset2;
+ poly_int64 offset2;
if (! DECL_P (expr))
return false;
else if (MEM_P (loc))
{
expr2 = MEM_EXPR (loc);
- offset2 = INT_MEM_OFFSET (loc);
+ offset2 = int_mem_offset (loc);
}
else
return false;
expr = var_debug_decl (expr);
expr2 = var_debug_decl (expr2);
- return (expr == expr2 && offset == offset2);
+ return (expr == expr2 && known_eq (offset, offset2));
}
/* LOC is a REG or MEM that we would like to track if possible.
from EXPR in *OFFSET_OUT (if nonnull). */
static bool
-track_loc_p (rtx loc, tree expr, HOST_WIDE_INT offset, bool store_reg_p,
+track_loc_p (rtx loc, tree expr, poly_int64 offset, bool store_reg_p,
machine_mode *mode_out, HOST_WIDE_INT *offset_out)
{
machine_mode mode;
machine_mode pseudo_mode;
pseudo_mode = PSEUDO_REGNO_MODE (ORIGINAL_REGNO (loc));
- if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (pseudo_mode))
+ if (paradoxical_subreg_p (mode, pseudo_mode))
{
offset += byte_lowpart_offset (pseudo_mode, mode);
mode = pseudo_mode;
because the real and imaginary parts are represented as separate
pseudo registers, even if the whole complex value fits into one
hard register. */
- if ((GET_MODE_SIZE (mode) > GET_MODE_SIZE (DECL_MODE (expr))
+ if ((paradoxical_subreg_p (mode, DECL_MODE (expr))
|| (store_reg_p
&& !COMPLEX_MODE_P (DECL_MODE (expr))
- && hard_regno_nregs[REGNO (loc)][DECL_MODE (expr)] == 1))
- && offset + byte_lowpart_offset (DECL_MODE (expr), mode) == 0)
+ && hard_regno_nregs (REGNO (loc), DECL_MODE (expr)) == 1))
+ && known_eq (offset + byte_lowpart_offset (DECL_MODE (expr), mode), 0))
{
mode = DECL_MODE (expr);
offset = 0;
}
- if (offset < 0 || offset >= MAX_VAR_PARTS)
+ HOST_WIDE_INT const_offset;
+ if (!track_offset_p (offset, &const_offset))
return false;
if (mode_out)
*mode_out = mode;
if (offset_out)
- *offset_out = offset;
+ *offset_out = const_offset;
return true;
}
static rtx
var_lowpart (machine_mode mode, rtx loc)
{
- unsigned int offset, reg_offset, regno;
+ unsigned int regno;
if (GET_MODE (loc) == mode)
return loc;
if (!REG_P (loc) && !MEM_P (loc))
return NULL;
- offset = byte_lowpart_offset (mode, GET_MODE (loc));
+ poly_uint64 offset = byte_lowpart_offset (mode, GET_MODE (loc));
if (MEM_P (loc))
return adjust_address_nv (loc, mode, offset);
- reg_offset = subreg_lowpart_offset (mode, GET_MODE (loc));
+ poly_uint64 reg_offset = subreg_lowpart_offset (mode, GET_MODE (loc));
regno = REGNO (loc) + subreg_regno_offset (REGNO (loc), GET_MODE (loc),
reg_offset, mode);
return gen_rtx_REG_offset (loc, mode, regno, offset);
return MO_CLOBBER;
else if (target_for_debug_bind (var_debug_decl (expr)))
return MO_CLOBBER;
- else if (track_loc_p (loc, expr, INT_MEM_OFFSET (loc),
+ else if (track_loc_p (loc, expr, int_mem_offset (loc),
false, modep, NULL)
/* Multi-part variables shouldn't refer to one-part
variable names such as VALUEs (never happens) or
DEBUG_EXPRs (only happens in the presence of debug
insns). */
- && (!MAY_HAVE_DEBUG_INSNS
+ && (!MAY_HAVE_DEBUG_BIND_INSNS
|| !rtx_debug_expr_p (XEXP (loc, 0))))
return MO_USE;
else
compile time for ridiculously complex expressions, although they're
seldom useful, and they may often have to be discarded as not
representable anyway. */
-#define EXPR_USE_DEPTH (PARAM_VALUE (PARAM_MAX_VARTRACK_EXPR_DEPTH))
+#define EXPR_USE_DEPTH (param_max_vartrack_expr_depth)
/* Attempt to reverse the EXPR operation in the debug info and record
it in the cselib table. Say for reg1 = reg2 + 6 even when reg2 is
&& (GET_CODE (l->loc) != CONST || !references_value_p (l->loc, 0)))
return;
/* Avoid creating too large locs lists. */
- else if (count == PARAM_VALUE (PARAM_MAX_VARTRACK_REVERSE_OP_SIZE))
+ else if (count == param_max_vartrack_reverse_op_size)
return;
switch (GET_CODE (src))
return;
}
ret = simplify_gen_binary (code, GET_MODE (val), val, arg);
- if (ret == val)
- /* Ensure ret isn't VALUE itself (which can happen e.g. for
- (plus (reg1) (reg2)) when reg2 is known to be 0), as that
- breaks a lot of routines during var-tracking. */
- ret = gen_rtx_fmt_ee (PLUS, GET_MODE (val), val, const0_rtx);
break;
default:
gcc_unreachable ();
mo.type = MO_CLOBBER;
mo.u.loc = loc;
if (GET_CODE (expr) == SET
- && SET_DEST (expr) == loc
+ && (SET_DEST (expr) == loc
+ || (GET_CODE (SET_DEST (expr)) == STRICT_LOW_PART
+ && XEXP (SET_DEST (expr), 0) == loc))
&& !unsuitable_loc (SET_SRC (expr))
&& find_use_val (loc, mode, cui))
{
rtx xexpr = gen_rtx_SET (loc, src);
if (same_variable_part_p (SET_SRC (xexpr),
MEM_EXPR (loc),
- INT_MEM_OFFSET (loc)))
+ int_mem_offset (loc)))
mo.type = MO_COPY;
else
mo.type = MO_SET;
resolve = preserve = !cselib_preserved_value_p (v);
/* We cannot track values for multiple-part variables, so we track only
- locations for tracked parameters passed either by invisible reference
- or directly in multiple locations. */
+ locations for tracked record parameters. */
if (track_p
&& REG_P (loc)
&& REG_EXPR (loc)
- && TREE_CODE (REG_EXPR (loc)) == PARM_DECL
- && DECL_MODE (REG_EXPR (loc)) != BLKmode
- && TREE_CODE (TREE_TYPE (REG_EXPR (loc))) != UNION_TYPE
- && ((MEM_P (DECL_INCOMING_RTL (REG_EXPR (loc)))
- && XEXP (DECL_INCOMING_RTL (REG_EXPR (loc)), 0) != arg_pointer_rtx)
- || (GET_CODE (DECL_INCOMING_RTL (REG_EXPR (loc))) == PARALLEL
- && XVECLEN (DECL_INCOMING_RTL (REG_EXPR (loc)), 0) > 1)))
+ && tracked_record_parameter_p (REG_EXPR (loc)))
{
/* Although we don't use the value here, it could be used later by the
mere virtue of its existence as the operand of the reverse operation
}
if (loc == stack_pointer_rtx
- && hard_frame_pointer_adjustment != -1
+ && (maybe_ne (hard_frame_pointer_adjustment, -1)
+ || (!frame_pointer_needed && !ACCUMULATE_OUTGOING_ARGS))
&& preserve)
cselib_set_value_sp_based (v);
+ /* Don't record MO_VAL_SET for VALUEs that can be described using
+ cfa_base_rtx or cfa_base_rtx + CONST_INT, cselib already knows
+ all the needed equivalences and they shouldn't change depending
+ on which register holds that VALUE in some instruction. */
+ if (!frame_pointer_needed
+ && cfa_base_rtx
+ && cselib_sp_derived_value_p (v))
+ {
+ if (preserve)
+ preserve_value (v);
+ return;
+ }
+
nloc = replace_expr_with_values (oloc);
if (nloc)
oloc = nloc;
&& targetm.calls.struct_value_rtx (type, 0) == 0)
{
tree struct_addr = build_pointer_type (TREE_TYPE (type));
- machine_mode mode = TYPE_MODE (struct_addr);
+ function_arg_info arg (struct_addr, /*named=*/true);
rtx reg;
INIT_CUMULATIVE_ARGS (args_so_far_v, type, NULL_RTX, fndecl,
nargs + 1);
- reg = targetm.calls.function_arg (args_so_far, mode,
- struct_addr, true);
- targetm.calls.function_arg_advance (args_so_far, mode,
- struct_addr, true);
+ reg = targetm.calls.function_arg (args_so_far, arg);
+ targetm.calls.function_arg_advance (args_so_far, arg);
if (reg == NULL_RTX)
{
for (; link; link = XEXP (link, 1))
nargs);
if (obj_type_ref && TYPE_ARG_TYPES (type) != void_list_node)
{
- machine_mode mode;
t = TYPE_ARG_TYPES (type);
- mode = TYPE_MODE (TREE_VALUE (t));
- this_arg = targetm.calls.function_arg (args_so_far, mode,
- TREE_VALUE (t), true);
+ function_arg_info arg (TREE_VALUE (t), /*named=*/true);
+ this_arg = targetm.calls.function_arg (args_so_far, arg);
if (this_arg && !REG_P (this_arg))
this_arg = NULL_RTX;
else if (this_arg == NULL_RTX)
else if (REG_P (x))
{
cselib_val *val = cselib_lookup (x, GET_MODE (x), 0, VOIDmode);
+ scalar_int_mode mode;
if (val && cselib_preserved_value_p (val))
item = val->val_rtx;
- else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
- || GET_MODE_CLASS (GET_MODE (x)) == MODE_PARTIAL_INT)
+ else if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
{
- machine_mode mode = GET_MODE (x);
-
- while ((mode = GET_MODE_WIDER_MODE (mode)) != VOIDmode
- && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD)
+ opt_scalar_int_mode mode_iter;
+ FOR_EACH_WIDER_MODE (mode_iter, mode)
{
- rtx reg = simplify_subreg (mode, x, GET_MODE (x), 0);
+ mode = mode_iter.require ();
+ if (GET_MODE_BITSIZE (mode) > BITS_PER_WORD)
+ break;
+ rtx reg = simplify_subreg (mode, x, GET_MODE (x), 0);
if (reg == NULL_RTX || !REG_P (reg))
continue;
val = cselib_lookup (reg, mode, 0, VOIDmode);
if (!frame_pointer_needed)
{
- struct adjust_mem_data amd;
+ class adjust_mem_data amd;
amd.mem_mode = VOIDmode;
amd.stack_adjust = -VTI (bb)->out.stack_adjust;
- amd.side_effects = NULL;
amd.store = true;
mem = simplify_replace_fn_rtx (mem, NULL_RTX, adjust_mems,
&amd);
- gcc_assert (amd.side_effects == NULL_RTX);
+ gcc_assert (amd.side_effects.is_empty ());
}
val = cselib_lookup (mem, GET_MODE (mem), 0, VOIDmode);
if (val && cselib_preserved_value_p (val))
{
/* For non-integer stack argument see also if they weren't
initialized by integers. */
- machine_mode imode = int_mode_for_mode (GET_MODE (mem));
- if (imode != GET_MODE (mem) && imode != BLKmode)
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (mem)).exists (&imode)
+ && imode != GET_MODE (mem))
{
val = cselib_lookup (adjust_address_nv (mem, imode, 0),
imode, 0, VOIDmode);
}
if (t && t != void_list_node)
{
- tree argtype = TREE_VALUE (t);
- machine_mode mode = TYPE_MODE (argtype);
rtx reg;
- if (pass_by_reference (&args_so_far_v, mode, argtype, true))
- {
- argtype = build_pointer_type (argtype);
- mode = TYPE_MODE (argtype);
- }
- reg = targetm.calls.function_arg (args_so_far, mode,
- argtype, true);
- if (TREE_CODE (argtype) == REFERENCE_TYPE
- && INTEGRAL_TYPE_P (TREE_TYPE (argtype))
+ function_arg_info arg (TREE_VALUE (t), /*named=*/true);
+ apply_pass_by_reference_rules (&args_so_far_v, arg);
+ reg = targetm.calls.function_arg (args_so_far, arg);
+ if (TREE_CODE (arg.type) == REFERENCE_TYPE
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg.type))
&& reg
&& REG_P (reg)
- && GET_MODE (reg) == mode
- && (GET_MODE_CLASS (mode) == MODE_INT
- || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ && GET_MODE (reg) == arg.mode
+ && (GET_MODE_CLASS (arg.mode) == MODE_INT
+ || GET_MODE_CLASS (arg.mode) == MODE_PARTIAL_INT)
&& REG_P (x)
&& REGNO (x) == REGNO (reg)
- && GET_MODE (x) == mode
+ && GET_MODE (x) == arg.mode
&& item)
{
machine_mode indmode
- = TYPE_MODE (TREE_TYPE (argtype));
+ = TYPE_MODE (TREE_TYPE (arg.type));
rtx mem = gen_rtx_MEM (indmode, x);
cselib_val *val = cselib_lookup (mem, indmode, 0, VOIDmode);
if (val && cselib_preserved_value_p (val))
}
}
}
- targetm.calls.function_arg_advance (args_so_far, mode,
- argtype, true);
+ targetm.calls.function_arg_advance (args_so_far, arg);
t = TREE_CHAIN (t);
}
}
insert notes before it without worrying about any
notes that MO_USEs might emit after the insn. */
cui.store_p = true;
- note_stores (PATTERN (insn), add_stores, &cui);
+ note_stores (insn, add_stores, &cui);
n2 = VTI (bb)->mos.length () - 1;
mos = VTI (bb)->mos.address ();
dataflow_set_copy (&old_out, out);
dataflow_set_copy (out, in);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
local_get_addr_cache = new hash_map<rtx, rtx>;
FOR_EACH_VEC_ELT (VTI (bb)->mos, i, mo)
}
}
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
delete local_get_addr_cache;
local_get_addr_cache = NULL;
{
bb_heap_t *worklist = new bb_heap_t (LONG_MIN);
bb_heap_t *pending = new bb_heap_t (LONG_MIN);
- sbitmap visited, in_worklist, in_pending;
+ sbitmap in_worklist, in_pending;
basic_block bb;
edge e;
int *bb_order;
int *rc_order;
int i;
int htabsz = 0;
- int htabmax = PARAM_VALUE (PARAM_MAX_VARTRACK_SIZE);
+ int htabmax = param_max_vartrack_size;
bool success = true;
timevar_push (TV_VAR_TRACKING_DATAFLOW);
bb_order[rc_order[i]] = i;
free (rc_order);
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
in_worklist = sbitmap_alloc (last_basic_block_for_fn (cfun));
in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_worklist);
else
oldinsz = oldoutsz = 0;
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
dataflow_set *in = &VTI (bb)->in, *first_out = NULL;
bool first = true, adjust = false;
if (htabmax && htabsz > htabmax)
{
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
inform (DECL_SOURCE_LOCATION (cfun->decl),
"variable tracking size limit exceeded with "
- "-fvar-tracking-assignments, retrying without");
+ "%<-fvar-tracking-assignments%>, retrying without");
else
inform (DECL_SOURCE_LOCATION (cfun->decl),
"variable tracking size limit exceeded");
}
}
- if (success && MAY_HAVE_DEBUG_INSNS)
+ if (success && MAY_HAVE_DEBUG_BIND_INSNS)
FOR_EACH_BB_FN (bb, cfun)
gcc_assert (VTI (bb)->flooded);
free (bb_order);
delete worklist;
delete pending;
- sbitmap_free (visited);
sbitmap_free (in_worklist);
sbitmap_free (in_pending);
static void
dump_vars (variable_table_type *vars)
{
- if (vars->elements () > 0)
+ if (!vars->is_empty ())
{
fprintf (dump_file, "Variables:\n");
vars->traverse <void *, dump_var_tracking_slot> (NULL);
/* Structure for passing some other parameters to function
vt_expand_loc_callback. */
-struct expand_loc_callback_data
+class expand_loc_callback_data
{
+public:
/* The variables and values active at this point. */
variable_table_type *vars;
vt_expand_var_loc_chain (variable *var, bitmap regs, void *data,
bool *pendrecp)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
location_chain *loc, *next;
rtx result = NULL;
int first_child, result_first_child, last_child;
int max_depth ATTRIBUTE_UNUSED,
void *data)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
decl_or_value dv;
variable *var;
rtx result, subreg;
/* Invalid SUBREGs are ok in debug info. ??? We could try
alternate expansions for the VALUE as well. */
- if (!result)
+ if (!result && GET_MODE (subreg) != VOIDmode)
result = gen_rtx_raw_SUBREG (GET_MODE (x), subreg, SUBREG_BYTE (x));
return result;
static rtx
vt_expand_loc (rtx loc, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx result;
- if (!MAY_HAVE_DEBUG_INSNS)
+ if (!MAY_HAVE_DEBUG_BIND_INSNS)
return loc;
INIT_ELCD (data, vars);
static rtx
vt_expand_1pvar (variable *var, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx loc;
gcc_checking_assert (var->onepart && var->n_var_parts == 1);
bool complete;
enum var_init_status initialized = VAR_INIT_STATUS_UNINITIALIZED;
HOST_WIDE_INT last_limit;
- tree type_size_unit;
HOST_WIDE_INT offsets[MAX_VAR_PARTS];
rtx loc[MAX_VAR_PARTS];
tree decl;
{
machine_mode mode, wider_mode;
rtx loc2;
- HOST_WIDE_INT offset;
+ HOST_WIDE_INT offset, size, wider_size;
if (i == 0 && var->onepart)
{
mode = GET_MODE (var->var_part[i].cur_loc);
if (mode == VOIDmode && var->onepart)
mode = DECL_MODE (decl);
- last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode);
+ /* We ony track subparts of constant-sized objects, since at present
+ there's no representation for polynomial pieces. */
+ if (!GET_MODE_SIZE (mode).is_constant (&size))
+ {
+ complete = false;
+ continue;
+ }
+ last_limit = offsets[n_var_parts] + size;
/* Attempt to merge adjacent registers or memory. */
- wider_mode = GET_MODE_WIDER_MODE (mode);
for (j = i + 1; j < var->n_var_parts; j++)
if (last_limit <= VAR_PART_OFFSET (var, j))
break;
if (j < var->n_var_parts
- && wider_mode != VOIDmode
+ && GET_MODE_WIDER_MODE (mode).exists (&wider_mode)
+ && GET_MODE_SIZE (wider_mode).is_constant (&wider_size)
&& var->var_part[j].cur_loc
&& mode == GET_MODE (var->var_part[j].cur_loc)
&& (REG_P (loc[n_var_parts]) || MEM_P (loc[n_var_parts]))
&& GET_CODE (loc[n_var_parts]) == GET_CODE (loc2))
{
rtx new_loc = NULL;
+ poly_int64 offset2;
if (REG_P (loc[n_var_parts])
- && hard_regno_nregs[REGNO (loc[n_var_parts])][mode] * 2
- == hard_regno_nregs[REGNO (loc[n_var_parts])][wider_mode]
+ && hard_regno_nregs (REGNO (loc[n_var_parts]), mode) * 2
+ == hard_regno_nregs (REGNO (loc[n_var_parts]), wider_mode)
&& end_hard_regno (mode, REGNO (loc[n_var_parts]))
== REGNO (loc2))
{
else if (MEM_P (loc[n_var_parts])
&& GET_CODE (XEXP (loc2, 0)) == PLUS
&& REG_P (XEXP (XEXP (loc2, 0), 0))
- && CONST_INT_P (XEXP (XEXP (loc2, 0), 1)))
+ && poly_int_rtx_p (XEXP (XEXP (loc2, 0), 1), &offset2))
{
- if ((REG_P (XEXP (loc[n_var_parts], 0))
- && rtx_equal_p (XEXP (loc[n_var_parts], 0),
- XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc2, 0), 1))
- == GET_MODE_SIZE (mode))
- || (GET_CODE (XEXP (loc[n_var_parts], 0)) == PLUS
- && CONST_INT_P (XEXP (XEXP (loc[n_var_parts], 0), 1))
- && rtx_equal_p (XEXP (XEXP (loc[n_var_parts], 0), 0),
- XEXP (XEXP (loc2, 0), 0))
- && INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1))
- + GET_MODE_SIZE (mode)
- == INTVAL (XEXP (XEXP (loc2, 0), 1))))
+ poly_int64 end1 = size;
+ rtx base1 = strip_offset_and_add (XEXP (loc[n_var_parts], 0),
+ &end1);
+ if (rtx_equal_p (base1, XEXP (XEXP (loc2, 0), 0))
+ && known_eq (end1, offset2))
new_loc = adjust_address_nv (loc[n_var_parts],
wider_mode, 0);
}
{
loc[n_var_parts] = new_loc;
mode = wider_mode;
- last_limit = offsets[n_var_parts] + GET_MODE_SIZE (mode);
+ last_limit = offsets[n_var_parts] + wider_size;
i = j;
}
}
++n_var_parts;
}
- type_size_unit = TYPE_SIZE_UNIT (TREE_TYPE (decl));
- if ((unsigned HOST_WIDE_INT) last_limit < TREE_INT_CST_LOW (type_size_unit))
+ poly_uint64 type_size_unit
+ = tree_to_poly_uint64 (TYPE_SIZE_UNIT (TREE_TYPE (decl)));
+ if (maybe_lt (poly_uint64 (last_limit), type_size_unit))
complete = false;
if (! flag_var_tracking_uninit)
/* Make sure that the call related notes come first. */
while (NEXT_INSN (insn)
&& NOTE_P (insn)
- && ((NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION
- && NOTE_DURING_CALL_P (insn))
- || NOTE_KIND (insn) == NOTE_INSN_CALL_ARG_LOCATION))
+ && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION
+ && NOTE_DURING_CALL_P (insn))
insn = NEXT_INSN (insn);
if (NOTE_P (insn)
- && ((NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION
- && NOTE_DURING_CALL_P (insn))
- || NOTE_KIND (insn) == NOTE_INSN_CALL_ARG_LOCATION))
+ && NOTE_KIND (insn) == NOTE_INSN_VAR_LOCATION
+ && NOTE_DURING_CALL_P (insn))
note = emit_note_after (NOTE_INSN_VAR_LOCATION, insn);
else
note = emit_note_before (NOTE_INSN_VAR_LOCATION, insn);
emit_note_data data;
variable_table_type *htab = shared_hash_htab (vars);
- if (!changed_variables->elements ())
+ if (changed_variables->is_empty ())
return;
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
process_changed_values (htab);
data.insn = insn;
emit_notes_for_changes (insn, EMIT_NOTE_AFTER_CALL_INSN, set->vars);
{
rtx arguments = mo->u.loc, *p = &arguments;
- rtx_note *note;
while (*p)
{
XEXP (XEXP (*p, 0), 1)
shared_hash_htab (set->vars));
/* If expansion is successful, keep it in the list. */
if (XEXP (XEXP (*p, 0), 1))
- p = &XEXP (*p, 1);
+ {
+ XEXP (XEXP (*p, 0), 1)
+ = copy_rtx_if_shared (XEXP (XEXP (*p, 0), 1));
+ p = &XEXP (*p, 1);
+ }
/* Otherwise, if the following item is data_value for it,
drop it too too. */
else if (XEXP (*p, 1)
else
*p = XEXP (*p, 1);
}
- note = emit_note_after (NOTE_INSN_CALL_ARG_LOCATION, insn);
- NOTE_VAR_LOCATION (note) = arguments;
+ add_reg_note (insn, REG_CALL_ARG_LOCATION, arguments);
}
break;
basic_block bb;
dataflow_set cur;
- gcc_assert (!changed_variables->elements ());
+ gcc_assert (changed_variables->is_empty ());
/* Free memory occupied by the out hash tables, as they aren't used
anymore. */
delete_variable_part). */
emit_notes = true;
- if (MAY_HAVE_DEBUG_INSNS)
- {
- dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
- }
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
+ dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
dataflow_set_init (&cur);
subsequent basic blocks. */
emit_notes_for_differences (BB_HEAD (bb), &cur, &VTI (bb)->in);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
local_get_addr_cache = new hash_map<rtx, rtx>;
/* Emit the notes for the changes in the basic block itself. */
emit_notes_in_bb (bb, &cur);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
delete local_get_addr_cache;
local_get_addr_cache = NULL;
dataflow_set_destroy (&cur);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
delete dropped_values;
dropped_values = NULL;
assign declaration to *DECLP and offset to *OFFSETP, and return true. */
static bool
-vt_get_decl_and_offset (rtx rtl, tree *declp, HOST_WIDE_INT *offsetp)
+vt_get_decl_and_offset (rtx rtl, tree *declp, poly_int64 *offsetp)
{
if (REG_P (rtl))
{
decl = REG_EXPR (reg);
if (REG_EXPR (reg) != decl)
break;
- if (REG_OFFSET (reg) < offset)
- offset = REG_OFFSET (reg);
+ HOST_WIDE_INT this_offset;
+ if (!track_offset_p (REG_OFFSET (reg), &this_offset))
+ break;
+ offset = MIN (offset, this_offset);
}
if (i == len)
if (MEM_ATTRS (rtl))
{
*declp = MEM_EXPR (rtl);
- *offsetp = INT_MEM_OFFSET (rtl);
+ *offsetp = int_mem_offset (rtl);
return true;
}
}
rtx incoming = DECL_INCOMING_RTL (parm);
tree decl;
machine_mode mode;
- HOST_WIDE_INT offset;
+ poly_int64 offset;
dataflow_set *out;
decl_or_value dv;
+ bool incoming_ok = true;
if (TREE_CODE (parm) != PARM_DECL)
return;
rewrite the incoming location of parameters passed on the stack
into MEMs based on the argument pointer, so that incoming doesn't
depend on a pseudo. */
+ poly_int64 incoming_offset = 0;
if (MEM_P (incoming)
- && (XEXP (incoming, 0) == crtl->args.internal_arg_pointer
- || (GET_CODE (XEXP (incoming, 0)) == PLUS
- && XEXP (XEXP (incoming, 0), 0)
- == crtl->args.internal_arg_pointer
- && CONST_INT_P (XEXP (XEXP (incoming, 0), 1)))))
+ && (strip_offset (XEXP (incoming, 0), &incoming_offset)
+ == crtl->args.internal_arg_pointer))
{
HOST_WIDE_INT off = -FIRST_PARM_OFFSET (current_function_decl);
- if (GET_CODE (XEXP (incoming, 0)) == PLUS)
- off += INTVAL (XEXP (XEXP (incoming, 0), 1));
incoming
= replace_equiv_address_nv (incoming,
plus_constant (Pmode,
- arg_pointer_rtx, off));
+ arg_pointer_rtx,
+ off + incoming_offset));
}
#ifdef HAVE_window_save
if (!vt_get_decl_and_offset (incoming, &decl, &offset))
{
+ incoming_ok = false;
if (MEM_P (incoming))
{
/* This means argument is passed by invisible reference. */
offset = 0;
}
- if (!track_loc_p (incoming, parm, offset, false, &mode, &offset))
+ HOST_WIDE_INT const_offset;
+ if (!track_loc_p (incoming, parm, offset, false, &mode, &const_offset))
return;
out = &VTI (ENTRY_BLOCK_PTR_FOR_FN (cfun))->out;
arguments passed by invisible reference aren't dealt with
above: incoming-rtl will have Pmode rather than the
expected mode for the type. */
- if (offset)
+ if (const_offset)
return;
lowpart = var_lowpart (mode, incoming);
if (val)
{
preserve_value (val);
- set_variable_part (out, val->val_rtx, dv, offset,
+ set_variable_part (out, val->val_rtx, dv, const_offset,
VAR_INIT_STATUS_INITIALIZED, NULL, INSERT);
dv = dv_from_value (val->val_rtx);
}
{
incoming = var_lowpart (mode, incoming);
gcc_assert (REGNO (incoming) < FIRST_PSEUDO_REGISTER);
- attrs_list_insert (&out->regs[REGNO (incoming)], dv, offset,
+ attrs_list_insert (&out->regs[REGNO (incoming)], dv, const_offset,
incoming);
- set_variable_part (out, incoming, dv, offset,
+ set_variable_part (out, incoming, dv, const_offset,
VAR_INIT_STATUS_INITIALIZED, NULL, INSERT);
if (dv_is_value_p (dv))
{
{
int i;
+ /* The following code relies on vt_get_decl_and_offset returning true for
+ incoming, which might not be always the case. */
+ if (!incoming_ok)
+ return;
for (i = 0; i < XVECLEN (incoming, 0); i++)
{
rtx reg = XEXP (XVECEXP (incoming, 0, i), 0);
- offset = REG_OFFSET (reg);
+ /* vt_get_decl_and_offset has already checked that the offset
+ is a valid variable part. */
+ const_offset = get_tracked_reg_offset (reg);
gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
- attrs_list_insert (&out->regs[REGNO (reg)], dv, offset, reg);
- set_variable_part (out, reg, dv, offset,
+ attrs_list_insert (&out->regs[REGNO (reg)], dv, const_offset, reg);
+ set_variable_part (out, reg, dv, const_offset,
VAR_INIT_STATUS_INITIALIZED, NULL, INSERT);
}
}
else if (MEM_P (incoming))
{
incoming = var_lowpart (mode, incoming);
- set_variable_part (out, incoming, dv, offset,
+ set_variable_part (out, incoming, dv, const_offset,
VAR_INIT_STATUS_INITIALIZED, NULL, INSERT);
}
}
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = DECL_CHAIN (parm))
- if (!POINTER_BOUNDS_P (parm))
- vt_add_function_parameter (parm);
+ vt_add_function_parameter (parm);
if (DECL_HAS_VALUE_EXPR_P (DECL_RESULT (current_function_decl)))
{
cfa_base_rtx = NULL_RTX;
return;
}
- if (!MAY_HAVE_DEBUG_INSNS)
+ if (!MAY_HAVE_DEBUG_BIND_INSNS)
return;
/* Tell alias analysis that cfa_base_rtx should share
cselib_preserve_cfa_base_value (val, REGNO (cfa_base_rtx));
}
+/* Reemit INSN, a MARKER_DEBUG_INSN, as a note. */
+
+static rtx_insn *
+reemit_marker_as_note (rtx_insn *insn)
+{
+ gcc_checking_assert (DEBUG_MARKER_INSN_P (insn));
+
+ enum insn_note kind = INSN_DEBUG_MARKER_KIND (insn);
+
+ switch (kind)
+ {
+ case NOTE_INSN_BEGIN_STMT:
+ case NOTE_INSN_INLINE_ENTRY:
+ {
+ rtx_insn *note = NULL;
+ if (cfun->debug_nonbind_markers)
+ {
+ note = emit_note_before (kind, insn);
+ NOTE_MARKER_LOCATION (note) = INSN_LOCATION (insn);
+ }
+ delete_insn (insn);
+ return note;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Allocate and initialize the data structures for variable tracking
and parse the RTL to get the micro operations. */
vt_initialize (void)
{
basic_block bb;
- HOST_WIDE_INT fp_cfa_offset = -1;
+ poly_int64 fp_cfa_offset = -1;
alloc_aux_for_blocks (sizeof (variable_tracking_info));
- empty_shared_hash = new shared_hash;
+ empty_shared_hash = shared_hash_pool.allocate ();
empty_shared_hash->refcount = 1;
empty_shared_hash->htab = new variable_table_type (1);
changed_variables = new variable_table_type (10);
VTI (bb)->permp = NULL;
}
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
cselib_init (CSELIB_RECORD_MEMORY | CSELIB_PRESERVE_CONSTANTS);
scratch_regs = BITMAP_ALLOC (NULL);
global_get_addr_cache = NULL;
}
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
rtx reg, expr;
int ofst;
preserve_value (val);
if (reg != hard_frame_pointer_rtx && fixed_regs[REGNO (reg)])
cselib_preserve_cfa_base_value (val, REGNO (reg));
- expr = plus_constant (GET_MODE (stack_pointer_rtx),
- stack_pointer_rtx, -ofst);
- cselib_add_permanent_equiv (val, expr, get_insns ());
-
if (ofst)
{
- val = cselib_lookup_from_insn (stack_pointer_rtx,
- GET_MODE (stack_pointer_rtx), 1,
- VOIDmode, get_insns ());
- preserve_value (val);
+ cselib_val *valsp
+ = cselib_lookup_from_insn (stack_pointer_rtx,
+ GET_MODE (stack_pointer_rtx), 1,
+ VOIDmode, get_insns ());
+ preserve_value (valsp);
expr = plus_constant (GET_MODE (reg), reg, ofst);
- cselib_add_permanent_equiv (val, expr, get_insns ());
+ /* This cselib_add_permanent_equiv call needs to be done before
+ the other cselib_add_permanent_equiv a few lines later,
+ because after that one is done, cselib_lookup on this expr
+ will due to the cselib SP_DERIVED_VALUE_P optimizations
+ return valsp and so no permanent equivalency will be added. */
+ cselib_add_permanent_equiv (valsp, expr, get_insns ());
}
+
+ expr = plus_constant (GET_MODE (stack_pointer_rtx),
+ stack_pointer_rtx, -ofst);
+ cselib_add_permanent_equiv (val, expr, get_insns ());
}
/* In order to factor out the adjustments made to the stack pointer or to
{
if (GET_CODE (elim) == PLUS)
{
- fp_cfa_offset -= INTVAL (XEXP (elim, 1));
+ fp_cfa_offset -= rtx_to_poly_int64 (XEXP (elim, 1));
elim = XEXP (elim, 0);
}
if (elim != hard_frame_pointer_rtx)
vt_add_function_parameters ();
+ bool record_sp_value = false;
FOR_EACH_BB_FN (bb, cfun)
{
rtx_insn *insn;
- HOST_WIDE_INT pre, post = 0;
basic_block first_bb, last_bb;
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
cselib_record_sets_hook = add_with_sets;
if (dump_file && (dump_flags & TDF_DETAILS))
cselib_get_next_uid ());
}
+ if (MAY_HAVE_DEBUG_BIND_INSNS
+ && cfa_base_rtx
+ && !frame_pointer_needed
+ && record_sp_value)
+ cselib_record_sp_cfa_base_equiv (-cfa_base_offset
+ - VTI (bb)->in.stack_adjust,
+ BB_HEAD (bb));
+ record_sp_value = true;
+
first_bb = bb;
for (;;)
{
{
HOST_WIDE_INT offset = VTI (bb)->out.stack_adjust;
VTI (bb)->out.stack_adjust = VTI (bb)->in.stack_adjust;
- for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+
+ rtx_insn *next;
+ FOR_BB_INSNS_SAFE (bb, insn, next)
{
if (INSN_P (insn))
{
+ HOST_WIDE_INT pre = 0, post = 0;
+
if (!frame_pointer_needed)
{
insn_stack_adjust_offset_pre_post (insn, &pre, &post);
log_op_type (PATTERN (insn), bb, insn,
MO_ADJUST, dump_file);
VTI (bb)->mos.safe_push (mo);
- VTI (bb)->out.stack_adjust += pre;
}
}
cselib_hook_called = false;
adjust_insn (bb, insn);
- if (MAY_HAVE_DEBUG_INSNS)
+
+ if (pre)
+ VTI (bb)->out.stack_adjust += pre;
+
+ if (DEBUG_MARKER_INSN_P (insn))
+ {
+ reemit_marker_as_note (insn);
+ continue;
+ }
+
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
if (CALL_P (insn))
prepare_call_arguments (bb, insn);
cselib_process_insn (insn);
if (dump_file && (dump_flags & TDF_DETAILS))
{
- print_rtl_single (dump_file, insn);
+ if (dump_flags & TDF_SLIM)
+ dump_insn_slim (dump_file, insn);
+ else
+ print_rtl_single (dump_file, insn);
dump_cselib_table (dump_file);
}
}
add_with_sets (insn, 0, 0);
cancel_changes (0);
- if (!frame_pointer_needed && post)
+ if (post)
{
micro_operation mo;
mo.type = MO_ADJUST;
VTI (bb)->out.stack_adjust += post;
}
- if (fp_cfa_offset != -1
- && hard_frame_pointer_adjustment == -1
+ if (maybe_ne (fp_cfa_offset, -1)
+ && known_eq (hard_frame_pointer_adjustment, -1)
&& fp_setter_insn (insn))
{
vt_init_cfa_base ();
hard_frame_pointer_adjustment = fp_cfa_offset;
/* Disassociate sp from fp now. */
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
cselib_val *v;
cselib_invalidate_rtx (stack_pointer_rtx);
bb = last_bb;
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
cselib_preserve_only_values ();
cselib_reset_table (cselib_get_next_uid ());
static int debug_label_num = 1;
-/* Get rid of all debug insns from the insn stream. */
+/* Remove from the insn stream a single debug insn used for
+ variable tracking at assignments. */
-static void
-delete_debug_insns (void)
+static inline void
+delete_vta_debug_insn (rtx_insn *insn)
+{
+ if (DEBUG_MARKER_INSN_P (insn))
+ {
+ reemit_marker_as_note (insn);
+ return;
+ }
+
+ tree decl = INSN_VAR_LOCATION_DECL (insn);
+ if (TREE_CODE (decl) == LABEL_DECL
+ && DECL_NAME (decl)
+ && !DECL_RTL_SET_P (decl))
+ {
+ PUT_CODE (insn, NOTE);
+ NOTE_KIND (insn) = NOTE_INSN_DELETED_DEBUG_LABEL;
+ NOTE_DELETED_LABEL_NAME (insn)
+ = IDENTIFIER_POINTER (DECL_NAME (decl));
+ SET_DECL_RTL (decl, insn);
+ CODE_LABEL_NUMBER (insn) = debug_label_num++;
+ }
+ else
+ delete_insn (insn);
+}
+
+/* Remove from the insn stream all debug insns used for variable
+ tracking at assignments. USE_CFG should be false if the cfg is no
+ longer usable. */
+
+void
+delete_vta_debug_insns (bool use_cfg)
{
basic_block bb;
rtx_insn *insn, *next;
if (!MAY_HAVE_DEBUG_INSNS)
return;
- FOR_EACH_BB_FN (bb, cfun)
- {
- FOR_BB_INSNS_SAFE (bb, insn, next)
+ if (use_cfg)
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ FOR_BB_INSNS_SAFE (bb, insn, next)
+ if (DEBUG_INSN_P (insn))
+ delete_vta_debug_insn (insn);
+ }
+ else
+ for (insn = get_insns (); insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
if (DEBUG_INSN_P (insn))
- {
- tree decl = INSN_VAR_LOCATION_DECL (insn);
- if (TREE_CODE (decl) == LABEL_DECL
- && DECL_NAME (decl)
- && !DECL_RTL_SET_P (decl))
- {
- PUT_CODE (insn, NOTE);
- NOTE_KIND (insn) = NOTE_INSN_DELETED_DEBUG_LABEL;
- NOTE_DELETED_LABEL_NAME (insn)
- = IDENTIFIER_POINTER (DECL_NAME (decl));
- SET_DECL_RTL (decl, insn);
- CODE_LABEL_NUMBER (insn) = debug_label_num++;
- }
- else
- delete_insn (insn);
- }
- }
+ delete_vta_debug_insn (insn);
+ }
}
/* Run a fast, BB-local only version of var tracking, to take care of
vt_debug_insns_local (bool skipped ATTRIBUTE_UNUSED)
{
/* ??? Just skip it all for now. */
- delete_debug_insns ();
+ delete_vta_debug_insns (true);
}
/* Free the data structures needed for variable tracking. */
location_chain_pool.release ();
shared_hash_pool.release ();
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
{
if (global_get_addr_cache)
delete global_get_addr_cache;
{
bool success;
- if (flag_var_tracking_assignments < 0
+ /* We won't be called as a separate pass if flag_var_tracking is not
+ set, but final may call us to turn debug markers into notes. */
+ if ((!flag_var_tracking && MAY_HAVE_DEBUG_INSNS)
+ || flag_var_tracking_assignments < 0
/* Var-tracking right now assumes the IR doesn't contain
any pseudos at this point. */
|| targetm.no_register_allocation)
{
- delete_debug_insns ();
+ delete_vta_debug_insns (true);
return 0;
}
- if (n_basic_blocks_for_fn (cfun) > 500 &&
- n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun) >= 20)
+ if (!flag_var_tracking)
+ return 0;
+
+ if (n_basic_blocks_for_fn (cfun) > 500
+ && n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun) >= 20)
{
vt_debug_insns_local (true);
return 0;
{
vt_finalize ();
- delete_debug_insns ();
+ delete_vta_debug_insns (true);
/* This is later restored by our caller. */
flag_var_tracking_assignments = 0;