/* A pass for lowering trees to RTL.
- Copyright (C) 2004-2017 Free Software Foundation, Inc.
+ Copyright (C) 2004-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "gimple-pretty-print.h"
#include "toplev.h"
#include "debug.h"
-#include "params.h"
#include "tree-inline.h"
#include "value-prof.h"
#include "tree-ssa-live.h"
#include "tree-ssa-address.h"
#include "output.h"
#include "builtins.h"
-#include "tree-chkp.h"
-#include "rtl-chkp.h"
/* Some systems use __main in a way incompatible with its use in gcc, in these
cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
gimple_assign_rhs_to_tree (gimple *stmt)
{
tree t;
- enum gimple_rhs_class grhs_class;
-
- grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
-
- if (grhs_class == GIMPLE_TERNARY_RHS)
- t = build3 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- gimple_assign_rhs3 (stmt));
- else if (grhs_class == GIMPLE_BINARY_RHS)
- t = build2 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (grhs_class == GIMPLE_UNARY_RHS)
- t = build1 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt));
- else if (grhs_class == GIMPLE_SINGLE_RHS)
- {
- t = gimple_assign_rhs1 (stmt);
- /* Avoid modifying this tree in place below. */
- if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
- && gimple_location (stmt) != EXPR_LOCATION (t))
- || (gimple_block (stmt)
- && currently_expanding_to_rtl
- && EXPR_P (t)))
- t = copy_node (t);
+ switch (get_gimple_rhs_class (gimple_expr_code (stmt)))
+ {
+ case GIMPLE_TERNARY_RHS:
+ t = build3 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt),
+ gimple_assign_rhs3 (stmt));
+ break;
+ case GIMPLE_BINARY_RHS:
+ t = build2 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt));
+ break;
+ case GIMPLE_UNARY_RHS:
+ t = build1 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt));
+ break;
+ case GIMPLE_SINGLE_RHS:
+ {
+ t = gimple_assign_rhs1 (stmt);
+ /* Avoid modifying this tree in place below. */
+ if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
+ && gimple_location (stmt) != EXPR_LOCATION (t))
+ || (gimple_block (stmt) && currently_expanding_to_rtl
+ && EXPR_P (t)))
+ t = copy_node (t);
+ break;
+ }
+ default:
+ gcc_unreachable ();
}
- else
- gcc_unreachable ();
if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
SET_EXPR_LOCATION (t, gimple_location (stmt));
/* This structure holds data relevant to one variable that will be
placed in a stack slot. */
-struct stack_var
+class stack_var
{
+public:
/* The Variable. */
tree decl;
/* Initially, the size of the variable. Later, the size of the partition,
if this variable becomes it's partition's representative. */
- HOST_WIDE_INT size;
+ poly_uint64 size;
/* The *byte* alignment required for this variable. Or as, with the
size, the alignment for this partition. */
#define EOC ((size_t)-1)
/* We have an array of such objects while deciding allocation. */
-static struct stack_var *stack_vars;
+static class stack_var *stack_vars;
static size_t stack_vars_alloc;
static size_t stack_vars_num;
static hash_map<tree, size_t> *decl_to_stack_part;
we can't do with expected alignment of the stack boundary. */
static unsigned int
-align_local_variable (tree decl)
+align_local_variable (tree decl, bool really_expand)
{
unsigned int align;
else
{
align = LOCAL_DECL_ALIGNMENT (decl);
- SET_DECL_ALIGN (decl, align);
+ /* Don't change DECL_ALIGN when called from estimated_stack_frame_size.
+ That is done before IPA and could bump alignment based on host
+ backend even for offloaded code which wants different
+ LOCAL_DECL_ALIGNMENT. */
+ if (really_expand)
+ SET_DECL_ALIGN (decl, align);
}
return align / BITS_PER_UNIT;
}
/* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
Return the frame offset. */
-static HOST_WIDE_INT
-alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align)
+static poly_int64
+alloc_stack_frame_space (poly_int64 size, unsigned HOST_WIDE_INT align)
{
- HOST_WIDE_INT offset, new_frame_offset;
+ poly_int64 offset, new_frame_offset;
if (FRAME_GROWS_DOWNWARD)
{
new_frame_offset
- = align_base (frame_offset - frame_phase - size,
- align, false) + frame_phase;
+ = aligned_lower_bound (frame_offset - frame_phase - size,
+ align) + frame_phase;
offset = new_frame_offset;
}
else
{
new_frame_offset
- = align_base (frame_offset - frame_phase, align, true) + frame_phase;
+ = aligned_upper_bound (frame_offset - frame_phase,
+ align) + frame_phase;
offset = new_frame_offset;
new_frame_offset += size;
}
/* Accumulate DECL into STACK_VARS. */
static void
-add_stack_var (tree decl)
+add_stack_var (tree decl, bool really_expand)
{
- struct stack_var *v;
+ class stack_var *v;
if (stack_vars_num >= stack_vars_alloc)
{
else
stack_vars_alloc = 32;
stack_vars
- = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
+ = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
}
if (!decl_to_stack_part)
decl_to_stack_part = new hash_map<tree, size_t>;
tree size = TREE_CODE (decl) == SSA_NAME
? TYPE_SIZE_UNIT (TREE_TYPE (decl))
: DECL_SIZE_UNIT (decl);
- v->size = tree_to_uhwi (size);
+ v->size = tree_to_poly_uint64 (size);
/* Ensure that all variables have size, so that &a != &b for any two
variables that are simultaneously live. */
- if (v->size == 0)
+ if (known_eq (v->size, 0U))
v->size = 1;
- v->alignb = align_local_variable (decl);
+ v->alignb = align_local_variable (decl, really_expand);
/* An alignment of zero can mightily confuse us later. */
gcc_assert (v->alignb != 0);
static void
add_stack_var_conflict (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
+ if (x == y)
+ return;
if (!a->conflicts)
a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
if (!b->conflicts)
static bool
stack_var_conflict_p (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return false;
/* Partitions containing an SSA name result from gimple registers
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
{
- struct stack_var *a = &stack_vars[i];
+ class stack_var *a = &stack_vars[i];
if (!a->conflicts)
a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
bitmap_ior_into (a->conflicts, work);
size_t ib = *(const size_t *)b;
unsigned int aligna = stack_vars[ia].alignb;
unsigned int alignb = stack_vars[ib].alignb;
- HOST_WIDE_INT sizea = stack_vars[ia].size;
- HOST_WIDE_INT sizeb = stack_vars[ib].size;
+ poly_int64 sizea = stack_vars[ia].size;
+ poly_int64 sizeb = stack_vars[ib].size;
tree decla = stack_vars[ia].decl;
tree declb = stack_vars[ib].decl;
bool largea, largeb;
return (int)largeb - (int)largea;
/* Secondary compare on size, decreasing */
- if (sizea > sizeb)
- return -1;
- if (sizea < sizeb)
- return 1;
+ int diff = compare_sizes_for_sort (sizeb, sizea);
+ if (diff != 0)
+ return diff;
/* Tertiary compare on true alignment, decreasing. */
if (aligna < alignb)
static void
union_stack_vars (size_t a, size_t b)
{
- struct stack_var *vb = &stack_vars[b];
+ class stack_var *vb = &stack_vars[b];
bitmap_iterator bi;
unsigned u;
stack_vars[b].representative = a;
stack_vars[a].next = b;
+ /* Make sure A is big enough to hold B. */
+ stack_vars[a].size = upper_bound (stack_vars[a].size, stack_vars[b].size);
+
/* Update the required alignment of partition A to account for B. */
if (stack_vars[a].alignb < stack_vars[b].alignb)
stack_vars[a].alignb = stack_vars[b].alignb;
{
size_t i = stack_vars_sorted[si];
unsigned int ialign = stack_vars[i].alignb;
- HOST_WIDE_INT isize = stack_vars[i].size;
+ poly_int64 isize = stack_vars[i].size;
/* Ignore objects that aren't partition representatives. If we
see a var that is not a partition representative, it must
{
size_t j = stack_vars_sorted[sj];
unsigned int jalign = stack_vars[j].alignb;
- HOST_WIDE_INT jsize = stack_vars[j].size;
+ poly_int64 jsize = stack_vars[j].size;
/* Ignore objects that aren't partition representatives. */
if (stack_vars[j].representative != j)
sizes, as the shorter vars wouldn't be adequately protected.
Don't do that for "large" (unsupported) alignment objects,
those aren't protected anyway. */
- if ((asan_sanitize_stack_p ())
- && isize != jsize
+ if (asan_sanitize_stack_p ()
+ && maybe_ne (isize, jsize)
&& ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
break;
if (stack_vars[i].representative != i)
continue;
- fprintf (dump_file, "Partition %lu: size " HOST_WIDE_INT_PRINT_DEC
- " align %u\n", (unsigned long) i, stack_vars[i].size,
- stack_vars[i].alignb);
+ fprintf (dump_file, "Partition %lu: size ", (unsigned long) i);
+ print_dec (stack_vars[i].size, dump_file);
+ fprintf (dump_file, " align %u\n", stack_vars[i].alignb);
for (j = i; j != EOC; j = stack_vars[j].next)
{
static void
expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
- HOST_WIDE_INT offset)
+ poly_int64 offset)
{
unsigned align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
- gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
+ gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode)));
x = plus_constant (Pmode, base, offset);
x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME
important, we'll simply use the alignment that is already set. */
if (base == virtual_stack_vars_rtx)
offset -= frame_phase;
- align = least_bit_hwi (offset);
+ align = known_alignment (offset);
align *= BITS_PER_UNIT;
if (align == 0 || align > base_align)
align = base_align;
set_rtl (decl, x);
}
-struct stack_vars_data
+class stack_vars_data
{
+public:
/* Vector of offset pairs, always end of some padding followed
by start of the padding that needs Address Sanitizer protection.
The vector is in reversed, highest offset pairs come first. */
with that location. */
static void
-expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
+expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
{
size_t si, i, j, n = stack_vars_num;
- HOST_WIDE_INT large_size = 0, large_alloc = 0;
+ poly_uint64 large_size = 0, large_alloc = 0;
rtx large_base = NULL;
unsigned large_align = 0;
bool large_allocation_done = false;
: DECL_RTL (decl) != pc_rtx)
continue;
- large_size += alignb - 1;
- large_size &= -(HOST_WIDE_INT)alignb;
+ large_size = aligned_upper_bound (large_size, alignb);
large_size += stack_vars[i].size;
}
}
{
rtx base;
unsigned base_align, alignb;
- HOST_WIDE_INT offset;
+ poly_int64 offset;
i = stack_vars_sorted[si];
if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
{
base = virtual_stack_vars_rtx;
- if ((asan_sanitize_stack_p ())
- && pred)
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (asan_sanitize_stack_p ()
+ && pred
+ && frame_offset.is_constant (&prev_offset)
+ && stack_vars[i].size.is_constant ())
{
- HOST_WIDE_INT prev_offset
- = align_base (frame_offset,
- MAX (alignb, ASAN_RED_ZONE_SIZE),
- !FRAME_GROWS_DOWNWARD);
+ if (data->asan_vec.is_empty ())
+ {
+ alloc_stack_frame_space (0, ASAN_RED_ZONE_SIZE);
+ prev_offset = frame_offset.to_constant ();
+ }
+ prev_offset = align_base (prev_offset,
+ ASAN_MIN_RED_ZONE_SIZE,
+ !FRAME_GROWS_DOWNWARD);
tree repr_decl = NULL_TREE;
- offset
- = alloc_stack_frame_space (stack_vars[i].size
- + ASAN_RED_ZONE_SIZE,
- MAX (alignb, ASAN_RED_ZONE_SIZE));
+ unsigned HOST_WIDE_INT size
+ = asan_var_and_redzone_size (stack_vars[i].size.to_constant ());
+ if (data->asan_vec.is_empty ())
+ size = MAX (size, ASAN_RED_ZONE_SIZE);
+
+ unsigned HOST_WIDE_INT alignment = MAX (alignb,
+ ASAN_MIN_RED_ZONE_SIZE);
+ offset = alloc_stack_frame_space (size, alignment);
data->asan_vec.safe_push (prev_offset);
- data->asan_vec.safe_push (offset + stack_vars[i].size);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ data->asan_vec.safe_push ((offset + stack_vars[i].size)
+ .to_constant ());
/* Find best representative of the partition.
Prefer those with DECL_NAME, even better
satisfying asan_protect_stack_decl predicate. */
if (repr_decl == NULL_TREE)
repr_decl = stack_vars[i].decl;
data->asan_decl_vec.safe_push (repr_decl);
+
+ /* Make sure a representative is unpoison if another
+ variable in the partition is handled by
+ use-after-scope sanitization. */
+ if (asan_handled_variables != NULL
+ && !asan_handled_variables->contains (repr_decl))
+ {
+ for (j = i; j != EOC; j = stack_vars[j].next)
+ if (asan_handled_variables->contains (stack_vars[j].decl))
+ break;
+ if (j != EOC)
+ asan_handled_variables->add (repr_decl);
+ }
+
data->asan_alignb = MAX (data->asan_alignb, alignb);
if (data->asan_base == NULL)
data->asan_base = gen_reg_rtx (Pmode);
/* If there were any variables requiring "large" alignment, allocate
space. */
- if (large_size > 0 && ! large_allocation_done)
+ if (maybe_ne (large_size, 0U) && ! large_allocation_done)
{
- HOST_WIDE_INT loffset;
+ poly_int64 loffset;
rtx large_allocsize;
- large_allocsize = GEN_INT (large_size);
+ large_allocsize = gen_int_mode (large_size, Pmode);
get_dynamic_stack_size (&large_allocsize, 0, large_align, NULL);
loffset = alloc_stack_frame_space
- (INTVAL (large_allocsize),
+ (rtx_to_poly_int64 (large_allocsize),
PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
large_base = get_dynamic_stack_base (loffset, large_align);
large_allocation_done = true;
}
gcc_assert (large_base != NULL);
- large_alloc += alignb - 1;
- large_alloc &= -(HOST_WIDE_INT)alignb;
+ large_alloc = aligned_upper_bound (large_alloc, alignb);
offset = large_alloc;
large_alloc += stack_vars[i].size;
}
}
- gcc_assert (large_alloc == large_size);
+ gcc_assert (known_eq (large_alloc, large_size));
}
/* Take into account all sizes of partitions and reset DECL_RTLs. */
-static HOST_WIDE_INT
+static poly_uint64
account_stack_vars (void)
{
size_t si, j, i, n = stack_vars_num;
- HOST_WIDE_INT size = 0;
+ poly_uint64 size = 0;
for (si = 0; si < n; ++si)
{
allocate it, which means that in-frame portion is just a
pointer. ??? We've got a pseudo for sure here, do we
actually dynamically allocate its spilling area if needed?
- ??? Isn't it a problem when POINTER_SIZE also exceeds
- MAX_SUPPORTED_STACK_ALIGNMENT, as on cris and lm32? */
+ ??? Isn't it a problem when Pmode alignment also exceeds
+ MAX_SUPPORTED_STACK_ALIGNMENT, as can happen on cris and lm32? */
if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = POINTER_SIZE;
+ align = GET_MODE_ALIGNMENT (Pmode);
record_alignment_for_reg_var (align);
}
static void
expand_one_stack_var_1 (tree var)
{
- HOST_WIDE_INT size, offset;
+ poly_uint64 size;
+ poly_int64 offset;
unsigned byte_align;
if (TREE_CODE (var) == SSA_NAME)
{
tree type = TREE_TYPE (var);
- size = tree_to_uhwi (TYPE_SIZE_UNIT (type));
+ size = tree_to_poly_uint64 (TYPE_SIZE_UNIT (type));
byte_align = TYPE_ALIGN_UNIT (type);
}
else
{
- size = tree_to_uhwi (DECL_SIZE_UNIT (var));
- byte_align = align_local_variable (var);
+ size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var));
+ byte_align = align_local_variable (var, true);
}
/* We handle highly aligned variables in expand_stack_vars. */
/* If the variable alignment is very large we'll dynamicaly allocate
it, which means that in-frame portion is just a pointer. */
if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = POINTER_SIZE;
+ align = GET_MODE_ALIGNMENT (Pmode);
record_alignment_for_reg_var (align);
if (!use_register_for_decl (var))
{
if (defer_stack_allocation (var, true))
- add_stack_var (var);
+ add_stack_var (var, true);
else
expand_one_stack_var_1 (var);
return;
tree size_unit = TREE_CODE (var) == SSA_NAME
? TYPE_SIZE_UNIT (TREE_TYPE (var))
: DECL_SIZE_UNIT (var);
+ poly_uint64 size;
/* Whether the variable is small enough for immediate allocation not to be
a problem with regard to the frame size. */
bool smallish
- = ((HOST_WIDE_INT) tree_to_uhwi (size_unit)
- < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING));
+ = (poly_int_tree_p (size_unit, &size)
+ && (estimated_poly_value (size)
+ < param_min_size_for_stack_sharing));
/* If stack protection is enabled, *all* stack variables must be deferred,
so that we can re-order the strings to the top of the frame.
Return stack usage this variable is supposed to take.
*/
-static HOST_WIDE_INT
+static poly_uint64
expand_one_var (tree var, bool toplevel, bool really_expand)
{
unsigned int align = BITS_PER_UNIT;
/* If the variable alignment is very large we'll dynamicaly allocate
it, which means that in-frame portion is just a pointer. */
if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
- align = POINTER_SIZE;
+ align = GET_MODE_ALIGNMENT (Pmode);
}
record_alignment_for_reg_var (align);
+ poly_uint64 size;
if (TREE_CODE (origvar) == SSA_NAME)
{
gcc_assert (!VAR_P (var)
if (really_expand)
expand_one_register_var (origvar);
}
- else if (! valid_constant_size_p (DECL_SIZE_UNIT (var)))
+ else if (!poly_int_tree_p (DECL_SIZE_UNIT (var), &size)
+ || !valid_constant_size_p (DECL_SIZE_UNIT (var)))
{
/* Reject variables which cover more than half of the address-space. */
if (really_expand)
{
- error ("size of variable %q+D is too large", var);
+ if (DECL_NONLOCAL_FRAME (var))
+ error_at (DECL_SOURCE_LOCATION (current_function_decl),
+ "total size of local objects is too large");
+ else
+ error_at (DECL_SOURCE_LOCATION (var),
+ "size of variable %q+D is too large", var);
expand_one_error_var (var);
}
}
else if (defer_stack_allocation (var, toplevel))
- add_stack_var (origvar);
+ add_stack_var (origvar, really_expand);
else
{
if (really_expand)
{
if (lookup_attribute ("naked",
DECL_ATTRIBUTES (current_function_decl)))
- error ("cannot allocate stack for variable %q+D, naked function.",
+ error ("cannot allocate stack for variable %q+D, naked function",
var);
expand_one_stack_var (origvar);
}
-
-
- return tree_to_uhwi (DECL_SIZE_UNIT (var));
+ return size;
}
return 0;
}
|| t == signed_char_type_node
|| t == unsigned_char_type_node)
{
- unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
+ unsigned HOST_WIDE_INT max = param_ssp_buffer_size;
unsigned HOST_WIDE_INT len;
if (!TYPE_SIZE_UNIT (type)
}
/* Ensure that variables in different stack protection phases conflict
- so that they are not merged and share the same stack slot. */
+ so that they are not merged and share the same stack slot.
+ Return true if there are any address taken variables. */
-static void
+static bool
add_stack_protection_conflicts (void)
{
size_t i, j, n = stack_vars_num;
unsigned char *phase;
+ bool ret = false;
phase = XNEWVEC (unsigned char, n);
for (i = 0; i < n; ++i)
- phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
+ {
+ phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
+ if (TREE_ADDRESSABLE (stack_vars[i].decl))
+ ret = true;
+ }
for (i = 0; i < n; ++i)
{
}
XDELETEVEC (phase);
+ return ret;
}
/* Create a decl for the guard at the top of the stack frame. */
HOST_WIDE_INT
estimated_stack_frame_size (struct cgraph_node *node)
{
- HOST_WIDE_INT size = 0;
+ poly_int64 size = 0;
size_t i;
tree var;
struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
fini_vars_expansion ();
pop_cfun ();
- return size;
-}
-
-/* Helper routine to check if a record or union contains an array field. */
-
-static int
-record_or_union_type_has_array_p (const_tree tree_type)
-{
- tree fields = TYPE_FIELDS (tree_type);
- tree f;
-
- for (f = fields; f; f = DECL_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
- {
- tree field_type = TREE_TYPE (f);
- if (RECORD_OR_UNION_TYPE_P (field_type)
- && record_or_union_type_has_array_p (field_type))
- return 1;
- if (TREE_CODE (field_type) == ARRAY_TYPE)
- return 1;
- }
- return 0;
-}
-
-/* Check if the current function has local referenced variables that
- have their addresses taken, contain an array, or are arrays. */
-
-static bool
-stack_protect_decl_p ()
-{
- unsigned i;
- tree var;
-
- FOR_EACH_LOCAL_DECL (cfun, i, var)
- if (!is_global_var (var))
- {
- tree var_type = TREE_TYPE (var);
- if (VAR_P (var)
- && (TREE_CODE (var_type) == ARRAY_TYPE
- || TREE_ADDRESSABLE (var)
- || (RECORD_OR_UNION_TYPE_P (var_type)
- && record_or_union_type_has_array_p (var_type))))
- return true;
- }
- return false;
+ return estimated_poly_value (size);
}
/* Check if the current function has calls that use a return slot. */
}
if (flag_stack_protect == SPCT_FLAG_STRONG)
- gen_stack_protect_signal
- = stack_protect_decl_p () || stack_protect_return_slot_p ();
+ gen_stack_protect_signal = stack_protect_return_slot_p ();
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
if (stack_vars_num > 0)
{
+ bool has_addressable_vars = false;
+
add_scope_conflicts ();
/* If stack protection is enabled, we don't share space between
|| (flag_stack_protect == SPCT_FLAG_EXPLICIT
&& lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))))
- add_stack_protection_conflicts ();
+ has_addressable_vars = add_stack_protection_conflicts ();
+
+ if (flag_stack_protect == SPCT_FLAG_STRONG && has_addressable_vars)
+ gen_stack_protect_signal = true;
/* Now that we have collected all stack variables, and have computed a
minimal interference graph, attempt to save some stack space. */
case SPCT_FLAG_STRONG:
if (gen_stack_protect_signal
- || cfun->calls_alloca || has_protected_decls
+ || cfun->calls_alloca
+ || has_protected_decls
|| lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
break;
case SPCT_FLAG_DEFAULT:
- if (cfun->calls_alloca || has_protected_decls
+ if (cfun->calls_alloca
+ || has_protected_decls
|| lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
break;
+
default:
- ;
+ break;
}
/* Assign rtl to each variable based on these partitions. */
if (stack_vars_num > 0)
{
- struct stack_vars_data data;
+ class stack_vars_data data;
data.asan_base = NULL_RTX;
data.asan_alignb = 0;
in addition to phase 1 and 2. */
expand_stack_vars (asan_decl_phase_3, &data);
- if (!data.asan_vec.is_empty ())
+ /* ASAN description strings don't yet have a syntax for expressing
+ polynomial offsets. */
+ HOST_WIDE_INT prev_offset;
+ if (!data.asan_vec.is_empty ()
+ && frame_offset.is_constant (&prev_offset))
{
- HOST_WIDE_INT prev_offset = frame_offset;
HOST_WIDE_INT offset, sz, redzonesz;
redzonesz = ASAN_RED_ZONE_SIZE;
sz = data.asan_vec[0] - prev_offset;
&& sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb)
redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1)
& ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz;
- offset
- = alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE);
+ /* Allocating a constant amount of space from a constant
+ starting offset must give a constant result. */
+ offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE)
+ .to_constant ());
data.asan_vec.safe_push (prev_offset);
data.asan_vec.safe_push (offset);
/* Leave space for alignment if STRICT_ALIGNMENT. */
if (STACK_ALIGNMENT_NEEDED)
{
HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
- if (!FRAME_GROWS_DOWNWARD)
- frame_offset += align - 1;
- frame_offset &= -align;
+ if (FRAME_GROWS_DOWNWARD)
+ frame_offset = aligned_lower_bound (frame_offset, align);
+ else
+ frame_offset = aligned_upper_bound (frame_offset, align);
}
return var_end_seq;
}
}
+ /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
+ into (x - C2) * C3 < C4. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == INTEGER_CST)
+ code = maybe_optimize_mod_cmp (code, &op0, &op1);
+
last2 = last = get_last_insn ();
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
- builtin_p = decl && DECL_BUILT_IN (decl);
+ builtin_p = decl && fndecl_built_in_p (decl);
/* If this is not a builtin function, the function type through which the
call is made may be different from the type of the function. */
if (gimple_call_nothrow_p (stmt))
TREE_NOTHROW (exp) = 1;
+ if (gimple_no_warning_p (stmt))
+ TREE_NO_WARNING (exp) = 1;
+
CALL_EXPR_TAILCALL (exp) = gimple_call_tail_p (stmt);
CALL_EXPR_MUST_TAIL_CALL (exp) = gimple_call_must_tail_p (stmt);
CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt);
if (decl
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (decl, BUILT_IN_NORMAL)
&& ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (decl)))
CALL_ALLOCA_FOR_VAR_P (exp) = gimple_call_alloca_for_var_p (stmt);
else
CALL_EXPR_VA_ARG_PACK (exp) = gimple_call_va_arg_pack_p (stmt);
CALL_EXPR_BY_DESCRIPTOR (exp) = gimple_call_by_descriptor_p (stmt);
SET_EXPR_LOCATION (exp, gimple_location (stmt));
- CALL_WITH_BOUNDS_P (exp) = gimple_call_with_bounds_p (stmt);
/* Ensure RTL is created for debug args. */
if (decl && DECL_HAS_DEBUG_ARGS_P (decl))
if (overlap)
{
- error ("asm-specifier for variable %qE conflicts with asm clobber list",
+ error ("%<asm%> specifier for variable %qE conflicts with "
+ "%<asm%> clobber list",
DECL_NAME (overlap));
/* Reset registerness to stop multiple errors emitted for a single
return false;
}
+/* Check that the given REGNO spanning NREGS is a valid
+ asm clobber operand. Some HW registers cannot be
+ saved/restored, hence they should not be clobbered by
+ asm statements. */
+static bool
+asm_clobber_reg_is_valid (int regno, int nregs, const char *regname)
+{
+ bool is_valid = true;
+ HARD_REG_SET regset;
+
+ CLEAR_HARD_REG_SET (regset);
+
+ add_range_to_hard_reg_set (®set, regno, nregs);
+
+ /* Clobbering the PIC register is an error. */
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && overlaps_hard_reg_set_p (regset, Pmode, PIC_OFFSET_TABLE_REGNUM))
+ {
+ /* ??? Diagnose during gimplification? */
+ error ("PIC register clobbered by %qs in %<asm%>", regname);
+ is_valid = false;
+ }
+ else if (!in_hard_reg_set_p
+ (accessible_reg_set, reg_raw_mode[regno], regno))
+ {
+ /* ??? Diagnose during gimplification? */
+ error ("the register %qs cannot be clobbered in %<asm%>"
+ " for the current target", regname);
+ is_valid = false;
+ }
+
+ /* Clobbering the stack pointer register is deprecated. GCC expects
+ the value of the stack pointer after an asm statement to be the same
+ as it was before, so no asm can validly clobber the stack pointer in
+ the usual sense. Adding the stack pointer to the clobber list has
+ traditionally had some undocumented and somewhat obscure side-effects. */
+ if (overlaps_hard_reg_set_p (regset, Pmode, STACK_POINTER_REGNUM)
+ && warning (OPT_Wdeprecated, "listing the stack pointer register"
+ " %qs in a clobber list is deprecated", regname))
+ inform (input_location, "the value of the stack pointer after an %<asm%>"
+ " statement must be the same as it was before the statement");
+
+ return is_valid;
+}
+
/* Generate RTL for an asm statement with arguments.
STRING is the instruction template.
OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
else
for (int reg = j; reg < j + nregs; reg++)
{
- /* Clobbering the PIC register is an error. */
- if (reg == (int) PIC_OFFSET_TABLE_REGNUM)
- {
- /* ??? Diagnose during gimplification? */
- error ("PIC register clobbered by %qs in %<asm%>",
- regname);
- return;
- }
+ if (!asm_clobber_reg_is_valid (reg, nregs, regname))
+ return;
SET_HARD_REG_BIT (clobbered_regs, reg);
rtx x = gen_rtx_REG (reg_raw_mode[reg], reg);
}
}
}
- unsigned nclobbers = clobber_rvec.length();
/* First pass over inputs and outputs checks validity and sets
mark_addressable if needed. */
&allows_mem, &allows_reg, &is_inout))
return;
+ /* If the output is a hard register, verify it doesn't conflict with
+ any other operand's possible hard register use. */
+ if (DECL_P (val)
+ && REG_P (DECL_RTL (val))
+ && HARD_REGISTER_P (DECL_RTL (val)))
+ {
+ unsigned j, output_hregno = REGNO (DECL_RTL (val));
+ bool early_clobber_p = strchr (constraints[i], '&') != NULL;
+ unsigned long match;
+
+ /* Verify the other outputs do not use the same hard register. */
+ for (j = i + 1; j < noutputs; ++j)
+ if (DECL_P (output_tvec[j])
+ && REG_P (DECL_RTL (output_tvec[j]))
+ && HARD_REGISTER_P (DECL_RTL (output_tvec[j]))
+ && output_hregno == REGNO (DECL_RTL (output_tvec[j])))
+ error ("invalid hard register usage between output operands");
+
+ /* Verify matching constraint operands use the same hard register
+ and that the non-matching constraint operands do not use the same
+ hard register if the output is an early clobber operand. */
+ for (j = 0; j < ninputs; ++j)
+ if (DECL_P (input_tvec[j])
+ && REG_P (DECL_RTL (input_tvec[j]))
+ && HARD_REGISTER_P (DECL_RTL (input_tvec[j])))
+ {
+ unsigned input_hregno = REGNO (DECL_RTL (input_tvec[j]));
+ switch (*constraints[j + noutputs])
+ {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ match = strtoul (constraints[j + noutputs], NULL, 10);
+ break;
+ default:
+ match = ULONG_MAX;
+ break;
+ }
+ if (i == match
+ && output_hregno != input_hregno)
+ error ("invalid hard register usage between output operand "
+ "and matching constraint operand");
+ else if (early_clobber_p
+ && i != match
+ && output_hregno == input_hregno)
+ error ("invalid hard register usage between earlyclobber "
+ "operand and input operand");
+ }
+ }
+
if (! allows_reg
&& (allows_mem
|| is_inout
generating_concat_p = 0;
- if ((TREE_CODE (val) == INDIRECT_REF
- && allows_mem)
+ if ((TREE_CODE (val) == INDIRECT_REF && allows_mem)
|| (DECL_P (val)
&& (allows_mem || REG_P (DECL_RTL (val)))
&& ! (REG_P (DECL_RTL (val))
&& GET_MODE (DECL_RTL (val)) != TYPE_MODE (type)))
|| ! allows_reg
- || is_inout)
+ || is_inout
+ || TREE_ADDRESSABLE (type))
{
op = expand_expr (val, NULL_RTX, VOIDmode,
!allows_reg ? EXPAND_MEMORY : EXPAND_WRITE);
if (! allows_reg && !MEM_P (op))
error ("output number %d not directly addressable", i);
- if ((! allows_mem && MEM_P (op))
+ if ((! allows_mem && MEM_P (op) && GET_MODE (op) != BLKmode)
|| GET_CODE (op) == CONCAT)
{
rtx old_op = op;
if (allows_reg && TYPE_MODE (type) != BLKmode)
op = force_reg (TYPE_MODE (type), op);
else if (!allows_mem)
- warning (0, "asm operand %d probably doesn%'t match constraints",
+ warning (0, "%<asm%> operand %d probably does not match "
+ "constraints",
i + noutputs);
else if (MEM_P (op))
{
gcc_assert (constraints.length() == noutputs + ninputs);
/* But it certainly can adjust the clobbers. */
- nclobbers = clobber_rvec.length();
+ unsigned nclobbers = clobber_rvec.length ();
/* Third pass checks for easy conflicts. */
/* ??? Why are we doing this on trees instead of rtx. */
may insert further instructions into the same basic block after
asm goto and if we don't do this, insertion of instructions on
the fallthru edge might misbehave. See PR58670. */
- if (fallthru_bb && label_to_block_fn (cfun, label) == fallthru_bb)
+ if (fallthru_bb && label_to_block (cfun, label) == fallthru_bb)
{
if (fallthru_label == NULL_RTX)
fallthru_label = gen_label_rtx ();
tripping over the under-construction body. */
for (unsigned k = 0; k < noutputs; ++k)
if (reg_overlap_mentioned_p (clobbered_reg, output_rvec[k]))
- internal_error ("asm clobber conflict with output operand");
+ internal_error ("%<asm%> clobber conflict with "
+ "output operand");
for (unsigned k = 0; k < ninputs - ninout; ++k)
if (reg_overlap_mentioned_p (clobbered_reg, input_rvec[k]))
- internal_error ("asm clobber conflict with input operand");
+ internal_error ("%<asm%> clobber conflict with "
+ "input operand");
}
XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg);
from the current function. */
static void
-expand_return (tree retval, tree bounds)
+expand_return (tree retval)
{
rtx result_rtl;
rtx val = 0;
tree retval_rhs;
- rtx bounds_rtl;
/* If function wants no value, give it none. */
if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE)
result_rtl = DECL_RTL (DECL_RESULT (current_function_decl));
- /* Put returned bounds to the right place. */
- bounds_rtl = DECL_BOUNDS_RTL (DECL_RESULT (current_function_decl));
- if (bounds_rtl)
- {
- rtx addr = NULL;
- rtx bnd = NULL;
-
- if (bounds && bounds != error_mark_node)
- {
- bnd = expand_normal (bounds);
- targetm.calls.store_returned_bounds (bounds_rtl, bnd);
- }
- else if (REG_P (bounds_rtl))
- {
- if (bounds)
- bnd = chkp_expand_zero_bounds ();
- else
- {
- addr = expand_normal (build_fold_addr_expr (retval_rhs));
- addr = gen_rtx_MEM (Pmode, addr);
- bnd = targetm.calls.load_bounds_for_arg (addr, NULL, NULL);
- }
-
- targetm.calls.store_returned_bounds (bounds_rtl, bnd);
- }
- else
- {
- int n;
-
- gcc_assert (GET_CODE (bounds_rtl) == PARALLEL);
-
- if (bounds)
- bnd = chkp_expand_zero_bounds ();
- else
- {
- addr = expand_normal (build_fold_addr_expr (retval_rhs));
- addr = gen_rtx_MEM (Pmode, addr);
- }
-
- for (n = 0; n < XVECLEN (bounds_rtl, 0); n++)
- {
- rtx slot = XEXP (XVECEXP (bounds_rtl, 0, n), 0);
- if (!bounds)
- {
- rtx offs = XEXP (XVECEXP (bounds_rtl, 0, n), 1);
- rtx from = adjust_address (addr, Pmode, INTVAL (offs));
- bnd = targetm.calls.load_bounds_for_arg (from, NULL, NULL);
- }
- targetm.calls.store_returned_bounds (slot, bnd);
- }
- }
- }
- else if (chkp_function_instrumented_p (current_function_decl)
- && !BOUNDED_P (retval_rhs)
- && chkp_type_has_pointer (TREE_TYPE (retval_rhs))
- && TREE_CODE (retval_rhs) != RESULT_DECL)
- {
- rtx addr = expand_normal (build_fold_addr_expr (retval_rhs));
- addr = gen_rtx_MEM (Pmode, addr);
-
- gcc_assert (MEM_P (result_rtl));
-
- chkp_copy_bounds_for_stack_parm (result_rtl, addr, TREE_TYPE (retval_rhs));
- }
-
/* If we are returning the RESULT_DECL, then the value has already
been stored into it, so we don't have to do anything special. */
if (TREE_CODE (retval_rhs) == RESULT_DECL)
}
}
+/* Expand a clobber of LHS. If LHS is stored it in a multi-part
+ register, tell the rtl optimizers that its value is no longer
+ needed. */
+
+static void
+expand_clobber (tree lhs)
+{
+ if (DECL_P (lhs))
+ {
+ rtx decl_rtl = DECL_RTL_IF_SET (lhs);
+ if (decl_rtl && REG_P (decl_rtl))
+ {
+ machine_mode decl_mode = GET_MODE (decl_rtl);
+ if (maybe_gt (GET_MODE_SIZE (decl_mode),
+ REGMODE_NATURAL_SIZE (decl_mode)))
+ emit_clobber (decl_rtl);
+ }
+ }
+}
+
/* A subroutine of expand_gimple_stmt, expanding one gimple statement
STMT that doesn't require special handling for outgoing edges. That
is no tailcalls and no GIMPLE_COND. */
case GIMPLE_RETURN:
{
- tree bnd = gimple_return_retbnd (as_a <greturn *> (stmt));
op0 = gimple_return_retval (as_a <greturn *> (stmt));
+ /* If a return doesn't have a location, it very likely represents
+ multiple user returns so we cannot let it inherit the location
+ of the last statement of the previous basic block in RTL. */
+ if (!gimple_has_location (stmt))
+ set_curr_insn_location (cfun->function_end_locus);
+
if (op0 && op0 != error_mark_node)
{
tree result = DECL_RESULT (current_function_decl);
- /* Mark we have return statement with missing bounds. */
- if (!bnd
- && chkp_function_instrumented_p (cfun->decl)
- && !DECL_P (op0))
- bnd = error_mark_node;
-
/* If we are not returning the current function's RESULT_DECL,
build an assignment to it. */
if (op0 != result)
if (!op0)
expand_null_return ();
else
- expand_return (op0, bnd);
+ expand_return (op0);
}
break;
if (TREE_CLOBBER_P (rhs))
/* This is a clobber to mark the going out of scope for
this LHS. */
- ;
+ expand_clobber (lhs);
else
expand_assignment (lhs, rhs,
gimple_assign_nontemporal_move_p (
case SAD_EXPR:
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
- case FMA_EXPR:
goto ternary;
case TRUTH_ANDIF_EXPR:
binary:
case tcc_binary:
+ if (mode == BLKmode)
+ return NULL_RTX;
op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
if (!op1)
return NULL_RTX;
unary:
case tcc_unary:
+ if (mode == BLKmode)
+ return NULL_RTX;
inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
if (!op0)
op0 = expand_expr (exp, NULL_RTX, mode, EXPAND_INITIALIZER);
return op0;
+ case POLY_INT_CST:
+ return immed_wide_int_const (poly_int_cst_value (exp), mode);
+
case COMPLEX_CST:
gcc_assert (COMPLEX_MODE_P (mode));
op0 = expand_debug_expr (TREE_REALPART (exp));
op0 = DECL_RTL_IF_SET (exp);
/* This decl was probably optimized away. */
- if (!op0)
+ if (!op0
+ /* At least label RTXen are sometimes replaced by
+ NOTE_INSN_DELETED_LABEL. Any notes here are not
+ handled by copy_rtx. */
+ || NOTE_P (op0))
{
if (!VAR_P (exp)
|| DECL_EXTERNAL (exp)
goto component_ref;
op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
- if (!op1 || !CONST_INT_P (op1))
+ poly_int64 offset;
+ if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
- op0 = plus_constant (inner_mode, op0, INTVAL (op1));
+ op0 = plus_constant (inner_mode, op0, offset);
}
as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
case VIEW_CONVERT_EXPR:
{
machine_mode mode1;
- HOST_WIDE_INT bitsize, bitpos;
+ poly_int64 bitsize, bitpos;
tree offset;
int reversep, volatilep = 0;
tree tem
&unsignedp, &reversep, &volatilep);
rtx orig_op0;
- if (bitsize == 0)
+ if (known_eq (bitsize, 0))
return NULL;
orig_op0 = op0 = expand_debug_expr (tem);
if (MEM_P (op0))
{
if (mode1 == VOIDmode)
- /* Bitfield. */
- mode1 = smallest_int_mode_for_size (bitsize);
- if (bitpos >= BITS_PER_UNIT)
{
- op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT);
- bitpos %= BITS_PER_UNIT;
+ if (maybe_gt (bitsize, MAX_BITSIZE_MODE_ANY_INT))
+ return NULL;
+ /* Bitfield. */
+ mode1 = smallest_int_mode_for_size (bitsize);
}
- else if (bitpos < 0)
+ poly_int64 bytepos = bits_to_bytes_round_down (bitpos);
+ if (maybe_ne (bytepos, 0))
{
- HOST_WIDE_INT units
- = (-bitpos + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
- op0 = adjust_address_nv (op0, mode1, -units);
- bitpos += units * BITS_PER_UNIT;
+ op0 = adjust_address_nv (op0, mode1, bytepos);
+ bitpos = num_trailing_bits (bitpos);
}
- else if (bitpos == 0 && bitsize == GET_MODE_BITSIZE (mode))
+ else if (known_eq (bitpos, 0)
+ && known_eq (bitsize, GET_MODE_BITSIZE (mode)))
op0 = adjust_address_nv (op0, mode, 0);
else if (GET_MODE (op0) != mode1)
op0 = adjust_address_nv (op0, mode1, 0);
set_mem_attributes (op0, exp, 0);
}
- if (bitpos == 0 && mode == GET_MODE (op0))
+ if (known_eq (bitpos, 0) && mode == GET_MODE (op0))
return op0;
- if (bitpos < 0)
+ if (maybe_lt (bitpos, 0))
return NULL;
- if (GET_MODE (op0) == BLKmode)
+ if (GET_MODE (op0) == BLKmode || mode == BLKmode)
return NULL;
- if ((bitpos % BITS_PER_UNIT) == 0
- && bitsize == GET_MODE_BITSIZE (mode1))
+ poly_int64 bytepos;
+ if (multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
+ && known_eq (bitsize, GET_MODE_BITSIZE (mode1)))
{
machine_mode opmode = GET_MODE (op0);
debug stmts). The gen_subreg below would rightfully
crash, and the address doesn't really exist, so just
drop it. */
- if (bitpos >= GET_MODE_BITSIZE (opmode))
+ if (known_ge (bitpos, GET_MODE_BITSIZE (opmode)))
return NULL;
- if ((bitpos % GET_MODE_BITSIZE (mode)) == 0)
- return simplify_gen_subreg (mode, op0, opmode,
- bitpos / BITS_PER_UNIT);
+ if (multiple_p (bitpos, GET_MODE_BITSIZE (mode)))
+ return simplify_gen_subreg (mode, op0, opmode, bytepos);
}
return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0))
GET_MODE (op0) != VOIDmode
? GET_MODE (op0)
: TYPE_MODE (TREE_TYPE (tem)),
- op0, GEN_INT (bitsize), GEN_INT (bitpos));
+ op0, gen_int_mode (bitsize, word_mode),
+ gen_int_mode (bitpos, word_mode));
}
case ABS_EXPR:
+ case ABSU_EXPR:
return simplify_gen_unary (ABS, mode, op0, mode);
case NEGATE_EXPR:
if (handled_component_p (TREE_OPERAND (exp, 0)))
{
- HOST_WIDE_INT bitoffset, bitsize, maxsize;
+ poly_int64 bitoffset, bitsize, maxsize, byteoffset;
bool reverse;
tree decl
= get_ref_base_and_extent (TREE_OPERAND (exp, 0), &bitoffset,
|| TREE_CODE (decl) == RESULT_DECL)
&& (!TREE_ADDRESSABLE (decl)
|| target_for_debug_bind (decl))
- && (bitoffset % BITS_PER_UNIT) == 0
- && bitsize > 0
- && bitsize == maxsize)
+ && multiple_p (bitoffset, BITS_PER_UNIT, &byteoffset)
+ && known_gt (bitsize, 0)
+ && known_eq (bitsize, maxsize))
{
rtx base = gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl);
- return plus_constant (mode, base, bitoffset / BITS_PER_UNIT);
+ return plus_constant (mode, base, byteoffset);
}
}
{
op1 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
1));
- if (!op1 || !CONST_INT_P (op1))
+ poly_int64 offset;
+ if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
- return plus_constant (mode, op0, INTVAL (op1));
+ return plus_constant (mode, op0, offset);
}
}
case VECTOR_CST:
{
- unsigned i, nelts;
+ unsigned HOST_WIDE_INT i, nelts;
+
+ if (!VECTOR_CST_NELTS (exp).is_constant (&nelts))
+ return NULL;
- nelts = VECTOR_CST_NELTS (exp);
op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
for (i = 0; i < nelts; ++i)
else if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
{
unsigned i;
+ unsigned HOST_WIDE_INT nelts;
tree val;
- op0 = gen_rtx_CONCATN
- (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))));
+ if (!TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)).is_constant (&nelts))
+ goto flag_unsupported;
+
+ op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
{
XVECEXP (op0, 0, i) = op1;
}
- if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))
+ if (i < nelts)
{
op1 = expand_debug_expr
(build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
if (!op1)
return NULL;
- for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++)
+ for (; i < nelts; i++)
XVECEXP (op0, 0, i) = op1;
}
/* Vector stuff. For most of the codes we don't have rtl codes. */
case REALIGN_LOAD_EXPR:
- case REDUC_MAX_EXPR:
- case REDUC_MIN_EXPR:
- case REDUC_PLUS_EXPR:
case VEC_COND_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
+ case VEC_PACK_FLOAT_EXPR:
case VEC_PACK_SAT_EXPR:
case VEC_PACK_TRUNC_EXPR:
+ case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
+ case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
case VEC_PERM_EXPR:
+ case VEC_DUPLICATE_EXPR:
+ case VEC_SERIES_EXPR:
+ case SAD_EXPR:
return NULL;
/* Misc codes. */
}
return NULL;
- case FMA_EXPR:
- return simplify_gen_ternary (FMA, mode, inner_mode, op0, op1, op2);
-
default:
flag_unsupported:
if (flag_checking)
switch (TREE_CODE (exp))
{
+ case VAR_DECL:
+ if (DECL_ABSTRACT_ORIGIN (exp))
+ return expand_debug_source_expr (DECL_ABSTRACT_ORIGIN (exp));
+ break;
case PARM_DECL:
{
mode = DECL_MODE (exp);
flag_strict_aliasing = 0;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- if (DEBUG_INSN_P (insn))
+ if (DEBUG_BIND_INSN_P (insn))
{
tree value = (tree)INSN_VAR_LOCATION_LOC (insn);
rtx val;
gimple_stmt_iterator gsi;
gimple_seq stmts;
gimple *stmt = NULL;
- rtx_note *note;
+ rtx_note *note = NULL;
rtx_insn *last;
edge e;
edge_iterator ei;
if (stmt || elt)
{
+ gcc_checking_assert (!note);
last = get_last_insn ();
if (stmt)
BB_HEAD (bb) = NEXT_INSN (last);
if (NOTE_P (BB_HEAD (bb)))
BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
+ gcc_assert (LABEL_P (BB_HEAD (bb)));
note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
maybe_dump_rtl_for_gimple_stmt (stmt, last);
else
BB_HEAD (bb) = note = emit_note (NOTE_INSN_BASIC_BLOCK);
- NOTE_BASIC_BLOCK (note) = bb;
+ if (note)
+ NOTE_BASIC_BLOCK (note) = bb;
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
a_2 = ...
#DEBUG ... => #D1
*/
- if (MAY_HAVE_DEBUG_INSNS
+ if (MAY_HAVE_DEBUG_BIND_INSNS
&& SA.values
&& !is_gimple_debug (stmt))
{
if (new_bb)
return new_bb;
}
- else if (gimple_debug_bind_p (stmt))
+ else if (is_gimple_debug (stmt))
{
location_t sloc = curr_insn_location ();
gimple_stmt_iterator nsi = gsi;
for (;;)
{
- tree var = gimple_debug_bind_get_var (stmt);
- tree value;
- rtx val;
+ tree var;
+ tree value = NULL_TREE;
+ rtx val = NULL_RTX;
machine_mode mode;
- if (TREE_CODE (var) != DEBUG_EXPR_DECL
- && TREE_CODE (var) != LABEL_DECL
- && !target_for_debug_bind (var))
+ if (!gimple_debug_nonbind_marker_p (stmt))
+ {
+ if (gimple_debug_bind_p (stmt))
+ {
+ var = gimple_debug_bind_get_var (stmt);
+
+ if (TREE_CODE (var) != DEBUG_EXPR_DECL
+ && TREE_CODE (var) != LABEL_DECL
+ && !target_for_debug_bind (var))
+ goto delink_debug_stmt;
+
+ if (DECL_P (var))
+ mode = DECL_MODE (var);
+ else
+ mode = TYPE_MODE (TREE_TYPE (var));
+
+ if (gimple_debug_bind_has_value_p (stmt))
+ value = gimple_debug_bind_get_value (stmt);
+
+ val = gen_rtx_VAR_LOCATION
+ (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
+ }
+ else if (gimple_debug_source_bind_p (stmt))
+ {
+ var = gimple_debug_source_bind_get_var (stmt);
+
+ value = gimple_debug_source_bind_get_value (stmt);
+
+ mode = DECL_MODE (var);
+
+ val = gen_rtx_VAR_LOCATION (mode, var, (rtx)value,
+ VAR_INIT_STATUS_UNINITIALIZED);
+ }
+ else
+ gcc_unreachable ();
+ }
+ /* If this function was first compiled with markers
+ enabled, but they're now disable (e.g. LTO), drop
+ them on the floor. */
+ else if (gimple_debug_nonbind_marker_p (stmt)
+ && !MAY_HAVE_DEBUG_MARKER_INSNS)
goto delink_debug_stmt;
+ else if (gimple_debug_begin_stmt_p (stmt))
+ val = GEN_RTX_DEBUG_MARKER_BEGIN_STMT_PAT ();
+ else if (gimple_debug_inline_entry_p (stmt))
+ {
+ tree block = gimple_block (stmt);
- if (gimple_debug_bind_has_value_p (stmt))
- value = gimple_debug_bind_get_value (stmt);
+ if (block)
+ val = GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT ();
+ else
+ goto delink_debug_stmt;
+ }
else
- value = NULL_TREE;
+ gcc_unreachable ();
last = get_last_insn ();
set_curr_insn_location (gimple_location (stmt));
- if (DECL_P (var))
- mode = DECL_MODE (var);
- else
- mode = TYPE_MODE (TREE_TYPE (var));
-
- val = gen_rtx_VAR_LOCATION
- (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
-
emit_debug_insn (val);
if (dump_file && (dump_flags & TDF_DETAILS))
{
/* We can't dump the insn with a TREE where an RTX
is expected. */
- PAT_VAR_LOCATION_LOC (val) = const0_rtx;
+ if (GET_CODE (val) == VAR_LOCATION)
+ {
+ gcc_checking_assert (PAT_VAR_LOCATION_LOC (val) == (rtx)value);
+ PAT_VAR_LOCATION_LOC (val) = const0_rtx;
+ }
maybe_dump_rtl_for_gimple_stmt (stmt, last);
- PAT_VAR_LOCATION_LOC (val) = (rtx)value;
+ if (GET_CODE (val) == VAR_LOCATION)
+ PAT_VAR_LOCATION_LOC (val) = (rtx)value;
}
delink_debug_stmt:
if (gsi_end_p (nsi))
break;
stmt = gsi_stmt (nsi);
- if (!gimple_debug_bind_p (stmt))
+ if (!is_gimple_debug (stmt))
break;
}
- set_curr_insn_location (sloc);
- }
- else if (gimple_debug_source_bind_p (stmt))
- {
- location_t sloc = curr_insn_location ();
- tree var = gimple_debug_source_bind_get_var (stmt);
- tree value = gimple_debug_source_bind_get_value (stmt);
- rtx val;
- machine_mode mode;
-
- last = get_last_insn ();
-
- set_curr_insn_location (gimple_location (stmt));
-
- mode = DECL_MODE (var);
-
- val = gen_rtx_VAR_LOCATION (mode, var, (rtx)value,
- VAR_INIT_STATUS_UNINITIALIZED);
-
- emit_debug_insn (val);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- /* We can't dump the insn with a TREE where an RTX
- is expected. */
- PAT_VAR_LOCATION_LOC (val) = const0_rtx;
- maybe_dump_rtl_for_gimple_stmt (stmt, last);
- PAT_VAR_LOCATION_LOC (val) = (rtx)value;
- }
-
set_curr_insn_location (sloc);
}
else
last = PREV_INSN (last);
if (JUMP_TABLE_DATA_P (last))
last = PREV_INSN (PREV_INSN (last));
+ if (BARRIER_P (last))
+ last = PREV_INSN (last);
BB_END (bb) = last;
update_bb_for_insn (bb);
{
first_block = e->dest;
redirect_edge_succ (e, init_block);
- e = make_single_succ_edge (init_block, first_block, flags);
+ make_single_succ_edge (init_block, first_block, flags);
}
else
- e = make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
- EDGE_FALLTHRU);
+ make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
update_bb_for_insn (init_block);
return init_block;
*walk_subtrees = 0;
}
+ /* References of size POLY_INT_CST to a fixed-size object must go
+ through memory. It's more efficient to force that here than
+ to create temporary slots on the fly. */
+ else if ((TREE_CODE (t) == MEM_REF || TREE_CODE (t) == TARGET_MEM_REF)
+ && TYPE_SIZE (TREE_TYPE (t))
+ && POLY_INT_CST_P (TYPE_SIZE (TREE_TYPE (t))))
+ {
+ tree base = get_base_address (t);
+ if (base
+ && DECL_P (base)
+ && DECL_MODE (base) != BLKmode
+ && GET_MODE_SIZE (DECL_MODE (base)).is_constant ())
+ TREE_ADDRESSABLE (base) = 1;
+ *walk_subtrees = 0;
+ }
return NULL_TREE;
}
+/* If there's a chance to get a pseudo for t then if it would be of float mode
+ and the actual access is via an integer mode (lowered memcpy or similar
+ access) then avoid the register expansion if the mode likely is not storage
+ suitable for raw bits processing (like XFmode on i?86). */
+
+static void
+avoid_type_punning_on_regs (tree t)
+{
+ machine_mode access_mode = TYPE_MODE (TREE_TYPE (t));
+ if (access_mode != BLKmode
+ && !SCALAR_INT_MODE_P (access_mode))
+ return;
+ tree base = get_base_address (t);
+ if (DECL_P (base)
+ && !TREE_ADDRESSABLE (base)
+ && FLOAT_MODE_P (DECL_MODE (base))
+ && maybe_lt (GET_MODE_PRECISION (DECL_MODE (base)),
+ GET_MODE_BITSIZE (GET_MODE_INNER (DECL_MODE (base))))
+ /* Double check in the expensive way we really would get a pseudo. */
+ && use_register_for_decl (base))
+ TREE_ADDRESSABLE (base) = 1;
+}
+
/* RTL expansion is not able to compile array references with variable
offsets for arrays stored in single register. Discover such
expressions and mark variables as addressable to avoid this
{
gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_debug (stmt))
- walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
+ {
+ walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (call && gimple_call_internal_p (call))
+ switch (gimple_call_internal_fn (call))
+ {
+ case IFN_LOAD_LANES:
+ /* The source must be a MEM. */
+ mark_addressable (gimple_call_arg (call, 0));
+ break;
+ case IFN_STORE_LANES:
+ /* The destination must be a MEM. */
+ mark_addressable (gimple_call_lhs (call));
+ break;
+ default:
+ break;
+ }
+ if (gimple_vdef (stmt))
+ {
+ tree t = gimple_get_lhs (stmt);
+ if (t && REFERENCE_CLASS_P (t))
+ avoid_type_punning_on_regs (t);
+ }
+ }
}
}
gcc_assert ((stack_realign_drap != 0) == (drap_rtx != NULL));
/* Do nothing if NULL is returned, which means DRAP is not needed. */
- if (NULL != drap_rtx)
+ if (drap_rtx != NULL)
{
crtl->args.internal_arg_pointer = drap_rtx;
tree guard_decl = targetm.stack_protect_guard ();
rtx x, y;
+ crtl->stack_protect_guard_decl = guard_decl;
x = expand_normal (crtl->stack_protect_guard);
+
+ if (targetm.have_stack_protect_combined_set () && guard_decl)
+ {
+ gcc_assert (DECL_P (guard_decl));
+ y = DECL_RTL (guard_decl);
+
+ /* Allow the target to compute address of Y and copy it to X without
+ leaking Y into a register. This combined address + copy pattern
+ allows the target to prevent spilling of any intermediate results by
+ splitting it after register allocator. */
+ if (rtx_insn *insn = targetm.gen_stack_protect_combined_set (x, y))
+ {
+ emit_insn (insn);
+ return;
+ }
+ }
+
if (guard_decl)
y = expand_normal (guard_decl);
else
timevar_pop (TV_OUT_OF_SSA);
SA.partition_to_pseudo = XCNEWVEC (rtx, SA.map->num_partitions);
- if (MAY_HAVE_DEBUG_STMTS && flag_tree_ter)
+ if (MAY_HAVE_DEBUG_BIND_STMTS && flag_tree_ter)
{
gimple_stmt_iterator gsi;
FOR_EACH_BB_FN (bb, cfun)
avoid_deep_ter_for_debug (gsi_stmt (gsi), 0);
}
+ /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
+ discover_nonconstant_array_refs ();
+
/* Make sure all values used by the optimization passes have sane
defaults. */
reg_renumber = 0;
rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
- if (chkp_function_instrumented_p (current_function_decl))
- chkp_reset_rtl_bounds ();
-
insn_locations_init ();
if (!DECL_IS_BUILTIN (current_function_decl))
{
Also, final expects a note to appear there. */
emit_note (NOTE_INSN_DELETED);
- /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
- discover_nonconstant_array_refs ();
-
targetm.expand_to_rtl_hook ();
crtl->init_stack_alignment ();
fun->cfg->max_jumptable_ents = 0;
warning (OPT_Wstack_protector,
"stack protector not protecting function: "
"all local arrays are less than %d bytes long",
- (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
+ (int) param_ssp_buffer_size);
}
/* Set up parameters and prepare for return, for the function. */
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs)
e->flags &= ~EDGE_EXECUTABLE;
+ /* If the function has too many markers, drop them while expanding. */
+ if (cfun->debug_marker_count
+ >= param_max_debug_marker_count)
+ cfun->debug_nonbind_markers = false;
+
lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR_FOR_FN (fun),
next_bb)
bb = expand_gimple_basic_block (bb, var_ret_seq != NULL_RTX);
- if (MAY_HAVE_DEBUG_INSNS)
+ if (MAY_HAVE_DEBUG_BIND_INSNS)
expand_debug_locations ();
if (deep_ter_debug_map)
split edges which edge insertions might do. */
rebuild_jump_labels (get_insns ());
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun),
- EXIT_BLOCK_PTR_FOR_FN (fun), next_bb)
+ /* If we have a single successor to the entry block, put the pending insns
+ after parm birth, but before NOTE_INSNS_FUNCTION_BEG. */
+ if (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
{
- edge e;
- edge_iterator ei;
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ if (e->insns.r)
{
- if (e->insns.r)
- {
- rebuild_jump_labels_chain (e->insns.r);
- /* Put insns after parm birth, but before
- NOTE_INSNS_FUNCTION_BEG. */
- if (e->src == ENTRY_BLOCK_PTR_FOR_FN (fun)
- && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
- {
- rtx_insn *insns = e->insns.r;
- e->insns.r = NULL;
- if (NOTE_P (parm_birth_insn)
- && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
- emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
- else
- emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
- }
- else
- commit_one_edge_insertion (e);
- }
+ rtx_insn *insns = e->insns.r;
+ e->insns.r = NULL;
+ rebuild_jump_labels_chain (insns);
+ if (NOTE_P (parm_birth_insn)
+ && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
+ emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
else
- ei_next (&ei);
+ emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
}
}
+ /* Otherwise, as well as for other edges, take the usual way. */
+ commit_edge_insertions ();
+
/* We're done expanding trees to RTL. */
currently_expanding_to_rtl = 0;
find_many_sub_basic_blocks (blocks);
purge_all_dead_edges ();
+ /* After initial rtl generation, call back to finish generating
+ exception support code. We need to do this before cleaning up
+ the CFG as the code does not expect dead landing pads. */
+ if (fun->eh->region_tree != NULL)
+ finish_eh_generation ();
+
+ /* Call expand_stack_alignment after finishing all
+ updates to crtl->preferred_stack_boundary. */
expand_stack_alignment ();
/* Fixup REG_EQUIV notes in the prologue if there are tailcalls in this
if (crtl->tail_call_emit)
fixup_tail_calls ();
- /* After initial rtl generation, call back to finish generating
- exception support code. We need to do this before cleaning up
- the CFG as the code does not expect dead landing pads. */
- if (fun->eh->region_tree != NULL)
- finish_eh_generation ();
+ unsigned HOST_WIDE_INT patch_area_size = function_entry_patch_area_size;
+ unsigned HOST_WIDE_INT patch_area_entry = function_entry_patch_area_start;
+
+ tree patchable_function_entry_attr
+ = lookup_attribute ("patchable_function_entry",
+ DECL_ATTRIBUTES (cfun->decl));
+ if (patchable_function_entry_attr)
+ {
+ tree pp_val = TREE_VALUE (patchable_function_entry_attr);
+ tree patchable_function_entry_value1 = TREE_VALUE (pp_val);
+
+ patch_area_size = tree_to_uhwi (patchable_function_entry_value1);
+ patch_area_entry = 0;
+ if (TREE_CHAIN (pp_val) != NULL_TREE)
+ {
+ tree patchable_function_entry_value2
+ = TREE_VALUE (TREE_CHAIN (pp_val));
+ patch_area_entry = tree_to_uhwi (patchable_function_entry_value2);
+ }
+ }
+
+ if (patch_area_entry > patch_area_size)
+ {
+ if (patch_area_size > 0)
+ warning (OPT_Wattributes,
+ "patchable function entry %wu exceeds size %wu",
+ patch_area_entry, patch_area_size);
+ patch_area_entry = 0;
+ }
+
+ crtl->patch_area_size = patch_area_size;
+ crtl->patch_area_entry = patch_area_entry;
- /* BB subdivision may have created basic blocks that are are only reachable
+ /* BB subdivision may have created basic blocks that are only reachable
from unlikely bbs but not marked as such in the profile. */
if (optimize)
propagate_unlikely_bbs_forward ();