/* A pass for lowering trees to RTL.
- Copyright (C) 2004-2019 Free Software Foundation, Inc.
+ Copyright (C) 2004-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "gimple-pretty-print.h"
#include "toplev.h"
#include "debug.h"
-#include "params.h"
#include "tree-inline.h"
#include "value-prof.h"
#include "tree-ssa-live.h"
gimple_assign_rhs_to_tree (gimple *stmt)
{
tree t;
- enum gimple_rhs_class grhs_class;
-
- grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
-
- if (grhs_class == GIMPLE_TERNARY_RHS)
- t = build3 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- gimple_assign_rhs3 (stmt));
- else if (grhs_class == GIMPLE_BINARY_RHS)
- t = build2 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (grhs_class == GIMPLE_UNARY_RHS)
- t = build1 (gimple_assign_rhs_code (stmt),
- TREE_TYPE (gimple_assign_lhs (stmt)),
- gimple_assign_rhs1 (stmt));
- else if (grhs_class == GIMPLE_SINGLE_RHS)
- {
- t = gimple_assign_rhs1 (stmt);
- /* Avoid modifying this tree in place below. */
- if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
- && gimple_location (stmt) != EXPR_LOCATION (t))
- || (gimple_block (stmt)
- && currently_expanding_to_rtl
- && EXPR_P (t)))
- t = copy_node (t);
+ switch (get_gimple_rhs_class (gimple_expr_code (stmt)))
+ {
+ case GIMPLE_TERNARY_RHS:
+ t = build3 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt),
+ gimple_assign_rhs3 (stmt));
+ break;
+ case GIMPLE_BINARY_RHS:
+ t = build2 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt));
+ break;
+ case GIMPLE_UNARY_RHS:
+ t = build1 (gimple_assign_rhs_code (stmt),
+ TREE_TYPE (gimple_assign_lhs (stmt)),
+ gimple_assign_rhs1 (stmt));
+ break;
+ case GIMPLE_SINGLE_RHS:
+ {
+ t = gimple_assign_rhs1 (stmt);
+ /* Avoid modifying this tree in place below. */
+ if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
+ && gimple_location (stmt) != EXPR_LOCATION (t))
+ || (gimple_block (stmt) && currently_expanding_to_rtl
+ && EXPR_P (t)))
+ t = copy_node (t);
+ break;
+ }
+ default:
+ gcc_unreachable ();
}
- else
- gcc_unreachable ();
if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
SET_EXPR_LOCATION (t, gimple_location (stmt));
/* This structure holds data relevant to one variable that will be
placed in a stack slot. */
-struct stack_var
+class stack_var
{
+public:
/* The Variable. */
tree decl;
#define EOC ((size_t)-1)
/* We have an array of such objects while deciding allocation. */
-static struct stack_var *stack_vars;
+static class stack_var *stack_vars;
static size_t stack_vars_alloc;
static size_t stack_vars_num;
static hash_map<tree, size_t> *decl_to_stack_part;
we can't do with expected alignment of the stack boundary. */
static unsigned int
-align_local_variable (tree decl)
+align_local_variable (tree decl, bool really_expand)
{
unsigned int align;
else
{
align = LOCAL_DECL_ALIGNMENT (decl);
- SET_DECL_ALIGN (decl, align);
+ /* Don't change DECL_ALIGN when called from estimated_stack_frame_size.
+ That is done before IPA and could bump alignment based on host
+ backend even for offloaded code which wants different
+ LOCAL_DECL_ALIGNMENT. */
+ if (really_expand)
+ SET_DECL_ALIGN (decl, align);
}
return align / BITS_PER_UNIT;
}
/* Accumulate DECL into STACK_VARS. */
static void
-add_stack_var (tree decl)
+add_stack_var (tree decl, bool really_expand)
{
- struct stack_var *v;
+ class stack_var *v;
if (stack_vars_num >= stack_vars_alloc)
{
else
stack_vars_alloc = 32;
stack_vars
- = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
+ = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
}
if (!decl_to_stack_part)
decl_to_stack_part = new hash_map<tree, size_t>;
variables that are simultaneously live. */
if (known_eq (v->size, 0U))
v->size = 1;
- v->alignb = align_local_variable (decl);
+ v->alignb = align_local_variable (decl, really_expand);
/* An alignment of zero can mightily confuse us later. */
gcc_assert (v->alignb != 0);
static void
add_stack_var_conflict (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return;
if (!a->conflicts)
static bool
stack_var_conflict_p (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return false;
/* Partitions containing an SSA name result from gimple registers
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
{
- struct stack_var *a = &stack_vars[i];
+ class stack_var *a = &stack_vars[i];
if (!a->conflicts)
a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
bitmap_ior_into (a->conflicts, work);
static void
union_stack_vars (size_t a, size_t b)
{
- struct stack_var *vb = &stack_vars[b];
+ class stack_var *vb = &stack_vars[b];
bitmap_iterator bi;
unsigned u;
stack_vars[b].representative = a;
stack_vars[a].next = b;
+ /* Make sure A is big enough to hold B. */
+ stack_vars[a].size = upper_bound (stack_vars[a].size, stack_vars[b].size);
+
/* Update the required alignment of partition A to account for B. */
if (stack_vars[a].alignb < stack_vars[b].alignb)
stack_vars[a].alignb = stack_vars[b].alignb;
set_rtl (decl, x);
}
-struct stack_vars_data
+class stack_vars_data
{
+public:
/* Vector of offset pairs, always end of some padding followed
by start of the padding that needs Address Sanitizer protection.
The vector is in reversed, highest offset pairs come first. */
with that location. */
static void
-expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
+expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
{
size_t si, i, j, n = stack_vars_num;
poly_uint64 large_size = 0, large_alloc = 0;
else
{
size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var));
- byte_align = align_local_variable (var);
+ byte_align = align_local_variable (var, true);
}
/* We handle highly aligned variables in expand_stack_vars. */
if (!use_register_for_decl (var))
{
if (defer_stack_allocation (var, true))
- add_stack_var (var);
+ add_stack_var (var, true);
else
expand_one_stack_var_1 (var);
return;
bool smallish
= (poly_int_tree_p (size_unit, &size)
&& (estimated_poly_value (size)
- < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
+ < param_min_size_for_stack_sharing));
/* If stack protection is enabled, *all* stack variables must be deferred,
so that we can re-order the strings to the top of the frame.
}
}
else if (defer_stack_allocation (var, toplevel))
- add_stack_var (origvar);
+ add_stack_var (origvar, really_expand);
else
{
if (really_expand)
{
if (lookup_attribute ("naked",
DECL_ATTRIBUTES (current_function_decl)))
- error ("cannot allocate stack for variable %q+D, naked function.",
+ error ("cannot allocate stack for variable %q+D, naked function",
var);
expand_one_stack_var (origvar);
|| t == signed_char_type_node
|| t == unsigned_char_type_node)
{
- unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
+ unsigned HOST_WIDE_INT max = param_ssp_buffer_size;
unsigned HOST_WIDE_INT len;
if (!TYPE_SIZE_UNIT (type)
}
/* Ensure that variables in different stack protection phases conflict
- so that they are not merged and share the same stack slot. */
+ so that they are not merged and share the same stack slot.
+ Return true if there are any address taken variables. */
-static void
+static bool
add_stack_protection_conflicts (void)
{
size_t i, j, n = stack_vars_num;
unsigned char *phase;
+ bool ret = false;
phase = XNEWVEC (unsigned char, n);
for (i = 0; i < n; ++i)
- phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
+ {
+ phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
+ if (TREE_ADDRESSABLE (stack_vars[i].decl))
+ ret = true;
+ }
for (i = 0; i < n; ++i)
{
}
XDELETEVEC (phase);
+ return ret;
}
/* Create a decl for the guard at the top of the stack frame. */
return estimated_poly_value (size);
}
-/* Helper routine to check if a record or union contains an array field. */
-
-static int
-record_or_union_type_has_array_p (const_tree tree_type)
-{
- tree fields = TYPE_FIELDS (tree_type);
- tree f;
-
- for (f = fields; f; f = DECL_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
- {
- tree field_type = TREE_TYPE (f);
- if (RECORD_OR_UNION_TYPE_P (field_type)
- && record_or_union_type_has_array_p (field_type))
- return 1;
- if (TREE_CODE (field_type) == ARRAY_TYPE)
- return 1;
- }
- return 0;
-}
-
-/* Check if the current function has local referenced variables that
- have their addresses taken, contain an array, or are arrays. */
-
-static bool
-stack_protect_decl_p ()
-{
- unsigned i;
- tree var;
-
- FOR_EACH_LOCAL_DECL (cfun, i, var)
- if (!is_global_var (var))
- {
- tree var_type = TREE_TYPE (var);
- if (VAR_P (var)
- && (TREE_CODE (var_type) == ARRAY_TYPE
- || TREE_ADDRESSABLE (var)
- || (RECORD_OR_UNION_TYPE_P (var_type)
- && record_or_union_type_has_array_p (var_type))))
- return true;
- }
- return false;
-}
-
/* Check if the current function has calls that use a return slot. */
static bool
}
if (flag_stack_protect == SPCT_FLAG_STRONG)
- gen_stack_protect_signal
- = stack_protect_decl_p () || stack_protect_return_slot_p ();
+ gen_stack_protect_signal = stack_protect_return_slot_p ();
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
if (stack_vars_num > 0)
{
+ bool has_addressable_vars = false;
+
add_scope_conflicts ();
/* If stack protection is enabled, we don't share space between
|| (flag_stack_protect == SPCT_FLAG_EXPLICIT
&& lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))))
- add_stack_protection_conflicts ();
+ has_addressable_vars = add_stack_protection_conflicts ();
+
+ if (flag_stack_protect == SPCT_FLAG_STRONG && has_addressable_vars)
+ gen_stack_protect_signal = true;
/* Now that we have collected all stack variables, and have computed a
minimal interference graph, attempt to save some stack space. */
case SPCT_FLAG_STRONG:
if (gen_stack_protect_signal
- || cfun->calls_alloca || has_protected_decls
+ || cfun->calls_alloca
+ || has_protected_decls
|| lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
break;
case SPCT_FLAG_DEFAULT:
- if (cfun->calls_alloca || has_protected_decls
+ if (cfun->calls_alloca
+ || has_protected_decls
|| lookup_attribute ("stack_protect",
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
DECL_ATTRIBUTES (current_function_decl)))
create_stack_guard ();
break;
+
default:
- ;
+ break;
}
/* Assign rtl to each variable based on these partitions. */
if (stack_vars_num > 0)
{
- struct stack_vars_data data;
+ class stack_vars_data data;
data.asan_base = NULL_RTX;
data.asan_alignb = 0;
if (overlap)
{
- error ("asm-specifier for variable %qE conflicts with asm clobber list",
+ error ("%<asm%> specifier for variable %qE conflicts with "
+ "%<asm%> clobber list",
DECL_NAME (overlap));
/* Reset registerness to stop multiple errors emitted for a single
}
}
}
- unsigned nclobbers = clobber_rvec.length();
/* First pass over inputs and outputs checks validity and sets
mark_addressable if needed. */
if (allows_reg && TYPE_MODE (type) != BLKmode)
op = force_reg (TYPE_MODE (type), op);
else if (!allows_mem)
- warning (0, "asm operand %d probably doesn%'t match constraints",
+ warning (0, "%<asm%> operand %d probably does not match "
+ "constraints",
i + noutputs);
else if (MEM_P (op))
{
gcc_assert (constraints.length() == noutputs + ninputs);
/* But it certainly can adjust the clobbers. */
- nclobbers = clobber_rvec.length();
+ unsigned nclobbers = clobber_rvec.length ();
/* Third pass checks for easy conflicts. */
/* ??? Why are we doing this on trees instead of rtx. */
tripping over the under-construction body. */
for (unsigned k = 0; k < noutputs; ++k)
if (reg_overlap_mentioned_p (clobbered_reg, output_rvec[k]))
- internal_error ("asm clobber conflict with output operand");
+ internal_error ("%<asm%> clobber conflict with "
+ "output operand");
for (unsigned k = 0; k < ninputs - ninout; ++k)
if (reg_overlap_mentioned_p (clobbered_reg, input_rvec[k]))
- internal_error ("asm clobber conflict with input operand");
+ internal_error ("%<asm%> clobber conflict with "
+ "input operand");
}
XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg);
{
op0 = gimple_return_retval (as_a <greturn *> (stmt));
+ /* If a return doesn't have a location, it very likely represents
+ multiple user returns so we cannot let it inherit the location
+ of the last statement of the previous basic block in RTL. */
+ if (!gimple_has_location (stmt))
+ set_curr_insn_location (cfun->function_end_locus);
+
if (op0 && op0 != error_mark_node)
{
tree result = DECL_RESULT (current_function_decl);
/* If we want exceptions for non-call insns, any
may_trap_p instruction may throw. */
&& GET_CODE (PATTERN (insn)) != CLOBBER
- && GET_CODE (PATTERN (insn)) != CLOBBER_HIGH
&& GET_CODE (PATTERN (insn)) != USE
&& insn_could_throw_p (insn))
make_reg_eh_region_note (insn, 0, lp_nr);
op0 = DECL_RTL_IF_SET (exp);
/* This decl was probably optimized away. */
- if (!op0)
+ if (!op0
+ /* At least label RTXen are sometimes replaced by
+ NOTE_INSN_DELETED_LABEL. Any notes here are not
+ handled by copy_rtx. */
+ || NOTE_P (op0))
{
if (!VAR_P (exp)
|| DECL_EXTERNAL (exp)
case VEC_PERM_EXPR:
case VEC_DUPLICATE_EXPR:
case VEC_SERIES_EXPR:
+ case SAD_EXPR:
return NULL;
/* Misc codes. */
{
first_block = e->dest;
redirect_edge_succ (e, init_block);
- e = make_single_succ_edge (init_block, first_block, flags);
+ make_single_succ_edge (init_block, first_block, flags);
}
else
- e = make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
- EDGE_FALLTHRU);
+ make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
+ EDGE_FALLTHRU);
update_bb_for_insn (init_block);
return init_block;
*walk_subtrees = 0;
}
+ /* References of size POLY_INT_CST to a fixed-size object must go
+ through memory. It's more efficient to force that here than
+ to create temporary slots on the fly. */
+ else if ((TREE_CODE (t) == MEM_REF || TREE_CODE (t) == TARGET_MEM_REF)
+ && TYPE_SIZE (TREE_TYPE (t))
+ && POLY_INT_CST_P (TYPE_SIZE (TREE_TYPE (t))))
+ {
+ tree base = get_base_address (t);
+ if (base
+ && DECL_P (base)
+ && DECL_MODE (base) != BLKmode
+ && GET_MODE_SIZE (DECL_MODE (base)).is_constant ())
+ TREE_ADDRESSABLE (base) = 1;
+ *walk_subtrees = 0;
+ }
return NULL_TREE;
}
+/* If there's a chance to get a pseudo for t then if it would be of float mode
+ and the actual access is via an integer mode (lowered memcpy or similar
+ access) then avoid the register expansion if the mode likely is not storage
+ suitable for raw bits processing (like XFmode on i?86). */
+
+static void
+avoid_type_punning_on_regs (tree t)
+{
+ machine_mode access_mode = TYPE_MODE (TREE_TYPE (t));
+ if (access_mode != BLKmode
+ && !SCALAR_INT_MODE_P (access_mode))
+ return;
+ tree base = get_base_address (t);
+ if (DECL_P (base)
+ && !TREE_ADDRESSABLE (base)
+ && FLOAT_MODE_P (DECL_MODE (base))
+ && maybe_lt (GET_MODE_PRECISION (DECL_MODE (base)),
+ GET_MODE_BITSIZE (GET_MODE_INNER (DECL_MODE (base))))
+ /* Double check in the expensive way we really would get a pseudo. */
+ && use_register_for_decl (base))
+ TREE_ADDRESSABLE (base) = 1;
+}
+
/* RTL expansion is not able to compile array references with variable
offsets for arrays stored in single register. Discover such
expressions and mark variables as addressable to avoid this
{
gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_debug (stmt))
- walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
+ {
+ walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
+ gcall *call = dyn_cast <gcall *> (stmt);
+ if (call && gimple_call_internal_p (call))
+ switch (gimple_call_internal_fn (call))
+ {
+ case IFN_LOAD_LANES:
+ /* The source must be a MEM. */
+ mark_addressable (gimple_call_arg (call, 0));
+ break;
+ case IFN_STORE_LANES:
+ /* The destination must be a MEM. */
+ mark_addressable (gimple_call_lhs (call));
+ break;
+ default:
+ break;
+ }
+ if (gimple_vdef (stmt))
+ {
+ tree t = gimple_get_lhs (stmt);
+ if (t && REFERENCE_CLASS_P (t))
+ avoid_type_punning_on_regs (t);
+ }
+ }
}
}
avoid_deep_ter_for_debug (gsi_stmt (gsi), 0);
}
+ /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
+ discover_nonconstant_array_refs ();
+
/* Make sure all values used by the optimization passes have sane
defaults. */
reg_renumber = 0;
Also, final expects a note to appear there. */
emit_note (NOTE_INSN_DELETED);
- /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
- discover_nonconstant_array_refs ();
-
targetm.expand_to_rtl_hook ();
crtl->init_stack_alignment ();
fun->cfg->max_jumptable_ents = 0;
warning (OPT_Wstack_protector,
"stack protector not protecting function: "
"all local arrays are less than %d bytes long",
- (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
+ (int) param_ssp_buffer_size);
}
/* Set up parameters and prepare for return, for the function. */
/* If the function has too many markers, drop them while expanding. */
if (cfun->debug_marker_count
- >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
+ >= param_max_debug_marker_count)
cfun->debug_nonbind_markers = false;
lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
split edges which edge insertions might do. */
rebuild_jump_labels (get_insns ());
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun),
- EXIT_BLOCK_PTR_FOR_FN (fun), next_bb)
+ /* If we have a single successor to the entry block, put the pending insns
+ after parm birth, but before NOTE_INSNS_FUNCTION_BEG. */
+ if (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
{
- edge e;
- edge_iterator ei;
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ if (e->insns.r)
{
- if (e->insns.r)
- {
- rebuild_jump_labels_chain (e->insns.r);
- /* Put insns after parm birth, but before
- NOTE_INSNS_FUNCTION_BEG. */
- if (e->src == ENTRY_BLOCK_PTR_FOR_FN (fun)
- && single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
- {
- rtx_insn *insns = e->insns.r;
- e->insns.r = NULL;
- if (NOTE_P (parm_birth_insn)
- && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
- emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
- else
- emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
- }
- else
- commit_one_edge_insertion (e);
- }
+ rtx_insn *insns = e->insns.r;
+ e->insns.r = NULL;
+ rebuild_jump_labels_chain (insns);
+ if (NOTE_P (parm_birth_insn)
+ && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
+ emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
else
- ei_next (&ei);
+ emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
}
}
+ /* Otherwise, as well as for other edges, take the usual way. */
+ commit_edge_insertions ();
+
/* We're done expanding trees to RTL. */
currently_expanding_to_rtl = 0;
if (crtl->tail_call_emit)
fixup_tail_calls ();
- /* BB subdivision may have created basic blocks that are are only reachable
+ unsigned HOST_WIDE_INT patch_area_size = function_entry_patch_area_size;
+ unsigned HOST_WIDE_INT patch_area_entry = function_entry_patch_area_start;
+
+ tree patchable_function_entry_attr
+ = lookup_attribute ("patchable_function_entry",
+ DECL_ATTRIBUTES (cfun->decl));
+ if (patchable_function_entry_attr)
+ {
+ tree pp_val = TREE_VALUE (patchable_function_entry_attr);
+ tree patchable_function_entry_value1 = TREE_VALUE (pp_val);
+
+ patch_area_size = tree_to_uhwi (patchable_function_entry_value1);
+ patch_area_entry = 0;
+ if (TREE_CHAIN (pp_val) != NULL_TREE)
+ {
+ tree patchable_function_entry_value2
+ = TREE_VALUE (TREE_CHAIN (pp_val));
+ patch_area_entry = tree_to_uhwi (patchable_function_entry_value2);
+ }
+ }
+
+ if (patch_area_entry > patch_area_size)
+ {
+ if (patch_area_size > 0)
+ warning (OPT_Wattributes,
+ "patchable function entry %wu exceeds size %wu",
+ patch_area_entry, patch_area_size);
+ patch_area_entry = 0;
+ }
+
+ crtl->patch_area_size = patch_area_size;
+ crtl->patch_area_entry = patch_area_entry;
+
+ /* BB subdivision may have created basic blocks that are only reachable
from unlikely bbs but not marked as such in the profile. */
if (optimize)
propagate_unlikely_bbs_forward ();