/* Default target hook functions.
- Copyright (C) 2003-2018 Free Software Foundation, Inc.
+ Copyright (C) 2003-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "opts.h"
#include "gimplify.h"
#include "predict.h"
-#include "params.h"
#include "real.h"
#include "langhooks.h"
#include "sbitmap.h"
+#include "function-abi.h"
bool
default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
rtx
default_expand_builtin_saveregs (void)
{
- error ("__builtin_saveregs not supported by this target");
+ error ("%<__builtin_saveregs%> not supported by this target");
return const0_rtx;
}
void
-default_setup_incoming_varargs (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED,
- int *pretend_arg_size ATTRIBUTE_UNUSED,
- int second_time ATTRIBUTE_UNUSED)
+default_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &, int *, int)
{
}
of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */
bool
-hook_pass_by_reference_must_pass_in_stack (cumulative_args_t c ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED,
- bool named_arg ATTRIBUTE_UNUSED)
+hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
+ const function_arg_info &arg)
{
- return targetm.calls.must_pass_in_stack (mode, type);
+ return targetm.calls.must_pass_in_stack (arg);
}
/* Return true if a parameter follows callee copies conventions. This
version of the hook is true for all named arguments. */
bool
-hook_callee_copies_named (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED, bool named)
+hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
{
- return named;
+ return arg.named;
}
/* Emit to STREAM the assembler syntax for insn operand X. */
return HAVE_GNU_INDIRECT_FUNCTION;
}
+/* Return true if we predict the loop LOOP will be transformed to a
+ low-overhead loop, otherwise return false.
+
+ By default, false is returned, as this hook's applicability should be
+ verified for each target. Target maintainers should re-define the hook
+ if the target can take advantage of it. */
+
+bool
+default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
/* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
an error message.
return NULL_TREE;
}
-/* Vectorized conversion. */
-
-tree
-default_builtin_vectorized_conversion (unsigned int code ATTRIBUTE_UNUSED,
- tree dest_type ATTRIBUTE_UNUSED,
- tree src_type ATTRIBUTE_UNUSED)
-{
- return NULL_TREE;
-}
-
/* Default vectorizer cost model values. */
int
}
bool
-hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false (
- cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
+hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
+ const function_arg_info &)
{
return false;
}
bool
-hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true (
- cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
+hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
+ const function_arg_info &)
{
return true;
}
int
-hook_int_CUMULATIVE_ARGS_mode_tree_bool_0 (
- cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED)
+hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
+ const function_arg_info &)
{
return 0;
}
}
void
-default_function_arg_advance (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED,
- bool named ATTRIBUTE_UNUSED)
+default_function_arg_advance (cumulative_args_t, const function_arg_info &)
{
gcc_unreachable ();
}
}
rtx
-default_function_arg (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED,
- bool named ATTRIBUTE_UNUSED)
+default_function_arg (cumulative_args_t, const function_arg_info &)
{
gcc_unreachable ();
}
rtx
-default_function_incoming_arg (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED,
- bool named ATTRIBUTE_UNUSED)
+default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
{
gcc_unreachable ();
}
return 0;
}
-reg_class_t
-default_branch_target_register_class (void)
-{
- return NO_REGS;
-}
-
reg_class_t
default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
reg_class_t cl,
return align;
}
-/* Default to natural alignment for vector types. */
+/* Default to natural alignment for vector types, bounded by
+ MAX_OFILE_ALIGNMENT. */
+
HOST_WIDE_INT
default_vector_alignment (const_tree type)
{
- HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
- if (align > MAX_OFILE_ALIGNMENT)
- align = MAX_OFILE_ALIGNMENT;
- return align;
+ unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
+ tree size = TYPE_SIZE (type);
+ if (tree_fits_uhwi_p (size))
+ align = tree_to_uhwi (size);
+
+ return align < MAX_OFILE_ALIGNMENT ? align : MAX_OFILE_ALIGNMENT;
}
/* The default implementation of
TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
-HOST_WIDE_INT
+poly_uint64
default_preferred_vector_alignment (const_tree type)
{
return TYPE_ALIGN (type);
return mode;
}
-/* By default only the size derived from the preferred vector mode
- is tried. */
+/* By default only the preferred vector mode is tried. */
-void
-default_autovectorize_vector_sizes (vector_sizes *)
+unsigned int
+default_autovectorize_vector_modes (vector_modes *, bool)
{
+ return 0;
}
-/* By default a vector of integers is used as a mask. */
+/* The default implementation of TARGET_VECTORIZE_RELATED_MODE. */
opt_machine_mode
-default_get_mask_mode (poly_uint64 nunits, poly_uint64 vector_size)
-{
- unsigned int elem_size = vector_element_size (vector_size, nunits);
- scalar_int_mode elem_mode
- = smallest_int_mode_for_size (elem_size * BITS_PER_UNIT);
- machine_mode vector_mode;
+default_vectorize_related_mode (machine_mode vector_mode,
+ scalar_mode element_mode,
+ poly_uint64 nunits)
+{
+ machine_mode result_mode;
+ if ((maybe_ne (nunits, 0U)
+ || multiple_p (GET_MODE_SIZE (vector_mode),
+ GET_MODE_SIZE (element_mode), &nunits))
+ && mode_for_vector (element_mode, nunits).exists (&result_mode)
+ && VECTOR_MODE_P (result_mode)
+ && targetm.vector_mode_supported_p (result_mode))
+ return result_mode;
- gcc_assert (known_eq (elem_size * nunits, vector_size));
+ return opt_machine_mode ();
+}
- if (mode_for_vector (elem_mode, nunits).exists (&vector_mode)
- && VECTOR_MODE_P (vector_mode)
- && targetm.vector_mode_supported_p (vector_mode))
- return vector_mode;
+/* By default a vector of integers is used as a mask. */
- return opt_machine_mode ();
+opt_machine_mode
+default_get_mask_mode (machine_mode mode)
+{
+ return related_int_vector_mode (mode);
}
/* By default consider masked stores to be expensive. */
array of three unsigned ints, set it to zero, and return its address. */
void *
-default_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED)
+default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED)
{
unsigned *cost = XNEWVEC (unsigned, 3);
cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
it into the cost specified by WHERE, and returns the cost added. */
unsigned
-default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+default_add_stmt_cost (class vec_info *vinfo, void *data, int count,
+ enum vect_cost_for_stmt kind,
+ class _stmt_vec_info *stmt_info, tree vectype,
+ int misalign,
enum vect_cost_model_location where)
{
unsigned *cost = (unsigned *) data;
unsigned retval = 0;
-
- tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
int stmt_cost = targetm.vectorize.builtin_vectorization_cost (kind, vectype,
misalign);
/* Statements in an inner loop relative to the loop being
vectorized are weighted more heavily. The value here is
arbitrary and could potentially be improved with analysis. */
- if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ if (where == vect_body && stmt_info
+ && stmt_in_inner_loop_p (vinfo, stmt_info))
count *= 50; /* FIXME. */
retval = (unsigned) (count * stmt_cost);
if (TYPE_UNSIGNED (TREE_TYPE (base))
|| TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
return false;
- /* The default implementation assumes an errno location
- declaration is never defined in the current compilation unit. */
+ /* The default implementation assumes an errno location declaration
+ is never defined in the current compilation unit and may not be
+ aliased by a local variable. */
if (DECL_P (base)
+ && DECL_EXTERNAL (base)
&& !TREE_STATIC (base))
return true;
else if (TREE_CODE (base) == MEM_REF
return false;
}
+extern bool default_new_address_profitable_p (rtx, rtx);
+
+
+/* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P. */
+
+bool
+default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED,
+ rtx new_addr ATTRIBUTE_UNUSED)
+{
+ return true;
+}
+
bool
default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
tree ARG_UNUSED (name),
do not have the "target" pragma. */
if (args)
warning (OPT_Wpragmas,
- "#pragma GCC target is not supported for this machine");
+ "%<#pragma GCC target%> is not supported for this machine");
return false;
}
return false;
}
+/* By default assume that libc has not a fast implementation. */
+
+bool
+default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
bool
gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED)
{
#ifdef MOVE_RATIO
move_ratio = (unsigned int) MOVE_RATIO (speed_p);
#else
-#if defined (HAVE_movmemqi) || defined (HAVE_movmemhi) || defined (HAVE_movmemsi) || defined (HAVE_movmemdi) || defined (HAVE_movmemti)
+#if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
move_ratio = 2;
-#else /* No movmem patterns, pick a default. */
+#else /* No cpymem patterns, pick a default. */
move_ratio = ((speed_p) ? 15 : 3);
#endif
#endif
}
/* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
- used; return FALSE if the movmem/setmem optab should be expanded, or
+ used; return FALSE if the cpymem/setmem optab should be expanded, or
a call to memcpy emitted. */
bool
char buf[256];
static int patch_area_number;
section *previous_section = in_section;
+ const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
+ gcc_assert (asm_op != NULL);
patch_area_number++;
ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number);
switch_to_section (get_section ("__patchable_function_entries",
- 0, NULL));
- fputs (integer_asm_op (POINTER_SIZE_UNITS, false), file);
+ SECTION_WRITE | SECTION_RELRO, NULL));
+ assemble_align (POINTER_SIZE);
+ fputs (asm_op, file);
assemble_name_raw (file, buf);
fputc ('\n', file);
unsigned i;
for (i = 0; i < patch_area_size; ++i)
- fprintf (file, "\t%s\n", nop_templ);
+ output_asm_insn (nop_templ, NULL);
}
bool
{
machine_mode save_mode = reg_raw_mode[regno];
- if (targetm.hard_regno_call_part_clobbered (regno, save_mode))
- save_mode = choose_hard_reg_mode (regno, 1, true);
+ if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
+ regno, save_mode))
+ save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
return save_mode;
}
/* -fpic and -fpie also usually make a PCH invalid. */
if (data[0] != flag_pic)
- return _("created and used with different settings of -fpic");
+ return _("created and used with different settings of %<-fpic%>");
if (data[1] != flag_pie)
- return _("created and used with different settings of -fpie");
+ return _("created and used with different settings of %<-fpie%>");
data += 2;
/* Check target_flags. */
if (ARGS_GROW_DOWNWARD)
gcc_unreachable ();
- indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ indirect = pass_va_arg_by_reference (type);
if (indirect)
type = build_pointer_type (type);
real_part = std_gimplify_va_arg_expr (valist,
TREE_TYPE (type), pre_p, NULL);
- real_part = get_initialized_tmp_var (real_part, pre_p, NULL);
+ real_part = get_initialized_tmp_var (real_part, pre_p);
imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
TREE_TYPE (type), pre_p, NULL);
- imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
+ imag_part = get_initialized_tmp_var (imag_part, pre_p);
return build2 (COMPLEX_EXPR, type, real_part, imag_part);
}
boundary /= BITS_PER_UNIT;
/* Hoist the valist value into a temporary for the moment. */
- valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
+ valist_tmp = get_initialized_tmp_var (valist, pre_p);
/* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
requires greater alignment, we must perform dynamic alignment. */
return build_va_arg_indirect_ref (addr);
}
-void
-default_setup_incoming_vararg_bounds (cumulative_args_t ca ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED,
- int *pretend_arg_size ATTRIBUTE_UNUSED,
- int second_time ATTRIBUTE_UNUSED)
-{
-}
-
/* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
not support nested low-overhead loops. */
{
bool predictable_p = predictable_edge_p (e);
- enum compiler_param param
- = (predictable_p
- ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
- : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
- /* If we have a parameter set, use that, otherwise take a guess using
- BRANCH_COST. */
- if (global_options_set.x_param_values[param])
- return PARAM_VALUE (param);
+ if (predictable_p)
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+ return param_max_rtl_if_conversion_predictable_cost;
+ }
else
- return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+ return param_max_rtl_if_conversion_unpredictable_cost;
+ }
+
+ return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
}
/* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */