bool overlap_op_by_pieces;
unsigned int fusible_ops;
const struct cpu_vector_cost *vec_costs;
+ const char *function_align = nullptr;
+ const char *jump_align = nullptr;
+ const char *loop_align = nullptr;
};
? &optimize_size_tune_info
: cpu->tune_param;
+ /* If not optimizing for size, set the default
+ alignment to what the target wants. */
+ if (!opts->x_optimize_size)
+ {
+ if (opts->x_flag_align_loops && !opts->x_str_align_loops)
+ opts->x_str_align_loops = tune_param->loop_align;
+ if (opts->x_flag_align_jumps && !opts->x_str_align_jumps)
+ opts->x_str_align_jumps = tune_param->jump_align;
+ if (opts->x_flag_align_functions && !opts->x_str_align_functions)
+ opts->x_str_align_functions = tune_param->function_align;
+ }
+
/* Use -mtune's setting for slow_unaligned_access, even when optimizing
for size. For architectures that trap and emulate unaligned accesses,
the performance cost is too great, even for -Os. Similarly, if