* postreload-gcse.c (gate_handle_gcse2): Disable for functions
optimized for speed.
* final.c (compute_alignments): Use optimize_bb_for_size_p.
* tree-call-cdce.c (gate_call_cdce): Use optimize_function_for_speed_p.
* opts.c (flag_predictive_commoning_set, flag_unswitch_loops_set,
flag_gcse_after_reload_set): New static vars.
(common_handle_option): Enable those flags for profile-use.
(decode_options): Remove optimize_size flags that are handled
on higher granuality.
* tree-vectorizer.c (vectorize_loops): Use
optimize_loop_nest_for_speed_p.
* tree-ssa-pre.c (do_pre): Use optimize_function_for_speed_p.
* tree-predcom.c (tree_predictive_commoning): Use
optimize_loop_for_speed_p.
* varasm.c (assemble_start_function): Use optimize_function_for_speed_p.
* bb-reorder.c (rest_of_handle_reorder_blocks): Likewise.
* predict.c (optimize_loop_for_speed_p): Fix walk.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@139801
138bc75d-0d04-0410-961f-
82ee72b054a4
+2008-08-30 Jan Hubicka <jh@suse.cz>
+
+ * postreload-gcse.c (gate_handle_gcse2): Disable for functions
+ optimized for speed.
+ * final.c (compute_alignments): Use optimize_bb_for_size_p.
+ * tree-call-cdce.c (gate_call_cdce): Use optimize_function_for_speed_p.
+ * opts.c (flag_predictive_commoning_set, flag_unswitch_loops_set,
+ flag_gcse_after_reload_set): New static vars.
+ (common_handle_option): Enable those flags for profile-use.
+ (decode_options): Remove optimize_size flags that are handled
+ on higher granuality.
+ * tree-vectorizer.c (vectorize_loops): Use
+ optimize_loop_nest_for_speed_p.
+ * tree-ssa-pre.c (do_pre): Use optimize_function_for_speed_p.
+ * tree-predcom.c (tree_predictive_commoning): Use
+ optimize_loop_for_speed_p.
+ * varasm.c (assemble_start_function): Use optimize_function_for_speed_p.
+ * bb-reorder.c (rest_of_handle_reorder_blocks): Likewise.
+ * predict.c (optimize_loop_for_speed_p): Fix walk.
+
2008-08-30 Jan Hubicka <jh@suse.cz>
* ipa-inline.c (cgraph_estimate_growth): Discover self recursive
splitting possibly introduced more crossjumping opportunities. */
cfg_layout_initialize (CLEANUP_EXPENSIVE);
- if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
+ if ((flag_reorder_blocks || flag_reorder_blocks_and_partition)
+ /* Don't reorder blocks when optimizing for size because extra jump insns may
+ be created; also barrier may create extra padding.
+
+ More correctly we should have a block reordering mode that tried to
+ minimize the combined size of all the jumps. This would more or less
+ automatically remove extra jumps, but would also try to use more short
+ jumps instead of long jumps. */
+ && optimize_function_for_speed_p (cfun))
{
reorder_basic_blocks ();
cleanup_cfg (CLEANUP_EXPENSIVE);
edge_iterator ei;
if (!LABEL_P (label)
- || probably_never_executed_bb_p (bb))
+ || optimize_bb_for_size_p (bb))
{
if (dump_file)
fprintf(dump_file, "BB %4i freq %4i loop %2i loop_depth %2i skipped.\n",
2008-08-29 Jan Hubicka <jh@suse.cz>
- * parse.c (parse_interface): Likewise.
+ * parse.c (parse_interface): Silence uninitialized var warning.
2008-08-29 Jakub Jelinek <jakub@redhat.com>
static bool flag_value_profile_transformations_set;
static bool flag_peel_loops_set, flag_branch_probabilities_set;
static bool flag_inline_functions_set, flag_ipa_cp_set, flag_ipa_cp_clone_set;
+static bool flag_predictive_commoning_set, flag_unswitch_loops_set, flag_gcse_after_reload_set;
/* Functions excluded from profiling. */
if (optimize_size)
{
- /* Conditional DCE generates bigger code. */
- flag_tree_builtin_call_dce = 0;
-
- /* PRE tends to generate bigger code. */
- flag_tree_pre = 0;
-
- /* These options are set with -O3, so reset for -Os */
- flag_predictive_commoning = 0;
- flag_gcse_after_reload = 0;
- flag_tree_vectorize = 0;
-
- /* Don't reorder blocks when optimizing for size because extra jump insns may
- be created; also barrier may create extra padding.
-
- More correctly we should have a block reordering mode that tried to
- minimize the combined size of all the jumps. This would more or less
- automatically remove extra jumps, but would also try to use more short
- jumps instead of long jumps. */
- flag_reorder_blocks = 0;
- flag_reorder_blocks_and_partition = 0;
-
/* Inlining of functions reducing size is a good idea regardless of them
being declared inline. */
flag_inline_functions = 1;
- /* Don't align code. */
- align_loops = 1;
- align_jumps = 1;
- align_labels = 1;
- align_functions = 1;
-
/* Basic optimization options. */
optimize_size = 1;
if (optimize > 2)
if (!flag_ipa_cp_clone_set
&& value && flag_ipa_cp)
flag_ipa_cp_clone = value;
+ if (!flag_predictive_commoning_set)
+ flag_predictive_commoning = value;
+ if (!flag_unswitch_loops_set)
+ flag_unswitch_loops = value;
+ if (!flag_gcse_after_reload_set)
+ flag_gcse_after_reload = value;
break;
case OPT_fprofile_generate_:
flag_ipa_cp_clone_set = true;
break;
+ case OPT_fpredictive_commoning:
+ flag_predictive_commoning_set = true;
+ break;
+
+ case OPT_funswitch_loops:
+ flag_unswitch_loops_set = true;
+ break;
+
+ case OPT_fgcse_after_reload:
+ flag_gcse_after_reload_set = true;
+ break;
+
case OPT_funroll_loops:
flag_unroll_loops_set = true;
break;
static bool
gate_handle_gcse2 (void)
{
- return (optimize > 0 && flag_gcse_after_reload);
+ return (optimize > 0 && flag_gcse_after_reload
+ && optimize_function_for_speed_p (cfun));
}
else if (l->next)
l = l->next;
else
- l = loop_outer (l);
+ {
+ while (l != loop && !l->next)
+ l = loop_outer (l);
+ if (l != loop)
+ l = l->next;
+ }
}
return false;
}
/* The limit constants used in the implementation
assume IEEE floating point format. Other formats
can be supported in the future if needed. */
- return flag_tree_builtin_call_dce != 0;
+ return flag_tree_builtin_call_dce != 0 && optimize_function_for_speed_p (cfun);
}
struct gimple_opt_pass pass_call_cdce =
initialize_original_copy_tables ();
FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
- {
- unrolled |= tree_predictive_commoning_loop (loop);
- }
+ if (optimize_loop_for_speed_p (loop))
+ {
+ unrolled |= tree_predictive_commoning_loop (loop);
+ }
if (unrolled)
{
static bool
gate_pre (void)
{
- return flag_tree_pre != 0;
+ /* PRE tends to generate bigger code. */
+ return flag_tree_pre != 0 && optimize_function_for_speed_p (cfun);
}
struct gimple_opt_pass pass_pre =
than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
FOR_EACH_LOOP (li, loop, 0)
- {
- loop_vec_info loop_vinfo;
+ if (optimize_loop_nest_for_speed_p (loop))
+ {
+ loop_vec_info loop_vinfo;
- vect_loop_location = find_loop_location (loop);
- loop_vinfo = vect_analyze_loop (loop);
- loop->aux = loop_vinfo;
+ vect_loop_location = find_loop_location (loop);
+ loop_vinfo = vect_analyze_loop (loop);
+ loop->aux = loop_vinfo;
- if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
- continue;
+ if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
+ continue;
- vect_transform_loop (loop_vinfo);
- num_vectorized_loops++;
- }
+ vect_transform_loop (loop_vinfo);
+ num_vectorized_loops++;
+ }
vect_loop_location = UNKNOWN_LOC;
statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
because ASM_OUTPUT_MAX_SKIP_ALIGN might not do any alignment at all. */
if (! DECL_USER_ALIGN (decl)
&& align_functions_log > align
- && cfun->function_frequency != FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
+ && optimize_function_for_speed_p (cfun))
{
#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file,