parse.c (parse_interface): Silence uninitialized var warning.
authorJan Hubicka <jh@suse.cz>
Sat, 30 Aug 2008 10:38:55 +0000 (12:38 +0200)
committerJan Hubicka <hubicka@gcc.gnu.org>
Sat, 30 Aug 2008 10:38:55 +0000 (10:38 +0000)
* parse.c (parse_interface): Silence uninitialized var warning.

* postreload-gcse.c (gate_handle_gcse2): Disable for functions
optimized for speed.
* final.c (compute_alignments): Use optimize_bb_for_size_p.
* tree-call-cdce.c (gate_call_cdce): Use optimize_function_for_speed_p.
* opts.c (flag_predictive_commoning_set, flag_unswitch_loops_set,
flag_gcse_after_reload_set): New static vars.
(common_handle_option): Enable those flags for profile-use.
(decode_options): Remove optimize_size flags that are handled
on higher granuality.
* tree-vectorizer.c (vectorize_loops): Use
optimize_loop_nest_for_speed_p.
* tree-ssa-pre.c (do_pre): Use optimize_function_for_speed_p.
* tree-predcom.c (tree_predictive_commoning): Use
optimize_loop_for_speed_p.
* varasm.c (assemble_start_function): Use optimize_function_for_speed_p.
* bb-reorder.c (rest_of_handle_reorder_blocks): Likewise.
* predict.c (optimize_loop_for_speed_p): Fix walk.

From-SVN: r139801

12 files changed:
gcc/ChangeLog
gcc/bb-reorder.c
gcc/final.c
gcc/fortran/ChangeLog
gcc/opts.c
gcc/postreload-gcse.c
gcc/predict.c
gcc/tree-call-cdce.c
gcc/tree-predcom.c
gcc/tree-ssa-pre.c
gcc/tree-vectorizer.c
gcc/varasm.c

index b712a12dec985259478369af0f9bd8118fb1e734..5a2ec127e49007b4488c424607c879f4182b8751 100644 (file)
@@ -1,3 +1,23 @@
+2008-08-30  Jan Hubicka  <jh@suse.cz>
+
+       * postreload-gcse.c (gate_handle_gcse2): Disable for functions
+       optimized for speed.
+       * final.c (compute_alignments): Use optimize_bb_for_size_p.
+       * tree-call-cdce.c (gate_call_cdce): Use optimize_function_for_speed_p.
+       * opts.c (flag_predictive_commoning_set, flag_unswitch_loops_set,
+       flag_gcse_after_reload_set): New static vars.
+       (common_handle_option): Enable those flags for profile-use.
+       (decode_options): Remove optimize_size flags that are handled
+       on higher granuality.
+       * tree-vectorizer.c (vectorize_loops): Use
+       optimize_loop_nest_for_speed_p.
+       * tree-ssa-pre.c (do_pre): Use optimize_function_for_speed_p.
+       * tree-predcom.c (tree_predictive_commoning): Use
+       optimize_loop_for_speed_p.
+       * varasm.c (assemble_start_function): Use optimize_function_for_speed_p.
+       * bb-reorder.c (rest_of_handle_reorder_blocks): Likewise.
+       * predict.c (optimize_loop_for_speed_p): Fix walk.
+
 2008-08-30  Jan Hubicka  <jh@suse.cz>
 
        * ipa-inline.c (cgraph_estimate_growth): Discover self recursive
index 3bf2dc72c7efb0b5e8e7e7febc4299d492820b0f..b636c1e3a1dfeffd9e15bda33f433d83977f942e 100644 (file)
@@ -2225,7 +2225,15 @@ rest_of_handle_reorder_blocks (void)
      splitting possibly introduced more crossjumping opportunities.  */
   cfg_layout_initialize (CLEANUP_EXPENSIVE);
 
-  if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
+  if ((flag_reorder_blocks || flag_reorder_blocks_and_partition)
+      /* Don't reorder blocks when optimizing for size because extra jump insns may
+        be created; also barrier may create extra padding.
+
+        More correctly we should have a block reordering mode that tried to
+        minimize the combined size of all the jumps.  This would more or less
+        automatically remove extra jumps, but would also try to use more short
+        jumps instead of long jumps.  */
+      && optimize_function_for_speed_p (cfun))
     {
       reorder_basic_blocks ();
       cleanup_cfg (CLEANUP_EXPENSIVE);
index 01689c16301e18f3873c44e98091af8024bef006..c1359e8410a6d27624b374408be9a64760644159 100644 (file)
@@ -707,7 +707,7 @@ compute_alignments (void)
       edge_iterator ei;
 
       if (!LABEL_P (label)
-         || probably_never_executed_bb_p (bb))
+         || optimize_bb_for_size_p (bb))
        {
          if (dump_file)
            fprintf(dump_file, "BB %4i freq %4i loop %2i loop_depth %2i skipped.\n",
index e6e9cca653defa62726bdca88df82a8c35d303df..6a88c38e724fcdb2060e57d945e1d82d54cdbabc 100644 (file)
@@ -1,6 +1,6 @@
 2008-08-29  Jan Hubicka  <jh@suse.cz>
        
-       * parse.c (parse_interface): Likewise.
+       * parse.c (parse_interface): Silence uninitialized var warning.
 
 2008-08-29  Jakub Jelinek  <jakub@redhat.com>
 
index fbe67569c2d8756f8c2db1aaf9e929efb068d09a..621f6b3fff3dc62cd2523a85a48d3ebf027abcc1 100644 (file)
@@ -348,6 +348,7 @@ static bool flag_unroll_loops_set, flag_tracer_set;
 static bool flag_value_profile_transformations_set;
 static bool flag_peel_loops_set, flag_branch_probabilities_set;
 static bool flag_inline_functions_set, flag_ipa_cp_set, flag_ipa_cp_clone_set;
+static bool flag_predictive_commoning_set, flag_unswitch_loops_set, flag_gcse_after_reload_set;
 
 /* Functions excluded from profiling.  */
 
@@ -993,37 +994,10 @@ decode_options (unsigned int argc, const char **argv)
 
   if (optimize_size)
     {
-      /* Conditional DCE generates bigger code.  */
-      flag_tree_builtin_call_dce = 0;
-
-      /* PRE tends to generate bigger code.  */
-      flag_tree_pre = 0;
-
-      /* These options are set with -O3, so reset for -Os */
-      flag_predictive_commoning = 0;
-      flag_gcse_after_reload = 0;
-      flag_tree_vectorize = 0;
-
-      /* Don't reorder blocks when optimizing for size because extra jump insns may
-        be created; also barrier may create extra padding.
-
-        More correctly we should have a block reordering mode that tried to
-        minimize the combined size of all the jumps.  This would more or less
-        automatically remove extra jumps, but would also try to use more short
-        jumps instead of long jumps.  */
-      flag_reorder_blocks = 0;
-      flag_reorder_blocks_and_partition = 0;
-
       /* Inlining of functions reducing size is a good idea regardless of them
         being declared inline.  */
       flag_inline_functions = 1;
 
-      /* Don't align code.  */
-      align_loops = 1;
-      align_jumps = 1;
-      align_labels = 1;
-      align_functions = 1;
-
       /* Basic optimization options.  */
       optimize_size = 1;
       if (optimize > 2)
@@ -1839,6 +1813,12 @@ common_handle_option (size_t scode, const char *arg, int value,
       if (!flag_ipa_cp_clone_set
          && value && flag_ipa_cp)
        flag_ipa_cp_clone = value;
+      if (!flag_predictive_commoning_set)
+       flag_predictive_commoning = value;
+      if (!flag_unswitch_loops_set)
+       flag_unswitch_loops = value;
+      if (!flag_gcse_after_reload_set)
+       flag_gcse_after_reload = value;
       break;
 
     case OPT_fprofile_generate_:
@@ -2004,6 +1984,18 @@ common_handle_option (size_t scode, const char *arg, int value,
       flag_ipa_cp_clone_set = true;
       break;
 
+    case OPT_fpredictive_commoning:
+      flag_predictive_commoning_set = true;
+      break;
+
+    case OPT_funswitch_loops:
+      flag_unswitch_loops_set = true;
+      break;
+
+    case OPT_fgcse_after_reload:
+      flag_gcse_after_reload_set = true;
+      break;
+
     case OPT_funroll_loops:
       flag_unroll_loops_set = true;
       break;
index 352503fcaae481bbbd060b1ea1ce1ff6ac6e17db..884830abebd2c1c912e13ca7757abf2bda202635 100644 (file)
@@ -1306,7 +1306,8 @@ gcse_after_reload_main (rtx f ATTRIBUTE_UNUSED)
 static bool
 gate_handle_gcse2 (void)
 {
-  return (optimize > 0 && flag_gcse_after_reload);
+  return (optimize > 0 && flag_gcse_after_reload
+         && optimize_function_for_speed_p (cfun));
 }
 
 
index 4e17b9ac676146f0ae0452fef50604850f96265d..d5de938ccfaca35704722d64162a18160bd7bbaa 100644 (file)
@@ -300,7 +300,12 @@ optimize_loop_nest_for_speed_p (struct loop *loop)
       else if (l->next)
         l = l->next;
       else
-       l = loop_outer (l);
+        {
+         while (l != loop && !l->next)
+           l = loop_outer (l);
+         if (l != loop)
+           l = l->next;
+       }
     }
   return false;
 }
index ce9572ca142870cca0f0da0f514059ceaf43132f..f59f083d5c987bc5cc3aeb9feff9d1ecdf91e484 100644 (file)
@@ -913,7 +913,7 @@ gate_call_cdce (void)
   /* The limit constants used in the implementation
      assume IEEE floating point format.  Other formats
      can be supported in the future if needed.  */
-  return flag_tree_builtin_call_dce != 0; 
+  return flag_tree_builtin_call_dce != 0 && optimize_function_for_speed_p (cfun)
 }
 
 struct gimple_opt_pass pass_call_cdce =
index e26149fd1b40ae0b9b6852f20dea25df31efb14b..85cfbd66d898dd35a8d7acbd76e36c12b97bf722 100644 (file)
@@ -2650,9 +2650,10 @@ tree_predictive_commoning (void)
 
   initialize_original_copy_tables ();
   FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
-    {
-      unrolled |= tree_predictive_commoning_loop (loop);
-    }
+    if (optimize_loop_for_speed_p (loop))
+      {
+       unrolled |= tree_predictive_commoning_loop (loop);
+      }
 
   if (unrolled)
     {
index 606cafa1792d726b2e7165136563197bd09a3636..8324f09aaa2ae59de382c17ff10350db9d866a44 100644 (file)
@@ -4230,7 +4230,8 @@ do_pre (void)
 static bool
 gate_pre (void)
 {
-  return flag_tree_pre != 0;
+  /* PRE tends to generate bigger code.  */
+  return flag_tree_pre != 0 && optimize_function_for_speed_p (cfun);
 }
 
 struct gimple_opt_pass pass_pre =
index cdab0b54df0db51fc60dc69658dcfa3e186154df..474860adedbcbc5f515cd4fb699a5048d5b4064a 100644 (file)
@@ -2806,19 +2806,20 @@ vectorize_loops (void)
      than all previously defined loops. This fact allows us to run 
      only over initial loops skipping newly generated ones.  */
   FOR_EACH_LOOP (li, loop, 0)
-    {
-      loop_vec_info loop_vinfo;
+    if (optimize_loop_nest_for_speed_p (loop))
+      {
+       loop_vec_info loop_vinfo;
 
-      vect_loop_location = find_loop_location (loop);
-      loop_vinfo = vect_analyze_loop (loop);
-      loop->aux = loop_vinfo;
+       vect_loop_location = find_loop_location (loop);
+       loop_vinfo = vect_analyze_loop (loop);
+       loop->aux = loop_vinfo;
 
-      if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
-       continue;
+       if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
+         continue;
 
-      vect_transform_loop (loop_vinfo);
-      num_vectorized_loops++;
-    }
+       vect_transform_loop (loop_vinfo);
+       num_vectorized_loops++;
+      }
   vect_loop_location = UNKNOWN_LOC;
 
   statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
index 863ffd010f148b98163d7b8199eec9b3e460b982..5aa0140e002a071e84683abfb82bb8021a281a37 100644 (file)
@@ -1723,7 +1723,7 @@ assemble_start_function (tree decl, const char *fnname)
      because ASM_OUTPUT_MAX_SKIP_ALIGN might not do any alignment at all.  */
   if (! DECL_USER_ALIGN (decl)
       && align_functions_log > align
-      && cfun->function_frequency != FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
+      && optimize_function_for_speed_p (cfun))
     {
 #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
       ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file,