bool check_calls_comdat_local_p ();
/* Return true if function should be optimized for size. */
- bool optimize_for_size_p (void);
+ enum optimize_size_level optimize_for_size_p (void);
/* Dump the callgraph to file F. */
static void dump_cgraph (FILE *f);
/* Return true if function should be optimized for size. */
-inline bool
+inline enum optimize_size_level
cgraph_node::optimize_for_size_p (void)
{
if (opt_for_fn (decl, optimize_size))
- return true;
+ return OPTIMIZE_SIZE_MAX;
+ if (count == profile_count::zero ())
+ return OPTIMIZE_SIZE_MAX;
if (frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
- return true;
+ return OPTIMIZE_SIZE_BALANCED;
else
- return false;
+ return OPTIMIZE_SIZE_NO;
}
/* Return symtab_node for NODE or create one if it is not present
EXCESS_PRECISION_TYPE_FAST
};
+/* Level of size optimization. */
+
+enum optimize_size_level
+{
+ /* Do not optimize for size. */
+ OPTIMIZE_SIZE_NO,
+ /* Optimize for size but not at extreme performance costs. */
+ OPTIMIZE_SIZE_BALANCED,
+ /* Optimize for size as much as possible. */
+ OPTIMIZE_SIZE_MAX
+};
+
/* Support for user-provided GGC and PCH markers. The first parameter
is a pointer to a pointer, the second a cookie. */
typedef void (*gt_pointer_operator) (void *, void *);
static bool
unlikely_executed_edge_p (edge e)
{
- return (e->count () == profile_count::zero ()
+ return (e->src->count == profile_count::zero ()
|| e->probability == profile_probability::never ())
|| (e->flags & (EDGE_EH | EDGE_FAKE));
}
/* Return true if function FUN should always be optimized for size. */
-bool
+optimize_size_level
optimize_function_for_size_p (struct function *fun)
{
if (!fun || !fun->decl)
- return optimize_size;
+ return optimize_size ? OPTIMIZE_SIZE_MAX : OPTIMIZE_SIZE_NO;
cgraph_node *n = cgraph_node::get (fun->decl);
- return n && n->optimize_for_size_p ();
+ if (n)
+ return n->optimize_for_size_p ();
+ return OPTIMIZE_SIZE_NO;
}
/* Return true if function FUN should always be optimized for speed. */
/* Return TRUE if basic block BB should be optimized for size. */
-bool
+optimize_size_level
optimize_bb_for_size_p (const_basic_block bb)
{
- return (optimize_function_for_size_p (cfun)
- || (bb && !maybe_hot_bb_p (cfun, bb)));
+ enum optimize_size_level ret = optimize_function_for_size_p (cfun);
+
+ if (bb && ret < OPTIMIZE_SIZE_MAX && bb->count == profile_count::zero ())
+ ret = OPTIMIZE_SIZE_MAX;
+ if (bb && ret < OPTIMIZE_SIZE_BALANCED && !maybe_hot_bb_p (cfun, bb))
+ ret = OPTIMIZE_SIZE_BALANCED;
+ return ret;
}
/* Return TRUE if basic block BB should be optimized for speed. */
/* Return TRUE if edge E should be optimized for size. */
-bool
+optimize_size_level
optimize_edge_for_size_p (edge e)
{
- return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
+ enum optimize_size_level ret = optimize_function_for_size_p (cfun);
+
+ if (ret < OPTIMIZE_SIZE_MAX && unlikely_executed_edge_p (e))
+ ret = OPTIMIZE_SIZE_MAX;
+ if (ret < OPTIMIZE_SIZE_BALANCED && !maybe_hot_edge_p (e))
+ ret = OPTIMIZE_SIZE_BALANCED;
+ return ret;
}
/* Return TRUE if edge E should be optimized for speed. */
/* Return TRUE if the current function is optimized for size. */
-bool
+optimize_size_level
optimize_insn_for_size_p (void)
{
- return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
+ enum optimize_size_level ret = optimize_function_for_size_p (cfun);
+ if (ret < OPTIMIZE_SIZE_BALANCED && !crtl->maybe_hot_insn_p)
+ ret = OPTIMIZE_SIZE_BALANCED;
+ return ret;
}
/* Return TRUE if the current function is optimized for speed. */
/* Return TRUE if LOOP should be optimized for size. */
-bool
+optimize_size_level
optimize_loop_for_size_p (class loop *loop)
{
return optimize_bb_for_size_p (loop->header);
/* Return TRUE if nest rooted at LOOP should be optimized for size. */
-bool
+optimize_size_level
optimize_loop_nest_for_size_p (class loop *loop)
{
- return !optimize_loop_nest_for_speed_p (loop);
+ enum optimize_size_level ret = optimize_loop_for_size_p (loop);
+ class loop *l = loop;
+
+ l = loop->inner;
+ while (l && l != loop)
+ {
+ if (ret == OPTIMIZE_SIZE_NO)
+ break;
+ ret = MIN (optimize_loop_for_size_p (l), ret);
+ if (l->inner)
+ l = l->inner;
+ else if (l->next)
+ l = l->next;
+ else
+ {
+ while (l != loop && !l->next)
+ l = loop_outer (l);
+ if (l != loop)
+ l = l->next;
+ }
+ }
+ return ret;
}
/* Return true if edge E is likely to be well predictable by branch
extern bool maybe_hot_edge_p (edge);
extern bool probably_never_executed_bb_p (struct function *, const_basic_block);
extern bool probably_never_executed_edge_p (struct function *, edge);
-extern bool optimize_function_for_size_p (struct function *);
+extern enum optimize_size_level optimize_function_for_size_p (struct function *);
extern bool optimize_function_for_speed_p (struct function *);
extern optimization_type function_optimization_type (struct function *);
-extern bool optimize_bb_for_size_p (const_basic_block);
+extern enum optimize_size_level optimize_bb_for_size_p (const_basic_block);
extern bool optimize_bb_for_speed_p (const_basic_block);
extern optimization_type bb_optimization_type (const_basic_block);
-extern bool optimize_edge_for_size_p (edge);
+extern enum optimize_size_level optimize_edge_for_size_p (edge);
extern bool optimize_edge_for_speed_p (edge);
-extern bool optimize_insn_for_size_p (void);
+extern enum optimize_size_level optimize_insn_for_size_p (void);
extern bool optimize_insn_for_speed_p (void);
-extern bool optimize_loop_for_size_p (class loop *);
+extern enum optimize_size_level optimize_loop_for_size_p (class loop *);
extern bool optimize_loop_for_speed_p (class loop *);
extern bool optimize_loop_nest_for_speed_p (class loop *);
-extern bool optimize_loop_nest_for_size_p (class loop *);
+extern enum optimize_size_level optimize_loop_nest_for_size_p (class loop *);
extern bool predictable_edge_p (edge);
extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);