* ipa-fnsummary.c: Fix comment typos.
* ipa-ref.h: Likewise.
* ipa-predicate.h: Likewise.
* ipa-split.c: Likewise.
* ipa-inline-analysis.c: Likewise.
* ipa-predicate.c: Likewise.
* ipa-devirt.c: Likewise.
* ipa-icf.h: Likewise.
* profile-count.c: Likewise.
* ipa-icf.c: Likewise.
(sem_function::equals_wpa): Fix typos in dump messages.
* ipa-icf-gimple.h: Fix comment typos.
* ipa-inline-transform.c: Likewise.
* ipa-polymorphic-call.c: Likewise.
* ipa-fnsummary.h: Likewise.
* ipa-inline.c: Likewise.
(dump_inline_stats): Fix typo in debug dump message.
* profile-count.h: Fix comment typos.
From-SVN: r278643
2019-11-23 Jakub Jelinek <jakub@redhat.com>
+ * ipa-fnsummary.c: Fix comment typos.
+ * ipa-ref.h: Likewise.
+ * ipa-predicate.h: Likewise.
+ * ipa-split.c: Likewise.
+ * ipa-inline-analysis.c: Likewise.
+ * ipa-predicate.c: Likewise.
+ * ipa-devirt.c: Likewise.
+ * ipa-icf.h: Likewise.
+ * profile-count.c: Likewise.
+ * ipa-icf.c: Likewise.
+ (sem_function::equals_wpa): Fix typos in dump messages.
+ * ipa-icf-gimple.h: Fix comment typos.
+ * ipa-inline-transform.c: Likewise.
+ * ipa-polymorphic-call.c: Likewise.
+ * ipa-fnsummary.h: Likewise.
+ * ipa-inline.c: Likewise.
+ (dump_inline_stats): Fix typo in debug dump message.
+ * profile-count.h: Fix comment typos.
+
PR target/92615
* config/i386/i386.c (ix86_md_asm_adjust): If dest_mode is
GET_MODE (dest), is not QImode, using ZERO_EXTEND and dest is not
bool all_derivations_known;
/* Did we report ODR violation here? */
bool odr_violated;
- /* Set when virtual table without RTTI previaled table with. */
+ /* Set when virtual table without RTTI prevailed table with. */
bool rtti_broken;
/* Set when the canonical type is determined using the type name. */
bool tbaa_enabled;
end2 = !vtable->iterate_reference (n2, ref2);
/* !DECL_VIRTUAL_P means RTTI entry;
- We warn when RTTI is lost because non-RTTI previals; we silently
+ We warn when RTTI is lost because non-RTTI prevails; we silently
accept the other case. */
while (!end2
&& (end1
class_type->odr_violated = true;
- /* Complain about size mismatch. Either we have too many virutal
+ /* Complain about size mismatch. Either we have too many virtual
functions or too many virtual table pointers. */
if (end1 || end2)
{
if (!warn || !TYPE_NAME(TYPE_MAIN_VARIANT (t1)))
return;
- /* ODR warnings are output druing LTO streaming; we must apply location
+ /* ODR warnings are output during LTO streaming; we must apply location
cache for potential warnings to be output correctly. */
if (lto_location_cache::current_cache)
lto_location_cache::current_cache->apply_location_cache ();
*warned = true;
}
-/* Return ture if T1 and T2 are incompatible and we want to recusively
+/* Return true if T1 and T2 are incompatible and we want to recursively
dive into them from warn_type_mismatch to give sensible answer. */
static bool
This is hard to do in general. We basically handle the common cases.
If LOC1 and LOC2 are meaningful locations, use it in the case the types
- themselves do no thave one.*/
+ themselves do not have one. */
void
warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2)
n1 = DECL_NAME (n1);
if (n2 && TREE_CODE (n2) == TYPE_DECL)
n2 = DECL_NAME (n2);
- /* Most of the time, the type names will match, do not be unnecesarily
+ /* Most of the time, the type names will match, do not be unnecessarily
verbose. */
if (n1 != n2)
inform (loc_t1,
if (types_odr_comparable (t1, t2)
/* We make assign integers mangled names to be able to handle
signed/unsigned chars. Accepting them here would however lead to
- confussing message like
+ confusing message like
"type ‘const int’ itself violates the C++ One Definition Rule" */
&& TREE_CODE (t1) != INTEGER_TYPE
&& types_same_for_odr (t1, t2))
inform (loc_t2, "the incompatible type is defined here");
}
-/* Return true if T should be ignored in TYPE_FIELDS for ODR comparsion. */
+/* Return true if T should be ignored in TYPE_FIELDS for ODR comparison. */
static bool
skip_in_fields_list_p (tree t)
return get_odr_type (type, false)->odr_violated;
}
-/* Add TYPE od ODR type hash. */
+/* Add TYPE of ODR type hash. */
void
register_odr_type (tree type)
odr_hash = new odr_hash_type (23);
if (type == TYPE_MAIN_VARIANT (type))
{
- /* To get ODR warings right, first register all sub-types. */
+ /* To get ODR warnings right, first register all sub-types. */
if (RECORD_OR_UNION_TYPE_P (type)
&& COMPLETE_TYPE_P (type))
{
continue;
/* To aid ODR warnings we also mangle integer constants but do
- not consinder duplicates there. */
+ not consider duplicates there. */
if (TREE_CODE (odr_types[i]->type) == INTEGER_TYPE)
continue;
If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing
OTR_TYPE and include their virtual method. This is useful for types
possibly in construction or destruction where the virtual table may
- temporarily change to one of base types. INCLUDE_DERIVER_TYPES make
+ temporarily change to one of base types. INCLUDE_DERIVED_TYPES make
us to walk the inheritance graph for all derivations.
If COMPLETEP is non-NULL, store true if the list is complete.
itself.
This may need to be revisited once we add further ways to use
- the may edges, but it is a resonable thing to do right now. */
+ the may edges, but it is a reasonable thing to do right now. */
if ((e->indirect_info->param_index == -1
|| (!opt_for_fn (n->decl, flag_devirtualize_speculatively)
/* Record SIZE and TIME to SUMMARY.
The accounted code will be executed when EXEC_PRED is true.
- When NONCONST_PRED is false the code will evaulate to constant and
+ When NONCONST_PRED is false the code will evaluate to constant and
will get optimized out in specialized clones of the function.
If CALL is true account to call_size_time_table rather than
size_time_table. */
if (nonconst_pred == false)
return;
- /* We need to create initial empty unconitional clause, but otherwie
+ /* We need to create initial empty unconditional clause, but otherwise
we don't need to account empty times and sizes. */
if (!size && time == 0 && table)
return;
- /* Only for calls we are unaccounting what we previously recoreded. */
+ /* Only for calls we are unaccounting what we previously recorded. */
gcc_checking_assert (time >= 0 || call);
for (i = 0; vec_safe_iterate (table, i, &e); i++)
}
}
-/* We proved E to be unreachable, redirect it to __bultin_unreachable. */
+/* We proved E to be unreachable, redirect it to __builtin_unreachable. */
static struct cgraph_edge *
redirect_to_unreachable (struct cgraph_edge *e)
}
-/* Compute what conditions may or may not hold given invormation about
+/* Compute what conditions may or may not hold given information about
parameters. RET_CLAUSE returns truths that may hold in a specialized copy,
- whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
+ while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized
copy when called in a given context. It is a bitmask of conditions. Bit
0 means that condition is known to be false, while bit 1 means that condition
may or may not be true. These differs - for example NOT_INLINED condition
the fact that parameter is indeed a constant.
KNOWN_VALS is partial mapping of parameters of NODE to constant values.
- KNOWN_AGGS is a vector of aggreggate known offset/value set for each
+ KNOWN_AGGS is a vector of aggregate known offset/value set for each
parameter. Return clause of possible truths. When INLINE_P is true, assume
that we are inlining.
/* Work out what conditions might be true at invocation of E.
Compute costs for inlined edge if INLINE_P is true.
- Return in CLAUSE_PTR the evaluated condistions and in NONSPEC_CLAUSE_PTR
+ Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR
(if non-NULL) conditions evaluated for nonspecialized clone called
in a given context.
KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by
- known canstant and aggregate values of parameters.
+ known constant and aggregate values of parameters.
KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts
of parameter used by a polymorphic call. */
info->account_size_time (0, 0, true_pred, true_pred);
/* Remap size_time vectors.
- Simplify the predicate by prunning out alternatives that are known
+ Simplify the predicate by pruning out alternatives that are known
to be false.
TODO: as on optimization, we can also eliminate conditions known
to be true. */
edge_set_predicate (edge, &new_predicate);
}
- /* Remap indirect edge predicates with the same simplificaiton as above.
+ /* Remap indirect edge predicates with the same simplification as above.
Also copy constantness arrays. */
for (edge = dst->indirect_calls; edge; edge = next)
{
/* If inliner or someone after inliner will ever start producing
non-trivial clones, we will get trouble with lack of information
about updating self sizes, because size vectors already contains
- sizes of the calees. */
+ sizes of the callees. */
gcc_assert (!inlined_to_p || !optimized_out_size);
}
else
/* Casts of parameters, loads from parameters passed by reference
and stores to return value or parameters are often free after
- inlining dua to SRA and further combining.
+ inlining due to SRA and further combining.
Assume that half of statements goes away. */
if (CONVERT_EXPR_CODE_P (rhs_code)
|| rhs_code == VIEW_CONVERT_EXPR
lhs_free = true;
/* Writes to parameters, parameters passed by value and return value
- (either dirrectly or passed via invisible reference) are free.
+ (either directly or passed via invisible reference) are free.
TODO: We ought to handle testcase like
struct a {int a,b;};
struct a
- retrurnsturct (void)
+ returnstruct (void)
{
struct a a ={1,2};
return a;
This translate into:
- retrurnsturct ()
+ returnstruct ()
{
int a$b;
int a$a;
enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE
? code : inverted_code);
/* invert_tree_comparison will return ERROR_MARK on FP
- comparsions that are not EQ/NE instead of returning proper
+ comparisons that are not EQ/NE instead of returning proper
unordered one. Be sure it is not confused with NON_CONSTANT.
And if the edge's target is the final block of diamond CFG graph
Here we can predicate nonconstant_code. We can't
really handle constant_code since we have no predicate
for this and also the constant code is not known to be
- optimized away when inliner doen't see operand is constant.
+ optimized away when inliner doesn't see operand is constant.
Other optimizers might think otherwise. */
if (gimple_cond_code (last) != NE_EXPR
|| !integer_zerop (gimple_cond_rhs (last)))
int base_index;
struct agg_position_info aggpos;
- /* What statments might be optimized away
+ /* What statements might be optimized away
when their arguments are constant. */
if (gimple_code (stmt) != GIMPLE_ASSIGN
&& gimple_code (stmt) != GIMPLE_COND
gimple *stmt;
};
-/* Value is initialized in INIT_BB and used in USE_BB. We want to copute
+/* Value is initialized in INIT_BB and used in USE_BB. We want to compute
probability how often it changes between USE_BB.
INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB
is in different loop nest, we can do better.
presence of EH and will be optimized out by optimize_clobbers later in the
game.
- NEED_EH is used to recurse in case the clobber has non-EH predecestors
+ NEED_EH is used to recurse in case the clobber has non-EH predecessors
that can be clobber only, too.. When it is false, the RESX is not necessary
on the end of basic block. */
return false;
}
- /* See if all predecestors are either throws or clobber only BBs. */
+ /* See if all predecessors are either throws or clobber only BBs. */
FOR_EACH_EDGE (e, ei, bb->preds)
if (!(e->flags & EDGE_EH)
&& !clobber_only_eh_bb_p (e->src, false))
predicate will_be_nonconstant;
/* This relation stmt should be folded after we remove
- buildin_expect call. Adjust the cost here. */
+ __builtin_expect call. Adjust the cost here. */
if (stmt == fix_builtin_expect_stmt)
{
this_size--;
}
}
- /* TODO: When conditional jump or swithc is known to be constant, but
+ /* TODO: When conditional jump or switch is known to be constant, but
we did not translate it into the predicates, we really can account
just maximum of the possible paths. */
if (fbi.info)
|| es->predicate->evaluate (possible_truths))
{
/* Predicates of calls shall not use NOT_CHANGED codes,
- sowe do not need to compute probabilities. */
+ so we do not need to compute probabilities. */
estimate_edge_size_and_time (e, size,
es->predicate ? NULL : min_size,
time,
}
/* Default constructor for ipa call context.
- Memory alloction of known_vals, known_contexts
+ Memory allocation of known_vals, known_contexts
and known_aggs vectors is owned by the caller, but can
be release by ipa_call_context::release.
/* Release memory used by known_vals/contexts/aggs vectors.
If ALL is true release also inline_param_summary.
- This happens when context was previously duplciated to be stored
+ This happens when context was previously duplicated to be stored
into cache. */
void
}
/* Estimate size and time needed to execute call in the given context.
- Additionally detemine hints determined by the context. Finally compute
+ Additionally determine hints determined by the context. Finally compute
minimal size needed for the call that is independent on the call context and
can be used for fast estimates. Return the values in RET_SIZE,
RET_MIN_SIZE, RET_TIME and RET_HINTS. */
gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1);
/* Roundoff issues may make specialized time bigger than nonspecialized
- time. We do not really want that to happen because some heurstics
+ time. We do not really want that to happen because some heuristics
may get confused by seeing negative speedups. */
if (time > nonspecialized_time)
time = nonspecialized_time;
/* Update change_prob of EDGE after INLINED_EDGE has been inlined.
When function A is inlined in B and A calls C with parameter that
- changes with probability PROB1 and C is known to be passthroug
+ changes with probability PROB1 and C is known to be passthrough
of argument if B that change with probability PROB2, the probability
of change is now PROB1*PROB2. */
/* Hints are reasons why IPA heuristics should prefer specializing given
- function. They are represtented as bitmap of the following values. */
+ function. They are represented as bitmap of the following values. */
enum ipa_hints_vals {
/* When specialization turns indirect call into a direct call,
it is good idea to do so. */
INLINE_HINT_indirect_call = 1,
/* Inlining may make loop iterations or loop stride known. It is good idea
- to do so because it enables loop optimizatoins. */
+ to do so because it enables loop optimizations. */
INLINE_HINT_loop_iterations = 2,
INLINE_HINT_loop_stride = 4,
/* Inlining within same strongly connected component of callgraph is often
/* Conditional size/time information. The summaries are being
merged during inlining. */
conditions conds;
- /* Normal code is acocunted in size_time_table, while calls are
+ /* Normal code is accounted in size_time_table, while calls are
accounted in call_size_time_table. This is because calls
are often adjusted by IPA optimizations and thus this summary
is generated from call summary information when needed. */
/* This object describe a context of call. That is a summary of known
information about its parameters. Main purpose of this context is
- to give more realistic esitmations of function runtime, size and
+ to give more realistic estimations of function runtime, size and
inline hints. */
class ipa_call_context
{
/* Called function. */
cgraph_node *m_node;
/* Clause describing what predicate conditionals can be satisfied
- in this context if function is inlined/specialised. */
+ in this context if function is inlined/specialized. */
clause_t m_possible_truths;
/* Clause describing what predicate conditionals can be satisfied
in this context if function is kept offline. */
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* Gimple identical code folding (class func_checker) is an infastructure
+/* Gimple identical code folding (class func_checker) is an infrastructure
capable of comparing two given functions. The class compares every
gimple statement and uses many dictionaries to map source and target
SSA_NAMEs, declarations and other components.
- To use the infrastructure, create an instanse of func_checker and call
- a comparsion function based on type of gimple statement. */
+ To use the infrastructure, create an instance of func_checker and call
+ a comparison function based on type of gimple statement. */
/* Prints string STRING to a FILE with a given number of SPACE_COUNT. */
#define FPUTS_SPACES(file, space_count, string) \
bool compare_loops (basic_block bb1, basic_block bb2);
/* Return true if types are compatible for polymorphic call analysis.
- COMPARE_PTR indicates if polymorphic type comparsion should be
+ COMPARE_PTR indicates if polymorphic type comparison should be
done for pointers, too. */
static bool compatible_polymorphic_types_p (tree t1, tree t2,
bool compare_ptr);
/* Compare properties of symbols N1 and N2 that does not affect semantics of
symbol itself but affects semantics of its references from USED_BY (which
- may be NULL if it is unknown). If comparsion is false, symbols
+ may be NULL if it is unknown). If comparison is false, symbols
can still be merged but any symbols referring them can't.
If ADDRESS is true, do extra checking needed for IPA_REF_ADDR.
if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl)
!= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl))
- return return_false_with_msg ("intrument function entry exit "
+ return return_false_with_msg ("instrument function entry exit "
"attributes are different");
if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl))
&& TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
{
if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE)
- return return_false_with_msg ("DECL_CXX_CONSTURCTOR type mismatch");
+ return return_false_with_msg ("DECL_CXX_CONSTRUCTOR type mismatch");
else if (!func_checker::compatible_polymorphic_types_p
(TYPE_METHOD_BASETYPE (TREE_TYPE (decl)),
TYPE_METHOD_BASETYPE (TREE_TYPE (item->decl)), false))
}
/* Update hash by address sensitive references. We iterate over all
- sensitive references (address_matters_p) and we hash ultime alias
+ sensitive references (address_matters_p) and we hash ultimate alias
target of these nodes, which can improve a semantic item hash.
Also hash in referenced symbols properties. This can be done at any time
}
/* Do not turn function in one comdat group into wrapper to another
comdat group. Other compiler producing the body of the
- another comdat group may make opossite decision and with unfortunate
+ another comdat group may make opposite decision and with unfortunate
linker choices this may close a loop. */
else if (DECL_COMDAT_GROUP (original->decl)
&& DECL_COMDAT_GROUP (alias->decl)
else
create_wrapper = true;
- /* We can redirect local calls in the case both alias and orignal
+ /* We can redirect local calls in the case both alias and original
are not interposable. */
redirect_callers
= alias->get_availability () > AVAIL_INTERPOSABLE
return false;
}
- /* We cannot merge if address comparsion metters. */
+ /* We cannot merge if address comparison matters. */
if (alias_address_matters && flag_merge_constants < 2)
{
if (dump_enabled_p ())
fixup_pt_set (&SSA_NAME_PTR_INFO (name)->pt);
fixup_pt_set (&fn->gimple_df->escaped);
- /* The above get's us to 99% I guess, at least catching the
+ /* The above gets us to 99% I guess, at least catching the
address compares. Below also gets us aliasing correct
but as said we're giving leeway to the situation with
readonly vars anyway, so ... */
optimizer->register_hooks ();
}
-/* Semantic equality exection function. */
+/* Semantic equality execution function. */
static unsigned int
ipa_icf_driver (void)
/* Congruence class encompasses a collection of either functions or
read-only variables. These items are considered to be equivalent
- if not proved the oposite. */
+ if not proved the opposite. */
class congruence_class
{
public:
virtual bool equals_wpa (sem_item *item,
hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
- /* Returns true if the item equals to ITEM given as arguemnt. */
+ /* Returns true if the item equals to ITEM given as argument. */
virtual bool equals (sem_item *item,
hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
ipa_icf_gimple::func_checker *checker);
/* Perform additional checks needed to match types of used function
- paramters. */
+ parameters. */
bool compatible_parm_types_p (tree, tree);
/* Exception handling region tree. */
static bool release_split_map (congruence_class * const &cls, bitmap const &b,
traverse_split_pair *pair);
- /* Process split operation for a cognruence class CLS,
+ /* Process split operation for a congruence class CLS,
where bitmap B splits congruence class members. DATA is used
as argument of split pair. */
static bool traverse_congruence_split (congruence_class * const &cls,
node_context_cache_clear = 0;
}
-/* Return hints derrived from EDGE. */
+/* Return hints derived from EDGE. */
int
simple_edge_hints (struct cgraph_edge *edge)
&& (!DECL_VIRTUAL_P (node->decl)
|| !opt_for_fn (node->decl, flag_devirtualize))
/* During early inlining some unanalyzed cgraph nodes might be in the
- callgraph and they might reffer the function in question. */
+ callgraph and they might refer the function in question. */
&& !cgraph_new_nodes.exists ());
}
{
/* We may eliminate the need for out-of-line copy to be output.
In that case just go ahead and re-use it. This is not just an
- memory optimization. Making offline copy of fuction disappear
+ memory optimization. Making offline copy of function disappear
from the program will improve future decisions on inlining. */
if (!e->callee->callers->next_caller
/* Recursive inlining never wants the master clone to
need small function inlining to register edge removal hook to
maintain the priority queue.
- For now we keep the ohter functions in the group in program until
+ For now we keep the other functions in the group in program until
cgraph_remove_unreachable_functions gets rid of them. */
gcc_assert (!e->callee->inlined_to);
e->callee->remove_from_same_comdat_group ();
&& DECL_FUNCTION_PERSONALITY (callee->decl))
|| (check_maybe_up (flag_exceptions)
&& DECL_FUNCTION_PERSONALITY (callee->decl))
- /* When devirtualization is diabled for callee, it is not safe
+ /* When devirtualization is disabled for callee, it is not safe
to inline it as we possibly mangled the type info.
Allow early inlining of always inlines. */
|| (!early && check_maybe_down (flag_devirtualize)))
|| DECL_DISREGARD_INLINE_LIMITS (callee->decl))
;
/* If mismatch is caused by merging two LTO units with different
- optimizationflags we want to be bit nicer. However never inline
+ optimization flags we want to be bit nicer. However never inline
if one of functions is not optimized at all. */
else if (!opt_for_fn (callee->decl, optimize)
|| !opt_for_fn (caller->decl, optimize))
return time;
}
-/* Determine time saved by inlininig EDGE of frequency FREQ
- where callee's runtime w/o inlineing is UNINLINED_TYPE
+/* Determine time saved by inlining EDGE of frequency FREQ
+ where callee's runtime w/o inlining is UNINLINED_TYPE
and with inlined is INLINED_TYPE. */
inline sreal
if (need_more_work)
noninline_callee ();
}
- Withhout penalizing this case, we usually inline noninline_callee
+ Without penalizing this case, we usually inline noninline_callee
into the inline_caller because overall_growth is small preventing
further inlining of inline_caller.
}
}
/* When function local profile is not available or it does not give
- useful information (ie frequency is zero), base the cost on
+ useful information (i.e. frequency is zero), base the cost on
loop nest and overall size growth, so we optimize for overall number
of functions fully inlined in program. */
else
gcc_checking_assert (n->get_data () == edge);
/* fibonacci_heap::replace_key does busy updating of the
- heap that is unnecesarily expensive.
+ heap that is unnecessarily expensive.
We do lazy increases: after extracting minimum if the key
turns out to be out of date, it is re-inserted into heap
with correct value. */
/* NODE was inlined.
- All caller edges needs to be resetted because
+ All caller edges needs to be reset because
size estimates change. Similarly callees needs reset
because better context may be known. */
update_edge_key (heap, e);
}
/* We do not reset callee growth cache here. Since we added a new call,
- growth chould have just increased and consequentely badness metric
+ growth should have just increased and consequently badness metric
don't need updating. */
else if (e->inline_failed
&& (callee = e->callee->ultimate_alias_target (&avail,
edge_growth_cache->get (edge)->hints = old_hints_est + 1;
/* When updating the edge costs, we only decrease badness in the keys.
- Increases of badness are handled lazilly; when we see key with out
+ Increases of badness are handled lazily; when we see key with out
of date value on it, we re-insert it now. */
current_badness = edge_badness (edge, false);
gcc_assert (cached_badness == current_badness);
add_new_edges_to_heap (&edge_heap, new_indirect_edges);
/* If caller's size and time increased we do not need to update
- all edges becuase badness is not going to decrease. */
+ all edges because badness is not going to decrease. */
if (old_size <= ipa_size_summaries->get (where)->size
&& old_time <= ipa_fn_summaries->get (where)->time
/* Wrapper penalty may be non-monotonous in this respect.
"%" PRId64 " + previously indirect "
"%" PRId64 " + virtual "
"%" PRId64 " + virtual and previously indirect "
- "%" PRId64 " + stil indirect "
+ "%" PRId64 " + still indirect "
"%" PRId64 " + still indirect polymorphic "
"%" PRId64 "\n", inlined_cnt,
inlined_speculative, inlined_speculative_ply,
into callee often leads to better optimization of callee due to
increased context for optimization.
For example if main() function calls a function that outputs help
- and then function that does the main optmization, we should inline
+ and then function that does the main optimization, we should inline
the second with priority even if both calls are cold by themselves.
We probably want to implement new predicate replacing our use of
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
- /* We can enounter not-yet-analyzed function during
+ /* We can encounter not-yet-analyzed function during
early inlining on callgraphs with strongly
connected components. */
ipa_fn_summary *s = ipa_fn_summaries->get (callee);
}
/* Return true if it seems valid to use placement new to build EXPECTED_TYPE
- at possition CUR_OFFSET within TYPE.
+ at position CUR_OFFSET within TYPE.
POD can be changed to an instance of a polymorphic type by
placement new. Here we play safe and assume that any
to represent it.
If OTR_TYPE is NULL, just find outermost polymorphic type with
- virtual table present at possition OFFSET.
+ virtual table present at position OFFSET.
For example when THIS represents type
class A
If we cannot find corresponding class, give up by setting
THIS->OUTER_TYPE to OTR_TYPE and THIS->OFFSET to NULL.
- Return true when lookup was sucesful.
+ Return true when lookup was successful.
When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made
valid only via allocation of new polymorphic type inside by means
Because the instance type may contain field whose type is of OUTER_TYPE,
we cannot derive any effective information about it.
- TODO: In the case we know all derrived types, we can definitely do better
+ TODO: In the case we know all derived types, we can definitely do better
here. */
else if (TYPE_SIZE (outer_type)
&& tree_fits_shwi_p (TYPE_SIZE (outer_type))
if (cur_offset != 0)
goto no_useful_type_info;
/* If we determined type precisely or we have no clue on
- speuclation, we are done. */
+ speculation, we are done. */
if (!maybe_derived_type || !speculative_outer_type
|| !speculation_consistent_p (speculative_outer_type,
speculative_offset,
{
outer_type = type;
offset = cur_offset;
- /* As soon as we se an field containing the type,
+ /* As soon as we see an field containing the type,
we know we are not looking for derivations. */
maybe_derived_type = false;
}
else
return true;
}
- /* We found no way to embedd EXPECTED_TYPE in TYPE.
+ /* We found no way to embed EXPECTED_TYPE in TYPE.
We still permit two special cases - placement new and
the case of variadic types containing themselves. */
if (!speculative
return true;
/* Pure functions cannot do any changes on the dynamic type;
- that require writting to memory. */
+ that require writing to memory. */
if ((!base || !auto_var_in_fn_p (base, function))
&& flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST))
return false;
}
}
-/* Proudce polymorphic call context for call method of instance
+/* Produce polymorphic call context for call method of instance
that is located within BASE (that is assumed to be a decl) at offset OFF. */
void
if (TREE_CODE (base) == MEM_REF || DECL_P (base))
{
/* We found dereference of a pointer. Type of the pointer
- and MEM_REF is meaningless, but we can look futher. */
+ and MEM_REF is meaningless, but we can look further. */
offset_int mem_offset;
if (TREE_CODE (base) == MEM_REF
&& mem_ref_offset (base).is_constant (&mem_offset))
0-thunk.fixed_offset. It starts with code that adds
think.fixed_offset to the pointer to compensate for this.
- Because we walked all the way to the begining of thunk, we now
+ Because we walked all the way to the beginning of thunk, we now
see pointer &bar-thunk.fixed_offset and need to compensate
for it. */
if (node->thunk.fixed_offset)
offset -= node->thunk.fixed_offset * BITS_PER_UNIT;
/* Dynamic casting has possibly upcasted the type
- in the hiearchy. In this case outer type is less
+ in the hierarchy. In this case outer type is less
informative than inner type and we should forget
about it. */
if ((otr_type
offset,
true, NULL /* Do not change type here */);
/* TODO: There are multiple ways to derive a type. For instance
- if BASE_POINTER is passed to an constructor call prior our refernece.
+ if BASE_POINTER is passed to an constructor call prior our reference.
We do not make this type of flow sensitive analysis yet. */
if (instance)
*instance = base_pointer;
{
if (dump_file)
fprintf (dump_file, " Construction vtable used\n");
- /* FIXME: We should suport construction contexts. */
+ /* FIXME: We should support construction contexts. */
return NULL;
}
AA_WALK_BUDGET_P, if not NULL, is how statements we should allow
walk_aliased_vdefs to examine. The value should be decremented by the
- number of stetements we examined or set to zero if exhausted. */
+ number of statements we examined or set to zero if exhausted. */
bool
ipa_polymorphic_call_context::get_dynamic_type (tree instance,
otr_type = TYPE_MAIN_VARIANT (otr_type);
/* Walk into inner type. This may clear maybe_derived_type and save us
- from useless work. It also makes later comparsions with static type
+ from useless work. It also makes later comparisons with static type
easier. */
if (outer_type && otr_type)
{
if (TREE_CODE (instance) == MEM_REF)
return false;
- /* We need to obtain refernce to virtual table pointer. It is better
+ /* We need to obtain reference to virtual table pointer. It is better
to look it up in the code rather than build our own. This require bit
of pattern matching, but we end up verifying that what we found is
correct.
Therefore if the static outer type was found (outer_type)
we can safely ignore tci.speculative that is set on calls and give up
- only if there was dyanmic type store that may affect given variable
+ only if there was dynamic type store that may affect given variable
(seen_unanalyzed_store) */
if (walked < 0)
return false;
/* restrict_to_inner_class may eliminate wrong speculation making our job
- easeier. */
+ easier. */
if (otr_type)
restrict_to_inner_class (otr_type);
}
/* Choose type that contains the other. This one either contains the outer
as a field (thus giving exactly one target) or is deeper in the type
- hiearchy. */
+ hierarchy. */
else if (speculative_outer_type
&& speculative_maybe_derived_type
&& (new_offset > speculative_offset
}
/* restrict_to_inner_class may eliminate wrong speculation making our job
- easeier. */
+ easier. */
if (otr_type)
restrict_to_inner_class (otr_type);
}
}
-/* Assume that both THIS and a given context is valid and strenghten THIS
- if possible. Return true if any strenghtening was made.
+/* Assume that both THIS and a given context is valid and strengthen THIS
+ if possible. Return true if any strengthening was made.
If actual type the context is being used in is known, OTR_TYPE should be
set accordingly. This improves quality of combined result. */
goto invalidate;
}
}
- /* Pick variant deeper in the hiearchy. */
+ /* Pick variant deeper in the hierarchy. */
else
{
outer_type = ctx.outer_type;
}
}
}
- /* TODO handle merging using hiearchy. */
+ /* TODO handle merging using hierarchy. */
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Giving up on merge\n");
if (!dynamic && ctx.dynamic)
dynamic = true;
}
- /* TODO handle merging using hiearchy. */
+ /* TODO handle merging using hierarchy. */
else
{
if (dump_file && (dump_flags & TDF_DETAILS))
}
-/* Dump THIS to F. CONDS a vector of conditions used when evauating
- predicats. When NL is true new line is output at the end of dump. */
+/* Dump THIS to F. CONDS a vector of conditions used when evaluating
+ predicates. When NL is true new line is output at the end of dump. */
void
predicate::dump (FILE *f, conditions conds, bool nl) const
INFO is ipa_fn_summary of function we are adding predicate into, CALLEE_INFO
is summary of function predicate P is from. OPERAND_MAP is array giving
- callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all
+ callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clause of all
callee conditions that may be true in caller context. TOPLEV_PREDICATE is
predicate under which callee is executed. OFFSET_MAP is an array of of
offsets that need to be added to conditions, negative offset means that
passed by reference and by value. */
unsigned by_ref : 1;
/* A set of sequential operations on the parameter, which can be seen as
- a mathmatical function on the parameter. */
+ a mathematical function on the parameter. */
expr_eval_ops param_ops;
};
typedef vec<condition, va_gc> *conditions;
-/* Predicates are used to repesent function parameters (such as runtime)
+/* Predicates are used to represent function parameters (such as runtime)
which depend on a context function is called in.
Predicates are logical formulas in conjunctive-disjunctive form consisting
first_dynamic_condition = 2
};
- /* Maximal number of conditions predicate can reffer to. This is limited
+ /* Maximal number of conditions predicate can refer to. This is limited
by using clause_t to be 32bit. */
static const int num_conditions = 32;
function. */
bool cannot_lead_to_return ();
- /* Return true if refernece may be used in address compare. */
+ /* Return true if reference may be used in address compare. */
bool address_matters_p ();
/* Return reference list this reference is in. */
|| (VAR_P (t)
&& auto_var_in_fn_p (t, current_function_decl))
|| TREE_CODE (t) == RESULT_DECL
- /* Normal labels are part of CFG and will be handled gratefuly.
+ /* Normal labels are part of CFG and will be handled gratefully.
Forced labels however can be used directly by statements and
need to stay in one partition along with their uses. */
|| (TREE_CODE (t) == LABEL_DECL
(param_partial_inlining_entry_probability, 100))))
{
/* When profile is guessed, we cannot expect it to give us
- realistic estimate on likelyness of function taking the
+ realistic estimate on likeliness of function taking the
complex path. As a special case, when tail of the function is
a loop, enable splitting since inlining code skipping the loop
is likely noticeable win. */
fprintf (stderr, "\n");
}
-/* Return true if THIS differs from OTHER; tolerate small diferences. */
+/* Return true if THIS differs from OTHER; tolerate small differences. */
bool
profile_count::differs_from_p (profile_count other) const
fprintf (stderr, "\n");
}
-/* Return true if THIS differs from OTHER; tolerate small diferences. */
+/* Return true if THIS differs from OTHER; tolerate small differences. */
bool
profile_probability::differs_from_p (profile_probability other) const
}
/* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
- happens with COUNT2 probablity. Return probablity that either *THIS or
+ happens with COUNT2 probability. Return probability that either *THIS or
OTHER happens. */
profile_probability
{
/* If probabilities are same, we are done.
If counts are nonzero we can distribute accordingly. In remaining
- cases just avreage the values and hope for the best. */
+ cases just average the values and hope for the best. */
if (*this == other || count1 == count2
|| (count2 == profile_count::zero ()
&& !(count1 == profile_count::zero ())))
GUESSED_LOCAL,
/* Profile was read by feedback and was 0, we used local heuristics to guess
- better. This is the case of functions not run in profile fedback.
+ better. This is the case of functions not run in profile feedback.
Never used by probabilities. */
GUESSED_GLOBAL0,
not reflect the reality but it can be compared interprocedurally
(for example, we inlined function w/o profile feedback into function
with feedback and propagated from that).
- Never used by probablities. */
+ Never used by probabilities. */
GUESSED,
/* Profile was determined by autofdo. */
In addition to actual value the quality of profile is tracked and propagated
through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities
- that has not been determined yet (for example bacause of
+ that has not been determined yet (for example because of
-fno-guess-branch-probability)
Typically probabilities are derived from profile feedback (via
- never (0 probability)
- guessed_never
- very_unlikely (1/2000 probability)
- - unlikely (1/5 probablity)
+ - unlikely (1/5 probability)
- even (1/2 probability)
- likely (4/5 probability)
- very_likely (1999/2000 probability)
/* The following is equivalent to:
*this = cprob.invert () * *this / ret.invert ();
Avoid scaling when overall outcome is supposed to be always.
- Without knowing that one is inverse of toher, the result would be
+ Without knowing that one is inverse of other, the result would be
conservative. */
if (!(*this == always ()))
*this = (*this - ret) / ret.invert ();
/* Return true when the probability of edge is reliable.
- The profile guessing code is good at predicting branch outcome (ie.
+ The profile guessing code is good at predicting branch outcome (i.e.
taken/not taken), that is predicted right slightly over 75% of time.
It is however notoriously poor on predicting the probability itself.
In general the profile appear a lot flatter (with probabilities closer
return m_val <= max_probability;
}
- /* Comparsions are three-state and conservative. False is returned if
+ /* Comparisons are three-state and conservative. False is returned if
the inequality cannot be decided. */
bool operator< (const profile_probability &other) const
{
bool differs_lot_from_p (profile_probability other) const;
/* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
- happens with COUNT2 probablity. Return probablity that either *THIS or
+ happens with COUNT2 probability. Return probability that either *THIS or
OTHER happens. */
profile_probability combine_with_count (profile_count count1,
profile_probability other,
estimation.
2) ipa counters which are result of profile feedback or special case
of static profile estimation (such as in function main).
- 3) counters which counts as 0 inter-procedurally (beause given function
+ 3) counters which counts as 0 inter-procedurally (because given function
was never run in train feedback) but they hold local static profile
estimate.
well defined.
To take local counter and use it inter-procedurally use ipa member function
- which strips information irelevant at the inter-procedural level.
+ which strips information irrelevant at the inter-procedural level.
Counters are 61bit integers representing number of executions during the
train run or normalized frequency within the function.
and they do end up in uninitialized scale if any of the parameters is
uninitialized.
- All comparsions that are three state and handling of probabilities. Thus
+ All comparisons that are three state and handling of probabilities. Thus
a < b is not equal to !(a >= b).
The following pre-defined counts are available:
return m_quality >= ADJUSTED;
}
- /* Return true if vlaue can be operated inter-procedurally. */
+ /* Return true if value can be operated inter-procedurally. */
bool ipa_p () const
{
return !initialized_p () || m_quality >= GUESSED_GLOBAL0;
return m_val != uninitialized_count || m_quality == GUESSED_LOCAL;
}
- /* Comparsions are three-state and conservative. False is returned if
+ /* Comparisons are three-state and conservative. False is returned if
the inequality cannot be decided. */
bool operator< (const profile_count &other) const
{
return initialized_p () && m_val != 0;
}
- /* Make counter forcingly nonzero. */
+ /* Make counter forcibly nonzero. */
profile_count force_nonzero () const
{
if (!initialized_p ())
return ret;
}
- /* Return variant of profile counte which is always safe to compare
- acorss functions. */
+ /* Return variant of profile count which is always safe to compare
+ across functions. */
profile_count ipa () const
{
if (m_quality > GUESSED_GLOBAL0_ADJUSTED)