From 956d615d66d06a9810000a5b7941be3ee1da7f8e Mon Sep 17 00:00:00 2001 From: Jakub Jelinek Date: Sat, 23 Nov 2019 12:44:51 +0100 Subject: [PATCH] ipa-fnsummary.c: Fix comment typos. * ipa-fnsummary.c: Fix comment typos. * ipa-ref.h: Likewise. * ipa-predicate.h: Likewise. * ipa-split.c: Likewise. * ipa-inline-analysis.c: Likewise. * ipa-predicate.c: Likewise. * ipa-devirt.c: Likewise. * ipa-icf.h: Likewise. * profile-count.c: Likewise. * ipa-icf.c: Likewise. (sem_function::equals_wpa): Fix typos in dump messages. * ipa-icf-gimple.h: Fix comment typos. * ipa-inline-transform.c: Likewise. * ipa-polymorphic-call.c: Likewise. * ipa-fnsummary.h: Likewise. * ipa-inline.c: Likewise. (dump_inline_stats): Fix typo in debug dump message. * profile-count.h: Fix comment typos. From-SVN: r278643 --- gcc/ChangeLog | 19 ++++++++++++ gcc/ipa-devirt.c | 28 +++++++++--------- gcc/ipa-fnsummary.c | 60 +++++++++++++++++++------------------- gcc/ipa-fnsummary.h | 10 +++---- gcc/ipa-icf-gimple.h | 8 ++--- gcc/ipa-icf.c | 18 ++++++------ gcc/ipa-icf.h | 8 ++--- gcc/ipa-inline-analysis.c | 2 +- gcc/ipa-inline-transform.c | 6 ++-- gcc/ipa-inline.c | 28 +++++++++--------- gcc/ipa-polymorphic-call.c | 52 ++++++++++++++++----------------- gcc/ipa-predicate.c | 6 ++-- gcc/ipa-predicate.h | 6 ++-- gcc/ipa-ref.h | 2 +- gcc/ipa-split.c | 4 +-- gcc/profile-count.c | 8 ++--- gcc/profile-count.h | 32 ++++++++++---------- 17 files changed, 158 insertions(+), 139 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index eadbfc082d0..b7d223ca8d0 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,24 @@ 2019-11-23 Jakub Jelinek + * ipa-fnsummary.c: Fix comment typos. + * ipa-ref.h: Likewise. + * ipa-predicate.h: Likewise. + * ipa-split.c: Likewise. + * ipa-inline-analysis.c: Likewise. + * ipa-predicate.c: Likewise. + * ipa-devirt.c: Likewise. + * ipa-icf.h: Likewise. + * profile-count.c: Likewise. + * ipa-icf.c: Likewise. + (sem_function::equals_wpa): Fix typos in dump messages. + * ipa-icf-gimple.h: Fix comment typos. + * ipa-inline-transform.c: Likewise. + * ipa-polymorphic-call.c: Likewise. + * ipa-fnsummary.h: Likewise. + * ipa-inline.c: Likewise. + (dump_inline_stats): Fix typo in debug dump message. + * profile-count.h: Fix comment typos. + PR target/92615 * config/i386/i386.c (ix86_md_asm_adjust): If dest_mode is GET_MODE (dest), is not QImode, using ZERO_EXTEND and dest is not diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c index d1c462a1ac1..c158d3c968d 100644 --- a/gcc/ipa-devirt.c +++ b/gcc/ipa-devirt.c @@ -216,7 +216,7 @@ struct GTY(()) odr_type_d bool all_derivations_known; /* Did we report ODR violation here? */ bool odr_violated; - /* Set when virtual table without RTTI previaled table with. */ + /* Set when virtual table without RTTI prevailed table with. */ bool rtti_broken; /* Set when the canonical type is determined using the type name. */ bool tbaa_enabled; @@ -655,7 +655,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) end2 = !vtable->iterate_reference (n2, ref2); /* !DECL_VIRTUAL_P means RTTI entry; - We warn when RTTI is lost because non-RTTI previals; we silently + We warn when RTTI is lost because non-RTTI prevails; we silently accept the other case. */ while (!end2 && (end1 @@ -767,7 +767,7 @@ compare_virtual_tables (varpool_node *prevailing, varpool_node *vtable) class_type->odr_violated = true; - /* Complain about size mismatch. Either we have too many virutal + /* Complain about size mismatch. Either we have too many virtual functions or too many virtual table pointers. */ if (end1 || end2) { @@ -861,7 +861,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2, if (!warn || !TYPE_NAME(TYPE_MAIN_VARIANT (t1))) return; - /* ODR warnings are output druing LTO streaming; we must apply location + /* ODR warnings are output during LTO streaming; we must apply location cache for potential warnings to be output correctly. */ if (lto_location_cache::current_cache) lto_location_cache::current_cache->apply_location_cache (); @@ -920,7 +920,7 @@ warn_odr (tree t1, tree t2, tree st1, tree st2, *warned = true; } -/* Return ture if T1 and T2 are incompatible and we want to recusively +/* Return true if T1 and T2 are incompatible and we want to recursively dive into them from warn_type_mismatch to give sensible answer. */ static bool @@ -941,7 +941,7 @@ type_mismatch_p (tree t1, tree t2) This is hard to do in general. We basically handle the common cases. If LOC1 and LOC2 are meaningful locations, use it in the case the types - themselves do no thave one.*/ + themselves do not have one. */ void warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) @@ -1006,7 +1006,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) n1 = DECL_NAME (n1); if (n2 && TREE_CODE (n2) == TYPE_DECL) n2 = DECL_NAME (n2); - /* Most of the time, the type names will match, do not be unnecesarily + /* Most of the time, the type names will match, do not be unnecessarily verbose. */ if (n1 != n2) inform (loc_t1, @@ -1132,7 +1132,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) if (types_odr_comparable (t1, t2) /* We make assign integers mangled names to be able to handle signed/unsigned chars. Accepting them here would however lead to - confussing message like + confusing message like "type ‘const int’ itself violates the C++ One Definition Rule" */ && TREE_CODE (t1) != INTEGER_TYPE && types_same_for_odr (t1, t2)) @@ -1149,7 +1149,7 @@ warn_types_mismatch (tree t1, tree t2, location_t loc1, location_t loc2) inform (loc_t2, "the incompatible type is defined here"); } -/* Return true if T should be ignored in TYPE_FIELDS for ODR comparsion. */ +/* Return true if T should be ignored in TYPE_FIELDS for ODR comparison. */ static bool skip_in_fields_list_p (tree t) @@ -2047,7 +2047,7 @@ odr_type_violation_reported_p (tree type) return get_odr_type (type, false)->odr_violated; } -/* Add TYPE od ODR type hash. */ +/* Add TYPE of ODR type hash. */ void register_odr_type (tree type) @@ -2056,7 +2056,7 @@ register_odr_type (tree type) odr_hash = new odr_hash_type (23); if (type == TYPE_MAIN_VARIANT (type)) { - /* To get ODR warings right, first register all sub-types. */ + /* To get ODR warnings right, first register all sub-types. */ if (RECORD_OR_UNION_TYPE_P (type) && COMPLETE_TYPE_P (type)) { @@ -2157,7 +2157,7 @@ dump_type_inheritance_graph (FILE *f) continue; /* To aid ODR warnings we also mangle integer constants but do - not consinder duplicates there. */ + not consider duplicates there. */ if (TREE_CODE (odr_types[i]->type) == INTEGER_TYPE) continue; @@ -2987,7 +2987,7 @@ class final_warning_record *final_warning_records; If INCLUDE_BASES is true, walk also base types of OUTER_TYPES containing OTR_TYPE and include their virtual method. This is useful for types possibly in construction or destruction where the virtual table may - temporarily change to one of base types. INCLUDE_DERIVER_TYPES make + temporarily change to one of base types. INCLUDE_DERIVED_TYPES make us to walk the inheritance graph for all derivations. If COMPLETEP is non-NULL, store true if the list is complete. @@ -3672,7 +3672,7 @@ ipa_devirt (void) itself. This may need to be revisited once we add further ways to use - the may edges, but it is a resonable thing to do right now. */ + the may edges, but it is a reasonable thing to do right now. */ if ((e->indirect_info->param_index == -1 || (!opt_for_fn (n->decl, flag_devirtualize_speculatively) diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c index 9ac998482b5..e53d9e9013c 100644 --- a/gcc/ipa-fnsummary.c +++ b/gcc/ipa-fnsummary.c @@ -145,7 +145,7 @@ ipa_dump_hints (FILE *f, ipa_hints hints) /* Record SIZE and TIME to SUMMARY. The accounted code will be executed when EXEC_PRED is true. - When NONCONST_PRED is false the code will evaulate to constant and + When NONCONST_PRED is false the code will evaluate to constant and will get optimized out in specialized clones of the function. If CALL is true account to call_size_time_table rather than size_time_table. */ @@ -171,12 +171,12 @@ ipa_fn_summary::account_size_time (int size, sreal time, if (nonconst_pred == false) return; - /* We need to create initial empty unconitional clause, but otherwie + /* We need to create initial empty unconditional clause, but otherwise we don't need to account empty times and sizes. */ if (!size && time == 0 && table) return; - /* Only for calls we are unaccounting what we previously recoreded. */ + /* Only for calls we are unaccounting what we previously recorded. */ gcc_checking_assert (time >= 0 || call); for (i = 0; vec_safe_iterate (table, i, &e); i++) @@ -234,7 +234,7 @@ ipa_fn_summary::account_size_time (int size, sreal time, } } -/* We proved E to be unreachable, redirect it to __bultin_unreachable. */ +/* We proved E to be unreachable, redirect it to __builtin_unreachable. */ static struct cgraph_edge * redirect_to_unreachable (struct cgraph_edge *e) @@ -309,9 +309,9 @@ set_hint_predicate (predicate **p, predicate new_predicate) } -/* Compute what conditions may or may not hold given invormation about +/* Compute what conditions may or may not hold given information about parameters. RET_CLAUSE returns truths that may hold in a specialized copy, - whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized + while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized copy when called in a given context. It is a bitmask of conditions. Bit 0 means that condition is known to be false, while bit 1 means that condition may or may not be true. These differs - for example NOT_INLINED condition @@ -319,7 +319,7 @@ set_hint_predicate (predicate **p, predicate new_predicate) the fact that parameter is indeed a constant. KNOWN_VALS is partial mapping of parameters of NODE to constant values. - KNOWN_AGGS is a vector of aggreggate known offset/value set for each + KNOWN_AGGS is a vector of aggregate known offset/value set for each parameter. Return clause of possible truths. When INLINE_P is true, assume that we are inlining. @@ -506,12 +506,12 @@ evaluate_conditions_for_known_args (struct cgraph_node *node, /* Work out what conditions might be true at invocation of E. Compute costs for inlined edge if INLINE_P is true. - Return in CLAUSE_PTR the evaluated condistions and in NONSPEC_CLAUSE_PTR + Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR (if non-NULL) conditions evaluated for nonspecialized clone called in a given context. KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by - known canstant and aggregate values of parameters. + known constant and aggregate values of parameters. KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts of parameter used by a polymorphic call. */ @@ -784,7 +784,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, info->account_size_time (0, 0, true_pred, true_pred); /* Remap size_time vectors. - Simplify the predicate by prunning out alternatives that are known + Simplify the predicate by pruning out alternatives that are known to be false. TODO: as on optimization, we can also eliminate conditions known to be true. */ @@ -822,7 +822,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, edge_set_predicate (edge, &new_predicate); } - /* Remap indirect edge predicates with the same simplificaiton as above. + /* Remap indirect edge predicates with the same simplification as above. Also copy constantness arrays. */ for (edge = dst->indirect_calls; edge; edge = next) { @@ -847,7 +847,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src, /* If inliner or someone after inliner will ever start producing non-trivial clones, we will get trouble with lack of information about updating self sizes, because size vectors already contains - sizes of the calees. */ + sizes of the callees. */ gcc_assert (!inlined_to_p || !optimized_out_size); } else @@ -1202,7 +1202,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) /* Casts of parameters, loads from parameters passed by reference and stores to return value or parameters are often free after - inlining dua to SRA and further combining. + inlining due to SRA and further combining. Assume that half of statements goes away. */ if (CONVERT_EXPR_CODE_P (rhs_code) || rhs_code == VIEW_CONVERT_EXPR @@ -1256,12 +1256,12 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) lhs_free = true; /* Writes to parameters, parameters passed by value and return value - (either dirrectly or passed via invisible reference) are free. + (either directly or passed via invisible reference) are free. TODO: We ought to handle testcase like struct a {int a,b;}; struct a - retrurnsturct (void) + returnstruct (void) { struct a a ={1,2}; return a; @@ -1269,7 +1269,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) This translate into: - retrurnsturct () + returnstruct () { int a$b; int a$a; @@ -1467,7 +1467,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE ? code : inverted_code); /* invert_tree_comparison will return ERROR_MARK on FP - comparsions that are not EQ/NE instead of returning proper + comparisons that are not EQ/NE instead of returning proper unordered one. Be sure it is not confused with NON_CONSTANT. And if the edge's target is the final block of diamond CFG graph @@ -1498,7 +1498,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, Here we can predicate nonconstant_code. We can't really handle constant_code since we have no predicate for this and also the constant code is not known to be - optimized away when inliner doen't see operand is constant. + optimized away when inliner doesn't see operand is constant. Other optimizers might think otherwise. */ if (gimple_cond_code (last) != NE_EXPR || !integer_zerop (gimple_cond_rhs (last))) @@ -1921,7 +1921,7 @@ will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, int base_index; struct agg_position_info aggpos; - /* What statments might be optimized away + /* What statements might be optimized away when their arguments are constant. */ if (gimple_code (stmt) != GIMPLE_ASSIGN && gimple_code (stmt) != GIMPLE_COND @@ -2004,7 +2004,7 @@ struct record_modified_bb_info gimple *stmt; }; -/* Value is initialized in INIT_BB and used in USE_BB. We want to copute +/* Value is initialized in INIT_BB and used in USE_BB. We want to compute probability how often it changes between USE_BB. INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB is in different loop nest, we can do better. @@ -2333,7 +2333,7 @@ find_foldable_builtin_expect (basic_block bb) presence of EH and will be optimized out by optimize_clobbers later in the game. - NEED_EH is used to recurse in case the clobber has non-EH predecestors + NEED_EH is used to recurse in case the clobber has non-EH predecessors that can be clobber only, too.. When it is false, the RESX is not necessary on the end of basic block. */ @@ -2367,7 +2367,7 @@ clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) return false; } - /* See if all predecestors are either throws or clobber only BBs. */ + /* See if all predecessors are either throws or clobber only BBs. */ FOR_EACH_EDGE (e, ei, bb->preds) if (!(e->flags & EDGE_EH) && !clobber_only_eh_bb_p (e->src, false)) @@ -2543,7 +2543,7 @@ analyze_function_body (struct cgraph_node *node, bool early) predicate will_be_nonconstant; /* This relation stmt should be folded after we remove - buildin_expect call. Adjust the cost here. */ + __builtin_expect call. Adjust the cost here. */ if (stmt == fix_builtin_expect_stmt) { this_size--; @@ -2609,7 +2609,7 @@ analyze_function_body (struct cgraph_node *node, bool early) } } - /* TODO: When conditional jump or swithc is known to be constant, but + /* TODO: When conditional jump or switch is known to be constant, but we did not translate it into the predicates, we really can account just maximum of the possible paths. */ if (fbi.info) @@ -3066,7 +3066,7 @@ estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size, || es->predicate->evaluate (possible_truths)) { /* Predicates of calls shall not use NOT_CHANGED codes, - sowe do not need to compute probabilities. */ + so we do not need to compute probabilities. */ estimate_edge_size_and_time (e, size, es->predicate ? NULL : min_size, time, @@ -3239,7 +3239,7 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size, } /* Default constructor for ipa call context. - Memory alloction of known_vals, known_contexts + Memory allocation of known_vals, known_contexts and known_aggs vectors is owned by the caller, but can be release by ipa_call_context::release. @@ -3334,7 +3334,7 @@ ipa_call_context::duplicate_from (const ipa_call_context &ctx) /* Release memory used by known_vals/contexts/aggs vectors. If ALL is true release also inline_param_summary. - This happens when context was previously duplciated to be stored + This happens when context was previously duplicated to be stored into cache. */ void @@ -3471,7 +3471,7 @@ ipa_call_context::equal_to (const ipa_call_context &ctx) } /* Estimate size and time needed to execute call in the given context. - Additionally detemine hints determined by the context. Finally compute + Additionally determine hints determined by the context. Finally compute minimal size needed for the call that is independent on the call context and can be used for fast estimates. Return the values in RET_SIZE, RET_MIN_SIZE, RET_TIME and RET_HINTS. */ @@ -3575,7 +3575,7 @@ ipa_call_context::estimate_size_and_time (int *ret_size, gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1); /* Roundoff issues may make specialized time bigger than nonspecialized - time. We do not really want that to happen because some heurstics + time. We do not really want that to happen because some heuristics may get confused by seeing negative speedups. */ if (time > nonspecialized_time) time = nonspecialized_time; @@ -3684,7 +3684,7 @@ inline_update_callee_summaries (struct cgraph_node *node, int depth) /* Update change_prob of EDGE after INLINED_EDGE has been inlined. When function A is inlined in B and A calls C with parameter that - changes with probability PROB1 and C is known to be passthroug + changes with probability PROB1 and C is known to be passthrough of argument if B that change with probability PROB2, the probability of change is now PROB1*PROB2. */ diff --git a/gcc/ipa-fnsummary.h b/gcc/ipa-fnsummary.h index ebd475b6d98..21ecd54293f 100644 --- a/gcc/ipa-fnsummary.h +++ b/gcc/ipa-fnsummary.h @@ -26,13 +26,13 @@ along with GCC; see the file COPYING3. If not see /* Hints are reasons why IPA heuristics should prefer specializing given - function. They are represtented as bitmap of the following values. */ + function. They are represented as bitmap of the following values. */ enum ipa_hints_vals { /* When specialization turns indirect call into a direct call, it is good idea to do so. */ INLINE_HINT_indirect_call = 1, /* Inlining may make loop iterations or loop stride known. It is good idea - to do so because it enables loop optimizatoins. */ + to do so because it enables loop optimizations. */ INLINE_HINT_loop_iterations = 2, INLINE_HINT_loop_stride = 4, /* Inlining within same strongly connected component of callgraph is often @@ -162,7 +162,7 @@ public: /* Conditional size/time information. The summaries are being merged during inlining. */ conditions conds; - /* Normal code is acocunted in size_time_table, while calls are + /* Normal code is accounted in size_time_table, while calls are accounted in call_size_time_table. This is because calls are often adjusted by IPA optimizations and thus this summary is generated from call summary information when needed. */ @@ -292,7 +292,7 @@ public: /* This object describe a context of call. That is a summary of known information about its parameters. Main purpose of this context is - to give more realistic esitmations of function runtime, size and + to give more realistic estimations of function runtime, size and inline hints. */ class ipa_call_context { @@ -323,7 +323,7 @@ private: /* Called function. */ cgraph_node *m_node; /* Clause describing what predicate conditionals can be satisfied - in this context if function is inlined/specialised. */ + in this context if function is inlined/specialized. */ clause_t m_possible_truths; /* Clause describing what predicate conditionals can be satisfied in this context if function is kept offline. */ diff --git a/gcc/ipa-icf-gimple.h b/gcc/ipa-icf-gimple.h index b59d05fd605..67ad60241cd 100644 --- a/gcc/ipa-icf-gimple.h +++ b/gcc/ipa-icf-gimple.h @@ -19,13 +19,13 @@ You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see . */ -/* Gimple identical code folding (class func_checker) is an infastructure +/* Gimple identical code folding (class func_checker) is an infrastructure capable of comparing two given functions. The class compares every gimple statement and uses many dictionaries to map source and target SSA_NAMEs, declarations and other components. - To use the infrastructure, create an instanse of func_checker and call - a comparsion function based on type of gimple statement. */ + To use the infrastructure, create an instance of func_checker and call + a comparison function based on type of gimple statement. */ /* Prints string STRING to a FILE with a given number of SPACE_COUNT. */ #define FPUTS_SPACES(file, space_count, string) \ @@ -220,7 +220,7 @@ public: bool compare_loops (basic_block bb1, basic_block bb2); /* Return true if types are compatible for polymorphic call analysis. - COMPARE_PTR indicates if polymorphic type comparsion should be + COMPARE_PTR indicates if polymorphic type comparison should be done for pointers, too. */ static bool compatible_polymorphic_types_p (tree t1, tree t2, bool compare_ptr); diff --git a/gcc/ipa-icf.c b/gcc/ipa-icf.c index 7762f939391..20e92e12bab 100644 --- a/gcc/ipa-icf.c +++ b/gcc/ipa-icf.c @@ -300,7 +300,7 @@ sem_function::get_hash (void) /* Compare properties of symbols N1 and N2 that does not affect semantics of symbol itself but affects semantics of its references from USED_BY (which - may be NULL if it is unknown). If comparsion is false, symbols + may be NULL if it is unknown). If comparison is false, symbols can still be merged but any symbols referring them can't. If ADDRESS is true, do extra checking needed for IPA_REF_ADDR. @@ -550,7 +550,7 @@ sem_function::equals_wpa (sem_item *item, if (DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl) != DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (item->decl)) - return return_false_with_msg ("intrument function entry exit " + return return_false_with_msg ("instrument function entry exit " "attributes are different"); if (DECL_NO_LIMIT_STACK (decl) != DECL_NO_LIMIT_STACK (item->decl)) @@ -576,7 +576,7 @@ sem_function::equals_wpa (sem_item *item, && TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE) { if (TREE_CODE (TREE_TYPE (item->decl)) != METHOD_TYPE) - return return_false_with_msg ("DECL_CXX_CONSTURCTOR type mismatch"); + return return_false_with_msg ("DECL_CXX_CONSTRUCTOR type mismatch"); else if (!func_checker::compatible_polymorphic_types_p (TYPE_METHOD_BASETYPE (TREE_TYPE (decl)), TYPE_METHOD_BASETYPE (TREE_TYPE (item->decl)), false)) @@ -726,7 +726,7 @@ sem_function::equals_wpa (sem_item *item, } /* Update hash by address sensitive references. We iterate over all - sensitive references (address_matters_p) and we hash ultime alias + sensitive references (address_matters_p) and we hash ultimate alias target of these nodes, which can improve a semantic item hash. Also hash in referenced symbols properties. This can be done at any time @@ -1114,7 +1114,7 @@ sem_function::merge (sem_item *alias_item) } /* Do not turn function in one comdat group into wrapper to another comdat group. Other compiler producing the body of the - another comdat group may make opossite decision and with unfortunate + another comdat group may make opposite decision and with unfortunate linker choices this may close a loop. */ else if (DECL_COMDAT_GROUP (original->decl) && DECL_COMDAT_GROUP (alias->decl) @@ -1160,7 +1160,7 @@ sem_function::merge (sem_item *alias_item) else create_wrapper = true; - /* We can redirect local calls in the case both alias and orignal + /* We can redirect local calls in the case both alias and original are not interposable. */ redirect_callers = alias->get_availability () > AVAIL_INTERPOSABLE @@ -1989,7 +1989,7 @@ sem_variable::merge (sem_item *alias_item) return false; } - /* We cannot merge if address comparsion metters. */ + /* We cannot merge if address comparison matters. */ if (alias_address_matters && flag_merge_constants < 2) { if (dump_enabled_p ()) @@ -3420,7 +3420,7 @@ sem_item_optimizer::fixup_points_to_sets (void) fixup_pt_set (&SSA_NAME_PTR_INFO (name)->pt); fixup_pt_set (&fn->gimple_df->escaped); - /* The above get's us to 99% I guess, at least catching the + /* The above gets us to 99% I guess, at least catching the address compares. Below also gets us aliasing correct but as said we're giving leeway to the situation with readonly vars anyway, so ... */ @@ -3505,7 +3505,7 @@ ipa_icf_read_summary (void) optimizer->register_hooks (); } -/* Semantic equality exection function. */ +/* Semantic equality execution function. */ static unsigned int ipa_icf_driver (void) diff --git a/gcc/ipa-icf.h b/gcc/ipa-icf.h index 3098fd1f0ce..261911799e2 100644 --- a/gcc/ipa-icf.h +++ b/gcc/ipa-icf.h @@ -24,7 +24,7 @@ class sem_item; /* Congruence class encompasses a collection of either functions or read-only variables. These items are considered to be equivalent - if not proved the oposite. */ + if not proved the opposite. */ class congruence_class { public: @@ -200,7 +200,7 @@ public: virtual bool equals_wpa (sem_item *item, hash_map &ignored_nodes) = 0; - /* Returns true if the item equals to ITEM given as arguemnt. */ + /* Returns true if the item equals to ITEM given as argument. */ virtual bool equals (sem_item *item, hash_map &ignored_nodes) = 0; @@ -350,7 +350,7 @@ public: ipa_icf_gimple::func_checker *checker); /* Perform additional checks needed to match types of used function - paramters. */ + parameters. */ bool compatible_parm_types_p (tree, tree); /* Exception handling region tree. */ @@ -606,7 +606,7 @@ private: static bool release_split_map (congruence_class * const &cls, bitmap const &b, traverse_split_pair *pair); - /* Process split operation for a cognruence class CLS, + /* Process split operation for a congruence class CLS, where bitmap B splits congruence class members. DATA is used as argument of split pair. */ static bool traverse_congruence_split (congruence_class * const &cls, diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c index e36902073f5..ae8e5db3d9c 100644 --- a/gcc/ipa-inline-analysis.c +++ b/gcc/ipa-inline-analysis.c @@ -148,7 +148,7 @@ free_growth_caches (void) node_context_cache_clear = 0; } -/* Return hints derrived from EDGE. */ +/* Return hints derived from EDGE. */ int simple_edge_hints (struct cgraph_edge *edge) diff --git a/gcc/ipa-inline-transform.c b/gcc/ipa-inline-transform.c index e54752191b7..e7ec7f911e1 100644 --- a/gcc/ipa-inline-transform.c +++ b/gcc/ipa-inline-transform.c @@ -105,7 +105,7 @@ can_remove_node_now_p_1 (struct cgraph_node *node, struct cgraph_edge *e) && (!DECL_VIRTUAL_P (node->decl) || !opt_for_fn (node->decl, flag_devirtualize)) /* During early inlining some unanalyzed cgraph nodes might be in the - callgraph and they might reffer the function in question. */ + callgraph and they might refer the function in question. */ && !cgraph_new_nodes.exists ()); } @@ -176,7 +176,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, { /* We may eliminate the need for out-of-line copy to be output. In that case just go ahead and re-use it. This is not just an - memory optimization. Making offline copy of fuction disappear + memory optimization. Making offline copy of function disappear from the program will improve future decisions on inlining. */ if (!e->callee->callers->next_caller /* Recursive inlining never wants the master clone to @@ -192,7 +192,7 @@ clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, need small function inlining to register edge removal hook to maintain the priority queue. - For now we keep the ohter functions in the group in program until + For now we keep the other functions in the group in program until cgraph_remove_unreachable_functions gets rid of them. */ gcc_assert (!e->callee->inlined_to); e->callee->remove_from_same_comdat_group (); diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c index 879da84cfe1..4dd4de157f1 100644 --- a/gcc/ipa-inline.c +++ b/gcc/ipa-inline.c @@ -517,7 +517,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report, && DECL_FUNCTION_PERSONALITY (callee->decl)) || (check_maybe_up (flag_exceptions) && DECL_FUNCTION_PERSONALITY (callee->decl)) - /* When devirtualization is diabled for callee, it is not safe + /* When devirtualization is disabled for callee, it is not safe to inline it as we possibly mangled the type info. Allow early inlining of always inlines. */ || (!early && check_maybe_down (flag_devirtualize))) @@ -547,7 +547,7 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report, || DECL_DISREGARD_INLINE_LIMITS (callee->decl)) ; /* If mismatch is caused by merging two LTO units with different - optimizationflags we want to be bit nicer. However never inline + optimization flags we want to be bit nicer. However never inline if one of functions is not optimized at all. */ else if (!opt_for_fn (callee->decl, optimize) || !opt_for_fn (caller->decl, optimize)) @@ -783,8 +783,8 @@ compute_inlined_call_time (struct cgraph_edge *edge, return time; } -/* Determine time saved by inlininig EDGE of frequency FREQ - where callee's runtime w/o inlineing is UNINLINED_TYPE +/* Determine time saved by inlining EDGE of frequency FREQ + where callee's runtime w/o inlining is UNINLINED_TYPE and with inlined is INLINED_TYPE. */ inline sreal @@ -1222,7 +1222,7 @@ edge_badness (struct cgraph_edge *edge, bool dump) if (need_more_work) noninline_callee (); } - Withhout penalizing this case, we usually inline noninline_callee + Without penalizing this case, we usually inline noninline_callee into the inline_caller because overall_growth is small preventing further inlining of inline_caller. @@ -1297,7 +1297,7 @@ edge_badness (struct cgraph_edge *edge, bool dump) } } /* When function local profile is not available or it does not give - useful information (ie frequency is zero), base the cost on + useful information (i.e. frequency is zero), base the cost on loop nest and overall size growth, so we optimize for overall number of functions fully inlined in program. */ else @@ -1349,7 +1349,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge) gcc_checking_assert (n->get_data () == edge); /* fibonacci_heap::replace_key does busy updating of the - heap that is unnecesarily expensive. + heap that is unnecessarily expensive. We do lazy increases: after extracting minimum if the key turns out to be out of date, it is re-inserted into heap with correct value. */ @@ -1383,7 +1383,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge) /* NODE was inlined. - All caller edges needs to be resetted because + All caller edges needs to be reset because size estimates change. Similarly callees needs reset because better context may be known. */ @@ -1520,7 +1520,7 @@ update_callee_keys (edge_heap_t *heap, struct cgraph_node *node, update_edge_key (heap, e); } /* We do not reset callee growth cache here. Since we added a new call, - growth chould have just increased and consequentely badness metric + growth should have just increased and consequently badness metric don't need updating. */ else if (e->inline_failed && (callee = e->callee->ultimate_alias_target (&avail, @@ -2082,7 +2082,7 @@ inline_small_functions (void) edge_growth_cache->get (edge)->hints = old_hints_est + 1; /* When updating the edge costs, we only decrease badness in the keys. - Increases of badness are handled lazilly; when we see key with out + Increases of badness are handled lazily; when we see key with out of date value on it, we re-insert it now. */ current_badness = edge_badness (edge, false); gcc_assert (cached_badness == current_badness); @@ -2225,7 +2225,7 @@ inline_small_functions (void) add_new_edges_to_heap (&edge_heap, new_indirect_edges); /* If caller's size and time increased we do not need to update - all edges becuase badness is not going to decrease. */ + all edges because badness is not going to decrease. */ if (old_size <= ipa_size_summaries->get (where)->size && old_time <= ipa_fn_summaries->get (where)->time /* Wrapper penalty may be non-monotonous in this respect. @@ -2569,7 +2569,7 @@ dump_inline_stats (void) "%" PRId64 " + previously indirect " "%" PRId64 " + virtual " "%" PRId64 " + virtual and previously indirect " - "%" PRId64 " + stil indirect " + "%" PRId64 " + still indirect " "%" PRId64 " + still indirect polymorphic " "%" PRId64 "\n", inlined_cnt, inlined_speculative, inlined_speculative_ply, @@ -2725,7 +2725,7 @@ ipa_inline (void) into callee often leads to better optimization of callee due to increased context for optimization. For example if main() function calls a function that outputs help - and then function that does the main optmization, we should inline + and then function that does the main optimization, we should inline the second with priority even if both calls are cold by themselves. We probably want to implement new predicate replacing our use of @@ -2850,7 +2850,7 @@ early_inline_small_functions (struct cgraph_node *node) { struct cgraph_node *callee = e->callee->ultimate_alias_target (); - /* We can enounter not-yet-analyzed function during + /* We can encounter not-yet-analyzed function during early inlining on callgraphs with strongly connected components. */ ipa_fn_summary *s = ipa_fn_summaries->get (callee); diff --git a/gcc/ipa-polymorphic-call.c b/gcc/ipa-polymorphic-call.c index 78e102c3947..a50d2c5c8c3 100644 --- a/gcc/ipa-polymorphic-call.c +++ b/gcc/ipa-polymorphic-call.c @@ -69,7 +69,7 @@ contains_polymorphic_type_p (const_tree type) } /* Return true if it seems valid to use placement new to build EXPECTED_TYPE - at possition CUR_OFFSET within TYPE. + at position CUR_OFFSET within TYPE. POD can be changed to an instance of a polymorphic type by placement new. Here we play safe and assume that any @@ -99,7 +99,7 @@ possible_placement_new (tree type, tree expected_type, to represent it. If OTR_TYPE is NULL, just find outermost polymorphic type with - virtual table present at possition OFFSET. + virtual table present at position OFFSET. For example when THIS represents type class A @@ -113,7 +113,7 @@ possible_placement_new (tree type, tree expected_type, If we cannot find corresponding class, give up by setting THIS->OUTER_TYPE to OTR_TYPE and THIS->OFFSET to NULL. - Return true when lookup was sucesful. + Return true when lookup was successful. When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made valid only via allocation of new polymorphic type inside by means @@ -147,7 +147,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, Because the instance type may contain field whose type is of OUTER_TYPE, we cannot derive any effective information about it. - TODO: In the case we know all derrived types, we can definitely do better + TODO: In the case we know all derived types, we can definitely do better here. */ else if (TYPE_SIZE (outer_type) && tree_fits_shwi_p (TYPE_SIZE (outer_type)) @@ -240,7 +240,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, if (cur_offset != 0) goto no_useful_type_info; /* If we determined type precisely or we have no clue on - speuclation, we are done. */ + speculation, we are done. */ if (!maybe_derived_type || !speculative_outer_type || !speculation_consistent_p (speculative_outer_type, speculative_offset, @@ -317,7 +317,7 @@ ipa_polymorphic_call_context::restrict_to_inner_class (tree otr_type, { outer_type = type; offset = cur_offset; - /* As soon as we se an field containing the type, + /* As soon as we see an field containing the type, we know we are not looking for derivations. */ maybe_derived_type = false; } @@ -395,7 +395,7 @@ no_useful_type_info: else return true; } - /* We found no way to embedd EXPECTED_TYPE in TYPE. + /* We found no way to embed EXPECTED_TYPE in TYPE. We still permit two special cases - placement new and the case of variadic types containing themselves. */ if (!speculative @@ -552,7 +552,7 @@ decl_maybe_in_construction_p (tree base, tree outer_type, return true; /* Pure functions cannot do any changes on the dynamic type; - that require writting to memory. */ + that require writing to memory. */ if ((!base || !auto_var_in_fn_p (base, function)) && flags_from_decl_or_type (function) & (ECF_PURE | ECF_CONST)) return false; @@ -721,7 +721,7 @@ ipa_polymorphic_call_context::stream_in (class lto_input_block *ib, } } -/* Proudce polymorphic call context for call method of instance +/* Produce polymorphic call context for call method of instance that is located within BASE (that is assumed to be a decl) at offset OFF. */ void @@ -915,7 +915,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, if (TREE_CODE (base) == MEM_REF || DECL_P (base)) { /* We found dereference of a pointer. Type of the pointer - and MEM_REF is meaningless, but we can look futher. */ + and MEM_REF is meaningless, but we can look further. */ offset_int mem_offset; if (TREE_CODE (base) == MEM_REF && mem_ref_offset (base).is_constant (&mem_offset)) @@ -1004,14 +1004,14 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, 0-thunk.fixed_offset. It starts with code that adds think.fixed_offset to the pointer to compensate for this. - Because we walked all the way to the begining of thunk, we now + Because we walked all the way to the beginning of thunk, we now see pointer &bar-thunk.fixed_offset and need to compensate for it. */ if (node->thunk.fixed_offset) offset -= node->thunk.fixed_offset * BITS_PER_UNIT; /* Dynamic casting has possibly upcasted the type - in the hiearchy. In this case outer type is less + in the hierarchy. In this case outer type is less informative than inner type and we should forget about it. */ if ((otr_type @@ -1113,7 +1113,7 @@ ipa_polymorphic_call_context::ipa_polymorphic_call_context (tree fndecl, offset, true, NULL /* Do not change type here */); /* TODO: There are multiple ways to derive a type. For instance - if BASE_POINTER is passed to an constructor call prior our refernece. + if BASE_POINTER is passed to an constructor call prior our reference. We do not make this type of flow sensitive analysis yet. */ if (instance) *instance = base_pointer; @@ -1323,7 +1323,7 @@ extr_type_from_vtbl_ptr_store (gimple *stmt, struct type_change_info *tci, { if (dump_file) fprintf (dump_file, " Construction vtable used\n"); - /* FIXME: We should suport construction contexts. */ + /* FIXME: We should support construction contexts. */ return NULL; } @@ -1557,7 +1557,7 @@ check_stmt_for_type_change (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) AA_WALK_BUDGET_P, if not NULL, is how statements we should allow walk_aliased_vdefs to examine. The value should be decremented by the - number of stetements we examined or set to zero if exhausted. */ + number of statements we examined or set to zero if exhausted. */ bool ipa_polymorphic_call_context::get_dynamic_type (tree instance, @@ -1583,7 +1583,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, otr_type = TYPE_MAIN_VARIANT (otr_type); /* Walk into inner type. This may clear maybe_derived_type and save us - from useless work. It also makes later comparsions with static type + from useless work. It also makes later comparisons with static type easier. */ if (outer_type && otr_type) { @@ -1599,7 +1599,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, if (TREE_CODE (instance) == MEM_REF) return false; - /* We need to obtain refernce to virtual table pointer. It is better + /* We need to obtain reference to virtual table pointer. It is better to look it up in the code rather than build our own. This require bit of pattern matching, but we end up verifying that what we found is correct. @@ -1778,7 +1778,7 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance, Therefore if the static outer type was found (outer_type) we can safely ignore tci.speculative that is set on calls and give up - only if there was dyanmic type store that may affect given variable + only if there was dynamic type store that may affect given variable (seen_unanalyzed_store) */ if (walked < 0) @@ -1915,7 +1915,7 @@ ipa_polymorphic_call_context::combine_speculation_with return false; /* restrict_to_inner_class may eliminate wrong speculation making our job - easeier. */ + easier. */ if (otr_type) restrict_to_inner_class (otr_type); @@ -1963,7 +1963,7 @@ ipa_polymorphic_call_context::combine_speculation_with } /* Choose type that contains the other. This one either contains the outer as a field (thus giving exactly one target) or is deeper in the type - hiearchy. */ + hierarchy. */ else if (speculative_outer_type && speculative_maybe_derived_type && (new_offset > speculative_offset @@ -2015,7 +2015,7 @@ ipa_polymorphic_call_context::meet_speculation_with } /* restrict_to_inner_class may eliminate wrong speculation making our job - easeier. */ + easier. */ if (otr_type) restrict_to_inner_class (otr_type); @@ -2095,8 +2095,8 @@ ipa_polymorphic_call_context::meet_speculation_with } } -/* Assume that both THIS and a given context is valid and strenghten THIS - if possible. Return true if any strenghtening was made. +/* Assume that both THIS and a given context is valid and strengthen THIS + if possible. Return true if any strengthening was made. If actual type the context is being used in is known, OTR_TYPE should be set accordingly. This improves quality of combined result. */ @@ -2261,7 +2261,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx, goto invalidate; } } - /* Pick variant deeper in the hiearchy. */ + /* Pick variant deeper in the hierarchy. */ else { outer_type = ctx.outer_type; @@ -2299,7 +2299,7 @@ ipa_polymorphic_call_context::combine_with (ipa_polymorphic_call_context ctx, } } } - /* TODO handle merging using hiearchy. */ + /* TODO handle merging using hierarchy. */ else if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Giving up on merge\n"); @@ -2587,7 +2587,7 @@ ipa_polymorphic_call_context::meet_with (ipa_polymorphic_call_context ctx, if (!dynamic && ctx.dynamic) dynamic = true; } - /* TODO handle merging using hiearchy. */ + /* TODO handle merging using hierarchy. */ else { if (dump_file && (dump_flags & TDF_DETAILS)) diff --git a/gcc/ipa-predicate.c b/gcc/ipa-predicate.c index f9ec7ab490c..9cd21ae5060 100644 --- a/gcc/ipa-predicate.c +++ b/gcc/ipa-predicate.c @@ -444,8 +444,8 @@ dump_clause (FILE *f, conditions conds, clause_t clause) } -/* Dump THIS to F. CONDS a vector of conditions used when evauating - predicats. When NL is true new line is output at the end of dump. */ +/* Dump THIS to F. CONDS a vector of conditions used when evaluating + predicates. When NL is true new line is output at the end of dump. */ void predicate::dump (FILE *f, conditions conds, bool nl) const @@ -495,7 +495,7 @@ predicate::remap_after_duplication (clause_t possible_truths) INFO is ipa_fn_summary of function we are adding predicate into, CALLEE_INFO is summary of function predicate P is from. OPERAND_MAP is array giving - callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clausule of all + callee formal IDs the caller formal IDs. POSSSIBLE_TRUTHS is clause of all callee conditions that may be true in caller context. TOPLEV_PREDICATE is predicate under which callee is executed. OFFSET_MAP is an array of of offsets that need to be added to conditions, negative offset means that diff --git a/gcc/ipa-predicate.h b/gcc/ipa-predicate.h index 25cd5f2797d..265b1d163b2 100644 --- a/gcc/ipa-predicate.h +++ b/gcc/ipa-predicate.h @@ -62,7 +62,7 @@ struct GTY(()) condition passed by reference and by value. */ unsigned by_ref : 1; /* A set of sequential operations on the parameter, which can be seen as - a mathmatical function on the parameter. */ + a mathematical function on the parameter. */ expr_eval_ops param_ops; }; @@ -89,7 +89,7 @@ struct inline_param_summary typedef vec *conditions; -/* Predicates are used to repesent function parameters (such as runtime) +/* Predicates are used to represent function parameters (such as runtime) which depend on a context function is called in. Predicates are logical formulas in conjunctive-disjunctive form consisting @@ -117,7 +117,7 @@ public: first_dynamic_condition = 2 }; - /* Maximal number of conditions predicate can reffer to. This is limited + /* Maximal number of conditions predicate can refer to. This is limited by using clause_t to be 32bit. */ static const int num_conditions = 32; diff --git a/gcc/ipa-ref.h b/gcc/ipa-ref.h index 0d8e509c932..00af24c77db 100644 --- a/gcc/ipa-ref.h +++ b/gcc/ipa-ref.h @@ -46,7 +46,7 @@ public: function. */ bool cannot_lead_to_return (); - /* Return true if refernece may be used in address compare. */ + /* Return true if reference may be used in address compare. */ bool address_matters_p (); /* Return reference list this reference is in. */ diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c index 6ffadc2a60f..2310c1abd0e 100644 --- a/gcc/ipa-split.c +++ b/gcc/ipa-split.c @@ -168,7 +168,7 @@ test_nonssa_use (gimple *, tree t, tree, void *data) || (VAR_P (t) && auto_var_in_fn_p (t, current_function_decl)) || TREE_CODE (t) == RESULT_DECL - /* Normal labels are part of CFG and will be handled gratefuly. + /* Normal labels are part of CFG and will be handled gratefully. Forced labels however can be used directly by statements and need to stay in one partition along with their uses. */ || (TREE_CODE (t) == LABEL_DECL @@ -455,7 +455,7 @@ consider_split (class split_point *current, bitmap non_ssa_vars, (param_partial_inlining_entry_probability, 100)))) { /* When profile is guessed, we cannot expect it to give us - realistic estimate on likelyness of function taking the + realistic estimate on likeliness of function taking the complex path. As a special case, when tail of the function is a loop, enable splitting since inlining code skipping the loop is likely noticeable win. */ diff --git a/gcc/profile-count.c b/gcc/profile-count.c index 6f6c0a9e832..49467481d47 100644 --- a/gcc/profile-count.c +++ b/gcc/profile-count.c @@ -105,7 +105,7 @@ profile_count::debug () const fprintf (stderr, "\n"); } -/* Return true if THIS differs from OTHER; tolerate small diferences. */ +/* Return true if THIS differs from OTHER; tolerate small differences. */ bool profile_count::differs_from_p (profile_count other) const @@ -186,7 +186,7 @@ profile_probability::debug () const fprintf (stderr, "\n"); } -/* Return true if THIS differs from OTHER; tolerate small diferences. */ +/* Return true if THIS differs from OTHER; tolerate small differences. */ bool profile_probability::differs_from_p (profile_probability other) const @@ -388,7 +388,7 @@ profile_count::from_gcov_type (gcov_type v, profile_quality quality) } /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER - happens with COUNT2 probablity. Return probablity that either *THIS or + happens with COUNT2 probability. Return probability that either *THIS or OTHER happens. */ profile_probability @@ -398,7 +398,7 @@ profile_probability::combine_with_count (profile_count count1, { /* If probabilities are same, we are done. If counts are nonzero we can distribute accordingly. In remaining - cases just avreage the values and hope for the best. */ + cases just average the values and hope for the best. */ if (*this == other || count1 == count2 || (count2 == profile_count::zero () && !(count1 == profile_count::zero ()))) diff --git a/gcc/profile-count.h b/gcc/profile-count.h index ef84ddcc535..3d6e388f7fe 100644 --- a/gcc/profile-count.h +++ b/gcc/profile-count.h @@ -37,7 +37,7 @@ enum profile_quality { GUESSED_LOCAL, /* Profile was read by feedback and was 0, we used local heuristics to guess - better. This is the case of functions not run in profile fedback. + better. This is the case of functions not run in profile feedback. Never used by probabilities. */ GUESSED_GLOBAL0, @@ -48,7 +48,7 @@ enum profile_quality { not reflect the reality but it can be compared interprocedurally (for example, we inlined function w/o profile feedback into function with feedback and propagated from that). - Never used by probablities. */ + Never used by probabilities. */ GUESSED, /* Profile was determined by autofdo. */ @@ -111,7 +111,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res) In addition to actual value the quality of profile is tracked and propagated through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities - that has not been determined yet (for example bacause of + that has not been determined yet (for example because of -fno-guess-branch-probability) Typically probabilities are derived from profile feedback (via @@ -122,7 +122,7 @@ safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res) - never (0 probability) - guessed_never - very_unlikely (1/2000 probability) - - unlikely (1/5 probablity) + - unlikely (1/5 probability) - even (1/2 probability) - likely (4/5 probability) - very_likely (1999/2000 probability) @@ -479,7 +479,7 @@ public: /* The following is equivalent to: *this = cprob.invert () * *this / ret.invert (); Avoid scaling when overall outcome is supposed to be always. - Without knowing that one is inverse of toher, the result would be + Without knowing that one is inverse of other, the result would be conservative. */ if (!(*this == always ())) *this = (*this - ret) / ret.invert (); @@ -532,7 +532,7 @@ public: /* Return true when the probability of edge is reliable. - The profile guessing code is good at predicting branch outcome (ie. + The profile guessing code is good at predicting branch outcome (i.e. taken/not taken), that is predicted right slightly over 75% of time. It is however notoriously poor on predicting the probability itself. In general the profile appear a lot flatter (with probabilities closer @@ -567,7 +567,7 @@ public: return m_val <= max_probability; } - /* Comparsions are three-state and conservative. False is returned if + /* Comparisons are three-state and conservative. False is returned if the inequality cannot be decided. */ bool operator< (const profile_probability &other) const { @@ -608,7 +608,7 @@ public: bool differs_lot_from_p (profile_probability other) const; /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER - happens with COUNT2 probablity. Return probablity that either *THIS or + happens with COUNT2 probability. Return probability that either *THIS or OTHER happens. */ profile_probability combine_with_count (profile_count count1, profile_probability other, @@ -631,7 +631,7 @@ public: estimation. 2) ipa counters which are result of profile feedback or special case of static profile estimation (such as in function main). - 3) counters which counts as 0 inter-procedurally (beause given function + 3) counters which counts as 0 inter-procedurally (because given function was never run in train feedback) but they hold local static profile estimate. @@ -641,7 +641,7 @@ public: well defined. To take local counter and use it inter-procedurally use ipa member function - which strips information irelevant at the inter-procedural level. + which strips information irrelevant at the inter-procedural level. Counters are 61bit integers representing number of executions during the train run or normalized frequency within the function. @@ -660,7 +660,7 @@ public: and they do end up in uninitialized scale if any of the parameters is uninitialized. - All comparsions that are three state and handling of probabilities. Thus + All comparisons that are three state and handling of probabilities. Thus a < b is not equal to !(a >= b). The following pre-defined counts are available: @@ -770,7 +770,7 @@ public: return m_quality >= ADJUSTED; } - /* Return true if vlaue can be operated inter-procedurally. */ + /* Return true if value can be operated inter-procedurally. */ bool ipa_p () const { return !initialized_p () || m_quality >= GUESSED_GLOBAL0; @@ -890,7 +890,7 @@ public: return m_val != uninitialized_count || m_quality == GUESSED_LOCAL; } - /* Comparsions are three-state and conservative. False is returned if + /* Comparisons are three-state and conservative. False is returned if the inequality cannot be decided. */ bool operator< (const profile_count &other) const { @@ -976,7 +976,7 @@ public: return initialized_p () && m_val != 0; } - /* Make counter forcingly nonzero. */ + /* Make counter forcibly nonzero. */ profile_count force_nonzero () const { if (!initialized_p ()) @@ -1119,8 +1119,8 @@ public: return ret; } - /* Return variant of profile counte which is always safe to compare - acorss functions. */ + /* Return variant of profile count which is always safe to compare + across functions. */ profile_count ipa () const { if (m_quality > GUESSED_GLOBAL0_ADJUSTED) -- 2.30.2