+2004-09-05 Kazu Hirata <kazu@cs.umass.edu>
+
+ * c-common.c, cfgexpand.c, cgraphunit.c, defaults.h,
+ et-forest.c, expr.c, gimplify.c, global.c, gthr-lynx.h,
+ hard-reg-set.h, modulo-sched.c, optabs.c, postreload-gcse.c,
+ tree-data-ref.c, tree-flow.h, tree-if-conv.c, tree-inline.c,
+ tree-sra.c, tree-ssa-loop-im.c, tree-ssa-loop-ivopts.c,
+ tree-ssa-loop-niter.c, tree-ssa-operands.c,
+ tree-ssa-operands.h, tree-ssa-propagate.c,
+ tree-ssa-propagate.h, tree-ssa-threadupdate.c, value-prof.c,
+ vec.c, vec.h: Fix comment typos. Follow spelling conventions.
+
2004-09-05 Diego Novillo <dnovillo@redhat.com>
* tree-if-conv.c (gate_tree_if_conversion): Enable only if the
type (ie. before the default conversion to int) of the switch testing
expression.
TYPE is the promoted type of the testing expression, and ORIG_TYPE is
- the type before promiting it. CASE_LOW_P is a pointer to the lower
+ the type before promoting it. CASE_LOW_P is a pointer to the lower
bound of the case label, and CASE_HIGH_P is the upper bound or NULL
if the case is not a case range.
The caller has to make sure that we are not called with NULL for
}
/* A subroutine of expand_used_vars. Expand one variable according to
- its flavour. Variables to be placed on the stack are not actually
+ its flavor. Variables to be placed on the stack are not actually
expanded yet, merely recorded. */
static void
max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
}
- /* Make sure that function is small enought to be considered for inlining. */
+ /* Make sure that function is small enough to be considered for inlining. */
if (!max_depth
|| cgraph_estimate_size_after_inlining (1, node, node) >= limit)
return;
#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) false
#endif
-/* True if the targets integer-comparision fucntions return { 0, 1, 2
+/* True if the targets integer-comparision functions return { 0, 1, 2
} to indicate { <, ==, > }. False if { -1, 0, 1 } is used
instead. The libgcc routines are biased. */
#ifndef TARGET_LIB_INT_CMP_BIASED
}
#ifdef DEBUG_ET
-/* Checks whether neighbourhood of OCC seems sane. */
+/* Checks whether neighborhood of OCC seems sane. */
static void
et_check_occ_sanity (struct et_occ *occ)
if (bitsize >= GET_MODE_BITSIZE (GET_MODE (str_rtx)))
break;
- /* We can't handle fields split accross multiple entities. */
+ /* We can't handle fields split across multiple entities. */
if (bitpos1 + bitsize > GET_MODE_BITSIZE (GET_MODE (str_rtx)))
break;
case IMAGPART_EXPR:
/* The imaginary part of the complex number is always second.
- The expresion is therefore always offset by the size of the
+ The expression is therefore always offset by the size of the
scalar type. */
offset = 0;
bitpos = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)));
gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p);
- /* All occurences of this decl in final gimplified code will be
+ /* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
- CONST_DECL node. Otherwise the decl is replacable by its
+ CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
}
}
-/* The function returns true if register classes C1 and C2 inetrsect. */
+/* The function returns true if register classes C1 and C2 intersect. */
static bool
regclass_intersect (enum reg_class c1, enum reg_class c2)
weak. If the multi-threaded application includes iostream.h,
gthr-posix.h is included and pthread_create will be defined weak.
If pthead_create is weak its defining module in libc is not
- necessarly included in the link and the symbol is resolved to zero.
+ necessarily included in the link and the symbol is resolved to zero.
Therefore the first call to it will crash.
Since -mthreads is a multilib switch on LynxOS we know that at this
the same format as a HARD_REG_SET. To help make sure this is true,
we only try the widest fast integer mode (HOST_WIDEST_FAST_INT)
instead of all the smaller types. This approach loses only if
- there are avery few registers and then only in the few cases where
+ there are very few registers and then only in the few cases where
we have an array of HARD_REG_SETs, so it needn't be as complex as
it used to be. */
row = SMODULO (ps_i->cycle, ps->ii);
/* Find the first must follow and the last must precede
- and insert the node immediatly after the must precede
+ and insert the node immediately after the must precede
but make sure that it there is no must follow after it. */
for (next_ps_i = ps->rows[row];
next_ps_i;
if (TEST_BIT (must_follow, next_node->cuid))
return false;
- /* Advace PS_I over its next_in_row in the doubly linked list. */
+ /* Advance PS_I over its next_in_row in the doubly linked list. */
prev = ps_i->prev_in_row;
next = ps_i->next_in_row;
masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
fill with zeros or sign bits as appropriate.
- If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesise
+ If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
/* We need to keep a hash table of expressions. The table entries are of
type 'struct expr', and for each expression there is a single linked
- list of occurences. */
+ list of occurrences. */
/* The table itself. */
static htab_t expr_table;
static struct obstack expr_obstack;
/* Occurrence of an expression.
- There is at most one occurence per basic block. If a pattern appears
+ There is at most one occurrence per basic block. If a pattern appears
more than once, the last appearance is used. */
struct occr
}
\f
-/* Dump all expressions and occurences that are currently in the
+/* Dump all expressions and occurrences that are currently in the
expression hash table to FILE. */
/* This helper is called via htab_traverse. */
- polyhedron dependence
or with the chains of recurrences based representation,
- - to define a knowledge base for storing the data dependeces
+ - to define a knowledge base for storing the data dependences
information,
- to define an interface to access this data.
to false, then the other fields in this structure
should not be used; there is no guarantee that they
will be correct. */
- tree may_be_zero; /* The booleand expression. If it evaluates to true,
+ tree may_be_zero; /* The boolean expression. If it evaluates to true,
the loop will exit in the first iteration (i.e.
its latch will not be executed), even if the niter
field says otherwise. */
var = create_tmp_var (type, name);
add_referenced_tmp_var (var);
- /* Build new statement to assigne EXP to new variable. */
+ /* Build new statement to assign EXP to new variable. */
stmt = build (MODIFY_EXPR, type, var, exp);
/* Get SSA name for the new variable and set make new statement
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We have to worry about mutually recursive pointers. These can't
- be written in C. They can in Ada. It's pathlogical, but
+ be written in C. They can in Ada. It's pathological, but
there's an ACATS test (c38102a) that checks it. Deal with this
by checking if we're pointing to another pointer, that one
points to another pointer, that one does too, and we have no htab.
/* Take into account everything back up the chain. Given that chain
lengths are rarely very long, this should be acceptable. If we
- truely identify this as a performance problem, it should work to
+ truly identify this as a performance problem, it should work to
hash the pointer value "e->parent". */
for (p = e->parent; p ; p = p->parent)
h = (h * 65521) ^ sra_hash_tree (p->element);
}
/* Suppose that operand DEF is used inside the LOOP. Returns the outermost
- loop to that we could move the expresion using DEF if it did not have
+ loop to that we could move the expression using DEF if it did not have
other operands, i.e. the outermost loop enclosing LOOP in that the value
of DEF is invariant. */
}
/* Hoist the statements in basic block BB out of the loops prescribed by
- data stored in LIM_DATA structres associated with each statement. Callback
+ data stored in LIM_DATA structures associated with each statement. Callback
for walk_dominator_tree. */
static void
}
/* Hoist the statements out of the loops prescribed by data stored in
- LIM_DATA structres associated with each statement.*/
+ LIM_DATA structures associated with each statement.*/
static void
move_computations (void)
return true;
}
-/* Forces statements definining (invariant) SSA names in expression EXPR to be
+/* Forces statements defining (invariant) SSA names in expression EXPR to be
moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
static void
}
/* Records request for store motion of memory reference REF from LOOP.
- MEM_REFS is the list of occurences of the reference REF inside LOOP;
+ MEM_REFS is the list of occurrences of the reference REF inside LOOP;
these references are rewritten by a new temporary variable.
Exits from the LOOP are stored in EXITS, there are N_EXITS of them.
The initialization of the temporary variable is put to the preheader
#define CONSIDER_ALL_CANDIDATES_BOUND \
((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
-/* If there are more iv occurences, we just give up (it is quite unlikely that
+/* If there are more iv occurrences, we just give up (it is quite unlikely that
optimizing such a loop would help, and it would take ages). */
#define MAX_CONSIDERED_USES \
/* We want to take care only of <=; this is easy,
as in cases the overflow would make the transformation unsafe the loop
does not roll. Seemingly it would make more sense to want to take
- care of <, as NE is more simmilar to it, but the problem is that here
+ care of <, as NE is more similar to it, but the problem is that here
the transformation would be more difficult due to possibly infinite
loops. */
if (zero_p (step0))
obviously if the test for overflow during that transformation
passed, we cannot overflow here. Most importantly any
loop with sharp end condition and step 1 falls into this
- cathegory, so handling this case specially is definitely
+ category, so handling this case specially is definitely
worth the troubles. */
may_xform = boolean_true_node;
}
get_stmt_operands() in the primary entry point.
The operand tree is the parsed by the various get_* routines which look
- through the stmt tree for the occurence of operands which may be of
+ through the stmt tree for the occurrence of operands which may be of
interest, and calls are made to the append_* routines whenever one is
found. There are 5 of these routines, each representing one of the
5 types of operands. Defs, Uses, Virtual Uses, Virtual May Defs, and
/* Specifically for use in DOM's expression analysis. Given a store, we
- create an artifical stmt which looks like a load from the store, this can
+ create an artificial stmt which looks like a load from the store, this can
be used to eliminate redundant loads. OLD_OPS are the operands from the
- store stmt, and NEW_STMT is the new load which reperesent a load of the
+ store stmt, and NEW_STMT is the new load which represents a load of the
values stored. */
void
/* This structure is used in the operand iterator loops. It contains the
- items required to determine which operand is retreived next. During
+ items required to determine which operand is retrieved next. During
optimization, this structure is scalarized, and any unused fields are
optimized away, resulting in little overhead. */
#define SSA_OP_VUSE 0x04 /* VUSE operands. */
#define SSA_OP_VMAYUSE 0x08 /* USE portion of V_MAY_DEFS. */
#define SSA_OP_VMAYDEF 0x10 /* DEF portion of V_MAY_DEFS. */
-#define SSA_OP_VMUSTDEF 0x20 /* V_MUST_DEF defintions. */
+#define SSA_OP_VMUSTDEF 0x20 /* V_MUST_DEF definitions. */
/* These are commonly grouped operand flags. */
#define SSA_OP_VIRTUAL_USES (SSA_OP_VUSE | SSA_OP_VMAYUSE)
SSA_PROP_INTERESTING: S produces a value that can be computed
at compile time. Its result can be propagated into the
- statements that feed from S. Furhtermore, if S is a
+ statements that feed from S. Furthermore, if S is a
conditional jump, only the edge known to be taken is added
to the work list. Edges that are known not to execute are
never simulated.
returned by SSA_PROP_VISIT_STMT should be added to
INTERESTING_SSA_EDGES. If the statement being visited is a
conditional jump, SSA_PROP_VISIT_STMT should indicate which edge
- out of the basic block should be marked exectuable. */
+ out of the basic block should be marked executable. */
SSA_PROP_INTERESTING,
/* The statement produces a varying (i.e., useless) value and
to update dominator tree and SSA graph after such changes.
The key to keeping the SSA graph update managable is to duplicate
- the side effects occuring in BB so that those side effects still
+ the side effects occurring in BB so that those side effects still
occur on the paths which bypass BB after redirecting edges.
We accomplish this by creating duplicates of BB and arranging for
-- list of counters starting from the first one. */
/* For speculative prefetching, the range in that we do not prefetch (because
- we assume that it will be in cache anyway). The assymetry between min and
+ we assume that it will be in cache anyway). The asymmetry between min and
max range is trying to reflect the fact that the sequential prefetching
of the data is commonly done directly by hardware. Nevertheless, these
values are just a guess and should of course be target-specific. */
/* We require that count is at least half of all; this means
that for the transformation to fire the value must be constant
- at least 50% of time (and 75% gives the garantee of usage). */
+ at least 50% of time (and 75% gives the guarantee of usage). */
if (!rtx_equal_p (address, value) || 2 * count < all)
return false;
/* Ensure there are at least RESERVE free slots in VEC, if RESERVE >=
0. If RESERVE < 0, increase the current allocation exponentially.
VEC can be NULL, in which case a new vector is created. The
- vector's trailing array is at VEC_OFFSET offset and consistes of
+ vector's trailing array is at VEC_OFFSET offset and consists of
ELT_SIZE sized elements. */
void *
interoperate with the GTY machinery.
Because of the different behaviour of objects and of pointers to
- objects, there are two flavours. One to deal with a vector of
+ objects, there are two flavors. One to deal with a vector of
pointers to objects, and one to deal with a vector of objects
themselves. Both of these pass pointers to objects around -- in
the former case the pointers are stored into the vector and in the
void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Object
Remove an element from the IXth position of V. Ordering of
- remaining elements is preserverd. For pointer vectors returns the
+ remaining elements is preserved. For pointer vectors returns the
removed object. This is an O(N) operation due to a memmove. */
#define VEC_ordered_remove(TDEF,V,I) \