+2004-05-30 Kazu Hirata <kazu@cs.umass.edu>
+
+ * c-common.c, calls.c, cfgcleanup.c, cgraph.c, cgraphunit.c,
+ ddg.c, ddg.h, df.c, df.h, except.c, expr.c, flags.h,
+ fold-const.c, gcc.c, gimplify.c, haifa-sched.c,
+ modulo-sched.c, tree-inline.c, tree-into-ssa.c, tree-nested.c,
+ tree-nrv.c, tree-ssa-ccp.c, tree-ssa-dom.c, tree-ssa-live.c,
+ tree-ssa-loop.c, tree-ssa-pre.c, tree-tailcall.c, tree.h: Fix
+ comment typos. Follow spelling conventions.
+
2004-05-29 Geoffrey Keating <geoffk@apple.com>
* gengtype-yacc.y: Add NESTED_PTR token.
/* Hook used by expand_expr to expand language-specific tree codes. */
/* The only things that should go here are bits needed to expand
- constant initalizers. Everything else should be handled by the
+ constant initializers. Everything else should be handled by the
gimplification routines. */
rtx
break;
}
- /* If tail call production suceeded, we need to remove REG_EQUIV notes on
+ /* If tail call production succeeded, we need to remove REG_EQUIV notes on
arguments too, as argument area is now clobbered by the call. */
if (tail_call_insns)
{
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
-/* This file contains optimizer of the control flow. The main entrypoint is
+/* This file contains optimizer of the control flow. The main entry point is
cleanup_cfg. Following optimizations are performed:
- Unreachable blocks removal
The function inlining information is decided in advance and maintained
in the callgraph as so called inline plan.
For each inlined call, the callee's node is cloned to represent the
- new function copy produced by inlininer.
+ new function copy produced by inliner.
Each inlined call gets a unique corresponding clone node of the callee
and the data structure is updated while inlining is performed, so
the clones are eliminated and their callee edges redirected to the
/* E is expected to be an edge being inlined. Clone destination node of
the edge and redirect it to the new clone.
- DUPLICATE is used for bookeeping on whether we are actually creating new
+ DUPLICATE is used for bookkeeping on whether we are actually creating new
clones or re-using node originally representing out-of-line function call.
*/
void
recursive = what->decl == to->global.inlined_to->decl;
else
recursive = what->decl == to->decl;
- /* Marking recursive function inlinine has sane semantic and thus we should
+ /* Marking recursive function inline has sane semantic and thus we should
not warn on it. */
if (recursive && reason)
*reason = (what->local.disregard_inline_limits
for_each_rtx (x, mark_mem_use, data);
}
-/* Returns non-zero if INSN reads from memory. */
+/* Returns nonzero if INSN reads from memory. */
static bool
mem_read_insn_p (rtx insn)
{
mem_ref_p = true;
}
-/* Returns non-zero if INSN writes to memory. */
+/* Returns nonzero if INSN writes to memory. */
static bool
mem_write_insn_p (rtx insn)
{
return mem_ref_p;
}
-/* Returns non-zero if X has access to memory. */
+/* Returns nonzero if X has access to memory. */
static bool
rtx_mem_access_p (rtx x)
{
return false;
}
-/* Returns non-zero if INSN reads to or writes from memory. */
+/* Returns nonzero if INSN reads to or writes from memory. */
static bool
mem_access_insn_p (rtx insn)
{
get_block_head_tail (g->bb->index, &head, &tail);
sched_analyze (&tmp_deps, head, tail);
- /* Build intra-loop data dependecies using the schedular dependecy
+ /* Build intra-loop data dependecies using the scheduler dependecy
analysis. */
for (i = 0; i < g->num_nodes; i++)
{
/* Updates the counts of U_NODE's successors (that belong to NODES) to be
at-least as large as the count of U_NODE plus the latency between them.
Sets a bit in TMP for each successor whose count was changed (increased).
- Returns non-zero if any count was changed. */
+ Returns nonzero if any count was changed. */
static int
update_dist_to_successors (ddg_node_ptr u_node, sbitmap nodes, sbitmap tmp)
{
/* The insn represented by the node. */
rtx insn;
- /* A note preceeding INSN (or INSN itself), such that all insns linked
+ /* A note preceding INSN (or INSN itself), such that all insns linked
from FIRST_NOTE until INSN (inclusive of both) are moved together
when reordering the insns. This takes care of notes that should
- continue to preceed INSN. */
+ continue to precede INSN. */
rtx first_note;
/* Incoming and outgoing dependency edges. */
df_init simply creates a poor man's object (df) that needs to be
passed to all the dataflow routines. df_finish destroys this
-object and frees up any allocated memory. DF_ALL says to analyse
+object and frees up any allocated memory. DF_ALL says to analyze
everything.
df_analyze performs the following:
#define DF_INSN_USES(DF, INSN) ((DF)->insns[INSN_UID (INSN)].uses)
-/* Functions to build and analyse dataflow information. */
+/* Functions to build and analyze dataflow information. */
extern struct df *df_init (void);
case ERT_MUST_NOT_THROW:
/* MUST_NOT_THROW regions are implementable solely in the
- runtime, but their existance continues to affect calls
+ runtime, but their existence continues to affect calls
within that region. Never delete them here. */
kill_it = false;
break;
return target;
}
\f
-/* Examine CTOR. Discover how many scalar fields are set to non-zero
+/* Examine CTOR. Discover how many scalar fields are set to nonzero
values and place it in *P_NZ_ELTS. Discover how many scalar fields
are set to non-constant values and place it in *P_NC_ELTS. */
extern int flag_shared_data;
-/* Controls the activiation of SMS modulo scheduling. */
+/* Controls the activation of SMS modulo scheduling. */
extern int flag_modulo_sched;
/* flag_schedule_insns means schedule insns within basic blocks (before
\f
/* Return nonzero if two operands (typically of the same tree node)
are necessarily equal. If either argument has side-effects this
- function returns zero. FLAGS modifies behaviour as follows:
+ function returns zero. FLAGS modifies behavior as follows:
If OEP_ONLY_CONST is set, only return nonzero for constants.
This function tests whether the operands are indistinguishable;
}
/* Subroutine of fold() that optimizes comparisons of a division by
- a non-zero integer constant against an integer constant, i.e.
+ a nonzero integer constant against an integer constant, i.e.
X/C1 op C2.
CODE is the comparison operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR,
const char *cpp_spec; /* If non-NULL, substitute this spec
for `%C', rather than the usual
cpp_spec. */
- const int combinable; /* If non-zero, compiler can deal with
+ const int combinable; /* If nonzero, compiler can deal with
multiple source files at once (IMA). */
- const int needs_preprocessing; /* If non-zero, source files need to
+ const int needs_preprocessing; /* If nonzero, source files need to
be run through a preprocessor. */
};
return lab;
}
-/* Create a new temporary name with PREFIX. Returns an indentifier. */
+/* Create a new temporary name with PREFIX. Returns an identifier. */
static GTY(()) unsigned int tmp_var_id_num;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
- temporary through that they comunicate. */
+ a temporary through which they communicate. */
static void
build_stack_save_restore (tree *save, tree *restore)
/* ??? This bit ought not be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
- requires trickery to avoid quadratic compile-time behaviour in
+ requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
else
{
}
/* Gimplifies a statement list. These may be created either by an
- enlightend front-end, or by shortcut_cond_expr. */
+ enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p)
description interface, MAX_INSN_QUEUE_INDEX is a power of two minus
one which is larger than maximal time of instruction execution
computed by genattr.c on the base maximal time of functional unit
- reservations and geting a result. This is the longest time an
+ reservations and getting a result. This is the longest time an
insn may be queued. */
#define MAX_INSN_QUEUE_INDEX max_insn_queue_index_macro_value
IEEE Trans. on Comps., 50(3), March 2001
[2] J. Llosa, A. Gonzalez, E. Ayguade, and M. Valero.
Swing Modulo Scheduling: A Lifetime Sensitive Approach.
- PACT '96 , pages 80-87, October 1996 (Boston - Massachussets - USA).
+ PACT '96 , pages 80-87, October 1996 (Boston - Massachusetts - USA).
The basic structure is:
1. Build a data-dependence graph (DDG) for each loop.
void set_row_column_for_ps (partial_schedule_ptr);
\f
-/* This page defines constants and structures for the modulo scheduiing
+/* This page defines constants and structures for the modulo scheduling
driver. */
/* As in haifa-sched.c: */
original register defined by the node. */
rtx first_reg_move;
- /* The number of register-move instructions added, immediately preceeding
+ /* The number of register-move instructions added, immediately preceding
first_reg_move. */
int nreg_moves;
int stage; /* Holds time / ii. */
/* The column of a node inside the ps. If nodes u, v are on the same row,
- u will preceed v if column (u) < column (v). */
+ u will precede v if column (u) < column (v). */
int column;
} *node_sched_params_ptr;
{
int nreg_moves4e = (SCHED_TIME (e->dest) - SCHED_TIME (e->src)) / ii;
- /* If dest preceeds src in the schedule of the kernel, then dest
+ /* If dest precedes src in the schedule of the kernel, then dest
will read before src writes and we can save one reg_copy. */
if (SCHED_ROW (e->dest) == SCHED_ROW (e->src)
&& SCHED_COLUMN (e->dest) < SCHED_COLUMN (e->src))
if (for_prolog)
{
/* SCHED_STAGE (u_node) >= from_stage == 0. Generate increasing
- number of reg_moves starting with the second occurance of
+ number of reg_moves starting with the second occurrence of
u_node, which is generated if its SCHED_STAGE <= to_stage. */
i_reg_moves = to_stage - SCHED_STAGE (u_node);
i_reg_moves = MAX (i_reg_moves, 0);
*count++;
break;
- /* Few special cases of expensive operations. This is usefull
+ /* Few special cases of expensive operations. This is useful
to avoid inlining on functions having too many of these. */
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
/* We get cleared memory from the allocator, so if the memory is
not cleared, then we are re-using a previously allocated entry. In
- that case, we can also re-use the underlying virtal arrays. Just
+ that case, we can also re-use the underlying virtual arrays. Just
make sure we clear them before using them! */
if (recycled && bd->block_defs && VARRAY_ACTIVE_SIZE (bd->block_defs) > 0)
abort ();
/* For __builtin_nonlocal_goto, we need N words. The first is the
frame pointer, the rest is for the target's stack pointer save
- area. The number of words is controled by STACK_SAVEAREA_MODE;
+ area. The number of words is controlled by STACK_SAVEAREA_MODE;
not the best interface, but it'll do for now. */
if (Pmode == ptr_mode)
type = ptr_type_node;
/* The original user label may also be use for a normal goto, therefore
we must create a new label that will actually receive the abnormal
control transfer. This new label will be marked LABEL_NONLOCAL; this
- mark will trigger proper behaviour in the cfg, as well as cause the
+ mark will trigger proper behavior in the cfg, as well as cause the
(hairy target-specific) non-local goto receiver code to be generated
when we expand rtl. */
new_label = create_artificial_label ();
sf->has_nonlocal_label = 1;
}
- /* Make sure all new local variables get insertted into the
+ /* Make sure all new local variables get inserted into the
proper BIND_EXPR. */
if (root->new_local_var_chain)
declare_tmp_vars (root->new_local_var_chain,
this function's RETURN_EXPR statements. */
tree var;
- /* This is the function's RESULT_DECL. We will replace all occurences
+ /* This is the function's RESULT_DECL. We will replace all occurrences
of VAR with RESULT_DECL when we apply this optimization. */
tree result;
};
to RESULT. */
else if (TREE_CODE (*tp) == RETURN_EXPR)
TREE_OPERAND (*tp, 0) = dp->result;
- /* Replace all occurences of VAR with RESULT. */
+ /* Replace all occurrences of VAR with RESULT. */
else if (*tp == dp->var)
*tp = dp->result;
found:
/* If we get here, we've got an aggregate field, and a possibly
- non-zero offset into them. Recurse and hope for a valid match. */
+ nonzero offset into them. Recurse and hope for a valid match. */
if (base_is_ptr)
base = build1 (INDIRECT_REF, record_type, base);
base = build (COMPONENT_REF, field_type, base, f);
/* We emptyed the hash table earlier, now delete it completely. */
htab_delete (avail_exprs);
- /* It is not nocessary to clear CURRDEFS, REDIRECTION_EDGES, VRP_DATA,
+ /* It is not necessary to clear CURRDEFS, REDIRECTION_EDGES, VRP_DATA,
CONST_AND_COPIES, and NONZERO_VARS as they all get cleared at the bottom
of the do-while loop above. */
}
/* Use the SSA_NAMES in LOCALS to restore TABLE to its original
- state, stopping when there are LIMIT entires left in LOCALs. */
+ state, stopping when there are LIMIT entries left in LOCALs. */
static void
restore_nonzero_vars_to_original_value (varray_type locals,
}
/* Use the source/dest pairs in LOCALS to restore TABLE to its original
- state, stopping when there are LIMIT entires left in LOCALs. */
+ state, stopping when there are LIMIT entries left in LOCALs. */
static void
restore_vars_to_original_value (varray_type locals,
breaking out of the loop, then we have a PHI which may create
a useful equivalence. We do not need to record unwind data for
this, since this is a true assignment and not an equivalence
- infered from a comparison. All uses of this ssa name are dominated
+ inferred from a comparison. All uses of this ssa name are dominated
by this assignment, so unwinding just costs time and space. */
if (i == PHI_NUM_ARGS (phi)
&& may_propagate_copy (lhs, rhs))
/* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
variable compared against zero. If we're honoring signed zeros,
then we cannot record this value unless we know that the value is
- non-zero. */
+ nonzero. */
if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
&& (TREE_CODE (y) != REAL_CST
|| REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
/* If the RHS of the assignment is a constant or another variable that
may be propagated, register it in the CONST_AND_COPIES table. We
do not need to record unwind data for this, since this is a true
- assignment and not an equivalence infered from a comparison. All
+ assignment and not an equivalence inferred from a comparison. All
uses of this ssa name are dominated by this assignment, so unwinding
just costs time and space. */
if (may_optimize_p
}
-/* This function will remove any tree entires from TPA which have only a single
+/* This function will remove any tree entries from TPA which have only a single
element. This will help keep the size of the conflict graph down. The
function returns the number of remaining tree lists. */
/* Anything which is still live at this point interferes.
In order to implement this efficiently, only conflicts between
partitions which have the same TPA root need be added.
- TPA roots which have been seen are tracked in 'tpa_nodes'. A non-zero
+ TPA roots which have been seen are tracked in 'tpa_nodes'. A nonzero
entry points to an index into 'partition_link', which then indexes
into itself forming a linked list of partitions sharing a tpa root
which have been seen as live up to this point. Since partitions start
create_preheaders (loops, CP_SIMPLE_PREHEADERS);
- /* We do not try to keep the information about irreductible regions
+ /* We do not try to keep the information about irreducible regions
up-to-date. */
loops->state &= ~LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS;
/* For the uninitiated, the algorithm is a modified SSA renaming
algorithm (working on expressions rather than variables) . We
attempt to determine which expression occurrences have the same
- ESSA version (we call it class, for equivalence/redunancy class,
+ ESSA version (we call it class, for equivalence/redundancy class,
which is what the papers call it. Open64 calls it e-version), and
which occurrences are actually operands for an EPHI (since this has
to be discovered from the program).
/* First step of finalization. Determine which expressions are being
saved and which are being deleted.
- This is done as a simple dominator based availabilty calculation,
+ This is done as a simple dominator based availability calculation,
using the e-versions/redundancy classes. */
static bool
#include "langhooks.h"
/* The file implements the tail recursion elimination. It is also used to
- analyse the tail calls in general, passing the results to the rtl level
+ analyze the tail calls in general, passing the results to the rtl level
where they are used for sibcall optimization.
In addition to the standard tail recursion elimination, we handle the most
We rewrite this to a gimple equivalent of return m_acc * x + a_acc.
2) return f (...), where f is the current function, is rewritten in a
- clasical tail-recursion elimination way, into assignment of arguments
+ classical tail-recursion elimination way, into assignment of arguments
and jump to the start of the function. Values of the accumulators
are unchanged.
}
}
-/* Adjust value of the return at the end of BB accodring to M and A
+/* Adjust value of the return at the end of BB according to M and A
accumulators. */
static void
/* Define fields and accessors for some nodes that represent expressions. */
-/* Non-zero if NODE is an emtpy statement (NOP_EXPR <0>). */
+/* Nonzero if NODE is an empty statement (NOP_EXPR <0>). */
#define IS_EMPTY_STMT(NODE) (TREE_CODE (NODE) == NOP_EXPR \
&& VOID_TYPE_P (TREE_TYPE (NODE)) \
&& integer_zerop (TREE_OPERAND (NODE, 0)))