+2007-09-01 Kazu Hirata <kazu@codesourcery.com>
+
+ * config/arm/arm.c, config/rs6000/ppu_intrinsics.h,
+ config/spu/spu.c, df-scan.c, fixed-value.c, fold-const.c,
+ ginclude/tgmath.h, haifa-sched.c, optabs.c, recog.c,
+ sched-deps.c, sched-int.h, system.h, target.h,
+ tree-ssa-live.c, tree-vect-transform.c, tree-vectorizer.c,
+ tree.def: Fix comment typos.
+
2007-09-01 Kazu Hirata <kazu@codesourcery.com>
* config/m68k/m68k.c (TARGET_DEFAULT_TARGET_FLAGS): Remove.
return -1;
/* Sign, mantissa and exponent are now in the correct form to plug into the
- formulae described in the comment above. */
+ formula described in the comment above. */
return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
}
#endif /* __powerpc64__ */
#ifdef __powerpc64__
-/* Work around the hadware bug in the current Cell implemention. */
+/* Work around the hardware bug in the current Cell implementation. */
#define __mftb() __extension__ \
({ unsigned long long result; \
__asm__ volatile ("1: mftb %[current_tb]\n" \
/* If the branch of the runtime test is taken - i.e. - the vectorized
version is skipped - this incurs a misprediction cost (because the
vectorized version is expected to be the fall-through). So we subtract
- the latency of a mispredicted branch from the costs that are incured
+ the latency of a mispredicted branch from the costs that are incurred
when the vectorized version is executed. */
if (runtime_test)
return -19;
}
-/* Update the defs in the entry bolck. */
+/* Update the defs in the entry block. */
void
df_update_entry_block_defs (void)
&r.low, &r.high, 0);
}
- /* Divide r by pos_b to quo_r. The remanider is in mod. */
+ /* Divide r by pos_b to quo_r. The remainder is in mod. */
div_and_round_double (TRUNC_DIV_EXPR, 1, r.low, r.high, pos_b.low,
pos_b.high, &quo_r.low, &quo_r.high, &mod.low,
&mod.high);
return overflow_p;
}
-/* Calculate F = A << B if LEFT_P. Otherwies, F = A >> B.
+/* Calculate F = A << B if LEFT_P. Otherwise, F = A >> B.
If SAT_P, saturate the result to the max or the min.
Return true, if !SAT_P and overflow. */
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
If any generic parameter is complex, we use a complex version. Otherwise
we use a real version. If the real part of any generic parameter is long
double, we use the long double version. Otherwise if the real part of any
- generic paramter is double or of integer type, we use the double version.
+ generic parameter is double or of integer type, we use the double version.
Otherwise we use the float version. */
#define __tg_cplx(expr) \
else
{
/* One of the NEXT's dependencies has been resolved.
- Recalcute NEXT's status. */
+ Recalculate NEXT's status. */
*ts &= ~SPECULATIVE & ~HARD_DEP;
DONE_SPEC (insn) = ts & BEGIN_SPEC;
CHECK_SPEC (check) = ts & BEGIN_SPEC;
- /* Luckyness of future speculations solely depends upon initial
+ /* Luckiness of future speculations solely depends upon initial
BEGIN speculation. */
if (ts & BEGIN_DATA)
fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
}
}
-/* Return true if BINOPTAB implements a commutatative binary operation. */
+/* Return true if BINOPTAB implements a commutative binary operation. */
static bool
commutative_optab_p (optab binoptab)
if (changes[i].unshare)
*changes[i].loc = copy_rtx (*changes[i].loc);
- /* Avoid unnecesary rescaning when multiple changes to same instruction
+ /* Avoid unnecesary rescanning when multiple changes to same instruction
are made. */
if (object)
{
}
/* Return true if there is no dep_nodes and deps_lists out there.
- After the region is scheduled all the depedency nodes and lists
+ After the region is scheduled all the dependency nodes and lists
should [generally] be returned to pool. */
bool
deps_pools_are_empty_p (void)
/* Find a dependency between producer PRO and consumer CON.
Search through resolved dependency lists if RESOLVED_P is true.
If no such dependency is found return NULL,
- overwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
+ otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
with an iterator pointing to it. */
static dep_t
sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
simply a pointer to the next element to allow easy deletion from the
list. When a dep is being removed from the list the iterator
will automatically advance because the value in *linkp will start
- reffering to the next element. */
+ referring to the next element. */
dep_link_t *linkp;
/* True if the current list is a resolved one. */
#define CONST_CAST(X) ((void*)(X))
#endif
-/* Acivate -Wcast-qual as a warning (not an error/-Werror). */
+/* Activate -Wcast-qual as a warning (not an error/-Werror). */
#if GCC_VERSION >= 4003
#pragma GCC diagnostic warning "-Wcast-qual"
#endif
/* The values of the following two members are pointers to
functions used to simplify the automaton descriptions.
dfa_pre_advance_cycle and dfa_post_advance_cycle are getting called
- immediatelly before and after cycle is advanced. */
+ immediately before and after cycle is advanced. */
void (* dfa_pre_advance_cycle) (void);
void (* dfa_post_advance_cycle) (void);
{
next = &TREE_CHAIN (*t);
- /* Debug info of nested function reffers to the block of the
+ /* Debug info of nested function refers to the block of the
function. */
if (TREE_CODE (*t) == FUNCTION_DECL)
unused = false;
The problem arises only if the memory access is in an inner-loop nested
inside LOOP, which is now being vectorized using outer-loop vectorization.
This is the only case when the misalignment of the memory access may not
- remain fixed thtoughout the iterations of the inner-loop (as exaplained in
+ remain fixed throughout the iterations of the inner-loop (as explained in
detail in vect_supportable_dr_alignment). In this case, not only is the
optimized realignment scheme not applicable, but also the misalignment
computation (and generation of the realignment token that is passed to
DR: The data reference.
VECT_FACTOR: vectorization factor.
- Return an exrpession whose value is the size of segment which will be
+ Return an expression whose value is the size of segment which will be
accessed by DR. */
static tree
iterations, it is *not* guaranteed that is will remain the same throughout
the execution of the inner-loop. This is because the inner-loop advances
with the original scalar step (and not in steps of VS). If the inner-loop
- step happens to be a multiple of VS, then the misalignment remaines fixed
+ step happens to be a multiple of VS, then the misalignment remains fixed
and we can use the optimized realignment scheme. For example:
for (i=0; i<N; i++)
which of the sections to execute. */
DEFTREECODE (OMP_SECTIONS, "omp_sections", tcc_statement, 3)
-/* This tree immediatelly follows OMP_SECTIONS, and represents the switch
+/* This tree immediately follows OMP_SECTIONS, and represents the switch
used to decide which branch is taken. */
DEFTREECODE (OMP_SECTIONS_SWITCH, "omp_sections_switch", tcc_statement, 0)