+2007-02-18 Kazu Hirata <kazu@codesourcery.com>
+
+ * cfgloop.c, config/alpha/alpha.c, config/bfin/bfin.c,
+ config/i386/athlon.md, config/ia64/ia64.md,
+ config/rs6000/rs6000.c, config/s390/s390.c, config/spu/spu.md,
+ df-problems.c, df.h, fold-const.c, ipa-cp.c, ipa-inline.c,
+ ipa-prop.h, see.c, struct-equiv.c, tree-inline.c,
+ tree-ssa-loop-niter.c, tree-vect-analyze.c,
+ tree-vect-transform.c: Fix comment typos.
+
2007-02-17 Kazu Hirata <kazu@codesourcery.com>
* sched-deps.c (find_insn_list): Remove.
{
edge e;
- /* We eliminate the mutiple latches by splitting the header to the forwarder
+ /* We eliminate the multiple latches by splitting the header to the forwarder
block F and the rest R, and redirecting the edges. There are two cases:
1) If there is a latch edge E that corresponds to a subloop (we guess
return ret;
}
-/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is
For args passed entirely in registers or entirely in memory, zero.
Refer VDSP C Compiler manual, our ABI.
- First 3 words are in registers. So, if a an argument is larger
+ First 3 words are in registers. So, if an argument is larger
than the registers available, it will span the register and
stack. */
"athlon-direct,athlon-fploadk8,athlon-fstore")
;; On AMDFAM10 all double, single and integer packed and scalar SSEx data
;; loads generated are direct path, latency of 2 and do not use any FP
-;; executions units. No seperate entries for movlpx/movhpx loads, which
+;; executions units. No separate entries for movlpx/movhpx loads, which
;; are direct path, latency of 4 and use the FADD/FMUL FP execution units,
;; as they will not be generated.
(define_insn_reservation "athlon_sseld_amdfam10" 2
"athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
;; On AMDFAM10 all double, single and integer packed SSEx data stores
;; generated are all double path, latency of 2 and use the FSTORE FP
-;; execution unit. No entries seperate for movupx/movdqu, which are
+;; execution unit. No entries separate for movupx/movdqu, which are
;; vector path, latency of 3 and use the FSTORE*2 FP execution unit,
;; as they will not be generated.
(define_insn_reservation "athlon_ssest_amdfam10" 2
;; Define register predicate prefix.
;; We can generate speculative loads only for general and fp registers - this
-;; is constrainted in ia64.c: ia64_speculate_insn ().
+;; is constrained in ia64.c: ia64_speculate_insn ().
(define_mode_attr reg_pred_prefix [(BI "gr") (QI "gr") (HI "gr") (SI "gr") (DI "grfr") (SF "grfr") (DF "grfr") (XF "fr") (TI "fr")])
(define_mode_attr ld_class [(BI "ld") (QI "ld") (HI "ld") (SI "ld") (DI "ld,fld") (SF "fld,ld") (DF "fld,ld") (XF "fld") (TI "fldp")])
emit_insn (fn (res, mem, val));
}
-/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is
}
/* Expand an atomic operation CODE of mode MODE. MEM is the memory location
- and VAL the value to play with. If AFTER is true then store the the value
+ and VAL the value to play with. If AFTER is true then store the value
MEM holds after the operation, if AFTER is false then store the value MEM
holds before the operation. If TARGET is zero then discard that value, else
store it to TARGET. */
"pipe0, fp, nothing*5")
;; The behavior of the double precision is that both pipes stall
-;; for 6 cycles and the the rest of the operation pipelines for
+;; for 6 cycles and the rest of the operation pipelines for
;; 7 cycles. The simplest way to model this is to simply ignore
;; the 6 cyle stall.
(define_insn_reservation "FPD" 7 (eq_attr "type" "fpd")
}
-/* Or in the stack regs, hard regs and early clobber regs into the the
+/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void
}
-/* Or in the stack regs, hard regs and early clobber regs into the the
+/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void
{
/* Local sets to describe the basic blocks. */
bitmap earlyclobber; /* The set of registers that are referenced
- with an an early clobber mode. */
+ with an early clobber mode. */
/* Kill and gen are defined as in the UR problem. */
bitmap kill;
bitmap gen;
}
/* If this is a comparison of complex values and both sides
- are COMPLEX_CST, do the comparision by parts to fold the
- comparision. */
+ are COMPLEX_CST, do the comparison by parts to fold the
+ comparison. */
if ((code == EQ_EXPR || code == NE_EXPR)
&& TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
&& TREE_CODE (arg0) == COMPLEX_CST
arguments
of the callsite. There are three types of values :
Formal - the caller's formal parameter is passed as an actual argument.
- Constant - a constant is passed as a an actual argument.
+ Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
In order to compute the jump functions, we need the modify information for
continue;
}
/* When the function body would grow and inlining the function won't
- elliminate the need for offline copy of the function, don't inline.
+ eliminate the need for offline copy of the function, don't inline.
*/
if (mode == INLINE_SIZE
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
/* A jump function for a callsite represents the values passed as actual
arguments of the callsite. There are three main types of values :
Formal - the caller's formal parameter is passed as an actual argument.
- Constant - a constant is passed as a an actual argument.
+ Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
Integer and real constants are represented as CONST_IPATYPE and Fortran
constants are represented as CONST_IPATYPE_REF. */
A definition is relevant if its root has
((entry_type == SIGN_EXTENDED_DEF) || (entry_type == ZERO_EXTENDED_DEF)) and
- his source_mode is not narrower then the the roots source_mode.
+ his source_mode is not narrower then the roots source_mode.
Return the number of relevant defs or negative number if something bad had
happened and the optimization should be aborted. */
return x_change;
}
-/* Check if *XP is equivalent to Y. Until an an unreconcilable difference is
+/* Check if *XP is equivalent to Y. Until an unreconcilable difference is
found, use in-group changes with validate_change on *XP to make register
assignments agree. It is the (not necessarily direct) callers
responsibility to verify / confirm / cancel these changes, as appropriate.
return false;
x_dest1 = XEXP (x, 0);
/* validate_change might have changed the destination. Put it back
- so that we can do a proper match for its role a an input. */
+ so that we can do a proper match for its role as an input. */
XEXP (x, 0) = x_dest0;
if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
return false;
/* Estimating time for call is difficult, since we have no idea what the
called function does. In the current uses of eni_time_weights,
underestimating the cost does less harm than overestimating it, so
- we choose a rather small walue here. */
+ we choose a rather small value here. */
eni_time_weights.call_cost = 10;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.switch_cost = 4;
return false;
/* We don't want to see undefined signed overflow warnings while
- computing the nmber of iterations. */
+ computing the number of iterations. */
fold_defer_overflow_warnings ();
iv0.base = expand_simple_operations (iv0.base);
can make all data references satisfy vect_supportable_dr_alignment.
If so, update data structures as needed and return true. Note that
at this time vect_supportable_dr_alignment is known to return false
- for a a misaligned write.
+ for a misaligned write.
B) If peeling wasn't possible and there is a data reference with an
unknown misalignment that does not satisfy vect_supportable_dr_alignment
{
/* Skip same data-refs. In case that two or more stmts share data-ref
(supported only for loads), we vectorize only the first stmt, and
- the rest get their vectorized loads from the the first one. */
+ the rest get their vectorized loads from the first one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))
REDUCTION_PHI is the phi-node that carries the reduction computation.
This function:
- 1. Creates the reduction def-use cycle: sets the the arguments for
+ 1. Creates the reduction def-use cycle: sets the arguments for
REDUCTION_PHI:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is VECT_DEF - the vector of partial sums.