cfgloop.c, [...]: Fix comment typos.

* cfgloop.c, config/alpha/alpha.c, config/bfin/bfin.c,
	config/i386/athlon.md, config/ia64/ia64.md,
	config/rs6000/rs6000.c, config/s390/s390.c, config/spu/spu.md,
	df-problems.c, df.h, fold-const.c, ipa-cp.c, ipa-inline.c,
	ipa-prop.h, see.c, struct-equiv.c, tree-inline.c,
	tree-ssa-loop-niter.c, tree-vect-analyze.c,
	tree-vect-transform.c: Fix comment typos.

From-SVN: r122080
This commit is contained in:
Kazu Hirata 2007-02-18 00:52:51 +00:00 committed by Kazu Hirata
parent aabd86cb15
commit ea2c620c64
21 changed files with 35 additions and 25 deletions

View file

@ -1,3 +1,13 @@
2007-02-18 Kazu Hirata <kazu@codesourcery.com>
* cfgloop.c, config/alpha/alpha.c, config/bfin/bfin.c,
config/i386/athlon.md, config/ia64/ia64.md,
config/rs6000/rs6000.c, config/s390/s390.c, config/spu/spu.md,
df-problems.c, df.h, fold-const.c, ipa-cp.c, ipa-inline.c,
ipa-prop.h, see.c, struct-equiv.c, tree-inline.c,
tree-ssa-loop-niter.c, tree-vect-analyze.c,
tree-vect-transform.c: Fix comment typos.
2007-02-17 Kazu Hirata <kazu@codesourcery.com>
* sched-deps.c (find_insn_list): Remove.

View file

@ -701,7 +701,7 @@ disambiguate_multiple_latches (struct loop *loop)
{
edge e;
/* We eliminate the mutiple latches by splitting the header to the forwarder
/* We eliminate the multiple latches by splitting the header to the forwarder
block F and the rest R, and redirecting the edges. There are two cases:
1) If there is a latch edge E that corresponds to a subloop (we guess

View file

@ -4434,7 +4434,7 @@ emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
return ret;
}
/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is

View file

@ -1537,7 +1537,7 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
For args passed entirely in registers or entirely in memory, zero.
Refer VDSP C Compiler manual, our ABI.
First 3 words are in registers. So, if a an argument is larger
First 3 words are in registers. So, if an argument is larger
than the registers available, it will span the register and
stack. */

View file

@ -603,7 +603,7 @@
"athlon-direct,athlon-fploadk8,athlon-fstore")
;; On AMDFAM10 all double, single and integer packed and scalar SSEx data
;; loads generated are direct path, latency of 2 and do not use any FP
;; executions units. No seperate entries for movlpx/movhpx loads, which
;; executions units. No separate entries for movlpx/movhpx loads, which
;; are direct path, latency of 4 and use the FADD/FMUL FP execution units,
;; as they will not be generated.
(define_insn_reservation "athlon_sseld_amdfam10" 2
@ -637,7 +637,7 @@
"athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
;; On AMDFAM10 all double, single and integer packed SSEx data stores
;; generated are all double path, latency of 2 and use the FSTORE FP
;; execution unit. No entries seperate for movupx/movdqu, which are
;; execution unit. No entries separate for movupx/movdqu, which are
;; vector path, latency of 3 and use the FSTORE*2 FP execution unit,
;; as they will not be generated.
(define_insn_reservation "athlon_ssest_amdfam10" 2

View file

@ -476,7 +476,7 @@
;; Define register predicate prefix.
;; We can generate speculative loads only for general and fp registers - this
;; is constrainted in ia64.c: ia64_speculate_insn ().
;; is constrained in ia64.c: ia64_speculate_insn ().
(define_mode_attr reg_pred_prefix [(BI "gr") (QI "gr") (HI "gr") (SI "gr") (DI "grfr") (SF "grfr") (DF "grfr") (XF "fr") (TI "fr")])
(define_mode_attr ld_class [(BI "ld") (QI "ld") (HI "ld") (SI "ld") (DI "ld,fld") (SF "fld,ld") (DF "fld,ld") (XF "fld") (TI "fldp")])

View file

@ -12705,7 +12705,7 @@ emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
emit_insn (fn (res, mem, val));
}
/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is

View file

@ -4228,7 +4228,7 @@ s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx ne
}
/* Expand an atomic operation CODE of mode MODE. MEM is the memory location
and VAL the value to play with. If AFTER is true then store the the value
and VAL the value to play with. If AFTER is true then store the value
MEM holds after the operation, if AFTER is false then store the value MEM
holds before the operation. If TARGET is zero then discard that value, else
store it to TARGET. */

View file

@ -56,7 +56,7 @@
"pipe0, fp, nothing*5")
;; The behavior of the double precision is that both pipes stall
;; for 6 cycles and the the rest of the operation pipelines for
;; for 6 cycles and the rest of the operation pipelines for
;; 7 cycles. The simplest way to model this is to simply ignore
;; the 6 cyle stall.
(define_insn_reservation "FPD" 7 (eq_attr "type" "fpd")

View file

@ -1968,7 +1968,7 @@ df_ur_init (struct dataflow *dflow, bitmap all_blocks)
}
/* Or in the stack regs, hard regs and early clobber regs into the the
/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void
@ -2550,7 +2550,7 @@ df_urec_init (struct dataflow *dflow, bitmap all_blocks)
}
/* Or in the stack regs, hard regs and early clobber regs into the the
/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void

View file

@ -562,7 +562,7 @@ struct df_urec_bb_info
{
/* Local sets to describe the basic blocks. */
bitmap earlyclobber; /* The set of registers that are referenced
with an an early clobber mode. */
with an early clobber mode. */
/* Kill and gen are defined as in the UR problem. */
bitmap kill;
bitmap gen;

View file

@ -8860,8 +8860,8 @@ fold_comparison (enum tree_code code, tree type, tree op0, tree op1)
}
/* If this is a comparison of complex values and both sides
are COMPLEX_CST, do the comparision by parts to fold the
comparision. */
are COMPLEX_CST, do the comparison by parts to fold the
comparison. */
if ((code == EQ_EXPR || code == NE_EXPR)
&& TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
&& TREE_CODE (arg0) == COMPLEX_CST

View file

@ -65,7 +65,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
arguments
of the callsite. There are three types of values :
Formal - the caller's formal parameter is passed as an actual argument.
Constant - a constant is passed as a an actual argument.
Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
In order to compute the jump functions, we need the modify information for

View file

@ -1345,7 +1345,7 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
continue;
}
/* When the function body would grow and inlining the function won't
elliminate the need for offline copy of the function, don't inline.
eliminate the need for offline copy of the function, don't inline.
*/
if (mode == INLINE_SIZE
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)

View file

@ -29,7 +29,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
/* A jump function for a callsite represents the values passed as actual
arguments of the callsite. There are three main types of values :
Formal - the caller's formal parameter is passed as an actual argument.
Constant - a constant is passed as a an actual argument.
Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
Integer and real constants are represented as CONST_IPATYPE and Fortran
constants are represented as CONST_IPATYPE_REF. */

View file

@ -3170,7 +3170,7 @@ see_store_reference_and_extension (rtx ref_insn, rtx se_insn,
A definition is relevant if its root has
((entry_type == SIGN_EXTENDED_DEF) || (entry_type == ZERO_EXTENDED_DEF)) and
his source_mode is not narrower then the the roots source_mode.
his source_mode is not narrower then the roots source_mode.
Return the number of relevant defs or negative number if something bad had
happened and the optimization should be aborted. */

View file

@ -344,7 +344,7 @@ note_local_live (struct equiv_info *info, rtx x, rtx y, int rvalue)
return x_change;
}
/* Check if *XP is equivalent to Y. Until an an unreconcilable difference is
/* Check if *XP is equivalent to Y. Until an unreconcilable difference is
found, use in-group changes with validate_change on *XP to make register
assignments agree. It is the (not necessarily direct) callers
responsibility to verify / confirm / cancel these changes, as appropriate.
@ -570,7 +570,7 @@ rtx_equiv_p (rtx *xp, rtx y, int rvalue, struct equiv_info *info)
return false;
x_dest1 = XEXP (x, 0);
/* validate_change might have changed the destination. Put it back
so that we can do a proper match for its role a an input. */
so that we can do a proper match for its role as an input. */
XEXP (x, 0) = x_dest0;
if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
return false;

View file

@ -2288,7 +2288,7 @@ init_inline_once (void)
/* Estimating time for call is difficult, since we have no idea what the
called function does. In the current uses of eni_time_weights,
underestimating the cost does less harm than overestimating it, so
we choose a rather small walue here. */
we choose a rather small value here. */
eni_time_weights.call_cost = 10;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.switch_cost = 4;

View file

@ -1060,7 +1060,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
return false;
/* We don't want to see undefined signed overflow warnings while
computing the nmber of iterations. */
computing the number of iterations. */
fold_defer_overflow_warnings ();
iv0.base = expand_simple_operations (iv0.base);

View file

@ -1438,7 +1438,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
can make all data references satisfy vect_supportable_dr_alignment.
If so, update data structures as needed and return true. Note that
at this time vect_supportable_dr_alignment is known to return false
for a a misaligned write.
for a misaligned write.
B) If peeling wasn't possible and there is a data reference with an
unknown misalignment that does not satisfy vect_supportable_dr_alignment
@ -1812,7 +1812,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
{
/* Skip same data-refs. In case that two or more stmts share data-ref
(supported only for loads), we vectorize only the first stmt, and
the rest get their vectorized loads from the the first one. */
the rest get their vectorized loads from the first one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))

View file

@ -1073,7 +1073,7 @@ get_initial_def_for_reduction (tree stmt, tree init_val, tree *scalar_def)
REDUCTION_PHI is the phi-node that carries the reduction computation.
This function:
1. Creates the reduction def-use cycle: sets the the arguments for
1. Creates the reduction def-use cycle: sets the arguments for
REDUCTION_PHI:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is VECT_DEF - the vector of partial sums.