c-common.c, [...]: Fix comment typos.
* c-common.c, c-parser.c, cfgbuild.c, cfghooks.c, cfghooks.h, cfgrtl.c, cgraphunit.c, ddg.c, expr.h, gcse.c, ggc-page.c, ggc-zone.c, gimplify.c, ipa-inline.c, longlong.h, targhooks.c, tree-flow-inline.h, tree-pass.h, tree-ssa-dse.c, tree-ssa-loop-im.c, tree-ssa-loop-ivopts.c, tree-ssa-operands.c, tree-vect-analyze.c, tree-vect-transform.c, tree-vectorizer.c, tree.c, config/arm/arm.c, config/bfin/bfin.c, config/frv/frv.c, config/frv/frv.md, config/i386/i386.c, config/i386/sse.md, config/m68hc11/m68hc11.c, config/m68hc11/m68hc11.h, config/mcore/mcore.c, config/mips/mips.c, config/mips/mips.md, config/rs6000/darwin-ldouble.c, config/rs6000/rs6000.c, config/rs6000/rs6000.h, config/sh/sh.c, config/sh/sh.md, config/sh/ushmedia.h, config/sparc/sparc.c, config/sparc/sparc.md, config/stormy16/stormy-abi: Fix comment typos. Follow spelling conventions. * doc/invoke.texi, doc/tm.texi, doc/tree-ssa.texi: Fix typos. Follow spelling conventions. From-SVN: r100218
This commit is contained in:
parent
e689b87000
commit
0fa2e4df47
50 changed files with 90 additions and 70 deletions
|
@ -1,3 +1,24 @@
|
|||
2005-05-26 Kazu Hirata <kazu@cs.umass.edu>
|
||||
|
||||
* c-common.c, c-parser.c, cfgbuild.c, cfghooks.c, cfghooks.h,
|
||||
cfgrtl.c, cgraphunit.c, ddg.c, expr.h, gcse.c, ggc-page.c,
|
||||
ggc-zone.c, gimplify.c, ipa-inline.c, longlong.h, targhooks.c,
|
||||
tree-flow-inline.h, tree-pass.h, tree-ssa-dse.c,
|
||||
tree-ssa-loop-im.c, tree-ssa-loop-ivopts.c,
|
||||
tree-ssa-operands.c, tree-vect-analyze.c,
|
||||
tree-vect-transform.c, tree-vectorizer.c, tree.c,
|
||||
config/arm/arm.c, config/bfin/bfin.c, config/frv/frv.c,
|
||||
config/frv/frv.md, config/i386/i386.c, config/i386/sse.md,
|
||||
config/m68hc11/m68hc11.c, config/m68hc11/m68hc11.h,
|
||||
config/mcore/mcore.c, config/mips/mips.c, config/mips/mips.md,
|
||||
config/rs6000/darwin-ldouble.c, config/rs6000/rs6000.c,
|
||||
config/rs6000/rs6000.h, config/sh/sh.c, config/sh/sh.md,
|
||||
config/sh/ushmedia.h, config/sparc/sparc.c,
|
||||
config/sparc/sparc.md, config/stormy16/stormy-abi: Fix comment
|
||||
typos. Follow spelling conventions.
|
||||
* doc/invoke.texi, doc/tm.texi, doc/tree-ssa.texi: Fix typos.
|
||||
Follow spelling conventions.
|
||||
|
||||
2005-05-26 David Ung <davidu@mips.com>
|
||||
|
||||
* config/mips/mips.c (mips_use_ins_ext_p): New helper function
|
||||
|
|
|
@ -1445,7 +1445,7 @@ check_case_value (tree value)
|
|||
if the case is not a case range.
|
||||
The caller has to make sure that we are not called with NULL for
|
||||
CASE_LOW_P (i.e. the default case).
|
||||
Returns true if the case label is in range of ORIG_TYPE (satured or
|
||||
Returns true if the case label is in range of ORIG_TYPE (saturated or
|
||||
untouched) or false if the label is out of range. */
|
||||
|
||||
static bool
|
||||
|
|
|
@ -2145,7 +2145,7 @@ c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
|
|||
which is resolved in the direction of treating it as a typedef
|
||||
name. If a close parenthesis follows, it is also an empty
|
||||
parameter list, as the syntax does not permit empty abstract
|
||||
declarators. Otherwise, it is a parenthesised declarator (in
|
||||
declarators. Otherwise, it is a parenthesized declarator (in
|
||||
which case the analysis may be repeated inside it, recursively).
|
||||
|
||||
??? There is an ambiguity in a parameter declaration "int
|
||||
|
@ -2155,7 +2155,7 @@ c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
|
|||
documenting. At present we follow an accident of the old
|
||||
parser's implementation, whereby the first parameter must have
|
||||
some declaration specifiers other than just attributes. Thus as
|
||||
a parameter declaration it is treated as a parenthesised
|
||||
a parameter declaration it is treated as a parenthesized
|
||||
parameter named x, and as an abstract declarator it is
|
||||
rejected.
|
||||
|
||||
|
@ -5325,7 +5325,7 @@ c_parser_expr_list (c_parser *parser)
|
|||
|
||||
"@interface identifier (" must start "@interface identifier (
|
||||
identifier ) ...": objc-methodprotolist in the first production may
|
||||
not start with a parenthesised identifier as a declarator of a data
|
||||
not start with a parenthesized identifier as a declarator of a data
|
||||
definition with no declaration specifiers if the objc-superclass,
|
||||
objc-protocol-refs and objc-class-instance-variables are omitted. */
|
||||
|
||||
|
|
|
@ -225,8 +225,7 @@ enum state {
|
|||
|
||||
/* Basic blocks that may need splitting (due to a label appearing in
|
||||
the middle, etc) belong to this state. After splitting them,
|
||||
make_edges will create create edges going out of them as
|
||||
needed. */
|
||||
make_edges will create edges going out of them as needed. */
|
||||
BLOCK_TO_SPLIT
|
||||
};
|
||||
|
||||
|
|
|
@ -825,7 +825,7 @@ execute_on_shrinking_pred (edge e)
|
|||
}
|
||||
|
||||
/* This is used inside loop versioning when we want to insert
|
||||
stmts/insns on the edges, which have a different behaviour
|
||||
stmts/insns on the edges, which have a different behavior
|
||||
in tree's and in RTL, so we made a CFG hook. */
|
||||
void
|
||||
lv_flush_pending_stmts (edge e)
|
||||
|
|
|
@ -119,7 +119,7 @@ struct cfg_hooks
|
|||
unsigned int *n_to_remove,
|
||||
int flags);
|
||||
|
||||
/* Add conition to new basic block and update CFG used in loop
|
||||
/* Add condition to new basic block and update CFG used in loop
|
||||
versioning. */
|
||||
void (*lv_add_condition_to_bb) (basic_block, basic_block, basic_block,
|
||||
void *);
|
||||
|
|
|
@ -1190,7 +1190,7 @@ rtl_tidy_fallthru_edge (edge e)
|
|||
|
||||
/* ??? In a late-running flow pass, other folks may have deleted basic
|
||||
blocks by nopping out blocks, leaving multiple BARRIERs between here
|
||||
and the target label. They ought to be chastized and fixed.
|
||||
and the target label. They ought to be chastised and fixed.
|
||||
|
||||
We can also wind up with a sequence of undeletable labels between
|
||||
one block and the next.
|
||||
|
|
|
@ -275,7 +275,7 @@ cgraph_varpool_analyze_pending_decls (void)
|
|||
/* Optimization of function bodies might've rendered some variables as
|
||||
unnecessary so we want to avoid these from being compiled.
|
||||
|
||||
This is done by prunning the queue and keeping only the variables that
|
||||
This is done by pruning the queue and keeping only the variables that
|
||||
really appear needed (ie they are either externally visible or referenced
|
||||
by compiled function). Re-doing the reachability analysis on variables
|
||||
brings back the remaining variables referenced by these. */
|
||||
|
|
|
@ -6976,7 +6976,7 @@ add_minipool_forward_ref (Mfix *fix)
|
|||
/* If this fix's address is greater than the address of the first
|
||||
entry, then we can't put the fix in this pool. We subtract the
|
||||
size of the current fix to ensure that if the table is fully
|
||||
packed we still have enough room to insert this value by suffling
|
||||
packed we still have enough room to insert this value by shuffling
|
||||
the other fixes forwards. */
|
||||
if (minipool_vector_head &&
|
||||
fix->address >= minipool_vector_head->max_address - fix->fix_size)
|
||||
|
@ -13492,7 +13492,7 @@ thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
|
|||
asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
|
||||
ARM_HARD_FRAME_POINTER_REGNUM, work_register);
|
||||
}
|
||||
/* Optimisation: If we are not pushing any low registers but we are going
|
||||
/* Optimization: If we are not pushing any low registers but we are going
|
||||
to push some high registers then delay our first push. This will just
|
||||
be a push of LR and we can combine it with the push of the first high
|
||||
register. */
|
||||
|
@ -14436,7 +14436,7 @@ arm_cxx_guard_type (void)
|
|||
}
|
||||
|
||||
|
||||
/* The EABI says test the least significan bit of a guard variable. */
|
||||
/* The EABI says test the least significant bit of a guard variable. */
|
||||
|
||||
static bool
|
||||
arm_cxx_guard_mask_bit (void)
|
||||
|
|
|
@ -2466,7 +2466,7 @@ bfin_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
|
|||
which perform the memory reference, are allowed to execute before the
|
||||
jump condition is evaluated.
|
||||
Therefore, we must insert additional instructions in all places where this
|
||||
could lead to incorrect behaviour. The manual recommends CSYNC, while
|
||||
could lead to incorrect behavior. The manual recommends CSYNC, while
|
||||
VDSP seems to use NOPs (even though its corresponding compiler option is
|
||||
named CSYNC).
|
||||
|
||||
|
|
|
@ -8197,7 +8197,7 @@ frv_int_to_acc (enum insn_code icode, int opnum, rtx opval)
|
|||
rtx reg;
|
||||
int i;
|
||||
|
||||
/* ACCs and ACCGs are implicity global registers if media intrinsics
|
||||
/* ACCs and ACCGs are implicit global registers if media intrinsics
|
||||
are being used. We set up this lazily to avoid creating lots of
|
||||
unnecessary call_insn rtl in non-media code. */
|
||||
for (i = 0; i <= ACC_MASK; i++)
|
||||
|
@ -8292,7 +8292,7 @@ frv_read_iacc_argument (enum machine_mode mode, tree *arglistptr)
|
|||
op = const0_rtx;
|
||||
}
|
||||
|
||||
/* IACCs are implicity global registers. We set up this lazily to
|
||||
/* IACCs are implicit global registers. We set up this lazily to
|
||||
avoid creating lots of unnecessary call_insn rtl when IACCs aren't
|
||||
being used. */
|
||||
regno = INTVAL (op) + IACC_FIRST;
|
||||
|
@ -8622,7 +8622,7 @@ frv_expand_mdpackh_builtin (tree arglist, rtx target)
|
|||
op0 = gen_reg_rtx (DImode);
|
||||
op1 = gen_reg_rtx (DImode);
|
||||
|
||||
/* The high half of each word is not explicitly initialised, so indicate
|
||||
/* The high half of each word is not explicitly initialized, so indicate
|
||||
that the input operands are not live before this point. */
|
||||
emit_insn (gen_rtx_CLOBBER (DImode, op0));
|
||||
emit_insn (gen_rtx_CLOBBER (DImode, op1));
|
||||
|
|
|
@ -1669,7 +1669,7 @@
|
|||
;; Note - it is best to only have one movsi pattern and to handle
|
||||
;; all the various contingencies by the use of alternatives. This
|
||||
;; allows reload the greatest amount of flexibility (since reload will
|
||||
;; only choose amoungst alternatives for a selected insn, it will not
|
||||
;; only choose amongst alternatives for a selected insn, it will not
|
||||
;; replace the insn with another one).
|
||||
|
||||
;; Unfortunately, we do have to separate out load-type moves from the rest,
|
||||
|
|
|
@ -1511,7 +1511,7 @@ override_options (void)
|
|||
target_flags &= ~MASK_NO_FANCY_MATH_387;
|
||||
|
||||
/* Likewise, if the target doesn't have a 387, or we've specified
|
||||
software floating point, don't use 387 inline instrinsics. */
|
||||
software floating point, don't use 387 inline intrinsics. */
|
||||
if (!TARGET_80387)
|
||||
target_flags |= MASK_NO_FANCY_MATH_387;
|
||||
|
||||
|
@ -1847,7 +1847,7 @@ ix86_comp_type_attributes (tree type1, tree type2)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Return the regparm value for a fuctio with the indicated TYPE and DECL.
|
||||
/* Return the regparm value for a function with the indicated TYPE and DECL.
|
||||
DECL may be NULL when calling function indirectly
|
||||
or considering a libcall. */
|
||||
|
||||
|
@ -15265,7 +15265,7 @@ ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
|
|||
if (from == to)
|
||||
return false;
|
||||
|
||||
/* x87 registers can't do subreg at all, as all values are reformated
|
||||
/* x87 registers can't do subreg at all, as all values are reformatted
|
||||
to extended precision. */
|
||||
if (MAYBE_FLOAT_CLASS_P (class))
|
||||
return true;
|
||||
|
|
|
@ -1744,7 +1744,7 @@
|
|||
(set_attr "mode" "V2DF")])
|
||||
|
||||
;; Also define scalar versions. These are used for abs, neg, and
|
||||
;; conditional move. Using subregs into vector modes causes regiser
|
||||
;; conditional move. Using subregs into vector modes causes register
|
||||
;; allocation lossage. These patterns do not allow memory operands
|
||||
;; because the native instructions read the full 128-bits.
|
||||
|
||||
|
|
|
@ -5018,7 +5018,7 @@ m68hc11_reorg (void)
|
|||
replacement, unshare everything. */
|
||||
unshare_all_rtl_again (first);
|
||||
|
||||
/* Force a split of all splitable insn. This is necessary for the
|
||||
/* Force a split of all splittable insn. This is necessary for the
|
||||
Z register replacement mechanism because we end up with basic insns. */
|
||||
split_all_insns_noflow ();
|
||||
split_done = 1;
|
||||
|
|
|
@ -450,7 +450,7 @@ SOFT_REG_FIRST+28, SOFT_REG_FIRST+29,SOFT_REG_FIRST+30,SOFT_REG_FIRST+31
|
|||
For any two classes, it is very desirable that there be another
|
||||
class that represents their union. */
|
||||
|
||||
/* The M68hc11 has so fiew registers that it's not possible for GCC to
|
||||
/* The M68hc11 has so few registers that it's not possible for GCC to
|
||||
do any register allocation without breaking. We extend the processor
|
||||
registers by having soft registers. These registers are treated as
|
||||
hard registers by GCC but they are located in memory and accessed by page0
|
||||
|
|
|
@ -2117,7 +2117,7 @@ mcore_expand_epilog (void)
|
|||
|
||||
/* The MCORE cannot load a large constant into a register, constants have to
|
||||
come from a pc relative load. The reference of a pc relative load
|
||||
instruction must be less than 1k infront of the instruction. This
|
||||
instruction must be less than 1k in front of the instruction. This
|
||||
means that we often have to dump a constant inside a function, and
|
||||
generate code to branch around it.
|
||||
|
||||
|
|
|
@ -4042,7 +4042,7 @@ mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
|
|||
left-side instructions (lwl, swl, ldl, sdl).
|
||||
|
||||
*RIGHT is a QImode reference to the opposite end of the field and
|
||||
can be used in the parterning right-side instruction. */
|
||||
can be used in the patterning right-side instruction. */
|
||||
|
||||
static bool
|
||||
mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
|
||||
|
|
|
@ -5367,7 +5367,7 @@ beq\t%2,%.,1b\;\
|
|||
|
||||
; Thread-Local Storage
|
||||
|
||||
; The TLS base pointer is acessed via "rdhwr $v1, $29". No current
|
||||
; The TLS base pointer is accessed via "rdhwr $v1, $29". No current
|
||||
; MIPS architecture defines this register, and no current
|
||||
; implementation provides it; instead, any OS which supports TLS is
|
||||
; expected to trap and emulate this instruction. rdhwr is part of the
|
||||
|
|
|
@ -68,7 +68,7 @@ extern long double __gcc_qmul (double, double, double, double);
|
|||
extern long double __gcc_qdiv (double, double, double, double);
|
||||
|
||||
#if defined __ELF__ && defined SHARED
|
||||
/* Provide definitions of the old symbol names to statisfy apps and
|
||||
/* Provide definitions of the old symbol names to satisfy apps and
|
||||
shared libs built against an older libgcc. To access the _xlq
|
||||
symbols an explicit version reference is needed, so these won't
|
||||
satisfy an unadorned reference like _xlqadd. If dot symbols are
|
||||
|
|
|
@ -16120,7 +16120,7 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
|
|||
between the insns.
|
||||
|
||||
The function estimates the group boundaries that the processor will form as
|
||||
folllows: It keeps track of how many vacant issue slots are available after
|
||||
follows: It keeps track of how many vacant issue slots are available after
|
||||
each insn. A subsequent insn will start a new group if one of the following
|
||||
4 cases applies:
|
||||
- no more vacant issue slots remain in the current dispatch group.
|
||||
|
|
|
@ -136,7 +136,7 @@
|
|||
#define TARGET_MFCRF 0
|
||||
#endif
|
||||
|
||||
/* Define TARGET_POPCNTB if the target assembler does not suppport the
|
||||
/* Define TARGET_POPCNTB if the target assembler does not support the
|
||||
popcount byte instruction. */
|
||||
|
||||
#ifndef HAVE_AS_POPCNTB
|
||||
|
|
|
@ -2909,7 +2909,7 @@ gen_datalabel_ref (rtx sym)
|
|||
|
||||
/* The SH cannot load a large constant into a register, constants have to
|
||||
come from a pc relative load. The reference of a pc relative load
|
||||
instruction must be less than 1k infront of the instruction. This
|
||||
instruction must be less than 1k in front of the instruction. This
|
||||
means that we often have to dump a constant inside a function, and
|
||||
generate code to branch around it.
|
||||
|
||||
|
|
|
@ -1106,7 +1106,7 @@
|
|||
rtx set1, set2;
|
||||
rtx replacements[4];
|
||||
|
||||
/* We want to replace occurences of operands[0] with operands[1] and
|
||||
/* We want to replace occurrences of operands[0] with operands[1] and
|
||||
operands[2] with operands[0] in operands[4]/operands[5].
|
||||
Doing just two replace_rtx calls naively would result in the second
|
||||
replacement undoing all that the first did if operands[1] and operands[2]
|
||||
|
|
|
@ -720,14 +720,14 @@ sh_media_ADDZ_L (unsigned int mm, unsigned int mn)
|
|||
return mm + mn;
|
||||
}
|
||||
|
||||
/* NOP and Synchronization instrinsics not implemented here. */
|
||||
/* NOP and Synchronization intrinsics not implemented here. */
|
||||
|
||||
static __inline__ void sh_media_PREFO(void *mm, int s)
|
||||
{
|
||||
__builtin_sh_media_PREFO (mm + s, 0, 0);
|
||||
}
|
||||
|
||||
/* Event Handling instrinsics not implemented here. */
|
||||
/* Event Handling intrinsics not implemented here. */
|
||||
|
||||
/* Old asm stuff */
|
||||
|
||||
|
|
|
@ -7839,7 +7839,7 @@ sparc_vis_init_builtins (void)
|
|||
}
|
||||
|
||||
/* Handle TARGET_EXPAND_BUILTIN target hook.
|
||||
Expand builtin functions for sparc instrinsics. */
|
||||
Expand builtin functions for sparc intrinsics. */
|
||||
|
||||
static rtx
|
||||
sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
|
||||
|
|
|
@ -854,7 +854,7 @@
|
|||
|
||||
;; The SEQ and SNE patterns are special because they can be done
|
||||
;; without any branching and do not involve a COMPARE. We want
|
||||
;; them to always use the splitz below so the results can be
|
||||
;; them to always use the splits below so the results can be
|
||||
;; scheduled.
|
||||
|
||||
(define_insn_and_split "*snesi_zero"
|
||||
|
@ -8363,7 +8363,7 @@
|
|||
(set_attr "fptype" "double")])
|
||||
|
||||
;; Using faligndata only makes sense after an alignaddr since the choice of
|
||||
;; bytes to take out of each operand is dependant on the results of the last
|
||||
;; bytes to take out of each operand is dependent on the results of the last
|
||||
;; alignaddr.
|
||||
(define_insn "faligndata<V64I:mode>_vis"
|
||||
[(set (match_operand:V64I 0 "register_operand" "=e")
|
||||
|
|
|
@ -159,7 +159,7 @@ the reloc refers, 'A' is the addend, and 'P' represents the place of
|
|||
the storage unit being relocated.
|
||||
|
||||
In the 'Overflow' column, 'none' means that any overflow of the
|
||||
computation perfomed in the 'Calculation' column is ignored.
|
||||
computation performed in the 'Calculation' column is ignored.
|
||||
'signed' means that the overflow is only reported if it happens when
|
||||
the values are treated as signed quantities. 'unsigned' is the same,
|
||||
except that the values are treated as unsigned quantities. 'either'
|
||||
|
|
|
@ -302,7 +302,7 @@ add_deps_for_use (ddg_ptr g, struct df *df, struct ref *use)
|
|||
if (df_find_def (df, g->nodes[i].insn, use->reg))
|
||||
return;
|
||||
/* We must not add ANTI dep when there is an intra-loop TRUE dep in
|
||||
the opozite direction. If the first_def reaches the USE then there is
|
||||
the opposite direction. If the first_def reaches the USE then there is
|
||||
such a dep. */
|
||||
if (! bitmap_bit_p (bb_info->rd_gen, first_def->id))
|
||||
create_ddg_dep_no_link (g, use_node, def_node, ANTI_DEP, REG_DEP, 1);
|
||||
|
|
|
@ -4470,7 +4470,7 @@ See below for a documentation of the individual
|
|||
parameters controlling inlining.
|
||||
|
||||
@emph{Note:} pseudo instruction represents, in this particular context, an
|
||||
abstract measurement of function's size. In no way, it represents a count
|
||||
abstract measurement of function's size. In no way does it represent a count
|
||||
of assembly instructions and as such its exact meaning might change from one
|
||||
release to an another.
|
||||
|
||||
|
|
|
@ -9519,7 +9519,7 @@ low-overhead loop.
|
|||
Many targets use special registers for low-overhead looping. This function
|
||||
should return false for any instruction that clobbers these.
|
||||
By default, the RTL loop optimizer does not use a present doloop pattern for
|
||||
loops containing function calls or brach on table instructions.
|
||||
loops containing function calls or branch on table instructions.
|
||||
@end deftypefn
|
||||
|
||||
@defmac MD_CAN_REDIRECT_BRANCH (@var{branch1}, @var{branch2})
|
||||
|
|
|
@ -1282,7 +1282,7 @@ After the replacement mappings have been registered and new symbols
|
|||
marked for renaming, a call to @code{update_ssa} makes the registered
|
||||
changes. This can be done with an explicit call or by creating
|
||||
@code{TODO} flags in the @code{tree_opt_pass} structure for your pass.
|
||||
There are several @code{TODO} flags that control the behaviour of
|
||||
There are several @code{TODO} flags that control the behavior of
|
||||
@code{update_ssa}:
|
||||
|
||||
@itemize @bullet
|
||||
|
@ -1304,7 +1304,7 @@ There are several @code{TODO} flags that control the behaviour of
|
|||
|
||||
|
||||
@item @code{TODO_update_ssa_full_phi}. Insert PHI nodes everywhere
|
||||
they are needed. No prunning of the IDF is done. This is used
|
||||
they are needed. No pruning of the IDF is done. This is used
|
||||
by passes that need the PHI nodes for @code{O_j} even if it
|
||||
means that some arguments will come from the default definition
|
||||
of @code{O_j}'s symbol (e.g., @code{pass_linear_transform})@.
|
||||
|
|
|
@ -588,7 +588,7 @@ extern rtx eliminate_constant_term (rtx, rtx *);
|
|||
by emitting insns to perform arithmetic if nec. */
|
||||
extern rtx memory_address (enum machine_mode, rtx);
|
||||
|
||||
/* Like `memory_address' but pretent `flag_force_addr' is 0. */
|
||||
/* Like `memory_address' but pretend `flag_force_addr' is 0. */
|
||||
extern rtx memory_address_noforce (enum machine_mode, rtx);
|
||||
|
||||
/* Return a memory reference like MEMREF, but with its mode changed
|
||||
|
|
|
@ -507,11 +507,11 @@ static int gcse_subst_count;
|
|||
static int gcse_create_count;
|
||||
/* Number of local constants propagated. */
|
||||
static int local_const_prop_count;
|
||||
/* Number of local copys propagated. */
|
||||
/* Number of local copies propagated. */
|
||||
static int local_copy_prop_count;
|
||||
/* Number of global constants propagated. */
|
||||
static int global_const_prop_count;
|
||||
/* Number of global copys propagated. */
|
||||
/* Number of global copies propagated. */
|
||||
static int global_copy_prop_count;
|
||||
|
||||
/* For available exprs */
|
||||
|
|
|
@ -75,7 +75,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
|||
#define USING_MALLOC_PAGE_GROUPS
|
||||
#endif
|
||||
|
||||
/* Stategy:
|
||||
/* Strategy:
|
||||
|
||||
This garbage-collecting allocator allocates objects on one of a set
|
||||
of pages. Each page can allocate objects of a single size only;
|
||||
|
|
|
@ -100,7 +100,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
|||
ggc_set_mark for any object in the garbage zone, which cuts off
|
||||
marking quickly. */
|
||||
|
||||
/* Stategy:
|
||||
/* Strategy:
|
||||
|
||||
This garbage-collecting allocator segregates objects into zones.
|
||||
It also segregates objects into "large" and "small" bins. Large
|
||||
|
|
|
@ -3017,7 +3017,7 @@ gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value)
|
|||
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
|
||||
{
|
||||
/* If we've somehow already got an SSA_NAME on the LHS, then
|
||||
we're probably modifying it twice. Not good. */
|
||||
we're probably modified it twice. Not good. */
|
||||
gcc_assert (TREE_CODE (*to_p) != SSA_NAME);
|
||||
*to_p = make_ssa_name (*to_p, *expr_p);
|
||||
}
|
||||
|
|
|
@ -320,13 +320,13 @@ cgraph_maybe_hot_edge_p (struct cgraph_edge *edge)
|
|||
|
||||
/* A cost model driving the inlining heuristics in a way so the edges with
|
||||
smallest badness are inlined first. After each inlining is performed
|
||||
the costs of all caller edges of nodes affected are recompted so the
|
||||
the costs of all caller edges of nodes affected are recomputed so the
|
||||
metrics may accurately depend on values such as number of inlinable callers
|
||||
of the function or function body size.
|
||||
|
||||
For the moment we use estimated growth caused by inlining callee into all
|
||||
it's callers for driving the inlining but once we have loop depth or
|
||||
frequency information readilly available we should do better.
|
||||
frequency information readily available we should do better.
|
||||
|
||||
With profiling we use number of executions of each edge to drive the cost.
|
||||
We also should distinguish hot and cold calls where the cold calls are
|
||||
|
@ -344,7 +344,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
|
|||
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
|
||||
growth -= edge->caller->global.insns;
|
||||
|
||||
/* Always preffer inlining saving code size. */
|
||||
/* Always prefer inlining saving code size. */
|
||||
if (growth <= 0)
|
||||
return INT_MIN - growth;
|
||||
return ((int)((double)edge->count * INT_MIN / max_count)) / growth;
|
||||
|
@ -416,7 +416,7 @@ update_callee_keys (fibheap_t heap, struct cgraph_node *node,
|
|||
}
|
||||
|
||||
/* Enqueue all recursive calls from NODE into priority queue depending on
|
||||
how likely we want to recursivly inline the call. */
|
||||
how likely we want to recursively inline the call. */
|
||||
|
||||
static void
|
||||
lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
|
||||
|
@ -608,7 +608,7 @@ cgraph_decide_inlining_of_small_functions (void)
|
|||
continue;
|
||||
|
||||
/* When not having profile info ready we don't weight by any way the
|
||||
possition of call in procedure itself. This means if call of
|
||||
position of call in procedure itself. This means if call of
|
||||
function A from function B seems profitable to inline, the recursive
|
||||
call of function A in inline copy of A in B will look profitable too
|
||||
and we end up inlining until reaching maximal function growth. This
|
||||
|
|
|
@ -46,8 +46,8 @@
|
|||
|
||||
/* Define auxiliary asm macros.
|
||||
|
||||
1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
|
||||
UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
|
||||
1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
|
||||
UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
|
||||
word product in HIGH_PROD and LOW_PROD.
|
||||
|
||||
2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
|
||||
|
|
|
@ -264,7 +264,7 @@ default_scalar_mode_supported_p (enum machine_mode mode)
|
|||
|
||||
/* TRUE if INSN insn is valid within a low-overhead loop.
|
||||
|
||||
This function checks wheter a given INSN is valid within a low-overhead
|
||||
This function checks whether a given INSN is valid within a low-overhead
|
||||
loop. A called function may clobber any special registers required for
|
||||
low-overhead looping. Additionally, some targets (eg, PPC) use the count
|
||||
register for branch on table instructions. We reject the doloop pattern in
|
||||
|
|
|
@ -238,7 +238,7 @@ set_ssa_use_from_ptr (use_operand_p use, tree val)
|
|||
link_imm_use (use, val);
|
||||
}
|
||||
|
||||
/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occuring
|
||||
/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring
|
||||
in STMT. */
|
||||
static inline void
|
||||
link_imm_use_stmt (ssa_use_operand_t *linknode, tree def, tree stmt)
|
||||
|
@ -267,7 +267,7 @@ relink_imm_use (ssa_use_operand_t *node, ssa_use_operand_t *old)
|
|||
}
|
||||
}
|
||||
|
||||
/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occuring
|
||||
/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring
|
||||
in STMT. */
|
||||
static inline void
|
||||
relink_imm_use_stmt (ssa_use_operand_t *linknode, ssa_use_operand_t *old, tree stmt)
|
||||
|
|
|
@ -126,7 +126,7 @@ struct dump_file_info
|
|||
chains for virtuals (e.g., DCE). */
|
||||
#define TODO_update_ssa_no_phi (1 << 8)
|
||||
|
||||
/* Insert PHI nodes everywhere they are needed. No prunning of the
|
||||
/* Insert PHI nodes everywhere they are needed. No pruning of the
|
||||
IDF is done. This is used by passes that need the PHI nodes for
|
||||
O_j even if it means that some arguments will come from the default
|
||||
definition of O_j's symbol (e.g., pass_linear_transform).
|
||||
|
|
|
@ -195,7 +195,7 @@ dse_optimize_stmt (struct dom_walk_data *walk_data,
|
|||
/* We want to verify that each virtual definition in STMT has
|
||||
precisely one use and that all the virtual definitions are
|
||||
used by the same single statement. When complete, we
|
||||
want USE_STMT to refer to the one statment which uses
|
||||
want USE_STMT to refer to the one statement which uses
|
||||
all of the virtual definitions from STMT. */
|
||||
use_stmt = NULL;
|
||||
FOR_EACH_SSA_MUST_AND_MAY_DEF_OPERAND (var1, var2, stmt, op_iter)
|
||||
|
|
|
@ -129,7 +129,7 @@ struct mem_ref
|
|||
table, but the hash function depends
|
||||
on values of pointers. Thus we cannot use
|
||||
htab_traverse, since then we would get
|
||||
misscompares during bootstrap (although the
|
||||
miscompares during bootstrap (although the
|
||||
produced code would be correct). */
|
||||
};
|
||||
|
||||
|
@ -627,7 +627,7 @@ determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
|
|||
bsi_insert_after (&bsi, stmt2, BSI_NEW_STMT);
|
||||
SSA_NAME_DEF_STMT (lhs) = stmt2;
|
||||
|
||||
/* Continue processing with invariant reciprocal statment. */
|
||||
/* Continue processing with invariant reciprocal statement. */
|
||||
stmt = stmt1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1886,7 +1886,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
|
|||
TREE_OPERAND (expr, 1) = op1;
|
||||
|
||||
/* Inside address, we might strip the top level component references,
|
||||
thus changing type of the expresion. Handling of ADDR_EXPR
|
||||
thus changing type of the expression. Handling of ADDR_EXPR
|
||||
will fix that. */
|
||||
expr = fold_convert (orig_type, expr);
|
||||
|
||||
|
|
|
@ -1759,7 +1759,7 @@ add_stmt_operand (tree *var_p, stmt_ann_t s_ann, int flags)
|
|||
|
||||
/* If the variable cannot be modified and this is a V_MAY_DEF change
|
||||
it into a VUSE. This happens when read-only variables are marked
|
||||
call-clobbered and/or aliased to writeable variables. So we only
|
||||
call-clobbered and/or aliased to writable variables. So we only
|
||||
check that this only happens on non-specific stores.
|
||||
|
||||
Note that if this is a specific store, i.e. associated with a
|
||||
|
|
|
@ -1246,7 +1246,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
|
|||
/* Function vect_analyze_data_refs_alignment
|
||||
|
||||
Analyze the alignment of the data-references in the loop.
|
||||
FOR NOW: Until support for misliagned accesses is in place, only if all
|
||||
FOR NOW: Until support for misaligned accesses is in place, only if all
|
||||
accesses are aligned can the loop be vectorized. This restriction will be
|
||||
relaxed. */
|
||||
|
||||
|
@ -1908,7 +1908,7 @@ vect_object_analysis (tree memref, tree stmt, bool is_read,
|
|||
foreach ref
|
||||
base_address = vect_object_analysis(ref)
|
||||
1.1- vect_object_analysis(ref):
|
||||
Analyze ref, and build a DR (data_referece struct) for it;
|
||||
Analyze ref, and build a DR (data_reference struct) for it;
|
||||
compute base, initial_offset, step and alignment.
|
||||
Call get_inner_reference for refs handled in this function.
|
||||
Call vect_addr_analysis(addr) to analyze pointer type expressions.
|
||||
|
|
|
@ -231,7 +231,7 @@ vect_create_addr_base_for_vector_ref (tree stmt,
|
|||
|
||||
/* Function vect_align_data_ref.
|
||||
|
||||
Handle mislignment of a memory accesses.
|
||||
Handle misalignment of a memory accesses.
|
||||
|
||||
FORNOW: Can't handle misaligned accesses.
|
||||
Make sure that the dataref is aligned. */
|
||||
|
|
|
@ -987,7 +987,7 @@ slpeel_verify_cfg_after_peeling (struct loop *first_loop,
|
|||
/* 1. Verify that one of the successors of first_loopt->exit is the preheader
|
||||
of second_loop. */
|
||||
|
||||
/* The preheader of new_loop is expected to have two predessors:
|
||||
/* The preheader of new_loop is expected to have two predecessors:
|
||||
first_loop->exit and the block that precedes first_loop. */
|
||||
|
||||
gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
|
||||
|
|
|
@ -6600,7 +6600,7 @@ walk_tree (tree *tp, walk_tree_fn func, void *data, struct pointer_set_t *pset)
|
|||
case SSA_NAME:
|
||||
case FIELD_DECL:
|
||||
case RESULT_DECL:
|
||||
/* None of thse have subtrees other than those already walked
|
||||
/* None of these have subtrees other than those already walked
|
||||
above. */
|
||||
break;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue