gcc: Remove trailing whitespace

I've tried to build stage3 with
-Wleading-whitespace=blanks -Wtrailing-whitespace=blank -Wno-error=leading-whitespace=blanks -Wno-error=trailing-whitespace=blank
added to STRICT_WARN and that expectably resulted in about
2744 unique trailing whitespace warnings and 124837 leading whitespace
warnings when excluding *.md files (which obviously is in big part a
generator issue).  Others from that are generator related, I think those
need to be solved later.

The following patch just fixes up the easy case (trailing whitespace),
which could be easily automated:
for i in `find . -name \*.h -o -name \*.cc -o -name \*.c | xargs grep -l '[ 	]$' | grep -v testsuite/`; do sed -i -e 's/[ 	]*$//' $i; done
I've excluded files which I knew are obviously generated or go FE.

Is there anything else we'd want to avoid the changes?

Due to patch size, I've split it between gcc/ part (this patch)
and rest (include/, libiberty/, libgcc/, libcpp/, libstdc++-v3/).

2024-10-24  Jakub Jelinek  <jakub@redhat.com>

gcc/
	* lra-assigns.cc: Remove trailing whitespace.
	* symtab.cc: Likewise.
	* stmt.cc: Likewise.
	* cgraphbuild.cc: Likewise.
	* cfgcleanup.cc: Likewise.
	* loop-init.cc: Likewise.
	* df-problems.cc: Likewise.
	* diagnostic-macro-unwinding.cc: Likewise.
	* langhooks.h: Likewise.
	* except.cc: Likewise.
	* tree-vect-loop.cc: Likewise.
	* coverage.cc: Likewise.
	* hash-table.cc: Likewise.
	* ggc-page.cc: Likewise.
	* gimple-ssa-strength-reduction.cc: Likewise.
	* tree-parloops.cc: Likewise.
	* internal-fn.cc: Likewise.
	* ipa-split.cc: Likewise.
	* calls.cc: Likewise.
	* reorg.cc: Likewise.
	* sbitmap.h: Likewise.
	* omp-offload.cc: Likewise.
	* cfgrtl.cc: Likewise.
	* reginfo.cc: Likewise.
	* gengtype.h: Likewise.
	* omp-general.h: Likewise.
	* ipa-comdats.cc: Likewise.
	* gimple-range-edge.h: Likewise.
	* tree-ssa-structalias.cc: Likewise.
	* target.def: Likewise.
	* basic-block.h: Likewise.
	* graphite-isl-ast-to-gimple.cc: Likewise.
	* auto-profile.cc: Likewise.
	* optabs.cc: Likewise.
	* gengtype-lex.l: Likewise.
	* optabs.def: Likewise.
	* ira-build.cc: Likewise.
	* ira.cc: Likewise.
	* function.h: Likewise.
	* tree-ssa-propagate.cc: Likewise.
	* gcov-io.cc: Likewise.
	* builtin-types.def: Likewise.
	* ddg.cc: Likewise.
	* lra-spills.cc: Likewise.
	* cfg.cc: Likewise.
	* bitmap.cc: Likewise.
	* gimple-range-gori.h: Likewise.
	* tree-ssa-loop-im.cc: Likewise.
	* cfghooks.h: Likewise.
	* genmatch.cc: Likewise.
	* explow.cc: Likewise.
	* lto-streamer-in.cc: Likewise.
	* graphite-scop-detection.cc: Likewise.
	* ipa-prop.cc: Likewise.
	* gcc.cc: Likewise.
	* vec.h: Likewise.
	* cfgexpand.cc: Likewise.
	* config/alpha/vms.h: Likewise.
	* config/alpha/alpha.cc: Likewise.
	* config/alpha/driver-alpha.cc: Likewise.
	* config/alpha/elf.h: Likewise.
	* config/iq2000/iq2000.h: Likewise.
	* config/iq2000/iq2000.cc: Likewise.
	* config/pa/pa-64.h: Likewise.
	* config/pa/som.h: Likewise.
	* config/pa/pa.cc: Likewise.
	* config/pa/pa.h: Likewise.
	* config/pa/pa32-regs.h: Likewise.
	* config/c6x/c6x.cc: Likewise.
	* config/openbsd-stdint.h: Likewise.
	* config/elfos.h: Likewise.
	* config/lm32/lm32.cc: Likewise.
	* config/lm32/lm32.h: Likewise.
	* config/lm32/lm32-protos.h: Likewise.
	* config/darwin-c.cc: Likewise.
	* config/rx/rx.cc: Likewise.
	* config/host-darwin.h: Likewise.
	* config/netbsd.h: Likewise.
	* config/ia64/ia64.cc: Likewise.
	* config/ia64/freebsd.h: Likewise.
	* config/avr/avr-c.cc: Likewise.
	* config/avr/avr.cc: Likewise.
	* config/avr/avr-arch.h: Likewise.
	* config/avr/avr.h: Likewise.
	* config/avr/stdfix.h: Likewise.
	* config/avr/gen-avr-mmcu-specs.cc: Likewise.
	* config/avr/avr-log.cc: Likewise.
	* config/avr/elf.h: Likewise.
	* config/avr/gen-avr-mmcu-texi.cc: Likewise.
	* config/avr/avr-devices.cc: Likewise.
	* config/nvptx/nvptx.cc: Likewise.
	* config/vx-common.h: Likewise.
	* config/sol2.cc: Likewise.
	* config/rl78/rl78.cc: Likewise.
	* config/cris/cris.cc: Likewise.
	* config/arm/symbian.h: Likewise.
	* config/arm/unknown-elf.h: Likewise.
	* config/arm/linux-eabi.h: Likewise.
	* config/arm/arm.cc: Likewise.
	* config/arm/arm-mve-builtins.h: Likewise.
	* config/arm/bpabi.h: Likewise.
	* config/arm/vxworks.h: Likewise.
	* config/arm/arm.h: Likewise.
	* config/arm/aout.h: Likewise.
	* config/arm/elf.h: Likewise.
	* config/host-linux.cc: Likewise.
	* config/sh/sh_treg_combine.cc: Likewise.
	* config/sh/vxworks.h: Likewise.
	* config/sh/elf.h: Likewise.
	* config/sh/netbsd-elf.h: Likewise.
	* config/sh/sh.cc: Likewise.
	* config/sh/embed-elf.h: Likewise.
	* config/sh/sh.h: Likewise.
	* config/darwin-driver.cc: Likewise.
	* config/m32c/m32c.cc: Likewise.
	* config/frv/frv.cc: Likewise.
	* config/openbsd.h: Likewise.
	* config/aarch64/aarch64-protos.h: Likewise.
	* config/aarch64/aarch64-builtins.cc: Likewise.
	* config/aarch64/aarch64-cost-tables.h: Likewise.
	* config/aarch64/aarch64.cc: Likewise.
	* config/bfin/bfin.cc: Likewise.
	* config/bfin/bfin.h: Likewise.
	* config/bfin/bfin-protos.h: Likewise.
	* config/i386/gmm_malloc.h: Likewise.
	* config/i386/djgpp.h: Likewise.
	* config/i386/sol2.h: Likewise.
	* config/i386/stringop.def: Likewise.
	* config/i386/i386-features.cc: Likewise.
	* config/i386/openbsdelf.h: Likewise.
	* config/i386/cpuid.h: Likewise.
	* config/i386/i386.h: Likewise.
	* config/i386/smmintrin.h: Likewise.
	* config/i386/avx10_2-512convertintrin.h: Likewise.
	* config/i386/i386-options.cc: Likewise.
	* config/i386/i386-opts.h: Likewise.
	* config/i386/i386-expand.cc: Likewise.
	* config/i386/avx512dqintrin.h: Likewise.
	* config/i386/wmmintrin.h: Likewise.
	* config/i386/gnu-user.h: Likewise.
	* config/i386/host-mingw32.cc: Likewise.
	* config/i386/avx10_2bf16intrin.h: Likewise.
	* config/i386/cygwin.h: Likewise.
	* config/i386/driver-i386.cc: Likewise.
	* config/i386/biarch64.h: Likewise.
	* config/i386/host-cygwin.cc: Likewise.
	* config/i386/cygming.h: Likewise.
	* config/i386/i386-builtins.cc: Likewise.
	* config/i386/avx10_2convertintrin.h: Likewise.
	* config/i386/i386.cc: Likewise.
	* config/i386/gas.h: Likewise.
	* config/i386/freebsd.h: Likewise.
	* config/mingw/winnt-cxx.cc: Likewise.
	* config/mingw/winnt.cc: Likewise.
	* config/h8300/h8300.cc: Likewise.
	* config/host-solaris.cc: Likewise.
	* config/m32r/m32r.h: Likewise.
	* config/m32r/m32r.cc: Likewise.
	* config/darwin.h: Likewise.
	* config/sparc/linux64.h: Likewise.
	* config/sparc/sparc-protos.h: Likewise.
	* config/sparc/sysv4.h: Likewise.
	* config/sparc/sparc.h: Likewise.
	* config/sparc/linux.h: Likewise.
	* config/sparc/freebsd.h: Likewise.
	* config/sparc/sparc.cc: Likewise.
	* config/gcn/gcn-run.cc: Likewise.
	* config/gcn/gcn.cc: Likewise.
	* config/gcn/gcn-tree.cc: Likewise.
	* config/kopensolaris-gnu.h: Likewise.
	* config/nios2/nios2.h: Likewise.
	* config/nios2/elf.h: Likewise.
	* config/nios2/nios2.cc: Likewise.
	* config/host-netbsd.cc: Likewise.
	* config/rtems.h: Likewise.
	* config/pdp11/pdp11.cc: Likewise.
	* config/pdp11/pdp11.h: Likewise.
	* config/mn10300/mn10300.cc: Likewise.
	* config/mn10300/linux.h: Likewise.
	* config/moxie/moxie.h: Likewise.
	* config/moxie/moxie.cc: Likewise.
	* config/rs6000/aix71.h: Likewise.
	* config/rs6000/vec_types.h: Likewise.
	* config/rs6000/xcoff.h: Likewise.
	* config/rs6000/rs6000.cc: Likewise.
	* config/rs6000/rs6000-internal.h: Likewise.
	* config/rs6000/rs6000-p8swap.cc: Likewise.
	* config/rs6000/rs6000-c.cc: Likewise.
	* config/rs6000/aix.h: Likewise.
	* config/rs6000/rs6000-logue.cc: Likewise.
	* config/rs6000/rs6000-string.cc: Likewise.
	* config/rs6000/rs6000-call.cc: Likewise.
	* config/rs6000/ppu_intrinsics.h: Likewise.
	* config/rs6000/altivec.h: Likewise.
	* config/rs6000/darwin.h: Likewise.
	* config/rs6000/host-darwin.cc: Likewise.
	* config/rs6000/freebsd64.h: Likewise.
	* config/rs6000/spu2vmx.h: Likewise.
	* config/rs6000/linux.h: Likewise.
	* config/rs6000/si2vmx.h: Likewise.
	* config/rs6000/driver-rs6000.cc: Likewise.
	* config/rs6000/freebsd.h: Likewise.
	* config/vxworksae.h: Likewise.
	* config/mips/frame-header-opt.cc: Likewise.
	* config/mips/mips.h: Likewise.
	* config/mips/mips.cc: Likewise.
	* config/mips/sde.h: Likewise.
	* config/darwin-protos.h: Likewise.
	* config/mcore/mcore-elf.h: Likewise.
	* config/mcore/mcore.h: Likewise.
	* config/mcore/mcore.cc: Likewise.
	* config/epiphany/epiphany.cc: Likewise.
	* config/fr30/fr30.h: Likewise.
	* config/fr30/fr30.cc: Likewise.
	* config/riscv/riscv-vector-builtins-shapes.cc: Likewise.
	* config/riscv/riscv-vector-builtins-bases.cc: Likewise.
	* config/visium/visium.h: Likewise.
	* config/mmix/mmix.cc: Likewise.
	* config/v850/v850.cc: Likewise.
	* config/v850/v850-c.cc: Likewise.
	* config/v850/v850.h: Likewise.
	* config/stormy16/stormy16.cc: Likewise.
	* config/stormy16/stormy16-protos.h: Likewise.
	* config/stormy16/stormy16.h: Likewise.
	* config/arc/arc.cc: Likewise.
	* config/vxworks.cc: Likewise.
	* config/microblaze/microblaze-c.cc: Likewise.
	* config/microblaze/microblaze-protos.h: Likewise.
	* config/microblaze/microblaze.h: Likewise.
	* config/microblaze/microblaze.cc: Likewise.
	* config/freebsd-spec.h: Likewise.
	* config/m68k/m68kelf.h: Likewise.
	* config/m68k/m68k.cc: Likewise.
	* config/m68k/netbsd-elf.h: Likewise.
	* config/m68k/linux.h: Likewise.
	* config/freebsd.h: Likewise.
	* config/host-openbsd.cc: Likewise.
	* regcprop.cc: Likewise.
	* dumpfile.cc: Likewise.
	* combine.cc: Likewise.
	* tree-ssa-forwprop.cc: Likewise.
	* ipa-profile.cc: Likewise.
	* hw-doloop.cc: Likewise.
	* opts.cc: Likewise.
	* gcc-ar.cc: Likewise.
	* tree-cfg.cc: Likewise.
	* incpath.cc: Likewise.
	* tree-ssa-sccvn.cc: Likewise.
	* function.cc: Likewise.
	* genattrtab.cc: Likewise.
	* rtl.def: Likewise.
	* genchecksum.cc: Likewise.
	* profile.cc: Likewise.
	* df-core.cc: Likewise.
	* tree-pretty-print.cc: Likewise.
	* tree.h: Likewise.
	* plugin.cc: Likewise.
	* tree-ssa-loop-ch.cc: Likewise.
	* emit-rtl.cc: Likewise.
	* haifa-sched.cc: Likewise.
	* gimple-range-edge.cc: Likewise.
	* range-op.cc: Likewise.
	* tree-ssa-ccp.cc: Likewise.
	* dwarf2cfi.cc: Likewise.
	* recog.cc: Likewise.
	* vtable-verify.cc: Likewise.
	* system.h: Likewise.
	* regrename.cc: Likewise.
	* tree-ssa-dom.cc: Likewise.
	* loop-unroll.cc: Likewise.
	* lra-constraints.cc: Likewise.
	* pretty-print.cc: Likewise.
	* ifcvt.cc: Likewise.
	* ipa.cc: Likewise.
	* alloc-pool.h: Likewise.
	* collect2.cc: Likewise.
	* pointer-query.cc: Likewise.
	* cfgloop.cc: Likewise.
	* toplev.cc: Likewise.
	* sese.cc: Likewise.
	* gengtype.cc: Likewise.
	* gimplify-me.cc: Likewise.
	* double-int.cc: Likewise.
	* bb-reorder.cc: Likewise.
	* dwarf2out.cc: Likewise.
	* tree-ssa-loop-ivcanon.cc: Likewise.
	* tree-ssa-reassoc.cc: Likewise.
	* cgraph.cc: Likewise.
	* sel-sched.cc: Likewise.
	* attribs.cc: Likewise.
	* expr.cc: Likewise.
	* tree-ssa-scopedtables.h: Likewise.
	* gimple-range-cache.cc: Likewise.
	* ipa-pure-const.cc: Likewise.
	* tree-inline.cc: Likewise.
	* genhooks.cc: Likewise.
	* gimple-range-phi.h: Likewise.
	* shrink-wrap.cc: Likewise.
	* tree.cc: Likewise.
	* gimple.cc: Likewise.
	* backend.h: Likewise.
	* opts-common.cc: Likewise.
	* cfg-flags.def: Likewise.
	* gcse-common.cc: Likewise.
	* tree-ssa-scopedtables.cc: Likewise.
	* ccmp.cc: Likewise.
	* builtins.def: Likewise.
	* builtin-attrs.def: Likewise.
	* postreload.cc: Likewise.
	* sched-deps.cc: Likewise.
	* ipa-inline-transform.cc: Likewise.
	* tree-vect-generic.cc: Likewise.
	* ipa-polymorphic-call.cc: Likewise.
	* builtins.cc: Likewise.
	* sel-sched-ir.cc: Likewise.
	* trans-mem.cc: Likewise.
	* ipa-visibility.cc: Likewise.
	* cgraph.h: Likewise.
	* tree-ssa-phiopt.cc: Likewise.
	* genopinit.cc: Likewise.
	* ipa-inline.cc: Likewise.
	* omp-low.cc: Likewise.
	* ipa-utils.cc: Likewise.
	* tree-ssa-math-opts.cc: Likewise.
	* tree-ssa-ifcombine.cc: Likewise.
	* gimple-range.cc: Likewise.
	* ipa-fnsummary.cc: Likewise.
	* ira-color.cc: Likewise.
	* value-prof.cc: Likewise.
	* varasm.cc: Likewise.
	* ipa-icf.cc: Likewise.
	* ira-emit.cc: Likewise.
	* lto-streamer.h: Likewise.
	* lto-wrapper.cc: Likewise.
	* regs.h: Likewise.
	* gengtype-parse.cc: Likewise.
	* alias.cc: Likewise.
	* lto-streamer.cc: Likewise.
	* real.h: Likewise.
	* wide-int.h: Likewise.
	* targhooks.cc: Likewise.
	* gimple-ssa-warn-access.cc: Likewise.
	* real.cc: Likewise.
	* ipa-reference.cc: Likewise.
	* bitmap.h: Likewise.
	* ginclude/float.h: Likewise.
	* ginclude/stddef.h: Likewise.
	* ginclude/stdarg.h: Likewise.
	* ginclude/stdatomic.h: Likewise.
	* optabs.h: Likewise.
	* sel-sched-ir.h: Likewise.
	* convert.cc: Likewise.
	* cgraphunit.cc: Likewise.
	* lra-remat.cc: Likewise.
	* tree-if-conv.cc: Likewise.
	* gcov-dump.cc: Likewise.
	* tree-predcom.cc: Likewise.
	* dominance.cc: Likewise.
	* gimple-range-cache.h: Likewise.
	* ipa-devirt.cc: Likewise.
	* rtl.h: Likewise.
	* ubsan.cc: Likewise.
	* tree-ssa.cc: Likewise.
	* ssa.h: Likewise.
	* cse.cc: Likewise.
	* jump.cc: Likewise.
	* hwint.h: Likewise.
	* caller-save.cc: Likewise.
	* coretypes.h: Likewise.
	* ipa-fnsummary.h: Likewise.
	* tree-ssa-strlen.cc: Likewise.
	* modulo-sched.cc: Likewise.
	* cgraphclones.cc: Likewise.
	* lto-cgraph.cc: Likewise.
	* hw-doloop.h: Likewise.
	* data-streamer.h: Likewise.
	* compare-elim.cc: Likewise.
	* profile-count.h: Likewise.
	* tree-vect-loop-manip.cc: Likewise.
	* ree.cc: Likewise.
	* reload.cc: Likewise.
	* tree-ssa-loop-split.cc: Likewise.
	* tree-into-ssa.cc: Likewise.
	* gcse.cc: Likewise.
	* cfgloopmanip.cc: Likewise.
	* df.h: Likewise.
	* fold-const.cc: Likewise.
	* wide-int.cc: Likewise.
	* gengtype-state.cc: Likewise.
	* sanitizer.def: Likewise.
	* tree-ssa-sink.cc: Likewise.
	* target-hooks-macros.h: Likewise.
	* tree-ssa-pre.cc: Likewise.
	* gimple-pretty-print.cc: Likewise.
	* ipa-utils.h: Likewise.
	* tree-outof-ssa.cc: Likewise.
	* tree-ssa-coalesce.cc: Likewise.
	* gimple-match.h: Likewise.
	* tree-ssa-loop-niter.cc: Likewise.
	* tree-loop-distribution.cc: Likewise.
	* tree-emutls.cc: Likewise.
	* tree-eh.cc: Likewise.
	* varpool.cc: Likewise.
	* ssa-iterators.h: Likewise.
	* asan.cc: Likewise.
	* reload1.cc: Likewise.
	* cfgloopanal.cc: Likewise.
	* tree-vectorizer.cc: Likewise.
	* simplify-rtx.cc: Likewise.
	* opts-global.cc: Likewise.
	* gimple-ssa-store-merging.cc: Likewise.
	* expmed.cc: Likewise.
	* tree-ssa-loop-prefetch.cc: Likewise.
	* tree-ssa-dse.h: Likewise.
	* tree-vect-stmts.cc: Likewise.
	* gimple-fold.cc: Likewise.
	* lra-coalesce.cc: Likewise.
	* data-streamer-out.cc: Likewise.
	* diagnostic.cc: Likewise.
	* tree-ssa-alias.cc: Likewise.
	* tree-vect-patterns.cc: Likewise.
	* common/common-target.def: Likewise.
	* common/config/rx/rx-common.cc: Likewise.
	* common/config/msp430/msp430-common.cc: Likewise.
	* common/config/avr/avr-common.cc: Likewise.
	* common/config/i386/i386-common.cc: Likewise.
	* common/config/pdp11/pdp11-common.cc: Likewise.
	* common/config/rs6000/rs6000-common.cc: Likewise.
	* common/config/mcore/mcore-common.cc: Likewise.
	* graphite.cc: Likewise.
	* gimple-low.cc: Likewise.
	* genmodes.cc: Likewise.
	* gimple-loop-jam.cc: Likewise.
	* lto-streamer-out.cc: Likewise.
	* predict.cc: Likewise.
	* omp-expand.cc: Likewise.
	* gimple-array-bounds.cc: Likewise.
	* predict.def: Likewise.
	* opts.h: Likewise.
	* tree-stdarg.cc: Likewise.
	* gimplify.cc: Likewise.
	* ira-lives.cc: Likewise.
	* loop-doloop.cc: Likewise.
	* lra.cc: Likewise.
	* gimple-iterator.h: Likewise.
	* tree-sra.cc: Likewise.
gcc/fortran/
	* trans-openmp.cc: Remove trailing whitespace.
	* trans-common.cc: Likewise.
	* match.h: Likewise.
	* scanner.cc: Likewise.
	* gfortranspec.cc: Likewise.
	* io.cc: Likewise.
	* iso-c-binding.def: Likewise.
	* iso-fortran-env.def: Likewise.
	* types.def: Likewise.
	* openmp.cc: Likewise.
	* f95-lang.cc: Likewise.
gcc/analyzer/
	* state-purge.cc: Remove trailing whitespace.
	* region-model.h: Likewise.
	* region-model.cc: Likewise.
	* program-point.cc: Likewise.
	* exploded-graph.h: Likewise.
	* program-state.cc: Likewise.
	* supergraph.cc: Likewise.
gcc/c-family/
	* c-ubsan.cc: Remove trailing whitespace.
	* stub-objc.cc: Likewise.
	* c-pragma.cc: Likewise.
	* c-ppoutput.cc: Likewise.
	* c-indentation.cc: Likewise.
	* c-ada-spec.cc: Likewise.
	* c-opts.cc: Likewise.
	* c-common.cc: Likewise.
	* c-format.cc: Likewise.
	* c-omp.cc: Likewise.
	* c-objc.h: Likewise.
	* c-cppbuiltin.cc: Likewise.
	* c-attribs.cc: Likewise.
	* c-target.def: Likewise.
	* c-common.h: Likewise.
gcc/c/
	* c-typeck.cc: Remove trailing whitespace.
	* gimple-parser.cc: Likewise.
	* c-parser.cc: Likewise.
	* c-decl.cc: Likewise.
gcc/cp/
	* vtable-class-hierarchy.cc: Remove trailing whitespace.
	* typeck2.cc: Likewise.
	* decl.cc: Likewise.
	* init.cc: Likewise.
	* semantics.cc: Likewise.
	* module.cc: Likewise.
	* rtti.cc: Likewise.
	* cxx-pretty-print.cc: Likewise.
	* cvt.cc: Likewise.
	* mangle.cc: Likewise.
	* name-lookup.h: Likewise.
	* coroutines.cc: Likewise.
	* error.cc: Likewise.
	* lambda.cc: Likewise.
	* tree.cc: Likewise.
	* g++spec.cc: Likewise.
	* decl2.cc: Likewise.
	* cp-tree.h: Likewise.
	* parser.cc: Likewise.
	* pt.cc: Likewise.
	* call.cc: Likewise.
	* lex.cc: Likewise.
	* cp-lang.cc: Likewise.
	* cp-tree.def: Likewise.
	* constexpr.cc: Likewise.
	* typeck.cc: Likewise.
	* name-lookup.cc: Likewise.
	* optimize.cc: Likewise.
	* search.cc: Likewise.
	* mapper-client.cc: Likewise.
	* ptree.cc: Likewise.
	* class.cc: Likewise.
gcc/jit/
	* docs/examples/tut04-toyvm/toyvm.cc: Remove trailing whitespace.
gcc/lto/
	* lto-object.cc: Remove trailing whitespace.
	* lto-symtab.cc: Likewise.
	* lto-partition.cc: Likewise.
	* lang-specs.h: Likewise.
	* lto-lang.cc: Likewise.
gcc/objc/
	* objc-encoding.cc: Remove trailing whitespace.
	* objc-map.h: Likewise.
	* objc-next-runtime-abi-01.cc: Likewise.
	* objc-act.cc: Likewise.
	* objc-map.cc: Likewise.
gcc/objcp/
	* objcp-decl.cc: Remove trailing whitespace.
	* objcp-lang.cc: Likewise.
	* objcp-decl.h: Likewise.
gcc/rust/
	* util/optional.h: Remove trailing whitespace.
	* util/expected.h: Likewise.
	* util/rust-unicode-data.h: Likewise.
gcc/m2/
	* mc-boot/GFpuIO.cc: Remove trailing whitespace.
	* mc-boot/GFIO.cc: Likewise.
	* mc-boot/GFormatStrings.cc: Likewise.
	* mc-boot/GCmdArgs.cc: Likewise.
	* mc-boot/GDebug.h: Likewise.
	* mc-boot/GM2Dependent.cc: Likewise.
	* mc-boot/GRTint.cc: Likewise.
	* mc-boot/GDebug.cc: Likewise.
	* mc-boot/GmcError.cc: Likewise.
	* mc-boot/Gmcp4.cc: Likewise.
	* mc-boot/GM2RTS.cc: Likewise.
	* mc-boot/GIO.cc: Likewise.
	* mc-boot/Gmcp5.cc: Likewise.
	* mc-boot/GDynamicStrings.cc: Likewise.
	* mc-boot/Gmcp1.cc: Likewise.
	* mc-boot/GFormatStrings.h: Likewise.
	* mc-boot/Gmcp2.cc: Likewise.
	* mc-boot/Gmcp3.cc: Likewise.
	* pge-boot/GFIO.cc: Likewise.
	* pge-boot/GDebug.h: Likewise.
	* pge-boot/GM2Dependent.cc: Likewise.
	* pge-boot/GDebug.cc: Likewise.
	* pge-boot/GM2RTS.cc: Likewise.
	* pge-boot/GSymbolKey.cc: Likewise.
	* pge-boot/GIO.cc: Likewise.
	* pge-boot/GIndexing.cc: Likewise.
	* pge-boot/GDynamicStrings.cc: Likewise.
	* pge-boot/GFormatStrings.h: Likewise.
gcc/go/
	* go-gcc.cc: Remove trailing whitespace.
	* gospec.cc: Likewise.
This commit is contained in:
Jakub Jelinek 2024-10-25 09:41:46 +02:00 committed by Jakub Jelinek
parent 0b22f05853
commit 50332a4fdd
562 changed files with 8993 additions and 8993 deletions

View file

@ -671,7 +671,7 @@ component_uses_parent_alias_set_from (const_tree t)
t = TREE_OPERAND (t, 0);
}
if (found)
return TREE_OPERAND (found, 0);
@ -738,7 +738,7 @@ get_deref_alias_set (tree t)
memory reference tree *T or NULL_TREE in which case *T is
adjusted to point to the outermost component reference that
can be used for assigning an alias set. */
tree
reference_alias_ptr_type_1 (tree *t)
{
@ -1258,12 +1258,12 @@ record_component_aliases (tree type, alias_set_type superset)
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
/* LTO non-ODR type merging does not make any difference between
/* LTO non-ODR type merging does not make any difference between
component pointer types. We may have
struct foo {int *a;};
as TYPE_CANONICAL of
as TYPE_CANONICAL of
struct bar {float *a;};
@ -2113,7 +2113,7 @@ compare_base_decls (tree base1, tree base2)
symtab_node *node2 = symtab_node::get (base2);
if (!node2)
return 0;
ret = node1->equal_address_to (node2, true);
return ret;
}
@ -2753,7 +2753,7 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
}
/* Return true if we can determine the exprs corresponding to memrefs
X and Y and they do not overlap.
X and Y and they do not overlap.
If LOOP_VARIANT is set, skip offset-based disambiguation */
bool

View file

@ -445,7 +445,7 @@ base_pool_allocator <TBlockAllocator>::remove (void *object)
memset (object, 0xaf, size);
}
#if CHECKING_P
#if CHECKING_P
/* Mark the element to be free. */
allocation_object::get_instance (object)->id = 0;
#endif

View file

@ -426,7 +426,7 @@ class dynamic_call_info_t : public custom_edge_info
public:
dynamic_call_info_t (const gcall *dynamic_call,
const bool is_returning_call = false)
: m_dynamic_call (dynamic_call),
: m_dynamic_call (dynamic_call),
m_is_returning_call (is_returning_call)
{}

View file

@ -342,7 +342,7 @@ program_point::to_json () const
/* Update the callstack to represent a call from caller to callee.
Generally used to push a custom call to a perticular program point
Generally used to push a custom call to a perticular program point
where we don't have a superedge representing the call. */
void
program_point::push_to_call_stack (const supernode *caller,

View file

@ -1343,7 +1343,7 @@ program_state::on_edge (exploded_graph &eg,
/* Update this program_state to reflect a call to function
represented by CALL_STMT.
currently used only when the call doesn't have a superedge representing
currently used only when the call doesn't have a superedge representing
the call ( like call via a function pointer ) */
void
program_state::push_call (exploded_graph &eg,
@ -1366,7 +1366,7 @@ program_state::push_call (exploded_graph &eg,
/* Update this program_state to reflect a return from function
call to which is represented by CALL_STMT.
currently used only when the call doesn't have a superedge representing
currently used only when the call doesn't have a superedge representing
the return */
void
program_state::returning_call (exploded_graph &eg,

View file

@ -5930,7 +5930,7 @@ region_model::update_for_return_gcall (const gcall *call_stmt,
pop_frame (lhs, NULL, ctxt, call_stmt);
}
/* Extract calling information from the superedge and update the model for the
/* Extract calling information from the superedge and update the model for the
call */
void
@ -5941,7 +5941,7 @@ region_model::update_for_call_superedge (const call_superedge &call_edge,
update_for_gcall (call_stmt, ctxt, call_edge.get_callee_function ());
}
/* Extract calling information from the return superedge and update the model
/* Extract calling information from the return superedge and update the model
for the returning call */
void

View file

@ -352,7 +352,7 @@ class region_model
void update_for_gcall (const gcall *call_stmt,
region_model_context *ctxt,
function *callee = NULL);
void update_for_return_gcall (const gcall *call_stmt,
region_model_context *ctxt);

View file

@ -599,17 +599,17 @@ state_purge_per_ssa_name::process_point (const function_point &point,
superedge *sedge
= map.get_sg ().get_intraprocedural_edge_for_call (cedge);
gcc_assert (sedge);
add_to_worklist
add_to_worklist
(function_point::after_supernode (sedge->m_src),
worklist, logger);
}
else
{
supernode *callernode
supernode *callernode
= map.get_sg ().get_supernode_for_stmt (returning_call);
gcc_assert (callernode);
add_to_worklist
add_to_worklist
(function_point::after_supernode (callernode),
worklist, logger);
}

View file

@ -201,14 +201,14 @@ supergraph::supergraph (logger *logger)
// maybe call is via a function pointer
if (gcall *call = dyn_cast<gcall *> (stmt))
{
cgraph_edge *edge
cgraph_edge *edge
= cgraph_node::get (fun->decl)->get_edge (stmt);
if (!edge || !edge->callee)
{
supernode *old_node_for_stmts = node_for_stmts;
node_for_stmts = add_node (fun, bb, call, NULL);
superedge *sedge
superedge *sedge
= new callgraph_superedge (old_node_for_stmts,
node_for_stmts,
SUPEREDGE_INTRAPROCEDURAL_CALL,
@ -1266,7 +1266,7 @@ callgraph_superedge::get_call_stmt () const
{
if (m_cedge)
return m_cedge->call_stmt;
return m_src->get_final_call ();
}

View file

@ -2175,7 +2175,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
& ~(ASAN_MIN_RED_ZONE_SIZE - HOST_WIDE_INT_1))
- offset;
/* Unpoison shadow memory that corresponds to a variable that is
/* Unpoison shadow memory that corresponds to a variable that is
is subject of use-after-return sanitization. */
if (l > 2)
{

View file

@ -1258,7 +1258,7 @@ make_dispatcher_decl (const tree decl)
fn_type = TREE_TYPE (decl);
func_type = build_function_type (TREE_TYPE (fn_type),
TYPE_ARG_TYPES (fn_type));
func_decl = build_fn_decl (func_name, func_type);
XDELETEVEC (func_name);
TREE_USED (func_decl) = 1;
@ -1271,7 +1271,7 @@ make_dispatcher_decl (const tree decl)
/* This will be of type IFUNCs have to be externally visible. */
TREE_PUBLIC (func_decl) = 1;
return func_decl;
return func_decl;
}
/* Returns true if DECL is multi-versioned using the target attribute, and this

View file

@ -474,7 +474,7 @@ string_table::get_index (const char *name) const
return iter->second;
}
/* Return the index of a given function DECL. Return -1 if DECL is not
/* Return the index of a given function DECL. Return -1 if DECL is not
found in string table. */
int

View file

@ -1,4 +1,4 @@
/* Common Backend requirements.
/* Common Backend requirements.
Copyright (C) 2015-2024 Free Software Foundation, Inc.
Contributed by Andrew MacLeod <amacleod@redhat.com>

View file

@ -563,7 +563,7 @@ check_probability (int prob)
gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
}
/* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE.
/* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE.
Used to combine BB probabilities. */
inline int

View file

@ -1207,7 +1207,7 @@ connect_traces (int n_traces, struct trace *traces)
/* If dest has multiple predecessors, skip it. We expect
that one predecessor with smaller index connects with it
later. */
if (count != 1)
if (count != 1)
break;
}
@ -3039,7 +3039,7 @@ pass_partition_blocks::execute (function *fun)
Which means that the bb_has_eh_pred test in df_bb_refs_collect
will *always* fail, because no edges can have been added to the
block yet. Which of course means we don't add the right
block yet. Which of course means we don't add the right
artificial refs, which means we fail df_verify (much) later.
Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply

View file

@ -340,7 +340,7 @@ bitmap_list_insert_element_after (bitmap head,
}
/* Return the element for INDX, or NULL if the element doesn't exist.
Update the `current' field even if we can't find an element that
Update the `current' field even if we can't find an element that
would hold the bitmap's bit to make eventual allocation
faster. */
@ -418,7 +418,7 @@ bitmap_list_find_element (bitmap head, unsigned int indx)
splay tree in Sleator and Tarjan's "Self-adjusting Binary Search Trees".
It is probably not the most efficient form of splay trees, but it should
be good enough to experiment with this idea of bitmaps-as-trees.
For all functions below, the variable or function argument "t" is a node
in the tree, and "e" is a temporary or new node in the tree. The rest
is sufficiently straigh-forward (and very well explained in the paper)

View file

@ -60,7 +60,7 @@ along with GCC; see the file COPYING3. If not see
sparse sets. The canonical example in GCC is, of course, the "set of
sets" for some CFG-based data flow problems (liveness analysis, dominance
frontiers, etc.).
For random-access sparse sets of unknown universe, the binary tree
representation is likely to be a more suitable choice. Theoretical
access times for the binary tree representation are better than those
@ -99,7 +99,7 @@ along with GCC; see the file COPYING3. If not see
efficiently. The downside is that many random-access operations are
relatively slow, because the linked list has to be traversed to test
membership (i.e. member_p/ add_member/remove_member).
To improve the performance of this set representation, the last
accessed element and its index are cached. For membership tests on
members close to recently accessed members, the cached last element
@ -176,7 +176,7 @@ along with GCC; see the file COPYING3. If not see
of clearing the set, and the relatively large number of operations
necessary to balance the tree. Also, iterating the set members is
not supported.
As for the linked-list representation, the last accessed element and
its index are cached, so that membership tests on the latest accessed
members is a constant-time operation. Other lookups take O(logE)

View file

@ -266,7 +266,7 @@ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_1_4, \
are nonnull pointers. */
DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_5, ATTR_NONNULL, ATTR_LIST_1_5, \
ATTR_NOTHROW_LIST)
/* Nothrow leaf functions which are type-generic. */
DEF_ATTR_TREE_LIST (ATTR_NOTHROW_TYPEGENERIC_LEAF, ATTR_TYPEGENERIC, ATTR_NULL, \
ATTR_NOTHROW_LEAF_LIST)

View file

@ -918,19 +918,19 @@ DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
BT_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT,
BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I1, BT_BOOL, BT_INT,
BT_INT)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT,
BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I2, BT_BOOL, BT_INT,
BT_INT)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT,
BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I4, BT_BOOL, BT_INT,
BT_INT)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT,
BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I8, BT_BOOL, BT_INT,
BT_INT)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT,
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT,
BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I16, BT_BOOL, BT_INT,
BT_INT)
DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_SIZE_VPTR_PTR_PTR_INT_INT, BT_BOOL, BT_SIZE,

View file

@ -3537,7 +3537,7 @@ builtin_memcpy_read_str (void *data, void *, HOST_WIDE_INT offset,
}
/* LEN specify length of the block of memcpy/memset operation.
Figure out its range and put it into MIN_SIZE/MAX_SIZE.
Figure out its range and put it into MIN_SIZE/MAX_SIZE.
In some cases we can make very likely guess on max size, then we
set it into PROBABLE_MAX_SIZE. */
@ -6428,7 +6428,7 @@ get_builtin_sync_mem (tree loc, machine_mode mode)
}
/* Make sure an argument is in the right mode.
EXP is the tree argument.
EXP is the tree argument.
MODE is the mode it should be in. */
static rtx
@ -6653,15 +6653,15 @@ expand_builtin_atomic_exchange (machine_mode mode, tree exp, rtx target)
}
/* Expand the __atomic_compare_exchange intrinsic:
bool __atomic_compare_exchange (TYPE *object, TYPE *expect,
TYPE desired, BOOL weak,
bool __atomic_compare_exchange (TYPE *object, TYPE *expect,
TYPE desired, BOOL weak,
enum memmodel success,
enum memmodel failure)
EXP is the CALL_EXPR.
TARGET is an optional place for us to store the results. */
static rtx
expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp,
expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp,
rtx target)
{
rtx expect, desired, mem, oldval;
@ -6674,14 +6674,14 @@ expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp,
if (failure > success)
success = MEMMODEL_SEQ_CST;
if (is_mm_release (failure) || is_mm_acq_rel (failure))
{
failure = MEMMODEL_SEQ_CST;
success = MEMMODEL_SEQ_CST;
}
if (!flag_inline_atomics)
return NULL_RTX;
@ -7160,7 +7160,7 @@ expand_ifn_atomic_op_fetch_cmp_0 (gcall *call)
EXP is the call expression. */
static rtx
expand_builtin_atomic_clear (tree exp)
expand_builtin_atomic_clear (tree exp)
{
machine_mode mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require ();
rtx mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
@ -7272,9 +7272,9 @@ fold_builtin_atomic_always_lock_free (tree arg0, tree arg1)
/* Return true if the parameters to call EXP represent an object which will
always generate lock free instructions. The first argument represents the
size of the object, and the second parameter is a pointer to the object
itself. If NULL is passed for the object, then the result is based on
typical alignment for an object of the specified size. Otherwise return
size of the object, and the second parameter is a pointer to the object
itself. If NULL is passed for the object, then the result is based on
typical alignment for an object of the specified size. Otherwise return
false. */
static rtx
@ -7296,7 +7296,7 @@ expand_builtin_atomic_always_lock_free (tree exp)
return const0_rtx;
}
/* Return a one or zero if it can be determined that object ARG1 of size ARG
/* Return a one or zero if it can be determined that object ARG1 of size ARG
is lock free on this architecture. */
static tree
@ -7304,7 +7304,7 @@ fold_builtin_atomic_is_lock_free (tree arg0, tree arg1)
{
if (!flag_inline_atomics)
return NULL_TREE;
/* If it isn't always lock free, don't generate a result. */
if (fold_builtin_atomic_always_lock_free (arg0, arg1) == boolean_true_node)
return boolean_true_node;
@ -7314,9 +7314,9 @@ fold_builtin_atomic_is_lock_free (tree arg0, tree arg1)
/* Return true if the parameters to call EXP represent an object which will
always generate lock free instructions. The first argument represents the
size of the object, and the second parameter is a pointer to the object
itself. If NULL is passed for the object, then the result is based on
typical alignment for an object of the specified size. Otherwise return
size of the object, and the second parameter is a pointer to the object
itself. If NULL is passed for the object, then the result is based on
typical alignment for an object of the specified size. Otherwise return
NULL*/
static rtx
@ -7333,7 +7333,7 @@ expand_builtin_atomic_is_lock_free (tree exp)
}
if (!flag_inline_atomics)
return NULL_RTX;
return NULL_RTX;
/* If the value is known at compile time, return the RTX for it. */
size = fold_builtin_atomic_is_lock_free (arg0, arg1);
@ -7407,7 +7407,7 @@ expand_builtin_set_thread_pointer (tree exp)
{
class expand_operand op;
rtx val = expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
Pmode, EXPAND_NORMAL);
Pmode, EXPAND_NORMAL);
create_input_operand (&op, val, Pmode);
expand_insn (icode, 1, &op);
return;
@ -7523,7 +7523,7 @@ expand_builtin_goacc_parlevel_id_size (tree exp, rtx target, int ignore)
LENGTH is the number of chars to compare;
CONST_STR_N indicates which source string is the constant string;
IS_MEMCMP indicates whether it's a memcmp or strcmp.
to: (assume const_str_n is 2, i.e., arg2 is a constant string)
target = (int) (unsigned char) var_str[0]
@ -8568,7 +8568,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
if (!target || !register_operand (target, mode))
target = gen_reg_rtx (mode);
mode = get_builtin_sync_mode
mode = get_builtin_sync_mode
(fcode - BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1);
target = expand_builtin_compare_and_swap (mode, exp, true, target);
if (target)
@ -8580,7 +8580,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8:
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16:
mode = get_builtin_sync_mode
mode = get_builtin_sync_mode
(fcode - BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1);
target = expand_builtin_compare_and_swap (mode, exp, false, target);
if (target)
@ -8631,7 +8631,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
unsigned int nargs, z;
vec<tree, va_gc> *vec;
mode =
mode =
get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1);
target = expand_builtin_atomic_compare_exchange (mode, exp, target);
if (target)
@ -8680,7 +8680,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 +
(fcode - BUILT_IN_ATOMIC_ADD_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, true,
ignore, lib);
@ -8696,7 +8696,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 +
(fcode - BUILT_IN_ATOMIC_SUB_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true,
ignore, lib);
@ -8712,7 +8712,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 +
(fcode - BUILT_IN_ATOMIC_AND_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true,
ignore, lib);
@ -8728,7 +8728,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 +
(fcode - BUILT_IN_ATOMIC_NAND_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true,
ignore, lib);
@ -8744,7 +8744,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 +
(fcode - BUILT_IN_ATOMIC_XOR_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true,
ignore, lib);
@ -8760,7 +8760,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
{
enum built_in_function lib;
mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1);
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 +
lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 +
(fcode - BUILT_IN_ATOMIC_OR_FETCH_1));
target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true,
ignore, lib);
@ -8779,7 +8779,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
if (target)
return target;
break;
case BUILT_IN_ATOMIC_FETCH_SUB_1:
case BUILT_IN_ATOMIC_FETCH_SUB_2:
case BUILT_IN_ATOMIC_FETCH_SUB_4:
@ -8803,7 +8803,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
if (target)
return target;
break;
case BUILT_IN_ATOMIC_FETCH_NAND_1:
case BUILT_IN_ATOMIC_FETCH_NAND_2:
case BUILT_IN_ATOMIC_FETCH_NAND_4:
@ -8815,7 +8815,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
if (target)
return target;
break;
case BUILT_IN_ATOMIC_FETCH_XOR_1:
case BUILT_IN_ATOMIC_FETCH_XOR_2:
case BUILT_IN_ATOMIC_FETCH_XOR_4:
@ -8827,7 +8827,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
if (target)
return target;
break;
case BUILT_IN_ATOMIC_FETCH_OR_1:
case BUILT_IN_ATOMIC_FETCH_OR_2:
case BUILT_IN_ATOMIC_FETCH_OR_4:
@ -8848,7 +8848,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
case BUILT_IN_ATOMIC_CLEAR:
return expand_builtin_atomic_clear (exp);
case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
return expand_builtin_atomic_always_lock_free (exp);

View file

@ -237,7 +237,7 @@ along with GCC; see the file COPYING3. If not see
false, true, true, ATTRS, false, flag_tm)
/* Builtin used by the implementation of libsanitizer. These
functions are mapped to the actual implementation of the
functions are mapped to the actual implementation of the
libtsan library. */
#undef DEF_SANITIZER_BUILTIN
#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
@ -1105,7 +1105,7 @@ DEF_BUILTIN_STUB (BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX, "__builtin_alloca_with_ali
equality with zero. */
DEF_BUILTIN_STUB (BUILT_IN_MEMCMP_EQ, "__builtin_memcmp_eq")
/* An internal version of strcmp/strncmp, used when the result is only
/* An internal version of strcmp/strncmp, used when the result is only
tested for equality with zero. */
DEF_BUILTIN_STUB (BUILT_IN_STRCMP_EQ, "__builtin_strcmp_eq")
DEF_BUILTIN_STUB (BUILT_IN_STRNCMP_EQ, "__builtin_strncmp_eq")

View file

@ -3288,7 +3288,7 @@ dump_ada_declaration (pretty_printer *pp, tree t, tree type, int spc)
}
}
TREE_VISITED (t) = 1;
TREE_VISITED (t) = 1;
if (is_interface)
{
pp_string (pp, "limited interface -- ");

View file

@ -628,11 +628,11 @@ const struct attribute_spec c_common_gnu_attributes[] =
{ "tainted_args", 0, 0, true, false, false, false,
handle_tainted_args_attribute, NULL },
{ "fd_arg", 1, 1, false, true, true, false,
handle_fd_arg_attribute, NULL},
handle_fd_arg_attribute, NULL},
{ "fd_arg_read", 1, 1, false, true, true, false,
handle_fd_arg_attribute, NULL},
{ "fd_arg_write", 1, 1, false, true, true, false,
handle_fd_arg_attribute, NULL},
handle_fd_arg_attribute, NULL},
{ "flag_enum", 0, 0, false, true, false, false,
handle_flag_enum_attribute, NULL },
{ "null_terminated_string_arg", 1, 1, false, true, true, false,
@ -5036,7 +5036,7 @@ handle_fd_arg_attribute (tree *node, tree name, tree args,
if (positional_argument (*node, name, TREE_VALUE (args), INTEGER_TYPE))
return NULL_TREE;
*no_add_attrs = true;
*no_add_attrs = true;
return NULL_TREE;
}

View file

@ -3308,14 +3308,14 @@ shorten_compare (location_t loc, tree *op0_ptr, tree *op1_ptr,
the comparison isn't an issue, so suppress the
warning. */
tree folded_op0 = fold_for_warn (op0);
bool warn =
bool warn =
warn_type_limits && !in_system_header_at (loc)
&& !(TREE_CODE (folded_op0) == INTEGER_CST
&& !TREE_OVERFLOW (convert (c_common_signed_type (type),
folded_op0)))
/* Do not warn for enumeration types. */
&& (TREE_CODE (expr_original_type (folded_op0)) != ENUMERAL_TYPE);
switch (code)
{
case GE_EXPR:
@ -7196,9 +7196,9 @@ complete_flexible_array_elts (tree init)
}
/* Like c_mark_addressable but don't check register qualifier. */
void
void
c_common_mark_addressable_vec (tree t)
{
{
while (handled_component_p (t) || TREE_CODE (t) == C_MAYBE_CONST_EXPR)
{
if (TREE_CODE (t) == C_MAYBE_CONST_EXPR)
@ -7586,7 +7586,7 @@ get_atomic_generic_size (location_t loc, tree function,
/* Types must be compile time constant sizes. */
if (!tree_fits_uhwi_p ((TYPE_SIZE_UNIT (TREE_TYPE (type_0)))))
{
error_at (loc,
error_at (loc,
"argument 1 of %qE must be a pointer to a constant size type",
function);
return 0;
@ -7597,7 +7597,7 @@ get_atomic_generic_size (location_t loc, tree function,
/* Zero size objects are not allowed. */
if (size_0 == 0)
{
error_at (loc,
error_at (loc,
"argument 1 of %qE must be a pointer to a nonzero size object",
function);
return 0;
@ -7711,12 +7711,12 @@ get_atomic_generic_size (location_t loc, tree function,
/* This will take an __atomic_ generic FUNCTION call, and add a size parameter N
at the beginning of the parameter list PARAMS representing the size of the
objects. This is to match the library ABI requirement. LOC is the location
of the function call.
of the function call.
The new function is returned if it needed rebuilding, otherwise NULL_TREE is
returned to allow the external call to be constructed. */
static tree
add_atomic_size_parameter (unsigned n, location_t loc, tree function,
add_atomic_size_parameter (unsigned n, location_t loc, tree function,
vec<tree, va_gc> *params)
{
tree size_node;
@ -7774,12 +7774,12 @@ atomic_size_supported_p (int n)
PARAMS is the argument list for the call. The return value is non-null
TRUE is returned if it is translated into the proper format for a call to the
external library, and NEW_RETURN is set the tree for that function.
FALSE is returned if processing for the _N variation is required, and
FALSE is returned if processing for the _N variation is required, and
NEW_RETURN is set to the return value the result is copied into. */
static bool
resolve_overloaded_atomic_exchange (location_t loc, tree function,
resolve_overloaded_atomic_exchange (location_t loc, tree function,
vec<tree, va_gc> *params, tree *new_return)
{
{
tree p0, p1, p2, p3;
tree I_type, I_type_ptr;
int n = get_atomic_generic_size (loc, function, params);
@ -7807,14 +7807,14 @@ resolve_overloaded_atomic_exchange (location_t loc, tree function,
p1 = (*params)[1];
p2 = (*params)[2];
p3 = (*params)[3];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
I_type_ptr = build_pointer_type (I_type);
/* Convert object pointer to required type. */
p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
(*params)[0] = p0;
(*params)[0] = p0;
/* Convert new value to required type, and dereference it.
If *p1 type can have padding or may involve floating point which
could e.g. be promoted to wider precision and demoted afterwards,
@ -7836,7 +7836,7 @@ resolve_overloaded_atomic_exchange (location_t loc, tree function,
}
/* This will process an __atomic_compare_exchange function call, determine
/* This will process an __atomic_compare_exchange function call, determine
whether it needs to be mapped to the _N variation, or turned into a lib call.
LOC is the location of the builtin call.
FUNCTION is the DECL that has been invoked;
@ -7846,10 +7846,10 @@ resolve_overloaded_atomic_exchange (location_t loc, tree function,
FALSE is returned if processing for the _N variation is required. */
static bool
resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
vec<tree, va_gc> *params,
resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
vec<tree, va_gc> *params,
tree *new_return)
{
{
tree p0, p1, p2;
tree I_type, I_type_ptr;
int n = get_atomic_generic_size (loc, function, params);
@ -7864,7 +7864,7 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
/* If not a lock-free size, change to the library generic format. */
if (!atomic_size_supported_p (n))
{
/* The library generic format does not have the weak parameter, so
/* The library generic format does not have the weak parameter, so
remove it from the param list. Since a parameter has been removed,
we can be sure that there is room for the SIZE_T parameter, meaning
there will not be a recursive rebuilding of the parameter list, so
@ -7887,7 +7887,7 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
p0 = (*params)[0];
p1 = (*params)[1];
p2 = (*params)[2];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
I_type_ptr = build_pointer_type (I_type);
@ -7924,13 +7924,13 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function,
PARAMS is the argument list for the call. The return value is non-null
TRUE is returned if it is translated into the proper format for a call to the
external library, and NEW_RETURN is set the tree for that function.
FALSE is returned if processing for the _N variation is required, and
FALSE is returned if processing for the _N variation is required, and
NEW_RETURN is set to the return value the result is copied into. */
static bool
resolve_overloaded_atomic_load (location_t loc, tree function,
resolve_overloaded_atomic_load (location_t loc, tree function,
vec<tree, va_gc> *params, tree *new_return)
{
{
tree p0, p1, p2;
tree I_type, I_type_ptr;
int n = get_atomic_generic_size (loc, function, params);
@ -7957,7 +7957,7 @@ resolve_overloaded_atomic_load (location_t loc, tree function,
p0 = (*params)[0];
p1 = (*params)[1];
p2 = (*params)[2];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
I_type_ptr = build_pointer_type (I_type);
@ -7984,13 +7984,13 @@ resolve_overloaded_atomic_load (location_t loc, tree function,
PARAMS is the argument list for the call. The return value is non-null
TRUE is returned if it is translated into the proper format for a call to the
external library, and NEW_RETURN is set the tree for that function.
FALSE is returned if processing for the _N variation is required, and
FALSE is returned if processing for the _N variation is required, and
NEW_RETURN is set to the return value the result is copied into. */
static bool
resolve_overloaded_atomic_store (location_t loc, tree function,
resolve_overloaded_atomic_store (location_t loc, tree function,
vec<tree, va_gc> *params, tree *new_return)
{
{
tree p0, p1;
tree I_type, I_type_ptr;
int n = get_atomic_generic_size (loc, function, params);
@ -8016,7 +8016,7 @@ resolve_overloaded_atomic_store (location_t loc, tree function,
p0 = (*params)[0];
p1 = (*params)[1];
/* Create pointer to appropriate size. */
I_type = builtin_type_for_size (BITS_PER_UNIT * n, 1);
I_type_ptr = build_pointer_type (I_type);
@ -8029,7 +8029,7 @@ resolve_overloaded_atomic_store (location_t loc, tree function,
p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR);
p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1);
(*params)[1] = p1;
/* The memory model is in the right spot already. Return is void. */
*new_return = NULL_TREE;
@ -9610,7 +9610,7 @@ c_family_tests (void)
#endif /* #if CHECKING_P */
/* Attempt to locate a suitable location within FILE for a
#include directive to be inserted before.
#include directive to be inserted before.
LOC is the location of the relevant diagnostic.
Attempt to return the location within FILE immediately

View file

@ -80,7 +80,7 @@ enum rid
are keywords only in specific contexts) */
RID_IN, RID_OUT, RID_INOUT, RID_BYCOPY, RID_BYREF, RID_ONEWAY,
/* ObjC ("PATTR" reserved words - they do not appear after a '@'
/* ObjC ("PATTR" reserved words - they do not appear after a '@'
and are keywords only as property attributes) */
RID_GETTER, RID_SETTER,
RID_READONLY, RID_READWRITE,
@ -197,7 +197,7 @@ enum rid
RID_AT_PRIVATE, RID_AT_PROTECTED, RID_AT_PUBLIC, RID_AT_PACKAGE,
RID_AT_PROTOCOL, RID_AT_SELECTOR,
RID_AT_THROW, RID_AT_TRY, RID_AT_CATCH,
RID_AT_FINALLY, RID_AT_SYNCHRONIZED,
RID_AT_FINALLY, RID_AT_SYNCHRONIZED,
RID_AT_OPTIONAL, RID_AT_REQUIRED, RID_AT_PROPERTY,
RID_AT_SYNTHESIZE, RID_AT_DYNAMIC,
RID_AT_INTERFACE,

View file

@ -660,7 +660,7 @@ c_cpp_builtins_optimize_pragma (cpp_reader *pfile, tree prev_tree,
/* This function will emit cpp macros to indicate the presence of various lock
free atomic operations. */
static void
cpp_atomic_builtins (cpp_reader *pfile)
{
@ -741,26 +741,26 @@ cpp_atomic_builtins (cpp_reader *pfile)
lock free. */
#define SIZEOF_NODE(T) (tree_to_uhwi (TYPE_SIZE_UNIT (T)))
#define SWAP_INDEX(T) ((SIZEOF_NODE (T) < SWAP_LIMIT) ? SIZEOF_NODE (T) : 0)
builtin_define_with_int_value ("__GCC_ATOMIC_BOOL_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_BOOL_LOCK_FREE",
(have_swap[SWAP_INDEX (boolean_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR_LOCK_FREE",
(have_swap[SWAP_INDEX (signed_char_type_node)]? 2 : 1));
if (flag_char8_t)
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR8_T_LOCK_FREE",
(have_swap[SWAP_INDEX (char8_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR16_T_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR16_T_LOCK_FREE",
(have_swap[SWAP_INDEX (char16_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR32_T_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_CHAR32_T_LOCK_FREE",
(have_swap[SWAP_INDEX (char32_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_WCHAR_T_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_WCHAR_T_LOCK_FREE",
(have_swap[SWAP_INDEX (wchar_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_SHORT_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_SHORT_LOCK_FREE",
(have_swap[SWAP_INDEX (short_integer_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_INT_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_INT_LOCK_FREE",
(have_swap[SWAP_INDEX (integer_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_LONG_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_LONG_LOCK_FREE",
(have_swap[SWAP_INDEX (long_integer_type_node)]? 2 : 1));
builtin_define_with_int_value ("__GCC_ATOMIC_LLONG_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_LLONG_LOCK_FREE",
(have_swap[SWAP_INDEX (long_long_integer_type_node)]? 2 : 1));
/* If we're dealing with a "set" value that doesn't exactly correspond
@ -787,7 +787,7 @@ cpp_atomic_builtins (cpp_reader *pfile)
psize = POINTER_SIZE_UNITS;
if (psize >= SWAP_LIMIT)
psize = 0;
builtin_define_with_int_value ("__GCC_ATOMIC_POINTER_LOCK_FREE",
builtin_define_with_int_value ("__GCC_ATOMIC_POINTER_LOCK_FREE",
(have_swap[psize]? 2 : 1));
}
@ -1822,7 +1822,7 @@ struct GTY(()) lazy_hex_fp_value_struct
Each builtin_define_float_constants invocation calls
builtin_define_with_hex_fp_value 5 times and builtin_define_float_constants
is called for FLT, DBL, LDBL and up to NUM_FLOATN_NX_TYPES times for
FLTNN*. */
FLTNN*. */
#define LAZY_HEX_FP_VALUES_CNT (5 * (3 + NUM_FLOATN_NX_TYPES))
static GTY(()) struct lazy_hex_fp_value_struct
lazy_hex_fp_values[LAZY_HEX_FP_VALUES_CNT];

View file

@ -150,12 +150,12 @@ format_warning_substr (location_t fmt_string_loc, tree format_string_cst,
/* Check that we have a pointer to a string suitable for use as a format.
The default is to check for a char type.
For objective-c dialects, this is extended to include references to string
objects validated by objc_string_ref_type_p ().
Targets may also provide a string object type that can be used within c and
objects validated by objc_string_ref_type_p ().
Targets may also provide a string object type that can be used within c and
c++ and shared with their respective objective-c dialects. In this case the
reference to a format string is checked for validity via a hook.
The function returns true if strref points to any string type valid for the
The function returns true if strref points to any string type valid for the
language dialect and target. */
bool
@ -206,8 +206,8 @@ handle_format_arg_attribute (tree *node, tree atname,
}
/* Verify that the format_num argument is actually a string reference suitable,
for the language dialect and target (in case the format attribute is in
error). When we know the specific reference type expected, this is also
for the language dialect and target (in case the format attribute is in
error). When we know the specific reference type expected, this is also
checked. */
static bool
check_format_string (const_tree fntype, unsigned HOST_WIDE_INT format_num,
@ -241,7 +241,7 @@ check_format_string (const_tree fntype, unsigned HOST_WIDE_INT format_num,
return true;
/* Now check that the arg matches the expected type. */
is_char_ref =
is_char_ref =
(TYPE_MAIN_VARIANT (TREE_TYPE (ref)) == char_type_node);
fmt_flags = format_flags (expected_format_type);
@ -275,18 +275,18 @@ check_format_string (const_tree fntype, unsigned HOST_WIDE_INT format_num,
*no_add_attrs = true;
return false;
}
/* We will assert that objective-c will support either its own string type
or the target-supplied variant. */
if (!is_objc_sref)
is_target_sref = (*targetcm.string_object_ref_type_p) ((const_tree) ref);
if (expected_format_type == (int) gcc_objc_string_format_type
if (expected_format_type == (int) gcc_objc_string_format_type
&& (is_objc_sref || is_target_sref))
return true;
/* We will allow a target string ref to match only itself. */
if (first_target_format_type
if (first_target_format_type
&& expected_format_type >= first_target_format_type
&& is_target_sref)
return true;
@ -1719,7 +1719,7 @@ check_format_arg (void *ctx, tree format_tree,
if (params == 0)
res->number_other++;
else
else
{
if (res->number_extra_args == 0)
res->extra_arg_loc = EXPR_LOC_OR_LOC (TREE_VALUE (params),
@ -1760,7 +1760,7 @@ check_format_arg (void *ctx, tree format_tree,
}
res->format_string_loc = EXPR_LOC_OR_LOC (format_tree, input_location);
format_tree = TREE_OPERAND (format_tree, 0);
if (format_types[info->format_type].flags
if (format_types[info->format_type].flags
& (int) FMT_FLAG_PARSE_ARG_CONVERT_EXTERNAL)
{
bool objc_str = (info->format_type == gcc_objc_string_format_type);
@ -1768,7 +1768,7 @@ check_format_arg (void *ctx, tree format_tree,
a valid type. */
if (TREE_CODE (format_tree) != CONST_DECL
|| !((objc_str && objc_string_ref_type_p (TREE_TYPE (format_tree)))
|| (*targetcm.string_object_ref_type_p)
|| (*targetcm.string_object_ref_type_p)
((const_tree) TREE_TYPE (format_tree))))
{
res->number_non_literal++;

View file

@ -324,7 +324,7 @@ should_warn_for_misleading_indentation (const token_indent_info &guard_tinfo,
" the size of the code/headers");
if (!flag_large_source_files)
inform (guard_loc,
"adding %<-flarge-source-files%> will allow for more"
"adding %<-flarge-source-files%> will allow for more"
" column-tracking support, at the expense of compilation"
" time and memory");
}

View file

@ -163,7 +163,7 @@ extern void objc_add_property_declaration (location_t, tree,
extern tree objc_maybe_build_component_ref (tree, tree);
extern tree objc_build_class_component_ref (tree, tree);
extern tree objc_maybe_build_modify_expr (tree, tree);
extern tree objc_build_incr_expr_for_property_ref (location_t, enum tree_code,
extern tree objc_build_incr_expr_for_property_ref (location_t, enum tree_code,
tree, tree);
extern void objc_add_synthesize_declaration (location_t, tree);
extern void objc_add_dynamic_declaration (location_t, tree);

View file

@ -204,7 +204,7 @@ c_finish_omp_taskyield (location_t loc)
/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
the expression to be implemented atomically is LHS opcode= RHS.
the expression to be implemented atomically is LHS opcode= RHS.
For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
opcode= RHS with the new or old content of LHS returned.
LOC is the location of the atomic statement. The value returned
@ -1337,7 +1337,7 @@ c_omp_is_loop_iterator (tree decl, struct c_omp_check_loop_iv_data *d)
/* Helper function called via walk_tree, to diagnose uses
of associated loop IVs inside of lb, b and incr expressions
of OpenMP loops. */
static tree
c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
{

View file

@ -147,7 +147,7 @@ static struct deferred_opt
} *deferred_opts;
extern const unsigned int
extern const unsigned int
c_family_lang_mask = (CL_C | CL_CXX | CL_ObjC | CL_ObjCXX);
/* Defer option CODE with argument ARG. */
@ -778,15 +778,15 @@ c_common_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
switch (c_language)
{
case clk_c:
C_handle_option_auto (&global_options, &global_options_set,
scode, arg, value,
C_handle_option_auto (&global_options, &global_options_set,
scode, arg, value,
c_family_lang_mask, kind,
loc, handlers, global_dc);
break;
case clk_objc:
ObjC_handle_option_auto (&global_options, &global_options_set,
scode, arg, value,
scode, arg, value,
c_family_lang_mask, kind,
loc, handlers, global_dc);
break;
@ -1541,7 +1541,7 @@ sanitize_cpp_opts (void)
/* Wlong-long is disabled by default. It is enabled by:
[-Wpedantic | -Wtraditional] -std=[gnu|c]++98 ; or
[-Wpedantic | -Wtraditional] -std=non-c99
[-Wpedantic | -Wtraditional] -std=non-c99
Either -Wlong-long or -Wno-long-long override any other settings.
??? These conditions should be handled in c.opt. */
@ -1784,13 +1784,13 @@ cb_file_change (cpp_reader *reader, const line_map_ordinary *new_map)
/* We're starting the main file. Inform the FE of that. */
lang_hooks.preprocess_main_file (reader, line_table, new_map);
if (new_map
if (new_map
&& (new_map->reason == LC_ENTER || new_map->reason == LC_RENAME))
{
/* Signal to plugins that a file is included. This could happen
several times with the same file path, e.g. because of
several '#include' or '#line' directives... */
invoke_plugin_callbacks
invoke_plugin_callbacks
(PLUGIN_INCLUDE_FILE,
const_cast<char*> (ORDINARY_MAP_FILE_NAME (new_map)));
}

View file

@ -207,7 +207,7 @@ class token_streamer
print.streamer = this;
}
void begin_pragma ()
void begin_pragma ()
{
in_pragma = true;
}

View file

@ -1880,7 +1880,7 @@ init_pragma (void)
#endif
global_sso = default_sso;
c_register_pragma (0, "scalar_storage_order",
c_register_pragma (0, "scalar_storage_order",
handle_pragma_scalar_storage_order);
/* Allow plugins to register their own pragmas. */

View file

@ -112,5 +112,5 @@ linkage the declaration would normally have. An example of such function\n\
is WinMain on Win32 targets.",
bool, (const char*),
NULL)
HOOK_VECTOR_END (C90_EMPTY_HACK)

View file

@ -664,7 +664,7 @@ ubsan_maybe_instrument_reference (tree *stmt_p)
UBSAN_REF_BINDING);
if (op)
{
if (TREE_CODE (stmt) == NOP_EXPR)
if (TREE_CODE (stmt) == NOP_EXPR)
TREE_OPERAND (stmt, 0) = op;
else
*stmt_p = op;

View file

@ -127,7 +127,7 @@ objc_start_protocol (tree ARG_UNUSED (proto),
{
}
void
void
objc_set_method_opt (bool ARG_UNUSED (optional))
{
}
@ -217,7 +217,7 @@ objc_finish_method_definition (tree ARG_UNUSED (fndecl))
{
}
bool
bool
objc_method_decl (enum tree_code ARG_UNUSED(opcode))
{
return false;
@ -316,7 +316,7 @@ objc_get_class_ivars (tree ARG_UNUSED (name))
}
void
objc_add_property_declaration (location_t ARG_UNUSED (location),
objc_add_property_declaration (location_t ARG_UNUSED (location),
tree ARG_UNUSED (decl),
vec<property_attribute_info *>&
/*prop_attr_list*/)
@ -363,19 +363,19 @@ objc_build_incr_expr_for_property_ref (location_t ARG_UNUSED (location),
}
void
objc_add_synthesize_declaration (location_t ARG_UNUSED (start_locus),
objc_add_synthesize_declaration (location_t ARG_UNUSED (start_locus),
tree ARG_UNUSED (property_and_ivar_list))
{
}
void
objc_add_dynamic_declaration (location_t ARG_UNUSED (start_locus),
objc_add_dynamic_declaration (location_t ARG_UNUSED (start_locus),
tree ARG_UNUSED (property_list))
{
}
const char *
objc_maybe_printable_name (tree ARG_UNUSED (decl),
objc_maybe_printable_name (tree ARG_UNUSED (decl),
int ARG_UNUSED (v))
{
return NULL;
@ -449,7 +449,7 @@ objc_string_ref_type_p (tree ARG_UNUSED (strp))
}
void
objc_check_format_arg (tree ARG_UNUSED (format_arg),
objc_check_format_arg (tree ARG_UNUSED (format_arg),
tree ARG_UNUSED (args_list))
{
}

View file

@ -2791,7 +2791,7 @@ merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
= TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t));
break;
}
}
}
else
for (tree t = TYPE_MAIN_VARIANT (remove); ;
t = TYPE_NEXT_VARIANT (t))
@ -11432,7 +11432,7 @@ void
finish_function (location_t end_loc)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
@ -13621,7 +13621,7 @@ collect_source_refs (void)
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
if (!DECL_IS_UNDECLARED_BUILTIN (decl))

View file

@ -80,7 +80,7 @@ along with GCC; see the file COPYING3. If not see
In finish_decl(), if the decl is static, has incomplete
struct/union/enum type, it is appended to incomplete_record_decls.
In c_parser_translation_unit(), we iterate over incomplete_record_decls
and report error if any of the decls are still incomplete. */
and report error if any of the decls are still incomplete. */
vec<tree> incomplete_record_decls;
@ -412,7 +412,7 @@ c_lex_one_token (c_parser *parser, c_token *token, bool raw = false)
/* Else they are not special keywords.
*/
}
else if (c_dialect_objc ()
else if (c_dialect_objc ()
&& (OBJC_IS_AT_KEYWORD (rid_code)
|| OBJC_IS_CXX_KEYWORD (rid_code)))
{
@ -863,9 +863,9 @@ c_parser_next_token_starts_declspecs (c_parser *parser)
setter/getter on the class. c_token_starts_declspecs() can't
differentiate between the two cases because it only checks the
current token, so we have a special check here. */
if (c_dialect_objc ()
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
@ -881,9 +881,9 @@ c_parser_next_tokens_start_declaration (c_parser *parser)
c_token *token = c_parser_peek_token (parser);
/* Same as above. */
if (c_dialect_objc ()
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
@ -2449,7 +2449,7 @@ c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
specs->attrs = NULL_TREE;
@ -2484,12 +2484,12 @@ c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for implementations");
specs->attrs = NULL_TREE;
}
c_parser_objc_class_definition (parser, NULL_TREE);
c_parser_objc_class_definition (parser, NULL_TREE);
return;
}
break;
@ -2542,7 +2542,7 @@ c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser,
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
@ -2862,7 +2862,7 @@ c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
if (d)
*objc_foreach_object_declaration = d;
else
*objc_foreach_object_declaration = error_mark_node;
*objc_foreach_object_declaration = error_mark_node;
}
}
if (c_parser_next_token_is (parser, CPP_COMMA))
@ -4848,7 +4848,7 @@ c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs,
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
/* Look ahead to detect typos in type names. */
&& c_parser_peek_2nd_token (parser)->type != CPP_NAME
&& c_parser_peek_2nd_token (parser)->type != CPP_MULT
@ -8583,7 +8583,7 @@ c_parser_do_statement (c_parser *parser, bool ivdep, unsigned short unroll,
Here is the canonical example of the first variant:
for (object in array) { do something with object }
we call the first expression ("object") the "object_expression" and
we call the first expression ("object") the "object_expression" and
the second expression ("array") the "collection_expression".
object_expression must be an lvalue of type "id" (a generic Objective-C
object) because the loop works by assigning to object_expression the
@ -8664,10 +8664,10 @@ c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll,
else if (c_parser_next_tokens_start_declaration (parser)
|| c_parser_nth_token_starts_std_attributes (parser, 1))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
&object_expression);
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
@ -8698,7 +8698,7 @@ c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll,
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, &object_expression);
parser->objc_could_be_foreach_context = false;
restore_extension_diagnostics (ext);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
@ -9477,7 +9477,7 @@ c_parser_expr_no_commas (c_parser *parser, struct c_expr *after,
exp_location = c_parser_peek_token (parser)->location;
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = convert_lvalue_to_rvalue (exp_location, rhs, true, true);
ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type,
code, exp_location, rhs.value,
rhs.original_type);
@ -10116,7 +10116,7 @@ c_parser_unary_expression (c_parser *parser)
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op);
case CPP_AND:
@ -10614,7 +10614,7 @@ struct c_generic_association
};
/* Parse a generic-selection. (C11 6.5.1.1).
generic-selection:
_Generic ( generic-controlling-operand , generic-assoc-list )
@ -10627,7 +10627,7 @@ struct c_generic_association
generic-assoc-list:
generic-association
generic-assoc-list , generic-association
generic-association:
type-name : assignment-expression
default : assignment-expression
@ -11127,7 +11127,7 @@ c_parser_postfix_expression (c_parser *parser)
component = component_tok->value;
location_t end_loc = component_tok->get_finish ();
c_parser_consume_token (parser);
expr.value = objc_build_class_component_ref (class_name,
expr.value = objc_build_class_component_ref (class_name,
component);
set_c_expr_source_range (&expr, loc, end_loc);
break;
@ -13678,7 +13678,7 @@ c_parser_objc_class_instance_variables (c_parser *parser)
/* There is a syntax error. We want to skip the offending
tokens up to the next ';' (included) or '}'
(excluded). */
/* First, skip manually a ')' or ']'. This is because they
reduce the nesting level, so c_parser_skip_until_found()
wouldn't be able to skip past them. */
@ -13984,32 +13984,32 @@ c_parser_objc_methodproto (c_parser *parser)
/* Forget protocol qualifiers now. */
parser->objc_pq_context = false;
/* Do not allow the presence of attributes to hide an erroneous
/* Do not allow the presence of attributes to hide an erroneous
method implementation in the interface section. */
if (!c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_error (parser, "expected %<;%>");
return;
}
if (decl != error_mark_node)
objc_add_method_declaration (is_class_method, decl, attributes);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* If we are at a position that method attributes may be present, check that
there are not any parsed already (a syntax error) and then collect any
/* If we are at a position that method attributes may be present, check that
there are not any parsed already (a syntax error) and then collect any
specified at the current location. Finally, if new attributes were present,
check that the next token is legal ( ';' for decls and '{' for defs). */
static bool
static bool
c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes)
{
bool bad = false;
if (*attributes)
{
c_parser_error (parser,
c_parser_error (parser,
"method attributes must be specified at the end only");
*attributes = NULL_TREE;
bad = true;
@ -14029,7 +14029,7 @@ c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes)
return bad;
/* We've got attributes, but not at the end. */
c_parser_error (parser,
c_parser_error (parser,
"expected %<;%> or %<{%> after method attribute definition");
return true;
}
@ -14135,7 +14135,7 @@ c_parser_objc_method_decl (c_parser *parser, bool is_class_method,
{
ellipsis = true;
c_parser_consume_token (parser);
attr_err |= c_parser_objc_maybe_method_attributes
attr_err |= c_parser_objc_maybe_method_attributes
(parser, attributes) ;
break;
}
@ -14265,7 +14265,7 @@ c_parser_objc_protocol_refs (c_parser *parser)
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
PS: This function is identical to cp_parser_objc_try_catch_finally_statement
for C++. Keep them in sync. */
for C++. Keep them in sync. */
static void
c_parser_objc_try_catch_finally_statement (c_parser *parser)
@ -14324,7 +14324,7 @@ c_parser_objc_try_catch_finally_statement (c_parser *parser)
going. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
@ -14597,13 +14597,13 @@ c_parser_objc_keywordexpr (c_parser *parser)
/* A check, needed in several places, that ObjC interface, implementation or
method definitions are not prefixed by incorrect items. */
static bool
c_parser_objc_diagnose_bad_element_prefix (c_parser *parser,
c_parser_objc_diagnose_bad_element_prefix (c_parser *parser,
struct c_declspecs *specs)
{
if (!specs->declspecs_seen_p || specs->non_sc_seen_p
|| specs->typespec_kind != ctsk_none)
{
c_parser_error (parser,
c_parser_error (parser,
"no type or storage class may be specified here,");
c_parser_skip_to_end_of_block_or_statement (parser);
return true;
@ -17825,7 +17825,7 @@ c_parser_omp_clause_private (c_parser *parser, tree list)
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || max min
@ -20321,8 +20321,8 @@ c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask,
clauses = c_parser_omp_clause_allocate (parser, clauses);
c_name = "allocate";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
c_name = "linear";
break;
case PRAGMA_OMP_CLAUSE_AFFINITY:
@ -25786,7 +25786,7 @@ c_finish_omp_declare_simd (c_parser *parser, tree fndecl, tree parms,
parser->tokens = clauses.address ();
parser->tokens_avail = clauses.length ();
/* c_parser_omp_declare_simd pushed 2 extra CPP_EOF tokens at the end. */
while (parser->tokens_avail > 3)
{

View file

@ -3545,7 +3545,7 @@ build_function_call_vec (location_t loc, vec<location_t> arg_loc,
fundecl = function;
if (!orig_fundecl)
orig_fundecl = fundecl;
/* Atomic functions have type checking/casting already done. They are
/* Atomic functions have type checking/casting already done. They are
often rewritten and don't match the original parameter list. */
if (name && startswith (IDENTIFIER_POINTER (name), "__atomic_"))
origtypes = NULL;

View file

@ -700,7 +700,7 @@ expr_stmt:
gimple-assign-statement:
gimple-unary-expression = gimple-assign-rhs
gimple-assign-rhs:
gimple-cast-expression
gimple-unary-expression
@ -1324,7 +1324,7 @@ c_parser_parse_ssa_name_id (tree id, unsigned *version, unsigned *ver_offset)
/* Get at the actual SSA name ID with VERSION starting at VER_OFFSET.
TYPE is the type if the SSA name is being declared. */
static tree
static tree
c_parser_parse_ssa_name (gimple_parser &parser,
tree id, tree type, unsigned version,
unsigned ver_offset)
@ -1341,7 +1341,7 @@ c_parser_parse_ssa_name (gimple_parser &parser,
{
if (! type)
{
c_parser_error (parser, "SSA name undeclared");
c_parser_error (parser, "SSA name undeclared");
return error_mark_node;
}
name = make_ssa_name_fn (cfun, type, NULL, version);
@ -1363,7 +1363,7 @@ c_parser_parse_ssa_name (gimple_parser &parser,
XDELETEVEC (var_name);
if (! parent || parent == error_mark_node)
{
c_parser_error (parser, "base variable or SSA name undeclared");
c_parser_error (parser, "base variable or SSA name undeclared");
return error_mark_node;
}
if (!(VAR_P (parent)

View file

@ -654,7 +654,7 @@ setup_save_areas (void)
}
else
{
/* We are not sharing slots.
/* We are not sharing slots.
Run through all the call-used hard-registers and allocate
space for each in the caller-save area. Try to allocate space
@ -851,7 +851,7 @@ save_call_clobbered_regs (void)
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (TEST_HARD_REG_BIT (hard_regs_saved, regno))
n_regs_saved++;
if (cheap
&& HARD_REGISTER_P (cheap)
&& callee_abi.clobbers_reg_p (GET_MODE (cheap),

View file

@ -4695,7 +4695,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
rtx val = argvec[argnum].value;
rtx reg = argvec[argnum].reg;
int partial = argvec[argnum].partial;
/* Handle calls that pass values in multiple non-contiguous
locations. The PA64 has examples of this for library calls. */
if (reg != 0 && GET_CODE (reg) == PARALLEL)

View file

@ -52,7 +52,7 @@ ccmp_tree_comparison_p (tree t, basic_block bb)
return (TREE_CODE (TREE_TYPE (t)) == BOOLEAN_TYPE);
/* Check to see if SSA name is set by a comparison operator in
the same basic block. */
the same basic block. */
if (!is_gimple_assign (g))
return false;
if (bb != gimple_bb (g))

View file

@ -71,7 +71,7 @@ DEF_BASIC_BLOCK_FLAG(DUPLICATED, 7)
DEF_BASIC_BLOCK_FLAG(NON_LOCAL_GOTO_TARGET, 8)
/* Set on blocks that are in RTL format. */
DEF_BASIC_BLOCK_FLAG(RTL, 9)
DEF_BASIC_BLOCK_FLAG(RTL, 9)
/* Set on blocks that are forwarder blocks.
Only used in cfgcleanup.cc. */

View file

@ -126,7 +126,7 @@ free_cfg (struct function *fn)
gcc_assert (!n_edges_for_fn (fn));
/* Sanity check that dominance tree is freed. */
gcc_assert (!fn->cfg->x_dom_computed[0] && !fn->cfg->x_dom_computed[1]);
vec_free (fn->cfg->x_label_to_block_map);
vec_free (basic_block_info_for_fn (fn));
ggc_free (fn->cfg);
@ -504,7 +504,7 @@ dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
{
basic_block side = (do_succ ? e->dest : e->src);
bool do_details = false;
if ((flags & TDF_DETAILS) != 0
&& (flags & TDF_SLIM) == 0)
do_details = true;
@ -971,7 +971,7 @@ set_edge_probability_and_rescale_others (edge e, profile_probability new_prob)
frequency or count is believed to be lower than COUNT
respectively. */
void
update_bb_profile_for_threading (basic_block bb,
update_bb_profile_for_threading (basic_block bb,
profile_count count, edge taken_edge)
{
gcc_assert (bb == taken_edge->src);

View file

@ -973,7 +973,7 @@ equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
that is a single_set with a SET_SRC of SRC1. Similarly
for NOTE2/SRC2.
So effectively NOTE1/NOTE2 are an alternate form of
So effectively NOTE1/NOTE2 are an alternate form of
SRC1/SRC2 respectively.
Return nonzero if SRC1 or NOTE1 has the same constant

View file

@ -2188,7 +2188,7 @@ static bool
stack_protect_return_slot_p ()
{
basic_block bb;
FOR_ALL_BB_FN (bb, cfun)
for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
!gsi_end_p (gsi); gsi_next (&gsi))
@ -3613,7 +3613,7 @@ expand_asm_stmt (gasm *stmt)
ASM_OPERANDS_OUTPUT_CONSTRAINT (body) = constraints[0];
if (nlabels > 0)
emit_jump_insn (gen_rtx_SET (output_rvec[0], body));
else
else
emit_insn (gen_rtx_SET (output_rvec[0], body));
}
else

View file

@ -31,7 +31,7 @@ along with GCC; see the file COPYING3. If not see
For every field[2], field[0] is the count before the pass runs, and
field[1] is the post-pass count. This allows us to monitor the effect
of each individual pass on the profile consistency.
This structure is not supposed to be used by anything other than passes.cc
and one CFG hook per CFG mode. */
struct profile_record
@ -189,7 +189,7 @@ struct cfg_hooks
/* Add PHI arguments queued in PENDINT_STMT list on edge E to edge
E->dest (only in tree-ssa loop versioning. */
void (*flush_pending_stmts) (edge);
/* True if a block contains no executable instructions. */
bool (*empty_block_p) (basic_block);

View file

@ -89,7 +89,7 @@ superloop_at_depth (class loop *loop, unsigned depth)
/* Returns the list of the latch edges of LOOP. */
static vec<edge>
static vec<edge>
get_loop_latch_edges (const class loop *loop)
{
edge_iterator ei;

View file

@ -383,7 +383,7 @@ expected_loop_iterations_unbounded (const class loop *loop,
bool *read_profile_p)
{
gcov_type expected = -1;
if (read_profile_p)
*read_profile_p = false;
@ -397,7 +397,7 @@ expected_loop_iterations_unbounded (const class loop *loop,
HOST_WIDE_INT max = get_max_loop_iterations_int (loop);
if (max != -1 && max < expected)
return max;
return expected;
}

View file

@ -171,7 +171,7 @@ fix_loop_placement (class loop *loop, bool *irred_invalidated)
successors we consider edges coming out of the loops.
If the changes may invalidate the information about irreducible regions,
IRRED_INVALIDATED is set to true.
IRRED_INVALIDATED is set to true.
If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
changed loop_father are collected there. */
@ -1110,7 +1110,7 @@ duplicate_loop (class loop *loop, class loop *target, class loop *after)
class loop *cloop;
cloop = alloc_loop ();
place_new_loop (cfun, cloop);
copy_loop_info (loop, cloop);
/* Mark the new loop as copy of LOOP. */

View file

@ -4115,7 +4115,7 @@ fixup_reorder_chain (void)
dest = e_fall->dest;
}
/* We got here if we need to add a new jump insn.
/* We got here if we need to add a new jump insn.
Note force_nonfallthru can delete E_FALL and thus we have to
save E_FALL->src prior to the call to force_nonfallthru. */
nb = force_nonfallthru_and_redirect (e_fall, dest, ret_label);
@ -4946,7 +4946,7 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
else
{
rtx_insn *last = BB_HEADER (b);
while (NEXT_INSN (last))
last = NEXT_INSN (last);
SET_NEXT_INSN (last) = BB_FOOTER (a);
@ -5057,10 +5057,10 @@ rtl_split_block_before_cond_jump (basic_block bb)
last = insn;
}
/* Did not find everything. */
/* Did not find everything. */
if (found_code && split_point)
return split_block (bb, split_point)->dest;
else
else
return NULL;
}
@ -5455,7 +5455,7 @@ struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
NULL, /* lv_adjust_loop_header_phi*/
rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
NULL, /* flush_pending_stmts */
NULL, /* flush_pending_stmts */
rtl_block_empty_p, /* block_empty_p */
rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
rtl_account_profile_record,

View file

@ -1085,7 +1085,7 @@ cgraph_edge::remove (cgraph_edge *edge)
/* Turn edge into speculative call calling N2. Update
the profile so the direct call is taken COUNT times
with FREQUENCY.
with FREQUENCY.
At clone materialization time, the indirect call E will
be expanded as:
@ -1097,7 +1097,7 @@ cgraph_edge::remove (cgraph_edge *edge)
At this time the function just creates the direct call,
the reference representing the if conditional and attaches
them all to the original indirect call statement.
them all to the original indirect call statement.
speculative_id is used to link direct calls with their corresponding
IPA_REF_ADDR references when representing speculative calls.
@ -2243,7 +2243,7 @@ cgraph_node::dump (FILE *f)
thunk_info::get (this)->dump (f);
}
else gcc_checking_assert (!thunk_info::get (this));
fprintf (f, " Called by: ");
profile_count sum = profile_count::zero ();
@ -2848,7 +2848,7 @@ set_const_flag_1 (cgraph_node *node, bool set_const, bool looping,
When setting the flag be careful about possible interposition and
do not set the flag for functions that can be interposed and set pure
flag for functions that can bind to other definition.
flag for functions that can bind to other definition.
Return true if any change was done. */
@ -3033,7 +3033,7 @@ cgraph_node::can_remove_if_no_direct_calls_p (bool will_inline)
{
struct ipa_ref *ref;
/* For local symbols or non-comdat group it is the same as
/* For local symbols or non-comdat group it is the same as
can_remove_if_no_direct_calls_p. */
if (!externally_visible || !same_comdat_group)
{
@ -4006,7 +4006,7 @@ cgraph_node::function_symbol (enum availability *availability,
/* Walk the alias chain to return the function cgraph_node is alias of.
Walk through non virtual thunks, too. Thus we return either a function
or a virtual thunk node.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When REF is non-NULL, assume that reference happens in symbol REF
when determining the availability. */
@ -4098,7 +4098,7 @@ cgraph_node::get_untransformed_body ()
return true;
}
/* Prepare function body. When doing LTO, read cgraph_node's body from disk
/* Prepare function body. When doing LTO, read cgraph_node's body from disk
if it is not already present. When some IPA transformations are scheduled,
apply them. */

View file

@ -436,7 +436,7 @@ public:
/* Return 0 if symbol is known to have different address than S2,
Return 1 if symbol is known to have same address as S2,
return 2 otherwise.
return 2 otherwise.
If MEMORY_ACCESSED is true, assume that both memory pointer to THIS
and S2 is going to be accessed. This eliminates the situations when
@ -923,7 +923,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
/* Walk the alias chain to return the function cgraph_node is alias of.
Walk through thunk, too.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When REF is non-NULL, assume that reference happens in symbol REF
when determining the availability. */
cgraph_node *function_symbol (enum availability *avail = NULL,
@ -932,7 +932,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
/* Walk the alias chain to return the function cgraph_node is alias of.
Walk through non virtual thunks, too. Thus we return either a function
or a virtual thunk node.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When AVAILABILITY is non-NULL, get minimal availability in the chain.
When REF is non-NULL, assume that reference happens in symbol REF
when determining the availability. */
cgraph_node *function_or_virtual_thunk_symbol
@ -1097,7 +1097,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
present. */
bool get_untransformed_body ();
/* Prepare function body. When doing LTO, read cgraph_node's body from disk
/* Prepare function body. When doing LTO, read cgraph_node's body from disk
if it is not already present. When some IPA transformations are scheduled,
apply them. */
bool get_body ();
@ -1180,7 +1180,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
When setting the flag be careful about possible interposition and
do not set the flag for functions that can be interposed and set pure
flag for functions that can bind to other definition.
flag for functions that can bind to other definition.
Return true if any change was done. */
@ -1256,7 +1256,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
all uses of COMDAT function does not make it necessarily disappear from
the program unless we are compiling whole program or we do LTO. In this
case we know we win since dynamic linking will not really discard the
linkonce section.
linkonce section.
If WILL_INLINE is true, assume that function will be inlined into all the
direct calls. */
@ -1269,7 +1269,7 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
bool can_remove_if_no_direct_calls_and_refs_p (void);
/* Return true when function cgraph_node and its aliases can be removed from
callgraph if all direct calls are eliminated.
callgraph if all direct calls are eliminated.
If WILL_INLINE is true, assume that function will be inlined into all the
direct calls. */
bool can_remove_if_no_direct_calls_p (bool will_inline = false);
@ -2229,7 +2229,7 @@ public:
friend struct cgraph_node;
friend struct cgraph_edge;
symbol_table ():
symbol_table ():
cgraph_count (0), cgraph_max_uid (1), cgraph_max_summary_id (0),
edges_count (0), edges_max_uid (1), edges_max_summary_id (0),
cgraph_released_summary_ids (), edge_released_summary_ids (),

View file

@ -119,7 +119,7 @@ record_type_list (cgraph_node *node, tree list)
for (; list; list = TREE_CHAIN (list))
{
tree type = TREE_VALUE (list);
if (TYPE_P (type))
type = lookup_type_for_runtime (type);
STRIP_NOPS (type);

View file

@ -109,7 +109,7 @@ cgraph_edge::clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
tree decl;
if (call_stmt && (decl = gimple_call_fndecl (call_stmt))
/* When the call is speculative, we need to resolve it
/* When the call is speculative, we need to resolve it
via cgraph_resolve_speculation and not here. */
&& !speculative)
{
@ -149,7 +149,7 @@ cgraph_edge::clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
/* Update IPA profile. Local profiles need no updating in original. */
if (update_original)
count = count.combine_with_ipa_count_within (count.ipa ()
count = count.combine_with_ipa_count_within (count.ipa ()
- new_edge->count.ipa (),
caller->count);
symtab->call_edge_duplication_hooks (this, new_edge);
@ -353,7 +353,7 @@ localize_profile (cgraph_node *n)
When UPDATE_ORIGINAL is true, the counts are subtracted from the original
function's profile to reflect the fact that part of execution is handled
by node.
by node.
When CALL_DUPLICATION_HOOK is true, the ipa passes are acknowledged about
the new clone. Otherwise the caller is responsible for doing so later.
@ -612,7 +612,7 @@ cgraph_node::create_virtual_clone (const vec<cgraph_edge *> &redirect_callers,
DECL_STRUCT_FUNCTION (new_decl) = NULL;
DECL_ARGUMENTS (new_decl) = NULL;
DECL_INITIAL (new_decl) = NULL;
DECL_RESULT (new_decl) = NULL;
DECL_RESULT (new_decl) = NULL;
/* We cannot do DECL_RESULT (new_decl) = NULL; here because of LTO partitioning
sometimes storing only clone decl instead of original. */
@ -671,7 +671,7 @@ cgraph_node::create_virtual_clone (const vec<cgraph_edge *> &redirect_callers,
}
/* callgraph node being removed from symbol table; see if its entry can be
replaced by other inline clone.
replaced by other inline clone.
INFO is clone info to attach to the new root. */
cgraph_node *
cgraph_node::find_replacement (clone_info *info)
@ -763,7 +763,7 @@ cgraph_node::find_replacement (clone_info *info)
}
/* Like cgraph_set_call_stmt but walk the clone tree and update all
clones sharing the same function body.
clones sharing the same function body.
When WHOLE_SPECULATIVE_EDGES is true, all three components of
speculative edge gets updated. Otherwise we update only direct
call. */
@ -928,9 +928,9 @@ update_call_expr (cgraph_node *new_version)
edges which should be redirected to point to
NEW_VERSION. ALL the callees edges of the node
are cloned to the new version node. Return the new
version node.
version node.
If non-NULL BLOCK_TO_COPY determine what basic blocks
If non-NULL BLOCK_TO_COPY determine what basic blocks
was copied to prevent duplications of calls that are dead
in the clone. */

View file

@ -92,7 +92,7 @@ along with GCC; see the file COPYING3. If not see
Interprocedural passes differ from small interprocedural
passes by their ability to operate across whole program
at linktime. Their analysis stage is performed early to
both reduce linking times and linktime memory usage by
both reduce linking times and linktime memory usage by
not having to represent whole program in memory.
d) LTO streaming. When doing LTO, everything important gets
@ -142,7 +142,7 @@ along with GCC; see the file COPYING3. If not see
out and thus all variables are output to the file.
Note that with -fno-toplevel-reorder passes 5 and 6
are combined together in cgraph_output_in_order.
are combined together in cgraph_output_in_order.
Finally there are functions to manipulate the callgraph from
backend.
@ -280,7 +280,7 @@ symtab_node::needed_p (void)
static symtab_node symtab_terminator (SYMTAB_SYMBOL);
static symtab_node *queued_nodes = &symtab_terminator;
/* Add NODE to queue starting at QUEUED_NODES.
/* Add NODE to queue starting at QUEUED_NODES.
The queue is linked via AUX pointers and terminated by pointer to 1. */
static void
@ -998,7 +998,7 @@ varpool_node::finalize_decl (tree decl)
}
/* EDGE is an polymorphic call. Mark all possible targets as reachable
and if there is only one target, perform trivial devirtualization.
and if there is only one target, perform trivial devirtualization.
REACHABLE_CALL_TARGETS collects target lists we already walked to
avoid duplicate work. */
@ -1016,7 +1016,7 @@ walk_polymorphic_call_targets (hash_set<void *> *reachable_call_targets,
if (cache_token != NULL && !reachable_call_targets->add (cache_token))
{
if (symtab->dump_file)
dump_possible_polymorphic_call_targets
dump_possible_polymorphic_call_targets
(symtab->dump_file, edge);
for (i = 0; i < targets.length (); i++)
@ -1702,7 +1702,7 @@ mark_functions_to_output (void)
/* DECL is FUNCTION_DECL. Initialize datastructures so DECL is a function
in lowered gimple form. IN_SSA is true if the gimple is in SSA.
Set current_function_decl and cfun to newly constructed empty function body.
return basic block in the function body. */

View file

@ -3073,7 +3073,7 @@ static void
post_ld_pass (bool temp_file) {
if (!(temp_file && flag_idsym) && !flag_dsym)
return;
do_dsymutil (output_file);
}
#else

View file

@ -1754,7 +1754,7 @@ can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
}
else if (next_active_insn (insn) != i3)
all_adjacent = false;
/* Can combine only if previous insn is a SET of a REG or a SUBREG,
or a PARALLEL consisting of such a SET and CLOBBERs.
@ -2614,7 +2614,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
/* If I0 loads a memory and I3 sets the same memory, then I1 and I2
are likely manipulating its value. Ideally we'll be able to combine
all four insns into a bitfield insertion of some kind.
all four insns into a bitfield insertion of some kind.
Note the source in I0 might be inside a sign/zero extension and the
memory modes in I0 and I3 might be different. So extract the address
@ -9835,7 +9835,7 @@ make_field_assignment (rtx x)
&& rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
/* The second SUBREG that might get in the way is a paradoxical
SUBREG around the first operand of the AND. We want to
SUBREG around the first operand of the AND. We want to
pretend the operand is as wide as the destination here. We
do this by adjusting the MEM to wider mode for the sole
purpose of the call to rtx_equal_for_field_assignment_p. Also
@ -9852,7 +9852,7 @@ make_field_assignment (rtx x)
&& rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
/* The second SUBREG that might get in the way is a paradoxical
SUBREG around the first operand of the AND. We want to
SUBREG around the first operand of the AND. We want to
pretend the operand is as wide as the destination here. We
do this by adjusting the MEM to wider mode for the sole
purpose of the call to rtx_equal_for_field_assignment_p. Also

View file

@ -130,7 +130,7 @@ computed relative to GCC's internal directories, false (default) if such\n\
components should be preserved and directory names containing them passed\n\
to other tools such as the linker.",
bool, false)
HOOK_VECTOR_END (C90_EMPTY_HACK)
#undef HOOK_PREFIX

View file

@ -7,12 +7,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -826,7 +826,7 @@ ix86_handle_option (struct gcc_options *opts,
opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_UINTR_UNSET;
}
return true;
case OPT_mhreset:
if (value)
{

View file

@ -26,7 +26,7 @@
/* What options are we going to default to specific settings when
-O* happens; the user can subsequently override these settings.
Omitting the frame pointer is a very good idea on the MCore.
Scheduling isn't worth anything on the current MCore implementation. */

View file

@ -57,7 +57,7 @@ msp430_handle_option (struct gcc_options *opts ATTRIBUTE_UNUSED,
}
break;
}
return true;
}

View file

@ -50,7 +50,7 @@ pdp11_handle_option (struct gcc_options *opts,
opts->x_target_flags &= ~MASK_40;
opts->x_target_flags |= MASK_45;
return true;
case OPT_msoft_float:
opts->x_target_flags &= ~MASK_AC0;
return true;

View file

@ -176,7 +176,7 @@ rs6000_handle_option (struct gcc_options *opts, struct gcc_options *opts_set,
if (invert)
opts->x_rs6000_debug &= ~mask;
else
else
opts->x_rs6000_debug |= mask;
}
break;

View file

@ -51,11 +51,11 @@ rx_handle_option (struct gcc_options *opts,
return value >= 0 && value <= 4;
case OPT_mcpu_:
if ((enum rx_cpu_types) value == RX200 ||
if ((enum rx_cpu_types) value == RX200 ||
(enum rx_cpu_types) value == RX100)
opts->x_target_flags |= MASK_NO_USE_FPU;
break;
case OPT_fpu:
if (opts->x_rx_cpu_type == RX200)
error_at (loc, "the RX200 cpu does not have FPU hardware");

View file

@ -45,7 +45,7 @@ along with GCC; see the file COPYING3. If not see
[(set (reg:CCM) (compare:CCM (operation) (immediate)))
(set (reg) (operation)]
The mode CCM will be chosen as if by SELECT_CC_MODE.
Note that unlike NOTICE_UPDATE_CC, we do not handle memory operands.
@ -125,7 +125,7 @@ struct comparison
/* Whether IN_A is wrapped in a NOT before being compared. */
bool not_in_a;
};
static vec<comparison *> all_compares;
/* Return whether X is a NOT unary expression. */
@ -875,13 +875,13 @@ try_eliminate_compare (struct comparison *cmp)
rtvec v = rtvec_alloc (2);
RTVEC_ELT (v, 0) = y;
RTVEC_ELT (v, 1) = x;
rtx pat = gen_rtx_PARALLEL (VOIDmode, v);
/* Succeed if the new instruction is valid. Note that we may have started
a change group within maybe_select_cc_mode, therefore we must continue. */
validate_change (insn, &PATTERN (insn), pat, true);
if (!apply_change_group ())
return false;

View file

@ -1161,7 +1161,7 @@ aarch64_simd_builtin_type (machine_mode mode,
return type;
}
static void
aarch64_init_simd_builtin_types (void)
{

View file

@ -346,7 +346,7 @@ const struct cpu_cost_table thunderx2t99_extra_costs =
}
};
const struct cpu_cost_table thunderx3t110_extra_costs =
const struct cpu_cost_table thunderx3t110_extra_costs =
{
/* ALU */
{

View file

@ -627,7 +627,7 @@ struct aarch64_address_info {
};
#define AARCH64_FUSION_PAIR(x, name) \
AARCH64_FUSE_##name##_index,
AARCH64_FUSE_##name##_index,
/* Supported fusion operations. */
enum aarch64_fusion_pairs_index
{

View file

@ -9105,7 +9105,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
{
bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
machine_mode mode = aarch64_reg_save_mode (regno);
rtx reg = gen_rtx_REG (mode, regno);
poly_int64 offset = frame.reg_offset[regno];
if (frame_pointer_needed)
@ -10290,7 +10290,7 @@ aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
/* Implement TARGET_CASE_VALUES_THRESHOLD.
The expansion for a table switch is quite expensive due to the number
of instructions, the table lookup and hard to predict indirect jump.
When optimizing for speed, and -O3 enabled, use the per-core tuning if
When optimizing for speed, and -O3 enabled, use the per-core tuning if
set, otherwise use tables for >= 11 cases as a tradeoff between size and
performance. When optimizing for size, use 8 for smallest codesize. */

View file

@ -3269,7 +3269,7 @@ alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
set (OP[1] OP[3])
is valid. Naturally, output operand ordering is little-endian.
This is used by *movtf_internal and *movti_internal. */
void
alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
bool fixup_overlap)
@ -4410,7 +4410,7 @@ emit_insxl (machine_mode mode, rtx op1, rtx op2)
}
/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is
a scratch register. */
@ -4594,7 +4594,7 @@ alpha_split_compare_and_swap_12 (rtx operands[])
label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
emit_insn (gen_load_locked (DImode, scratch, mem));
width = GEN_INT (GET_MODE_BITSIZE (mode));
mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
emit_insn (gen_extxl (dest, scratch, width, addr));
@ -4725,7 +4725,7 @@ alpha_split_atomic_exchange_12 (rtx operands[])
emit_label (XEXP (label, 0));
emit_insn (gen_load_locked (DImode, scratch, mem));
width = GEN_INT (GET_MODE_BITSIZE (mode));
mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
emit_insn (gen_extxl (dest, scratch, width, addr));
@ -5019,7 +5019,7 @@ get_trap_mode_suffix (void)
gcc_unreachable ();
}
break;
default:
gcc_unreachable ();
}
@ -5056,7 +5056,7 @@ get_round_mode_suffix (void)
case ROUND_SUFFIX_C:
return "c";
default:
gcc_unreachable ();
}
@ -6151,7 +6151,7 @@ alpha_setup_incoming_varargs (cumulative_args_t pcum,
/* Detect whether integer registers or floating-point registers
are needed by the detected va_arg statements. See above for
how these values are computed. Note that the "escape" value
is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
these bits set. */
gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
@ -6754,7 +6754,7 @@ alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
return NULL;
}
/* Fold the builtin for the ZAPNOT instruction. This is essentially a
/* Fold the builtin for the ZAPNOT instruction. This is essentially a
specialized form of an AND operation. Other byte manipulation instructions
are defined in terms of this instruction, so this is also used as a
subroutine for other builtins.
@ -6821,7 +6821,7 @@ alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
else
zap_op = op;
}
opint[1] = bytemask;
return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
}
@ -7422,7 +7422,7 @@ alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
HOST_WIDE_INT
alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
{
{
/* The only possible attempts we ever expect are ARG or FRAME_PTR to
HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
on the proper computations and will need the register save area size
@ -7433,7 +7433,7 @@ alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
/* PT_NULL procedures have no frame of their own and we only allow
elimination to the stack pointer. This is the argument pointer and we
resolve the soft frame pointer to that as well. */
if (alpha_procedure_type == PT_NULL)
return 0;
@ -7448,13 +7448,13 @@ alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
^ ^ ^ ^
ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
PT_REGISTER procedures are similar in that they may have a frame of their
own. They have no regs-sa/pv/outgoing-args area.
We first compute offset to HARD_FRAME_PTR, then add what we need to get
to STACK_PTR if need be. */
{
HOST_WIDE_INT offset;
HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
@ -7473,10 +7473,10 @@ alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
default:
gcc_unreachable ();
}
if (to == STACK_POINTER_REGNUM)
offset += ALPHA_ROUND (crtl->outgoing_args_size);
return offset;
}
}
@ -8828,7 +8828,7 @@ alpha_handle_trap_shadows (void)
suitably aligned. This is very processor-specific. */
/* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
that are marked "fake". These instructions do not exist on that target,
but it is possible to see these insns with deranged combinations of
but it is possible to see these insns with deranged combinations of
command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
choose a result at random. */
@ -9465,7 +9465,7 @@ And in the noreturn case:
after the insn. In case trap is the last insn in the function,
emit NOP to guarantee that PC remains inside function boundaries.
This workaround is needed to get reliable backtraces. */
rtx_insn *insn = prev_active_insn (get_last_insn ());
if (insn && NONJUMP_INSN_P (insn))
@ -9725,7 +9725,7 @@ alpha_write_linkage (FILE *stream, const char *funname)
the section; 0 if the default should be used. */
static void
vms_asm_named_section (const char *name, unsigned int flags,
vms_asm_named_section (const char *name, unsigned int flags,
tree decl ATTRIBUTE_UNUSED)
{
fputc ('\n', asm_out_file);

View file

@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see
/* Bit defines for amask instruction. */
#define AMASK_BWX 0x1 /* byte/word extension. */
#define AMASK_FIX 0x2 /* sqrt and f <-> i conversions
#define AMASK_FIX 0x2 /* sqrt and f <-> i conversions
extension. */
#define AMASK_CIX 0x4 /* count extension. */
#define AMASK_MVI 0x100 /* multimedia extension. */

View file

@ -25,7 +25,7 @@ along with GCC; see the file COPYING3. If not see
#define ASM_SPEC "%{G*} %{relax:-relax} %{mcpu=*:-m%*}"
/* Do not output a .file directive at the beginning of the input file. */
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE false

View file

@ -188,8 +188,8 @@ typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
{ ASM_OUTPUT_ALIGN (FILE, 3); (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); }
/* This says how to output assembler code to declare an
uninitialized external linkage data object. */
/* This says how to output assembler code to declare an
uninitialized external linkage data object. */
#define COMMON_ASM_OP "\t.comm\t"

View file

@ -4229,7 +4229,7 @@ enum arc_shift_alg
{
SHIFT_MOVE, /* Register-to-register move. */
SHIFT_LOOP, /* Zero-overhead loop implementation. */
SHIFT_INLINE, /* Mmultiple LSHIFTs and LSHIFT-PLUSs. */
SHIFT_INLINE, /* Mmultiple LSHIFTs and LSHIFT-PLUSs. */
SHIFT_AND_ROT, /* Bitwise AND, then ROTATERTs. */
SHIFT_SWAP, /* SWAP then multiple LSHIFTs/LSHIFT-PLUSs. */
SHIFT_AND_SWAP_ROT /* Bitwise AND, then SWAP, then ROTATERTs. */

View file

@ -1,7 +1,7 @@
/* Definitions of target machine for GNU compiler, for ARM with a.out
Copyright (C) 1995-2024 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
@ -165,7 +165,7 @@
#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
sprintf (STRING, "*%s%s%u", LOCAL_LABEL_PREFIX, PREFIX, (unsigned int)(NUM))
#endif
/* Output an element of a dispatch table. */
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
do \
@ -174,7 +174,7 @@
asm_fprintf (STREAM, "\t.word\t%LL%d\n", VALUE); \
} \
while (0)
/* Thumb-2 always uses addr_diff_elf so that the Table Branch instructions
can be used. For non-pic code where the offsets do not suitable for
@ -266,7 +266,7 @@
fprintf (STREAM, "\t.space\t%d\n", (int) (NBYTES))
/* Align output to a power of two. Horrible /bin/as. */
#ifndef ASM_OUTPUT_ALIGN
#ifndef ASM_OUTPUT_ALIGN
#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
do \
{ \
@ -292,7 +292,7 @@
} \
while (0)
#endif
/* Output a local common block. /bin/as can't do this, so hack a
`.space' into the bss segment. Note that this is *bad* practice,
which is guaranteed NOT to work since it doesn't define STATIC
@ -308,7 +308,7 @@
} \
while (0)
#endif
/* Output a zero-initialized block. */
#ifndef ASM_OUTPUT_ALIGNED_BSS
#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGN) \

View file

@ -401,7 +401,7 @@ public:
bool require_integer_immediate (unsigned int);
bool require_derived_scalar_type (unsigned int, type_class_index,
unsigned int = SAME_SIZE);
bool check_num_arguments (unsigned int);
bool check_gp_argument (unsigned int, unsigned int &, unsigned int &);
tree resolve_unary (type_class_index = SAME_TYPE_CLASS,

View file

@ -15367,9 +15367,9 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
HOST_WIDE_INT srcoffset, dstoffset;
HOST_WIDE_INT src_autoinc, dst_autoinc;
rtx mem, addr;
gcc_assert (interleave_factor >= 1 && interleave_factor <= 4);
/* Use hard registers if we have aligned source or destination so we can use
load/store multiple with contiguous registers. */
if (dst_aligned || src_aligned)
@ -15383,7 +15383,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
src = copy_addr_to_reg (XEXP (srcbase, 0));
srcoffset = dstoffset = 0;
/* Calls to arm_gen_load_multiple and arm_gen_store_multiple update SRC/DST.
For copying the last bytes we want to subtract this offset again. */
src_autoinc = dst_autoinc = 0;
@ -15437,14 +15437,14 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
remaining -= block_size_bytes;
}
/* Copy any whole words left (note these aren't interleaved with any
subsequent halfword/byte load/stores in the interests of simplicity). */
words = remaining / UNITS_PER_WORD;
gcc_assert (words < interleave_factor);
if (src_aligned && words > 1)
{
emit_insn (arm_gen_load_multiple (regnos, words, src, TRUE, srcbase,
@ -15490,11 +15490,11 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
}
remaining -= words * UNITS_PER_WORD;
gcc_assert (remaining < 4);
/* Copy a halfword if necessary. */
if (remaining >= 2)
{
halfword_tmp = gen_reg_rtx (SImode);
@ -15518,11 +15518,11 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
remaining -= 2;
srcoffset += 2;
}
gcc_assert (remaining < 2);
/* Copy last byte. */
if ((remaining & 1) != 0)
{
byte_tmp = gen_reg_rtx (SImode);
@ -15543,9 +15543,9 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
remaining--;
srcoffset++;
}
/* Store last halfword if we haven't done so already. */
if (halfword_tmp)
{
addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
@ -15564,7 +15564,7 @@ arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
emit_move_insn (mem, gen_lowpart (QImode, byte_tmp));
dstoffset++;
}
gcc_assert (remaining == 0 && srcoffset == dstoffset);
}
@ -15583,7 +15583,7 @@ arm_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg,
rtx *loop_mem)
{
*loop_reg = copy_addr_to_reg (XEXP (mem, 0));
/* Although the new mem does not refer to a known location,
it does keep up to LENGTH bytes of alignment. */
*loop_mem = change_address (mem, BLKmode, *loop_reg);
@ -15603,14 +15603,14 @@ arm_block_move_unaligned_loop (rtx dest, rtx src, HOST_WIDE_INT length,
{
rtx src_reg, dest_reg, final_src, test;
HOST_WIDE_INT leftover;
leftover = length % bytes_per_iter;
length -= leftover;
/* Create registers and memory references for use within the loop. */
arm_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
arm_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
/* Calculate the value that SRC_REG should have after the last iteration of
the loop. */
final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
@ -15619,7 +15619,7 @@ arm_block_move_unaligned_loop (rtx dest, rtx src, HOST_WIDE_INT length,
/* Emit the start of the loop. */
rtx_code_label *label = gen_label_rtx ();
emit_label (label);
/* Emit the loop body. */
arm_block_move_unaligned_straight (dest, src, bytes_per_iter,
interleave_factor);
@ -15627,11 +15627,11 @@ arm_block_move_unaligned_loop (rtx dest, rtx src, HOST_WIDE_INT length,
/* Move on to the next block. */
emit_move_insn (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
emit_move_insn (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
/* Emit the loop condition. */
test = gen_rtx_NE (VOIDmode, src_reg, final_src);
emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
/* Mop up any left-over bytes. */
if (leftover)
arm_block_move_unaligned_straight (dest, src, leftover, interleave_factor);
@ -15645,7 +15645,7 @@ static int
arm_cpymemqi_unaligned (rtx *operands)
{
HOST_WIDE_INT length = INTVAL (operands[2]);
if (optimize_size)
{
bool src_aligned = MEM_ALIGN (operands[1]) >= BITS_PER_WORD;
@ -15656,7 +15656,7 @@ arm_cpymemqi_unaligned (rtx *operands)
resulting code can be smaller. */
unsigned int interleave_factor = (src_aligned || dst_aligned) ? 2 : 1;
HOST_WIDE_INT bytes_per_iter = (src_aligned || dst_aligned) ? 8 : 4;
if (length > 12)
arm_block_move_unaligned_loop (operands[0], operands[1], length,
interleave_factor, bytes_per_iter);
@ -15674,7 +15674,7 @@ arm_cpymemqi_unaligned (rtx *operands)
else
arm_block_move_unaligned_straight (operands[0], operands[1], length, 4);
}
return 1;
}
@ -31165,10 +31165,10 @@ int
vfp3_const_double_for_fract_bits (rtx operand)
{
REAL_VALUE_TYPE r0;
if (!CONST_DOUBLE_P (operand))
return 0;
r0 = *CONST_DOUBLE_REAL_VALUE (operand);
if (exact_real_inverse (DFmode, &r0)
&& !REAL_VALUE_NEGATIVE (r0))
@ -32430,7 +32430,7 @@ arm_autoinc_modes_ok_p (machine_mode mode, enum arm_auto_incmodes code)
else
return false;
}
return true;
case ARM_POST_DEC:
@ -32447,10 +32447,10 @@ arm_autoinc_modes_ok_p (machine_mode mode, enum arm_auto_incmodes code)
return false;
return true;
default:
return false;
}
return false;
@ -32461,7 +32461,7 @@ arm_autoinc_modes_ok_p (machine_mode mode, enum arm_auto_incmodes code)
Additionally, the default expansion code is not available or suitable
for post-reload insn splits (this can occur when the register allocator
chooses not to do a shift in NEON).
This function is used in both initial expand and post-reload splits, and
handles all kinds of 64-bit shifts.
@ -33531,7 +33531,7 @@ arm_asan_shadow_offset (void)
/* This is a temporary fix for PR60655. Ideally we need
to handle most of these cases in the generic part but
currently we reject minus (..) (sym_ref). We try to
currently we reject minus (..) (sym_ref). We try to
ameliorate the case with minus (sym_ref1) (sym_ref2)
where they are in the same section. */
@ -33854,7 +33854,7 @@ arm_valid_target_attribute_tree (tree args, struct gcc_options *opts,
return build_target_option_node (opts, opts_set);
}
static void
static void
add_attribute (const char * mode, tree *attributes)
{
size_t len = strlen (mode);
@ -33885,7 +33885,7 @@ arm_insert_attributes (tree fndecl, tree * attributes)
/* Nested definitions must inherit mode. */
if (current_function_decl)
{
mode = TARGET_THUMB ? "thumb" : "arm";
mode = TARGET_THUMB ? "thumb" : "arm";
add_attribute (mode, attributes);
return;
}

View file

@ -1426,7 +1426,7 @@ extern const char *fp_sysreg_names[NB_FP_SYSREGS];
but prevents the compiler from extending the lifetime of these
registers. */
#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
arm_small_register_classes_for_mode_p
arm_small_register_classes_for_mode_p
/* Must leave BASE_REGS reloads alone */
#define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
@ -1479,7 +1479,7 @@ extern const char *fp_sysreg_names[NB_FP_SYSREGS];
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS.
ARM regs are UNITS_PER_WORD bits.
ARM regs are UNITS_PER_WORD bits.
FIXME: Is this true for iWMMX? */
#define CLASS_MAX_NREGS(CLASS, MODE) \
(CLASS == VPR_REG) \
@ -1647,14 +1647,14 @@ machine_function;
#define ARM_Q_BIT_READ (arm_q_bit_access ())
#define ARM_GE_BITS_READ (arm_ge_bits_access ())
/* As in the machine_function, a global set of call-via labels, for code
/* As in the machine_function, a global set of call-via labels, for code
that is in text_section. */
extern GTY(()) rtx thumb_call_via_label[14];
/* The number of potential ways of assigning to a co-processor. */
#define ARM_NUM_COPROC_SLOTS 1
/* Enumeration of procedure calling standard variants. We don't really
/* Enumeration of procedure calling standard variants. We don't really
support all of these yet. */
enum arm_pcs
{

View file

@ -1,6 +1,6 @@
/* Configuration file for ARM BPABI targets.
Copyright (C) 2004-2024 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
Contributed by CodeSourcery, LLC
This file is part of GCC.

View file

@ -3,7 +3,7 @@
Copyright (C) 1995-2024 Free Software Foundation, Inc.
Contributed by Philip Blundell <philb@gnu.org> and
Catherine Moore <clm@cygnus.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
@ -111,7 +111,7 @@
#ifndef LINK_SPEC
#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
#endif
/* Run-time Target Specification. */
#ifndef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_APCS_FRAME)

View file

@ -1,6 +1,6 @@
/* Configuration file for ARM GNU/Linux EABI targets.
Copyright (C) 2004-2024 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
Contributed by CodeSourcery, LLC
This file is part of GCC.

View file

@ -1,6 +1,6 @@
/* Configuration file for Symbian OS on ARM processors.
Copyright (C) 2004-2024 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
Contributed by CodeSourcery, LLC
This file is part of GCC.
@ -29,7 +29,7 @@
Make all symbols hidden by default. Symbian OS expects that all
exported symbols will be explicitly marked with
"__declspec(dllexport)".
"__declspec(dllexport)".
Enumeration types use 4 bytes, even if the enumerals are small,
unless explicitly overridden.
@ -63,7 +63,7 @@
#undef SUBTARGET_ASM_FLOAT_SPEC
#define SUBTARGET_ASM_FLOAT_SPEC \
"%{!mfpu=*:-mfpu=vfp} %{!mcpu=*:%{!march=*:-march=armv5t}}"
/* Define the __symbian__ macro. */
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \

View file

@ -91,6 +91,6 @@
/* The libgcc udivmod functions may throw exceptions. If newlib is
configured to support long longs in I/O, then printf will depend on
udivmoddi4, which will depend on the exception unwind routines,
which will depend on abort, which is defined in libc. */
which will depend on abort, which is defined in libc. */
#undef LINK_GCC_C_SEQUENCE_SPEC
#define LINK_GCC_C_SEQUENCE_SPEC "--start-group %G %{!nolibc:%L} --end-group"

View file

@ -1,10 +1,10 @@
/* Definitions of target machine for GCC,
for ARM with targeting the VXWorks run time environment.
for ARM with targeting the VXWorks run time environment.
Copyright (C) 1999-2024 Free Software Foundation, Inc.
Contributed by: Mike Stump <mrs@wrs.com>
Brought up to date by CodeSourcery, LLC.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify

View file

@ -13,7 +13,7 @@ GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -7,12 +7,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -326,7 +326,7 @@ enum reg_class {
#define RETURN_ADDR_RTX(count, tem) avr_return_addr_rtx (count, tem)
/* Don't use Push rounding. expr.cc: emit_single_push_insn is broken
/* Don't use Push rounding. expr.cc: emit_single_push_insn is broken
for POST_DEC targets (PR27386). */
/*#define PUSH_ROUNDING(NPUSHED) (NPUSHED)*/
@ -485,7 +485,7 @@ typedef struct avr_args
/* Set MOVE_RATIO to 3 to allow memory moves upto 4 bytes to happen
by pieces when optimizing for speed, like it did when MOVE_MAX_PIECES
was 4. When optimizing for size, allow memory moves upto 2 bytes.
was 4. When optimizing for size, allow memory moves upto 2 bytes.
Also see avr_use_by_pieces_infrastructure_p. */
#define MOVE_RATIO(speed) ((speed) ? 3 : 2)
@ -568,19 +568,19 @@ struct GTY(()) machine_function
-1 when "signal" attribute(s) with arguments are present but none
without argument. */
int is_signal;
/* 'true' - if current function is a non-blocking interrupt service
routine as specified by the "isr_noblock" attribute. */
int is_noblock;
/* 'true' - if current function is a 'task' function
/* 'true' - if current function is a 'task' function
as specified by the "OS_task" attribute. */
int is_OS_task;
/* 'true' - if current function is a 'main' function
/* 'true' - if current function is a 'main' function
as specified by the "OS_main" attribute. */
int is_OS_main;
/* Current function stack size. */
int stack_usage;

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -8,12 +8,12 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */

View file

@ -108,12 +108,12 @@ typedef long long unsigned int uint_uk_t;
/* The Embedded-C paper specifies results only for rounding points
0 < RP < FBIT
As an extension, the following functions work as expected
with rounding points
-IBIT < RP < FBIT
For example, rounding an accum with a rounding point of -1 will
result in an even integer value. */

View file

@ -71,7 +71,7 @@ extern char *bfin_asm_long (void);
extern char *bfin_asm_short (void);
extern int log2constp (unsigned HOST_WIDE_INT);
extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx);
extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx);
extern HOST_WIDE_INT bfin_initial_elimination_offset (int, int);
extern int effective_address_32bit_p (rtx, machine_mode);

View file

@ -97,14 +97,14 @@ bfin_globalize_label (FILE *stream, const char *name)
fputc ('\n',stream);
}
static void
output_file_start (void)
static void
output_file_start (void)
{
FILE *file = asm_out_file;
int i;
fprintf (file, ".file \"%s\";\n", LOCATION_FILE (input_location));
for (i = 0; arg_regs[i] >= 0; i++)
;
max_arg_registers = i; /* how many arg reg used */
@ -417,7 +417,7 @@ expand_prologue_reg_save (rtx spreg, int saveall, bool is_inthandler)
}
}
for (i = REG_P7 + 1; i < REG_CC; i++)
if (saveall
if (saveall
|| (is_inthandler
&& (df_regs_ever_live_p (i)
|| (!leaf_function_p () && call_used_or_fixed_reg_p (i)))))
@ -548,7 +548,7 @@ expand_epilogue_reg_restore (rtx spreg, bool saveall, bool is_inthandler)
it.
Normally, this macro will push all remaining incoming registers on the
stack and set PRETEND_SIZE to the length of the registers pushed.
stack and set PRETEND_SIZE to the length of the registers pushed.
Blackfin specific :
- VDSP C compiler manual (our ABI) says that a variable args function
@ -590,7 +590,7 @@ setup_incoming_varargs (cumulative_args_t cum,
be accessed via the stack pointer) in functions that seem suitable. */
static bool
bfin_frame_pointer_required (void)
bfin_frame_pointer_required (void)
{
e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
@ -906,7 +906,7 @@ do_unlink (rtx spreg, HOST_WIDE_INT frame_size, bool all, int epilogue_p)
if (stack_frame_needed_p ())
emit_insn (gen_unlink ());
else
else
{
rtx postinc = gen_rtx_MEM (Pmode, gen_rtx_POST_INC (Pmode, spreg));
@ -968,7 +968,7 @@ expand_interrupt_handler_prologue (rtx spreg, e_funkind fkind, bool all)
emit_insn (gen_movsi_low (p5reg, p5reg, chipid));
emit_insn (gen_dummy_load (p5reg, bfin_cc_rtx));
}
if (lookup_attribute ("nesting", attrs))
{
rtx srcreg = gen_rtx_REG (Pmode, ret_regs[fkind]);
@ -1046,7 +1046,7 @@ bfin_load_pic_reg (rtx dest)
pic reg, since the caller always passes a usable one. */
if (local_info_node && local_info_node->local)
return pic_offset_table_rtx;
if (OPTION_SET_P (bfin_library_id))
addr = plus_constant (Pmode, pic_offset_table_rtx,
-4 - bfin_library_id * 4);
@ -1236,7 +1236,7 @@ bfin_delegitimize_address (rtx orig_x)
32-bit instruction. */
int
effective_address_32bit_p (rtx op, machine_mode mode)
effective_address_32bit_p (rtx op, machine_mode mode)
{
HOST_WIDE_INT offset;
@ -1312,7 +1312,7 @@ print_address_operand (FILE *file, rtx x)
case PRE_DEC:
fprintf (file, "--");
output_address (VOIDmode, XEXP (x, 0));
output_address (VOIDmode, XEXP (x, 0));
break;
case POST_INC:
output_address (VOIDmode, XEXP (x, 0));
@ -1390,7 +1390,7 @@ print_operand (FILE *file, rtx x, char code)
output_operand_lossage ("invalid %%j value");
}
break;
case 'J': /* reverse logic */
switch (GET_CODE(x))
{
@ -1491,7 +1491,7 @@ print_operand (FILE *file, rtx x, char code)
else
output_operand_lossage ("invalid operand for code '%c'", code);
}
else
else
fprintf (file, "%s", reg_names[REGNO (x)]);
break;
@ -1620,7 +1620,7 @@ print_operand (FILE *file, rtx x, char code)
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0.
For a library call, FNTYPE is 0.
VDSP C Compiler manual, our ABI says that
first 3 words of arguments will use R0, R1 and R2.
*/
@ -1718,7 +1718,7 @@ bfin_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
{
int bytes = arg.promoted_size_in_bytes ();
int bytes_left = get_cumulative_args (cum)->nregs * UNITS_PER_WORD;
if (bytes == -1)
return 0;
@ -1759,7 +1759,7 @@ bfin_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
/* Return true when register may be used to pass function parameters. */
bool
bool
function_arg_regno_p (int n)
{
int i;
@ -2701,7 +2701,7 @@ bfin_valid_reg_p (unsigned int regno, int strict, machine_mode mode,
/* Recognize an RTL expression that is a valid memory address for an
instruction. The MODE argument is the machine mode for the MEM expression
that wants to use this address.
that wants to use this address.
Blackfin addressing modes are as follows:
@ -2710,7 +2710,7 @@ bfin_valid_reg_p (unsigned int regno, int strict, machine_mode mode,
B [ Preg + uimm15 ]
W [ Preg + uimm16m2 ]
[ Preg + uimm17m4 ]
[ Preg + uimm17m4 ]
[preg++]
[preg--]
@ -2888,8 +2888,8 @@ bfin_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
else
*total = cost2;
return true;
case ASHIFT:
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
if (mode == DImode)
@ -2904,7 +2904,7 @@ bfin_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
*total += rtx_cost (op0, mode, code, 0, speed);
return true;
case IOR:
case AND:
case XOR:
@ -3152,11 +3152,11 @@ output_push_multiple (rtx insn, rtx *operands)
{
char buf[80];
int ok;
/* Validate the insn again, and compute first_[dp]reg_to_save. */
ok = analyze_push_multiple_operation (PATTERN (insn));
gcc_assert (ok);
if (first_dreg_to_save == 8)
sprintf (buf, "[--sp] = ( p5:%d );\n", first_preg_to_save);
else if (first_preg_to_save == 6)
@ -3176,7 +3176,7 @@ output_pop_multiple (rtx insn, rtx *operands)
{
char buf[80];
int ok;
/* Validate the insn again, and compute first_[dp]reg_to_save. */
ok = analyze_pop_multiple_operation (PATTERN (insn));
gcc_assert (ok);
@ -3856,7 +3856,7 @@ static void
hwloop_fail (hwloop_info loop)
{
rtx insn = loop->loop_end;
if (DPREG_P (loop->iter_reg))
{
/* If loop->iter_reg is a DREG or PREG, we can split it here
@ -3880,7 +3880,7 @@ hwloop_fail (hwloop_info loop)
}
else
{
splitting_loops = 1;
splitting_loops = 1;
try_split (PATTERN (insn), safe_as_a <rtx_insn *> (insn), 1);
splitting_loops = 0;
}
@ -4132,7 +4132,7 @@ workaround_rts_anomaly (void)
if (BARRIER_P (insn))
return;
if (NOTE_P (insn) || LABEL_P (insn))
continue;
@ -4286,7 +4286,7 @@ indirect_call_p (rtx pat)
pat = XEXP (pat, 0);
gcc_assert (GET_CODE (pat) == MEM);
pat = XEXP (pat, 0);
return REG_P (pat);
}
@ -4329,7 +4329,7 @@ workaround_speculation (void)
int delay_needed = 0;
next = find_next_insn_start (insn);
if (NOTE_P (insn) || BARRIER_P (insn))
continue;
if (JUMP_TABLE_DATA_P (insn))
@ -4344,7 +4344,7 @@ workaround_speculation (void)
pat = PATTERN (insn);
if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
continue;
if (GET_CODE (pat) == ASM_INPUT || asm_noperands (pat) >= 0)
{
np_check_regno = -1;
@ -4603,7 +4603,7 @@ add_sched_insns_for_speculation (void)
if (GET_CODE (PATTERN (next)) == UNSPEC_VOLATILE
&& get_attr_type (next) == TYPE_STALL)
continue;
emit_insn_before (gen_stall (GEN_INT (1)), next);
emit_insn_before (gen_stall (GEN_INT (1)), next);
}
}
}
@ -4719,7 +4719,7 @@ bfin_comp_type_attributes (const_tree type1, const_tree type2)
if (kind1 != kind2)
return 0;
/* Check for mismatched modifiers */
if (!lookup_attribute ("nesting", TYPE_ATTRIBUTES (type1))
!= !lookup_attribute ("nesting", TYPE_ATTRIBUTES (type2)))
@ -4744,9 +4744,9 @@ bfin_comp_type_attributes (const_tree type1, const_tree type2)
struct attribute_spec.handler. */
static tree
bfin_handle_longcall_attribute (tree *node, tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bfin_handle_longcall_attribute (tree *node, tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_TYPE
@ -5154,7 +5154,7 @@ bfin_init_builtins (void)
= build_function_type_list (integer_type_node,
build_pointer_type (integer_type_node),
NULL_TREE);
/* Add the remaining MMX insns with somewhat more complicated types. */
def_builtin ("__builtin_bfin_csync", void_ftype_void, BFIN_BUILTIN_CSYNC);
def_builtin ("__builtin_bfin_ssync", void_ftype_void, BFIN_BUILTIN_SSYNC);
@ -5746,7 +5746,7 @@ bfin_conditional_register_usage (void)
#define TARGET_EXPAND_BUILTIN bfin_expand_builtin
#undef TARGET_ASM_GLOBALIZE_LABEL
#define TARGET_ASM_GLOBALIZE_LABEL bfin_globalize_label
#define TARGET_ASM_GLOBALIZE_LABEL bfin_globalize_label
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START output_file_start

View file

@ -295,10 +295,10 @@ extern const char *bfin_library_id_string;
/* Define this if the above stack space is to be considered part of the
* space allocated by the caller. */
#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
/* Define this if the maximum size of all the outgoing args is to be
accumulated and pushed during the prologue. The amount can be
found in the variable crtl->outgoing_args_size. */
found in the variable crtl->outgoing_args_size. */
#define ACCUMULATE_OUTGOING_ARGS 1
/*#define DATA_ALIGNMENT(TYPE, BASIC-ALIGN) for arrays.. */
@ -876,11 +876,11 @@ typedef struct {
#define DEFAULT_SIGNED_CHAR 1
/* FLOAT_TYPE_SIZE get poisoned, so add BFIN_ prefix. */
#define BFIN_FLOAT_TYPE_SIZE BITS_PER_WORD
#define SHORT_TYPE_SIZE 16
#define SHORT_TYPE_SIZE 16
#define CHAR_TYPE_SIZE 8
#define INT_TYPE_SIZE 32
#define LONG_TYPE_SIZE 32
#define LONG_LONG_TYPE_SIZE 64
#define LONG_LONG_TYPE_SIZE 64
/* Note: Fix this to depend on target switch. -- lev */
@ -943,7 +943,7 @@ typedef struct {
#define JUMP_TABLES_IN_TEXT_SECTION flag_pic
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified.
on the full register even if a narrower mode is specified.
#define WORD_REGISTER_OPERATIONS 1
*/
@ -1095,7 +1095,7 @@ extern rtx bfin_cc_rtx, bfin_rets_rtx;
#define SET_ASM_OP ".set "
/* Debugger register number for a given compiler register number */
#define DEBUGGER_REGNO(REGNO) (REGNO)
#define DEBUGGER_REGNO(REGNO) (REGNO)
#define SIZE_ASM_OP "\t.size\t"

Some files were not shown because too many files have changed in this diff Show more