lib1funcs.asm: Avoid use of .Lfe* in .size directives.
* config/xtensa/lib1funcs.asm: Avoid use of .Lfe* in .size directives. (do_abs, do_addx2, do_addx4, do_addx8): New assembler macros. (__mulsi3): Use do_addx* instead of ADDX* instructions. Formatting. (nsau): Rename to do_nsau. Provide alternate version for use when the NSAU instruction is available. (__udivsi3, __divsi3, __umodsi3, __modsi3): Use do_nsau macro. (__divsi3, __modsi3): Use do_abs macro instead of ABS instruction. * config/xtensa/xtensa-config.h: Update comments to match binutils. (XCHAL_HAVE_ABS, XCHAL_HAVE_ADDX): Define. * config/xtensa/xtensa.h (MASK_ABS, MASK_ADDX): Define. (TARGET_ABS, TARGET_ADDX): Define. (TARGET_DEFAULT): Conditionally add MASK_ABS and MASK_ADDX. (TARGET_SWITCHES): Add "abs", "no-abs", "addx", and "no-addx". * config/xtensa/xtensa.md (*addx2, *addx4, *addx8, *subx2, *subx4, *subx8): Set predicate condition to TARGET_ADDX. (abssi2): Set predicate condition to TARGET_ABS. * doc/invoke.texi (Option Summary): Document new "-mabs", "-mno-abs", "-maddx", and "-mno-addx" options. (Xtensa Options): Likewise. Also tag some opcode names with @code. From-SVN: r67044
This commit is contained in:
parent
69cf7a5532
commit
6c2e8d1cf9
6 changed files with 196 additions and 130 deletions
|
@ -1,3 +1,25 @@
|
|||
2003-05-20 Bob Wilson <bob.wilson@acm.org>
|
||||
|
||||
* config/xtensa/lib1funcs.asm: Avoid use of .Lfe* in .size directives.
|
||||
(do_abs, do_addx2, do_addx4, do_addx8): New assembler macros.
|
||||
(__mulsi3): Use do_addx* instead of ADDX* instructions. Formatting.
|
||||
(nsau): Rename to do_nsau. Provide alternate version for use when
|
||||
the NSAU instruction is available.
|
||||
(__udivsi3, __divsi3, __umodsi3, __modsi3): Use do_nsau macro.
|
||||
(__divsi3, __modsi3): Use do_abs macro instead of ABS instruction.
|
||||
* config/xtensa/xtensa-config.h: Update comments to match binutils.
|
||||
(XCHAL_HAVE_ABS, XCHAL_HAVE_ADDX): Define.
|
||||
* config/xtensa/xtensa.h (MASK_ABS, MASK_ADDX): Define.
|
||||
(TARGET_ABS, TARGET_ADDX): Define.
|
||||
(TARGET_DEFAULT): Conditionally add MASK_ABS and MASK_ADDX.
|
||||
(TARGET_SWITCHES): Add "abs", "no-abs", "addx", and "no-addx".
|
||||
* config/xtensa/xtensa.md (*addx2, *addx4, *addx8, *subx2, *subx4,
|
||||
*subx8): Set predicate condition to TARGET_ADDX.
|
||||
(abssi2): Set predicate condition to TARGET_ABS.
|
||||
* doc/invoke.texi (Option Summary): Document new "-mabs", "-mno-abs",
|
||||
"-maddx", and "-mno-addx" options.
|
||||
(Xtensa Options): Likewise. Also tag some opcode names with @code.
|
||||
|
||||
2003-05-20 Kevin Ryde <user42@zip.com.au>
|
||||
Wolfgang Bangerth <bangerth@dealii.org>
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Assembly functions for the Xtensa version of libgcc1.
|
||||
Copyright (C) 2001,2002 Free Software Foundation, Inc.
|
||||
Copyright (C) 2001,2002,2003 Free Software Foundation, Inc.
|
||||
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
|
||||
|
||||
This file is part of GCC.
|
||||
|
@ -30,6 +30,46 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
|||
|
||||
#include "xtensa/xtensa-config.h"
|
||||
|
||||
# Define macros for the ABS and ADDX* instructions to handle cases
|
||||
# where they are not included in the Xtensa processor configuration.
|
||||
|
||||
.macro do_abs dst, src, tmp
|
||||
#if XCHAL_HAVE_ABS
|
||||
abs \dst, \src
|
||||
#else
|
||||
neg \tmp, \src
|
||||
movgez \tmp, \src, \src
|
||||
mov \dst, \tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro do_addx2 dst, as, at, tmp
|
||||
#if XCHAL_HAVE_ADDX
|
||||
addx2 \dst, \as, \at
|
||||
#else
|
||||
slli \tmp, \as, 1
|
||||
add \dst, \tmp, \at
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro do_addx4 dst, as, at, tmp
|
||||
#if XCHAL_HAVE_ADDX
|
||||
addx4 \dst, \as, \at
|
||||
#else
|
||||
slli \tmp, \as, 2
|
||||
add \dst, \tmp, \at
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro do_addx8 dst, as, at, tmp
|
||||
#if XCHAL_HAVE_ADDX
|
||||
addx8 \dst, \as, \at
|
||||
#else
|
||||
slli \tmp, \as, 3
|
||||
add \dst, \tmp, \at
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#ifdef L_mulsi3
|
||||
.align 4
|
||||
.global __mulsi3
|
||||
|
@ -64,88 +104,85 @@ __mulsi3:
|
|||
|
||||
#else /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
|
||||
|
||||
# Multiply one bit at a time, but unroll the loop 4x to better
|
||||
# exploit the addx instructions.
|
||||
|
||||
# Peel the first iteration to save a cycle on init
|
||||
|
||||
# avoid negative numbers
|
||||
# Multiply one bit at a time, but unroll the loop 4x to better
|
||||
# exploit the addx instructions and avoid overhead.
|
||||
# Peel the first iteration to save a cycle on init.
|
||||
|
||||
# Avoid negative numbers.
|
||||
xor a5, a2, a3 # top bit is 1 iff one of the inputs is negative
|
||||
abs a3, a3
|
||||
abs a2, a2
|
||||
do_abs a3, a3, a6
|
||||
do_abs a2, a2, a6
|
||||
|
||||
# swap so that second argument is smaller
|
||||
sub a7, a2, a3
|
||||
mov a4, a3
|
||||
movgez a4, a2, a7 # a4 = max(a2, a3)
|
||||
movltz a3, a2, a7 # a3 = min(a2, a3)
|
||||
# Swap so the second argument is smaller.
|
||||
sub a7, a2, a3
|
||||
mov a4, a3
|
||||
movgez a4, a2, a7 # a4 = max(a2, a3)
|
||||
movltz a3, a2, a7 # a3 = min(a2, a3)
|
||||
|
||||
movi a2, 0
|
||||
extui a6, a3, 0, 1
|
||||
movnez a2, a4, a6
|
||||
movi a2, 0
|
||||
extui a6, a3, 0, 1
|
||||
movnez a2, a4, a6
|
||||
|
||||
addx2 a7, a4, a2
|
||||
extui a6, a3, 1, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx2 a7, a4, a2, a7
|
||||
extui a6, a3, 1, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
addx4 a7, a4, a2
|
||||
extui a6, a3, 2, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx4 a7, a4, a2, a7
|
||||
extui a6, a3, 2, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
addx8 a7, a4, a2
|
||||
extui a6, a3, 3, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx8 a7, a4, a2, a7
|
||||
extui a6, a3, 3, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
bgeui a3, 16, .Lmult_main_loop
|
||||
neg a3, a2
|
||||
movltz a2, a3, a5
|
||||
retw
|
||||
bgeui a3, 16, .Lmult_main_loop
|
||||
neg a3, a2
|
||||
movltz a2, a3, a5
|
||||
retw
|
||||
|
||||
|
||||
.align 4
|
||||
.align 4
|
||||
.Lmult_main_loop:
|
||||
srli a3, a3, 4
|
||||
slli a4, a4, 4
|
||||
srli a3, a3, 4
|
||||
slli a4, a4, 4
|
||||
|
||||
add a7, a4, a2
|
||||
extui a6, a3, 0, 1
|
||||
movnez a2, a7, a6
|
||||
add a7, a4, a2
|
||||
extui a6, a3, 0, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
addx2 a7, a4, a2
|
||||
extui a6, a3, 1, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx2 a7, a4, a2, a7
|
||||
extui a6, a3, 1, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
addx4 a7, a4, a2
|
||||
extui a6, a3, 2, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx4 a7, a4, a2, a7
|
||||
extui a6, a3, 2, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
addx8 a7, a4, a2
|
||||
extui a6, a3, 3, 1
|
||||
movnez a2, a7, a6
|
||||
do_addx8 a7, a4, a2, a7
|
||||
extui a6, a3, 3, 1
|
||||
movnez a2, a7, a6
|
||||
|
||||
bgeui a3, 16, .Lmult_main_loop
|
||||
|
||||
bgeui a3, 16, .Lmult_main_loop
|
||||
|
||||
neg a3, a2
|
||||
movltz a2, a3, a5
|
||||
neg a3, a2
|
||||
movltz a2, a3, a5
|
||||
|
||||
#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
|
||||
|
||||
retw
|
||||
.Lfe0:
|
||||
.size __mulsi3,.Lfe0-__mulsi3
|
||||
.size __mulsi3,.-__mulsi3
|
||||
|
||||
#endif /* L_mulsi3 */
|
||||
|
||||
|
||||
# Some Xtensa configurations include the NSAU (unsigned
|
||||
# normalize shift amount) instruction which computes the number
|
||||
# of leading zero bits. For other configurations, the "nsau"
|
||||
# operation is implemented as a macro.
|
||||
# Define a macro for the NSAU (unsigned normalize shift amount)
|
||||
# instruction, which computes the number of leading zero bits,
|
||||
# to handle cases where it is not included in the Xtensa processor
|
||||
# configuration.
|
||||
|
||||
#if !XCHAL_HAVE_NSA
|
||||
.macro nsau cnt, val, tmp, a
|
||||
.macro do_nsau cnt, val, tmp, a
|
||||
#if XCHAL_HAVE_NSA
|
||||
nsau \cnt, \val
|
||||
#else
|
||||
mov \a, \val
|
||||
movi \cnt, 0
|
||||
extui \tmp, \a, 16, 16
|
||||
|
@ -163,8 +200,8 @@ __mulsi3:
|
|||
add \tmp, \tmp, \a
|
||||
l8ui \tmp, \tmp, 0
|
||||
add \cnt, \cnt, \tmp
|
||||
.endm
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
.endm
|
||||
|
||||
#ifdef L_nsau
|
||||
.section .rodata
|
||||
|
@ -190,8 +227,7 @@ __nsau_data:
|
|||
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
.Lfe1:
|
||||
.size __nsau_data,.Lfe1-__nsau_data
|
||||
.size __nsau_data,.-__nsau_data
|
||||
.hidden __nsau_data
|
||||
#endif /* L_nsau */
|
||||
|
||||
|
@ -205,13 +241,8 @@ __udivsi3:
|
|||
bltui a3, 2, .Lle_one # check if the divisor <= 1
|
||||
|
||||
mov a6, a2 # keep dividend in a6
|
||||
#if XCHAL_HAVE_NSA
|
||||
nsau a5, a6 # dividend_shift = nsau(dividend)
|
||||
nsau a4, a3 # divisor_shift = nsau(divisor)
|
||||
#else /* !XCHAL_HAVE_NSA */
|
||||
nsau a5, a6, a2, a7 # dividend_shift = nsau(dividend)
|
||||
nsau a4, a3, a2, a7 # divisor_shift = nsau(divisor)
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
do_nsau a5, a6, a2, a7 # dividend_shift = nsau(dividend)
|
||||
do_nsau a4, a3, a2, a7 # divisor_shift = nsau(divisor)
|
||||
bgeu a5, a4, .Lspecial
|
||||
|
||||
sub a4, a4, a5 # count = divisor_shift - dividend_shift
|
||||
|
@ -255,8 +286,7 @@ __udivsi3:
|
|||
.Lerror:
|
||||
movi a2, 0 # just return 0; could throw an exception
|
||||
retw
|
||||
.Lfe2:
|
||||
.size __udivsi3,.Lfe2-__udivsi3
|
||||
.size __udivsi3,.-__udivsi3
|
||||
|
||||
#endif /* L_udivsi3 */
|
||||
|
||||
|
@ -268,16 +298,11 @@ __udivsi3:
|
|||
__divsi3:
|
||||
entry sp, 16
|
||||
xor a7, a2, a3 # sign = dividend ^ divisor
|
||||
abs a6, a2 # udividend = abs(dividend)
|
||||
abs a3, a3 # udivisor = abs(divisor)
|
||||
do_abs a6, a2, a4 # udividend = abs(dividend)
|
||||
do_abs a3, a3, a4 # udivisor = abs(divisor)
|
||||
bltui a3, 2, .Lle_one # check if udivisor <= 1
|
||||
#if XCHAL_HAVE_NSA
|
||||
nsau a5, a6 # udividend_shift = nsau(udividend)
|
||||
nsau a4, a3 # udivisor_shift = nsau(udivisor)
|
||||
#else /* !XCHAL_HAVE_NSA */
|
||||
nsau a5, a6, a2, a8 # udividend_shift = nsau(udividend)
|
||||
nsau a4, a3, a2, a8 # udivisor_shift = nsau(udivisor)
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
do_nsau a5, a6, a2, a8 # udividend_shift = nsau(udividend)
|
||||
do_nsau a4, a3, a2, a8 # udivisor_shift = nsau(udivisor)
|
||||
bgeu a5, a4, .Lspecial
|
||||
|
||||
sub a4, a4, a5 # count = udivisor_shift - udividend_shift
|
||||
|
@ -326,8 +351,7 @@ __divsi3:
|
|||
.Lerror:
|
||||
movi a2, 0 # just return 0; could throw an exception
|
||||
retw
|
||||
.Lfe3:
|
||||
.size __divsi3,.Lfe3-__divsi3
|
||||
.size __divsi3,.-__divsi3
|
||||
|
||||
#endif /* L_divsi3 */
|
||||
|
||||
|
@ -340,13 +364,8 @@ __umodsi3:
|
|||
entry sp, 16
|
||||
bltui a3, 2, .Lle_one # check if the divisor is <= 1
|
||||
|
||||
#if XCHAL_HAVE_NSA
|
||||
nsau a5, a2 # dividend_shift = nsau(dividend)
|
||||
nsau a4, a3 # divisor_shift = nsau(divisor)
|
||||
#else /* !XCHAL_HAVE_NSA */
|
||||
nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
|
||||
nsau a4, a3, a6, a7 # divisor_shift = nsau(divisor)
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
do_nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
|
||||
do_nsau a4, a3, a6, a7 # divisor_shift = nsau(divisor)
|
||||
bgeu a5, a4, .Lspecial
|
||||
|
||||
sub a4, a4, a5 # count = divisor_shift - dividend_shift
|
||||
|
@ -384,8 +403,7 @@ __umodsi3:
|
|||
# someday we may want to throw an exception if the divisor is 0.
|
||||
movi a2, 0
|
||||
retw
|
||||
.Lfe4:
|
||||
.size __umodsi3,.Lfe4-__umodsi3
|
||||
.size __umodsi3,.-__umodsi3
|
||||
|
||||
#endif /* L_umodsi3 */
|
||||
|
||||
|
@ -397,16 +415,11 @@ __umodsi3:
|
|||
__modsi3:
|
||||
entry sp, 16
|
||||
mov a7, a2 # save original (signed) dividend
|
||||
abs a2, a2 # udividend = abs(dividend)
|
||||
abs a3, a3 # udivisor = abs(divisor)
|
||||
do_abs a2, a2, a4 # udividend = abs(dividend)
|
||||
do_abs a3, a3, a4 # udivisor = abs(divisor)
|
||||
bltui a3, 2, .Lle_one # check if udivisor <= 1
|
||||
#if XCHAL_HAVE_NSA
|
||||
nsau a5, a2 # udividend_shift = nsau(udividend)
|
||||
nsau a4, a3 # udivisor_shift = nsau(udivisor)
|
||||
#else /* !XCHAL_HAVE_NSA */
|
||||
nsau a5, a2, a6, a8 # udividend_shift = nsau(udividend)
|
||||
nsau a4, a3, a6, a8 # udivisor_shift = nsau(udivisor)
|
||||
#endif /* !XCHAL_HAVE_NSA */
|
||||
do_nsau a5, a2, a6, a8 # udividend_shift = nsau(udividend)
|
||||
do_nsau a4, a3, a6, a8 # udivisor_shift = nsau(udivisor)
|
||||
bgeu a5, a4, .Lspecial
|
||||
|
||||
sub a4, a4, a5 # count = udivisor_shift - udividend_shift
|
||||
|
@ -450,7 +463,6 @@ __modsi3:
|
|||
# someday we may want to throw an exception if udivisor is 0.
|
||||
movi a2, 0
|
||||
retw
|
||||
.Lfe5:
|
||||
.size __modsi3,.Lfe5-__modsi3
|
||||
.size __modsi3,.-__modsi3
|
||||
|
||||
#endif /* L_modsi3 */
|
||||
|
|
|
@ -2,32 +2,33 @@
|
|||
Copyright (C) 2001,2002,2003 Free Software Foundation, Inc.
|
||||
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
|
||||
|
||||
** NOTE: This file was automatically generated by the Xtensa Processor
|
||||
** Generator. Changes made here will be lost when this file is
|
||||
** updated or replaced with the settings for a different Xtensa
|
||||
** processor configuration. DO NOT EDIT!
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2, or (at your option)
|
||||
any later version.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2, or (at your option)
|
||||
any later version.
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
|
||||
|
||||
#ifndef XTENSA_CONFIG_H
|
||||
#define XTENSA_CONFIG_H
|
||||
|
||||
/* The macros defined here match those with the same names in the Xtensa
|
||||
compile-time HAL (Hardware Abstraction Layer). Please refer to the
|
||||
Xtensa System Software Reference Manual for documentation of these
|
||||
macros. */
|
||||
|
||||
#define XCHAL_HAVE_BE 1
|
||||
#define XCHAL_HAVE_DENSITY 1
|
||||
#define XCHAL_HAVE_CONST16 0
|
||||
#define XCHAL_HAVE_ABS 1
|
||||
#define XCHAL_HAVE_ADDX 1
|
||||
#define XCHAL_HAVE_L32R 1
|
||||
#define XCHAL_HAVE_MAC16 0
|
||||
#define XCHAL_HAVE_MUL16 0
|
||||
|
|
|
@ -62,6 +62,8 @@ extern unsigned xtensa_current_frame_size;
|
|||
#define MASK_NO_FUSED_MADD 0x00008000 /* avoid f-p mul/add */
|
||||
#define MASK_SERIALIZE_VOLATILE 0x00010000 /* serialize volatile refs */
|
||||
#define MASK_CONST16 0x00020000 /* use CONST16 instruction */
|
||||
#define MASK_ABS 0x00040000 /* use ABS instruction */
|
||||
#define MASK_ADDX 0x00080000 /* use ADDX* and SUBX* */
|
||||
|
||||
/* Macros used in the machine description to test the flags. */
|
||||
|
||||
|
@ -83,6 +85,8 @@ extern unsigned xtensa_current_frame_size;
|
|||
#define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD)
|
||||
#define TARGET_SERIALIZE_VOLATILE (target_flags & MASK_SERIALIZE_VOLATILE)
|
||||
#define TARGET_CONST16 (target_flags & MASK_CONST16)
|
||||
#define TARGET_ABS (target_flags & MASK_ABS)
|
||||
#define TARGET_ADDX (target_flags & MASK_ADDX)
|
||||
|
||||
/* Default target_flags if no switches are specified */
|
||||
|
||||
|
@ -90,6 +94,8 @@ extern unsigned xtensa_current_frame_size;
|
|||
(XCHAL_HAVE_BE ? MASK_BIG_ENDIAN : 0) | \
|
||||
(XCHAL_HAVE_DENSITY ? MASK_DENSITY : 0) | \
|
||||
(XCHAL_HAVE_L32R ? 0 : MASK_CONST16) | \
|
||||
(XCHAL_HAVE_ABS ? MASK_ABS : 0) | \
|
||||
(XCHAL_HAVE_ADDX ? MASK_ADDX : 0) | \
|
||||
(XCHAL_HAVE_MAC16 ? MASK_MAC16 : 0) | \
|
||||
(XCHAL_HAVE_MUL16 ? MASK_MUL16 : 0) | \
|
||||
(XCHAL_HAVE_MUL32 ? MASK_MUL32 : 0) | \
|
||||
|
@ -121,6 +127,14 @@ extern unsigned xtensa_current_frame_size;
|
|||
N_("Use CONST16 instruction to load constants")}, \
|
||||
{"no-const16", -MASK_CONST16, \
|
||||
N_("Use PC-relative L32R instruction to load constants")}, \
|
||||
{"abs", MASK_ABS, \
|
||||
N_("Use the Xtensa ABS instruction")}, \
|
||||
{"no-abs", -MASK_ABS, \
|
||||
N_("Do not use the Xtensa ABS instruction")}, \
|
||||
{"addx", MASK_ADDX, \
|
||||
N_("Use the Xtensa ADDX and SUBX instructions")}, \
|
||||
{"no-addx", -MASK_ADDX, \
|
||||
N_("Do not use the Xtensa ADDX and SUBX instructions")}, \
|
||||
{"mac16", MASK_MAC16, \
|
||||
N_("Use the Xtensa MAC16 option")}, \
|
||||
{"no-mac16", -MASK_MAC16, \
|
||||
|
|
|
@ -163,7 +163,7 @@
|
|||
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 2))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"addx2\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -174,7 +174,7 @@
|
|||
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 4))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"addx4\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -185,7 +185,7 @@
|
|||
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 8))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"addx8\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -257,7 +257,7 @@
|
|||
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 2))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"subx2\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -268,7 +268,7 @@
|
|||
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 4))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"subx4\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -279,7 +279,7 @@
|
|||
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
|
||||
(const_int 8))
|
||||
(match_operand:SI 2 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ADDX"
|
||||
"subx8\\t%0, %1, %2"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
@ -518,7 +518,7 @@
|
|||
(define_insn "abssi2"
|
||||
[(set (match_operand:SI 0 "register_operand" "=a")
|
||||
(abs:SI (match_operand:SI 1 "register_operand" "r")))]
|
||||
""
|
||||
"TARGET_ABS"
|
||||
"abs\\t%0, %1"
|
||||
[(set_attr "type" "arith")
|
||||
(set_attr "mode" "SI")
|
||||
|
|
|
@ -634,6 +634,8 @@ in the following sections.
|
|||
@gccoptlist{-mbig-endian -mlittle-endian @gol
|
||||
-mdensity -mno-density @gol
|
||||
-mconst16 -mno-const16 @gol
|
||||
-mabs -mno-abs @gol
|
||||
-maddx -mno-addx @gol
|
||||
-mmac16 -mno-mac16 @gol
|
||||
-mmul16 -mno-mul16 @gol
|
||||
-mmul32 -mno-mul32 @gol
|
||||
|
@ -10649,11 +10651,26 @@ Enable or disable use of the optional Xtensa code density instructions.
|
|||
@itemx -mno-const16
|
||||
@opindex mconst16
|
||||
@opindex mno-const16
|
||||
Enable or disable use of CONST16 instructions for loading constant values.
|
||||
The CONST16 instruction is currently not a standard option from Tensilica.
|
||||
When enabled, CONST16 instructions are always used in place of the standard
|
||||
L32R instructions. The use of CONST16 is enabled by default only if the
|
||||
L32R instruction is not available.
|
||||
Enable or disable use of @code{CONST16} instructions for loading
|
||||
constant values. The @code{CONST16} instruction is currently not a
|
||||
standard option from Tensilica. When enabled, @code{CONST16}
|
||||
instructions are always used in place of the standard @code{L32R}
|
||||
instructions. The use of @code{CONST16} is enabled by default only if
|
||||
the @code{L32R} instruction is not available.
|
||||
|
||||
@item -mabs
|
||||
@itemx -mno-abs
|
||||
@opindex mabs
|
||||
@opindex mno-abs
|
||||
Enable or disable use of the Xtensa @code{ABS} instruction for absolute
|
||||
value operations.
|
||||
|
||||
@item -maddx
|
||||
@itemx -mno-addx
|
||||
@opindex maddx
|
||||
@opindex mno-addx
|
||||
Enable or disable use of the Xtensa @code{ADDX} and @code{SUBX}
|
||||
instructions.
|
||||
|
||||
@item -mmac16
|
||||
@itemx -mno-mac16
|
||||
|
|
Loading…
Add table
Reference in a new issue