Revert my previous commit.
From-SVN: r133167
This commit is contained in:
parent
8a6c843029
commit
8ec3e357d4
11 changed files with 26 additions and 60 deletions
|
@ -1,19 +1,3 @@
|
|||
2008-03-13 Uros Bizjak <ubizjak@gmail.com>
|
||||
|
||||
PR target/35553
|
||||
* config/i386/i386.h (TARGET_CPU_CPP_BUILTINS): Define
|
||||
__SSE_USE_INLINED_FUNC__ when using -flag-keep-inline-functions
|
||||
together with optimization.
|
||||
* config/i386/xmmintrin.h: Use __SSE_USE_INLINED_FUNC__ instead of
|
||||
__OPTIMIZE__ to choose between inlined intrinsic SSE function having
|
||||
immediate arguments and its equivalent macro definition.
|
||||
* config/i386/bmintrin.h: Ditto.
|
||||
* config/i386/smmintrin.h: Ditto.
|
||||
* config/i386/tmmintrin.h: Ditto.
|
||||
* config/i386/mmintrin-common.h: Ditto.
|
||||
* config/i386/ammintrin.h: Ditto.
|
||||
* config/i386/emmintrin.h: Ditto.
|
||||
|
||||
2008-03-13 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
PR middle-end/35185
|
||||
|
|
|
@ -55,7 +55,7 @@ _mm_extract_si64 (__m128i __X, __m128i __Y)
|
|||
return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
|
||||
{
|
||||
|
@ -73,7 +73,7 @@ _mm_insert_si64 (__m128i __X,__m128i __Y)
|
|||
return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L)
|
||||
{
|
||||
|
|
|
@ -350,7 +350,7 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
|
|||
|
||||
|
||||
/* Rotates - Immediate form */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_roti_epi8(__m128i __A, const int __B)
|
||||
{
|
||||
|
|
|
@ -880,7 +880,7 @@ _mm_cvtss_sd (__m128d __A, __m128 __B)
|
|||
return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128d __attribute__((__always_inline__, __artificial__))
|
||||
_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
|
||||
{
|
||||
|
@ -1144,7 +1144,7 @@ _mm_srai_epi32 (__m128i __A, int __B)
|
|||
return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_srli_si128 (__m128i __A, const int __N)
|
||||
{
|
||||
|
@ -1307,7 +1307,7 @@ _mm_cmpgt_epi32 (__m128i __A, __m128i __B)
|
|||
return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline int __attribute__((__always_inline__, __artificial__))
|
||||
_mm_extract_epi16 (__m128i const __A, int const __N)
|
||||
{
|
||||
|
@ -1363,7 +1363,7 @@ _mm_mulhi_epu16 (__m128i __A, __m128i __B)
|
|||
return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_shufflehi_epi16 (__m128i __A, const int __mask)
|
||||
{
|
||||
|
|
|
@ -691,11 +691,6 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
|
|||
builtin_define ("__SSE_MATH__"); \
|
||||
if (TARGET_SSE_MATH && TARGET_SSE2) \
|
||||
builtin_define ("__SSE2_MATH__"); \
|
||||
\
|
||||
/* Use inlined intrinsic SSE function having immediate \
|
||||
arguments instead of a macro definition. */ \
|
||||
if (optimize && !flag_keep_inline_functions) \
|
||||
builtin_define ("__SSE_USE_INLINED_FUNC__"); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ _mm_testnzc_si128 (__m128i __M, __m128i __V)
|
|||
|
||||
/* Packed/scalar double precision floating point rounding. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128d __attribute__((__always_inline__, __artificial__))
|
||||
_mm_round_pd (__m128d __V, const int __M)
|
||||
{
|
||||
|
@ -117,7 +117,7 @@ _mm_round_sd(__m128d __D, __m128d __V, const int __M)
|
|||
|
||||
/* Packed/scalar single precision floating point rounding. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_round_ps (__m128 __V, const int __M)
|
||||
{
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
/* Integer blend instructions - select data from 2 sources using
|
||||
constant/variable mask. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ _mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
|
|||
/* Single precision floating point blend instructions - select data
|
||||
from 2 sources using constant/variable mask. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
|
||||
{
|
||||
|
@ -94,7 +94,7 @@ _mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
|
|||
/* Double precision floating point blend instructions - select data
|
||||
from 2 sources using constant/variable mask. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128d __attribute__((__always_inline__, __artificial__))
|
||||
_mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
|
||||
{
|
||||
|
@ -119,7 +119,7 @@ _mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
|
|||
/* Dot product instructions with mask-defined summing and zeroing parts
|
||||
of result. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
|
||||
{
|
||||
|
@ -224,7 +224,7 @@ _mm_mul_epi32 (__m128i __X, __m128i __Y)
|
|||
index, the bits [5-4] define D index, and bits [3-0] define
|
||||
zeroing mask for D. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_insert_ps (__m128 __D, __m128 __S, const int __N)
|
||||
{
|
||||
|
@ -244,7 +244,7 @@ _mm_insert_ps (__m128 __D, __m128 __S, const int __N)
|
|||
/* Extract binary representation of single precision float from packed
|
||||
single precision array element of X selected by index N. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline int __attribute__((__always_inline__, __artificial__))
|
||||
_mm_extract_ps (__m128 __X, const int __N)
|
||||
{
|
||||
|
@ -277,7 +277,7 @@ _mm_extract_ps (__m128 __X, const int __N)
|
|||
/* Insert integer, S, into packed integer array element of D
|
||||
selected by index N. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_insert_epi8 (__m128i __D, int __S, const int __N)
|
||||
{
|
||||
|
@ -319,7 +319,7 @@ _mm_insert_epi64 (__m128i __D, long long __S, const int __N)
|
|||
/* Extract integer from packed integer array element of X selected by
|
||||
index N. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline int __attribute__((__always_inline__, __artificial__))
|
||||
_mm_extract_epi8 (__m128i __X, const int __N)
|
||||
{
|
||||
|
@ -447,7 +447,7 @@ _mm_packus_epi32 (__m128i __X, __m128i __Y)
|
|||
byte integers in the first 2 operands. Starting offsets within
|
||||
operands are determined by the 3rd mask operand. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
|
||||
{
|
||||
|
@ -497,7 +497,7 @@ _mm_stream_load_si128 (__m128i *__X)
|
|||
|
||||
/* Intrinsics for text/string processing. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
|
||||
{
|
||||
|
@ -550,7 +550,7 @@ _mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
|
|||
/* Intrinsics for text/string processing and reading values of
|
||||
EFlags. */
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline int __attribute__((__always_inline__, __artificial__))
|
||||
_mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
|
||||
{
|
||||
|
|
|
@ -181,7 +181,7 @@ _mm_sign_pi32 (__m64 __X, __m64 __Y)
|
|||
return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
|
||||
}
|
||||
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128i __attribute__((__always_inline__, __artificial__))
|
||||
_mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N)
|
||||
{
|
||||
|
|
|
@ -716,7 +716,7 @@ _mm_cvtps_pi8(__m128 __A)
|
|||
}
|
||||
|
||||
/* Selects four specific SPFP values from A and B based on MASK. */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m128 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
|
||||
{
|
||||
|
@ -992,7 +992,7 @@ _mm_move_ss (__m128 __A, __m128 __B)
|
|||
}
|
||||
|
||||
/* Extracts one of the four words of A. The selector N must be immediate. */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline int __attribute__((__always_inline__, __artificial__))
|
||||
_mm_extract_pi16 (__m64 const __A, int const __N)
|
||||
{
|
||||
|
@ -1013,7 +1013,7 @@ _m_pextrw (__m64 const __A, int const __N)
|
|||
|
||||
/* Inserts word D into one of four words of A. The selector N must be
|
||||
immediate. */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m64 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
|
||||
{
|
||||
|
@ -1114,7 +1114,7 @@ _m_pmulhuw (__m64 __A, __m64 __B)
|
|||
|
||||
/* Return a combination of the four 16-bit values in A. The selector
|
||||
must be an immediate. */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline __m64 __attribute__((__always_inline__, __artificial__))
|
||||
_mm_shuffle_pi16 (__m64 __A, int const __N)
|
||||
{
|
||||
|
@ -1191,7 +1191,7 @@ _m_psadbw (__m64 __A, __m64 __B)
|
|||
|
||||
/* Loads one cache line from address P to a location "closer" to the
|
||||
processor. The selector I specifies the type of prefetch operation. */
|
||||
#ifdef __SSE_USE_INLINED_FUNC__
|
||||
#ifdef __OPTIMIZE__
|
||||
static __inline void __attribute__((__always_inline__, __artificial__))
|
||||
_mm_prefetch (const void *__P, enum _mm_hint __I)
|
||||
{
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
2008-03-13 Uros Bizjak <ubizjak@gmail.com>
|
||||
|
||||
PR target/35553
|
||||
* g++.dg/other/i386-3.C: New test.
|
||||
|
||||
2008-03-13 Paolo Bonzini <bonzini@gnu.org>
|
||||
|
||||
PR tree-opt/35422
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
/* Test that {,x,e,p,t,s,a,b}mmintrin.h, mm3dnow.h and mm_malloc.h are
|
||||
usable with -O -fkeep-inline-functions. */
|
||||
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
|
||||
/* { dg-options "-O -fkeep-inline-functions -march=k8 -m3dnow -msse4 -msse5" } */
|
||||
|
||||
#include <bmmintrin.h>
|
||||
#include <smmintrin.h>
|
||||
#include <mm3dnow.h>
|
Loading…
Add table
Reference in a new issue