x86-tune.def: Enable X86_TUNE_SSE_TYPELESS_STORES for generic...

* config/i386/x86-tune.def: Enable X86_TUNE_SSE_TYPELESS_STORES
	for generic, enable X86_TUNE_SSE_LOAD0_BY_PXOR for Bulldozer,
	Bobcat and generic.

	* gcc.target/i386/avx256-unaligned-store-3.c: Update template for
	tuning change.
	* gcc.target/i386/avx256-unaligned-store-1.c: Likewise.
	* gcc.target/i386/pr49168-1.c: Likewise.
	* gcc.target/i386/pr49002-2.c: Likewise.

From-SVN: r203387
This commit is contained in:
Jan Hubicka 2013-10-10 19:52:40 +02:00 committed by Jan Hubicka
parent a1d5038607
commit 13ef00fa08
7 changed files with 24 additions and 10 deletions

View file

@ -1,3 +1,9 @@
2013-10-10 Jan Hubicka <jh@suse.cz>
* config/i386/x86-tune.def: Enable X86_TUNE_SSE_TYPELESS_STORES
for generic, enable X86_TUNE_SSE_LOAD0_BY_PXOR for Bulldozer,
Bobcat and generic.
2013-10-10 Jakub Jelinek <jakub@redhat.com>
PR middle-end/58670

View file

@ -221,16 +221,14 @@ DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optim
upper part undefined. */
DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8)
/* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores.
FIXME: Shall we enable it for generic? */
/* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */
DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores",
m_AMD_MULTIPLE | m_CORE_ALL)
m_AMD_MULTIPLE | m_CORE_ALL | m_GENERIC)
/* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to
xorps/xorpd and other variants.
FIXME: Shall we enable it buldozers and for generic? */
xorps/xorpd and other variants. */
DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor",
m_PPRO | m_P4_NOCONA | m_CORE_ALL)
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_GENERIC)
/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
full sized loads. */

View file

@ -1,3 +1,11 @@
2013-10-10 Jan Hubicka <jh@suse.cz>
* gcc.target/i386/avx256-unaligned-store-3.c: Update template for
tuning change.
* gcc.target/i386/avx256-unaligned-store-1.c: Likewise.
* gcc.target/i386/pr49168-1.c: Likewise.
* gcc.target/i386/pr49002-2.c: Likewise.
2013-10-10 Jakub Jelinek <jakub@redhat.com>
PR middle-end/58670

View file

@ -24,5 +24,5 @@ avx_test (void)
}
/* { dg-final { scan-assembler-not "avx_storedqu256" } } */
/* { dg-final { scan-assembler "vmovdqu.*\\*movv16qi_internal/3" } } */
/* { dg-final { scan-assembler "vmovups.*\\*movv16qi_internal/3" } } */
/* { dg-final { scan-assembler "vextract.128" } } */

View file

@ -18,5 +18,5 @@ avx_test (void)
}
/* { dg-final { scan-assembler-not "avx_storeupd256" } } */
/* { dg-final { scan-assembler "vmovupd.*\\*movv2df_internal/3" } } */
/* { dg-final { scan-assembler "vmovups.*\\*movv2df_internal/3" } } */
/* { dg-final { scan-assembler "vextractf128" } } */

View file

@ -11,4 +11,5 @@ void foo(const __m128d from, __m256d *to)
/* Ensure we store ymm, not xmm. */
/* { dg-final { scan-assembler-not "vmovapd\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler "vmovapd\[\t \]*%ymm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler-not "vmovaps\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler "vmovaps\[\t \]*%ymm\[0-9\]\+,\[^,\]*" } } */

View file

@ -2,7 +2,8 @@
/* { dg-do compile } */
/* { dg-options "-O2 -msse2 -mtune=generic" } */
/* { dg-final { scan-assembler-not "movdqa\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler "movdqu\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler-not "movaps\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
/* { dg-final { scan-assembler "movups\[\t \]*%xmm\[0-9\]\+,\[^,\]*" } } */
void
flt128_va (void *mem, __float128 d)