rs6000: Remove -maltivec={be,le}

This removes the -maltivec=be and -maltivec=le options.  Those were
deprecated in GCC 8.

Altivec will keep working on both BE and LE; it is just the BE-vectors-
on-LE that is removed (the other way around was never supported).

The main change is replacing VECTOR_ELT_ORDER_BIG by BYTES_BIG_ENDIAN
(and then simplifying).


	* config/rs6000/altivec.md (altivec_vmrghb, altivec_vmrghh,
	altivec_vmrghw, altivec_vmrglb, altivec_vmrglh, altivec_vmrglw): Remove
	-maltivec=be support.
	(vec_widen_umult_even_v16qi, vec_widen_smult_even_v16qi,
	vec_widen_umult_even_v8hi, vec_widen_smult_even_v8hi,
	vec_widen_umult_even_v4si, vec_widen_smult_even_v4si,
	vec_widen_umult_odd_v16qi, vec_widen_smult_odd_v16qi,
	vec_widen_umult_odd_v8hi, vec_widen_smult_odd_v8hi,
	vec_widen_umult_odd_v4si, vec_widen_smult_odd_v4si, altivec_vpkpx,
	altivec_vpks<VI_char>ss, altivec_vpks<VI_char>us,
	altivec_vpku<VI_char>us, altivec_vpku<VI_char>um, altivec_vsum2sws,
	altivec_vsumsws): Adjust.
	(altivec_vspltb *altivec_vspltb_internal, altivec_vsplth,
	*altivec_vsplth_internal, altivec_vspltw, *altivec_vspltw_internal,
	altivec_vspltsf, *altivec_vspltsf_internal): Remove -maltivec=be
	support.
	(altivec_vperm_<mode>, altivec_vperm_<mode>_uns,
	altivec_vupkhs<VU_char>, altivec_vupkls<VU_char>, altivec_vupkhpx,
	altivec_vupklpx, altivec_lvsl, altivec_lvsr): Adjust.
	(altivec_lve<VI_char>x): Delete expand.
	(*altivec_lve<VI_char>x_internal): Rename to...
	(altivec_lve<VI_char>x): ... this.
	(altivec_lvxl_<mode>): Delete expand.
	(*altivec_lvxl_<mode>_internal): Rename to ...
	(altivec_lvxl_<mode>): ... this.
	(altivec_stvxl_<mode>): Delete expand.
	(*altivec_stvxl_<mode>_internal): Rename to ...
	(altivec_stvxl_<mode>): ... this.
	(altivec_stve<VI_char>x): Delete expand.
	(*altivec_stve<VI_char>x_internal): Rename to ...
	(altivec_stve<VI_char>x): ... this.
	(doublee<mode>2, unsdoubleev4si2, doubleo<mode>2, unsdoubleov4si2,
	doubleh<mode>2, unsdoublehv4si2, doublel<mode>2, unsdoublelv4si2,
	reduc_plus_scal_<mode>): Adjust.
	* config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Adjust
	comment.
	(rs6000_cpu_cpp_builtins): Adjust.
	(altivec_resolve_overloaded_builtin): Remove -maltivec=be support.
	* config/rs6000/rs6000-protos.h (altivec_expand_lvx_be,
	altivec_expand_stvx_be, altivec_expand_stvex_be): Delete.
	* config/rs6000/rs6000.c (rs6000_option_override_internal): Remove
	-maltivec=be support.
	(rs6000_split_vec_extract_var): Adjust.
	(rs6000_split_v4si_init): Adjust.
	(swap_selector_for_mode): Delete.
	(altivec_expand_lvx_be, altivec_expand_stvx_be,
	altivec_expand_stvex_be): Delete.
	(altivec_expand_lv_builtin, altivec_expand_stv_builtin): Remove
	-maltivec=be support.
	(rs6000_gimple_fold_builtin): Ditto.
	(rs6000_generate_float2_double_code, rs6000_generate_float2_code):
	Adjust.
	* config/rs6000/rs6000.h (VECTOR_ELT_ORDER_BIG): Delete.
	(TARGET_DIRECT_MOVE_64BIT): Adjust.
	* config/rs6000/rs6000.md (split for extendsidi2 for vectors): Adjust.
	* config/rs6000/rs6000.opt (maltivec=le, maltivec=be): Delete.
	* config/rs6000/vsx.md (floate<mode>, unsfloatev2di, floato<mode>,
	unsfloatov2di, vsignedo_v2df, vsignede_v2df, vunsignedo_v2df,
	vunsignede_v2df, vsx_extract_<mode>_p9, *vsx_extract_si,
	*vsx_extract_<mode>_p8, *vsx_extract_si_<uns>float_df,
	*vsx_extract_si_<uns>float_<mode>, vsx_set_<mode>_p9, vsx_set_v4sf_p9,
	*vsx_insert_extract_v4sf_p9, *vsx_insert_extract_v4sf_p9_2, and an
	anonymous split): Adjust.
	(vsx_mergel_<mode>, vsx_mergeh_<mode>): Remove -maltivec=be support.
	(vsx_xxspltd_<mode>, extract4b, insert4b): Adjust.

gcc/testsuite/
	* gcc.dg/vmx/extract-be-order.c: Delete testcase.
	* gcc.dg/vmx/extract-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/insert-be-order.c: Delete testcase.
	* gcc.dg/vmx/insert-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/ld-be-order.c: Delete testcase.
	* gcc.dg/vmx/ld-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/lde-be-order.c: Delete testcase.
	* gcc.dg/vmx/ldl-be-order.c: Delete testcase.
	* gcc.dg/vmx/ldl-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/merge-be-order.c: Delete testcase.
	* gcc.dg/vmx/merge-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/mult-even-odd-be-order.c: Delete testcase.
	* gcc.dg/vmx/pack-be-order.c: Delete testcase.
	* gcc.dg/vmx/perm-be-order.c: Delete testcase.
	* gcc.dg/vmx/splat-be-order.c: Delete testcase.
	* gcc.dg/vmx/splat-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/st-be-order.c: Delete testcase.
	* gcc.dg/vmx/st-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/ste-be-order.c: Delete testcase.
	* gcc.dg/vmx/stl-be-order.c: Delete testcase.
	* gcc.dg/vmx/stl-vsx-be-order.c: Delete testcase.
	* gcc.dg/vmx/sum2s-be-order.c: Delete testcase.
	* gcc.dg/vmx/unpack-be-order.c: Delete testcase.
	* gcc.dg/vmx/vsums-be-order.c: Delete testcase.
	* gcc.target/powerpc/vec-setup-be-double.c: Delete testcase.
	* gcc.target/powerpc/vec-setup-be-long.c: Delete testcase.
	* gcc.target/powerpc/vec-setup.h: Remove -maltivec=be support.

From-SVN: r260109
This commit is contained in:
Segher Boessenkool 2018-05-10 12:06:00 +02:00 committed by Segher Boessenkool
parent b4d3485e4f
commit 427a7384b9
37 changed files with 210 additions and 2072 deletions

View file

@ -1,3 +1,71 @@
2018-05-10 Segher Boessenkool <segher@kernel.crashing.org>
* config/rs6000/altivec.md (altivec_vmrghb, altivec_vmrghh,
altivec_vmrghw, altivec_vmrglb, altivec_vmrglh, altivec_vmrglw): Remove
-maltivec=be support.
(vec_widen_umult_even_v16qi, vec_widen_smult_even_v16qi,
vec_widen_umult_even_v8hi, vec_widen_smult_even_v8hi,
vec_widen_umult_even_v4si, vec_widen_smult_even_v4si,
vec_widen_umult_odd_v16qi, vec_widen_smult_odd_v16qi,
vec_widen_umult_odd_v8hi, vec_widen_smult_odd_v8hi,
vec_widen_umult_odd_v4si, vec_widen_smult_odd_v4si, altivec_vpkpx,
altivec_vpks<VI_char>ss, altivec_vpks<VI_char>us,
altivec_vpku<VI_char>us, altivec_vpku<VI_char>um, altivec_vsum2sws,
altivec_vsumsws): Adjust.
(altivec_vspltb *altivec_vspltb_internal, altivec_vsplth,
*altivec_vsplth_internal, altivec_vspltw, *altivec_vspltw_internal,
altivec_vspltsf, *altivec_vspltsf_internal): Remove -maltivec=be
support.
(altivec_vperm_<mode>, altivec_vperm_<mode>_uns,
altivec_vupkhs<VU_char>, altivec_vupkls<VU_char>, altivec_vupkhpx,
altivec_vupklpx, altivec_lvsl, altivec_lvsr): Adjust.
(altivec_lve<VI_char>x): Delete expand.
(*altivec_lve<VI_char>x_internal): Rename to...
(altivec_lve<VI_char>x): ... this.
(altivec_lvxl_<mode>): Delete expand.
(*altivec_lvxl_<mode>_internal): Rename to ...
(altivec_lvxl_<mode>): ... this.
(altivec_stvxl_<mode>): Delete expand.
(*altivec_stvxl_<mode>_internal): Rename to ...
(altivec_stvxl_<mode>): ... this.
(altivec_stve<VI_char>x): Delete expand.
(*altivec_stve<VI_char>x_internal): Rename to ...
(altivec_stve<VI_char>x): ... this.
(doublee<mode>2, unsdoubleev4si2, doubleo<mode>2, unsdoubleov4si2,
doubleh<mode>2, unsdoublehv4si2, doublel<mode>2, unsdoublelv4si2,
reduc_plus_scal_<mode>): Adjust.
* config/rs6000/rs6000-c.c (rs6000_target_modify_macros): Adjust
comment.
(rs6000_cpu_cpp_builtins): Adjust.
(altivec_resolve_overloaded_builtin): Remove -maltivec=be support.
* config/rs6000/rs6000-protos.h (altivec_expand_lvx_be,
altivec_expand_stvx_be, altivec_expand_stvex_be): Delete.
* config/rs6000/rs6000.c (rs6000_option_override_internal): Remove
-maltivec=be support.
(rs6000_split_vec_extract_var): Adjust.
(rs6000_split_v4si_init): Adjust.
(swap_selector_for_mode): Delete.
(altivec_expand_lvx_be, altivec_expand_stvx_be,
altivec_expand_stvex_be): Delete.
(altivec_expand_lv_builtin, altivec_expand_stv_builtin): Remove
-maltivec=be support.
(rs6000_gimple_fold_builtin): Ditto.
(rs6000_generate_float2_double_code, rs6000_generate_float2_code):
Adjust.
* config/rs6000/rs6000.h (VECTOR_ELT_ORDER_BIG): Delete.
(TARGET_DIRECT_MOVE_64BIT): Adjust.
* config/rs6000/rs6000.md (split for extendsidi2 for vectors): Adjust.
* config/rs6000/rs6000.opt (maltivec=le, maltivec=be): Delete.
* config/rs6000/vsx.md (floate<mode>, unsfloatev2di, floato<mode>,
unsfloatov2di, vsignedo_v2df, vsignede_v2df, vunsignedo_v2df,
vunsignede_v2df, vsx_extract_<mode>_p9, *vsx_extract_si,
*vsx_extract_<mode>_p8, *vsx_extract_si_<uns>float_df,
*vsx_extract_si_<uns>float_<mode>, vsx_set_<mode>_p9, vsx_set_v4sf_p9,
*vsx_insert_extract_v4sf_p9, *vsx_insert_extract_v4sf_p9_2, and an
anonymous split): Adjust.
(vsx_mergel_<mode>, vsx_mergeh_<mode>): Remove -maltivec=be support.
(vsx_xxspltd_<mode>, extract4b, insert4b): Adjust.
2018-05-10 Eric Botcazou <ebotcazou@adacore.com>
* configure.ac (gcc_gxx_include_dir_add_sysroot): Set it to 1 only

View file

@ -945,27 +945,11 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
rtx x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -1009,22 +993,9 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
rtx x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
@ -1065,21 +1036,8 @@
(use (match_operand:V4SI 2 "register_operand"))]
"VECTOR_MEM_ALTIVEC_P (V4SImode)"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
rtx x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -1136,27 +1094,11 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17),
GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19),
GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21),
GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23));
x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25),
GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27),
GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29),
GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31));
rtx x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -1200,23 +1142,9 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9),
GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11));
x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13),
GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15));
rtx x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -1256,21 +1184,8 @@
(use (match_operand:V4SI 2 "register_operand"))]
"VECTOR_MEM_ALTIVEC_P (V4SImode)"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5));
x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7));
rtx x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -1415,7 +1330,7 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
@ -1428,7 +1343,7 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
@ -1441,7 +1356,7 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
@ -1454,7 +1369,7 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
@ -1467,7 +1382,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_P8_VECTOR"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
@ -1480,7 +1395,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_P8_VECTOR"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
@ -1493,7 +1408,7 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2]));
@ -1506,7 +1421,7 @@
(use (match_operand:V16QI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2]));
@ -1519,7 +1434,7 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2]));
@ -1532,7 +1447,7 @@
(use (match_operand:V8HI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2]));
@ -1545,7 +1460,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_P8_VECTOR"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2]));
@ -1558,7 +1473,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_P8_VECTOR"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2]));
else
emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2]));
@ -1681,7 +1596,7 @@
UNSPEC_VPKPX))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vpkpx %0,%1,%2";
else
return "vpkpx %0,%2,%1";
@ -1695,7 +1610,7 @@
UNSPEC_VPACK_SIGN_SIGN_SAT))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vpks<VI_char>ss %0,%1,%2";
else
return "vpks<VI_char>ss %0,%2,%1";
@ -1709,7 +1624,7 @@
UNSPEC_VPACK_SIGN_UNS_SAT))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vpks<VI_char>us %0,%1,%2";
else
return "vpks<VI_char>us %0,%2,%1";
@ -1723,7 +1638,7 @@
UNSPEC_VPACK_UNS_UNS_SAT))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vpku<VI_char>us %0,%1,%2";
else
return "vpku<VI_char>us %0,%2,%1";
@ -1737,7 +1652,7 @@
UNSPEC_VPACK_UNS_UNS_MOD))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vpku<VI_char>um %0,%1,%2";
else
return "vpku<VI_char>um %0,%2,%1";
@ -1889,7 +1804,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1],
operands[2]));
else
@ -1922,7 +1837,7 @@
(use (match_operand:V4SI 2 "register_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1],
operands[2]));
else
@ -1954,15 +1869,8 @@
(use (match_operand:QI 2 "u5bit_cint_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtvec v = gen_rtvec (1, operands[2]);
rtx x;
/* Special handling for LE with -maltivec=be. We have to reflect
the actual selected index for the splat in the RTL. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
operands[2] = GEN_INT (15 - INTVAL (operands[2]));
v = gen_rtvec (1, operands[2]);
x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
x = gen_rtx_VEC_DUPLICATE (V16QImode, x);
emit_insn (gen_rtx_SET (operands[0], x));
@ -1977,9 +1885,6 @@
[(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
"TARGET_ALTIVEC"
{
/* For true LE, this adjusts the selected index. For LE with
-maltivec=be, this reverses what was done in the define_expand
because the instruction already has big-endian bias. */
if (!BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (15 - INTVAL (operands[2]));
@ -2002,15 +1907,8 @@
(use (match_operand:QI 2 "u5bit_cint_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtvec v = gen_rtvec (1, operands[2]);
rtx x;
/* Special handling for LE with -maltivec=be. We have to reflect
the actual selected index for the splat in the RTL. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
operands[2] = GEN_INT (7 - INTVAL (operands[2]));
v = gen_rtvec (1, operands[2]);
x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
x = gen_rtx_VEC_DUPLICATE (V8HImode, x);
emit_insn (gen_rtx_SET (operands[0], x));
@ -2025,9 +1923,6 @@
[(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
"TARGET_ALTIVEC"
{
/* For true LE, this adjusts the selected index. For LE with
-maltivec=be, this reverses what was done in the define_expand
because the instruction already has big-endian bias. */
if (!BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (7 - INTVAL (operands[2]));
@ -2050,15 +1945,8 @@
(use (match_operand:QI 2 "u5bit_cint_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtvec v = gen_rtvec (1, operands[2]);
rtx x;
/* Special handling for LE with -maltivec=be. We have to reflect
the actual selected index for the splat in the RTL. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
operands[2] = GEN_INT (3 - INTVAL (operands[2]));
v = gen_rtvec (1, operands[2]);
x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
x = gen_rtx_VEC_DUPLICATE (V4SImode, x);
emit_insn (gen_rtx_SET (operands[0], x));
@ -2073,9 +1961,6 @@
[(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
"TARGET_ALTIVEC"
{
/* For true LE, this adjusts the selected index. For LE with
-maltivec=be, this reverses what was done in the define_expand
because the instruction already has big-endian bias. */
if (!BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (3 - INTVAL (operands[2]));
@ -2098,15 +1983,8 @@
(use (match_operand:QI 2 "u5bit_cint_operand"))]
"TARGET_ALTIVEC"
{
rtvec v;
rtvec v = gen_rtvec (1, operands[2]);
rtx x;
/* Special handling for LE with -maltivec=be. We have to reflect
the actual selected index for the splat in the RTL. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
operands[2] = GEN_INT (3 - INTVAL (operands[2]));
v = gen_rtvec (1, operands[2]);
x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v));
x = gen_rtx_VEC_DUPLICATE (V4SFmode, x);
emit_insn (gen_rtx_SET (operands[0], x));
@ -2121,9 +1999,6 @@
[(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
"VECTOR_UNIT_ALTIVEC_P (V4SFmode)"
{
/* For true LE, this adjusts the selected index. For LE with
-maltivec=be, this reverses what was done in the define_expand
because the instruction already has big-endian bias. */
if (!BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (3 - INTVAL (operands[2]));
@ -2154,7 +2029,7 @@
UNSPEC_VPERM))]
"TARGET_ALTIVEC"
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
{
altivec_expand_vec_perm_le (operands);
DONE;
@ -2196,7 +2071,7 @@
UNSPEC_VPERM_UNS))]
"TARGET_ALTIVEC"
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
{
altivec_expand_vec_perm_le (operands);
DONE;
@ -2374,7 +2249,7 @@
UNSPEC_VUNPACK_HI_SIGN))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vupkhs<VU_char> %0,%1";
else
return "vupkls<VU_char> %0,%1";
@ -2395,7 +2270,7 @@
UNSPEC_VUNPACK_LO_SIGN))]
"<VI_unit>"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vupkls<VU_char> %0,%1";
else
return "vupkhs<VU_char> %0,%1";
@ -2416,7 +2291,7 @@
UNSPEC_VUPKHPX))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vupkhpx %0,%1";
else
return "vupklpx %0,%1";
@ -2429,7 +2304,7 @@
UNSPEC_VUPKLPX))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
return "vupklpx %0,%1";
else
return "vupkhpx %0,%1";
@ -2588,7 +2463,7 @@
(use (match_operand:V16QI 1 "memory_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1]));
else
{
@ -2626,7 +2501,7 @@
(use (match_operand:V16QI 1 "memory_operand"))]
"TARGET_ALTIVEC"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1]));
else
{
@ -2680,21 +2555,7 @@
;; Parallel some of the LVE* and STV*'s with unspecs because some have
;; identical rtl but different instructions-- and gcc gets confused.
(define_expand "altivec_lve<VI_char>x"
[(parallel
[(set (match_operand:VI 0 "register_operand" "=v")
(match_operand:VI 1 "memory_operand" "Z"))
(unspec [(const_int 0)] UNSPEC_LVE)])]
"TARGET_ALTIVEC"
{
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE);
DONE;
}
})
(define_insn "*altivec_lve<VI_char>x_internal"
(define_insn "altivec_lve<VI_char>x"
[(parallel
[(set (match_operand:VI 0 "register_operand" "=v")
(match_operand:VI 1 "memory_operand" "Z"))
@ -2712,21 +2573,7 @@
"lvewx %0,%y1"
[(set_attr "type" "vecload")])
(define_expand "altivec_lvxl_<mode>"
[(parallel
[(set (match_operand:VM2 0 "register_operand" "=v")
(match_operand:VM2 1 "memory_operand" "Z"))
(unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
"TARGET_ALTIVEC"
{
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR);
DONE;
}
})
(define_insn "*altivec_lvxl_<mode>_internal"
(define_insn "altivec_lvxl_<mode>"
[(parallel
[(set (match_operand:VM2 0 "register_operand" "=v")
(match_operand:VM2 1 "memory_operand" "Z"))
@ -2847,21 +2694,7 @@
"stvx %0,0,%1"
[(set_attr "type" "vecstore")])
(define_expand "altivec_stvxl_<mode>"
[(parallel
[(set (match_operand:VM2 0 "memory_operand" "=Z")
(match_operand:VM2 1 "register_operand" "v"))
(unspec [(const_int 0)] UNSPEC_STVXL)])]
"TARGET_ALTIVEC"
{
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL);
DONE;
}
})
(define_insn "*altivec_stvxl_<mode>_internal"
(define_insn "altivec_stvxl_<mode>"
[(parallel
[(set (match_operand:VM2 0 "memory_operand" "=Z")
(match_operand:VM2 1 "register_operand" "v"))
@ -2870,19 +2703,7 @@
"stvxl %1,%y0"
[(set_attr "type" "vecstore")])
(define_expand "altivec_stve<VI_char>x"
[(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
"TARGET_ALTIVEC"
{
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE);
DONE;
}
})
(define_insn "*altivec_stve<VI_char>x_internal"
(define_insn "altivec_stve<VI_char>x"
[(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
(unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))]
"TARGET_ALTIVEC"
@ -2905,7 +2726,7 @@
{
machine_mode op_mode = GET_MODE (operands[1]);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
Input words 0 and 2 are where they need to be. */
@ -2937,7 +2758,7 @@
(match_operand:V4SI 1 "register_operand" "v"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
Input words 0 and 2 are where they need to be. */
@ -2971,7 +2792,7 @@
{
machine_mode op_mode = GET_MODE (operands[1]);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
take (operand[1] operand[1]) and shift left one word
@ -3003,7 +2824,7 @@
(match_operand:V4SI 1 "register_operand" "v"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
take (operand[1] operand[1]) and shift left one word
@ -3042,7 +2863,7 @@
machine_mode op_mode = GET_MODE (operands[1]);
rtx_tmp = gen_reg_rtx (op_mode);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
@ -3090,7 +2911,7 @@
rtx rtx_tmp = gen_reg_rtx (V4SImode);
rtx rtx_val = GEN_INT (12);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for words in operand is 0 1 2 3.
Shift operand left one word, rtx_tmp word order is now 1 2 3 0.
@ -3143,7 +2964,7 @@
machine_mode op_mode = GET_MODE (operands[1]);
rtx_tmp = gen_reg_rtx (op_mode);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for operand is 0 1 2 3.
Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
@ -3191,7 +3012,7 @@
rtx rtx_tmp = gen_reg_rtx (V4SImode);
rtx rtx_val = GEN_INT (12);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Big endian word numbering for operand is 0 1 2 3.
Shift operand left three words, rtx_tmp word order is now 3 0 1 2.
@ -3330,7 +3151,7 @@
rtx vtmp1 = gen_reg_rtx (V4SImode);
rtx vtmp2 = gen_reg_rtx (<MODE>mode);
rtx dest = gen_lowpart (V4SImode, vtmp2);
int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
int elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0;
emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));

View file

@ -452,10 +452,9 @@ rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
/* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
in any of the following conditions:
1. The command line specifies either -maltivec=le or -maltivec=be.
2. The operating system is Darwin and it is configured for 64
1. The operating system is Darwin and it is configured for 64
bit. (See darwin_rs6000_override_options.)
3. The operating system is Darwin and the operating system
2. The operating system is Darwin and the operating system
version is 10.5 or higher and the user has not explicitly
disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
the compiler is not producing code for integration within the
@ -750,7 +749,7 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
}
/* Vector element order. */
if (BYTES_BIG_ENDIAN || (rs6000_altivec_element_order == 2))
if (BYTES_BIG_ENDIAN)
builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
else
builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
@ -6121,11 +6120,11 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
(int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
/* vec_lvsl and vec_lvsr are deprecated for use with LE element order. */
if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !VECTOR_ELT_ORDER_BIG)
if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !BYTES_BIG_ENDIAN)
warning (OPT_Wdeprecated,
"vec_lvsl is deprecated for little endian; use "
"assignment for unaligned loads and stores");
else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !VECTOR_ELT_ORDER_BIG)
else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !BYTES_BIG_ENDIAN)
warning (OPT_Wdeprecated,
"vec_lvsr is deprecated for little endian; use "
"assignment for unaligned loads and stores");
@ -6537,17 +6536,6 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
goto bad;
/* If we are targeting little-endian, but -maltivec=be has been
specified to override the element order, adjust the element
number accordingly. */
if (!BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 2)
{
unsigned int last_elem = TYPE_VECTOR_SUBPARTS (arg1_type) - 1;
arg2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (arg2),
build_int_cstu (TREE_TYPE (arg2), last_elem),
arg2);
}
/* See if we can optimize vec_extracts with the current VSX instruction
set. */
mode = TYPE_MODE (arg1_type);
@ -6719,17 +6707,6 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
goto bad;
/* If we are targeting little-endian, but -maltivec=be has been
specified to override the element order, adjust the element
number accordingly. */
if (!BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 2)
{
unsigned int last_elem = TYPE_VECTOR_SUBPARTS (arg1_type) - 1;
arg2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (arg2),
build_int_cstu (TREE_TYPE (arg2), last_elem),
arg2);
}
/* If we can use the VSX xxpermdi instruction, use that for insert. */
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)

View file

@ -63,9 +63,6 @@ extern void rs6000_split_vec_extract_var (rtx, rtx, rtx, rtx, rtx);
extern rtx rs6000_adjust_vec_address (rtx, rtx, rtx, rtx, machine_mode);
extern void rs6000_split_v4si_init (rtx []);
extern void altivec_expand_vec_perm_le (rtx op[4]);
extern void altivec_expand_lvx_be (rtx, rtx, machine_mode, unsigned);
extern void altivec_expand_stvx_be (rtx, rtx, machine_mode, unsigned);
extern void altivec_expand_stvex_be (rtx, rtx, machine_mode, unsigned);
extern void rs6000_expand_extract_even (rtx, rtx, rtx);
extern void rs6000_expand_interleave (rtx, rtx, rtx, bool);
extern void rs6000_scale_v2df (rtx, rtx, int);

View file

@ -4000,13 +4000,6 @@ rs6000_option_override_internal (bool global_init_p)
if (global_init_p)
rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
/* We plan to deprecate the -maltivec=be option. For now, just
issue a warning message. */
if (global_init_p
&& rs6000_altivec_element_order == 2)
warning (0, "%qs command-line option is deprecated",
"-maltivec=be");
/* On 64-bit Darwin, power alignment is ABI-incompatible with some C
library functions, so warn about it. The flag may be useful for
performance studies from time to time though, so don't disable it
@ -4217,18 +4210,6 @@ rs6000_option_override_internal (bool global_init_p)
&& !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
/* -maltivec={le,be} implies -maltivec. */
if (rs6000_altivec_element_order != 0)
rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
/* Disallow -maltivec=le in big endian mode for now. This is not
known to be useful for anyone. */
if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
{
warning (0, N_("-maltivec=le not allowed for big-endian targets"));
rs6000_altivec_element_order = 0;
}
if (!rs6000_fold_gimple)
fprintf (stderr,
"gimple folding of rs6000 builtins has been disabled.\n");
@ -7442,7 +7423,7 @@ rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
rtx element_si = gen_rtx_REG (SImode, element_regno);
if (mode == V16QImode)
emit_insn (VECTOR_ELT_ORDER_BIG
emit_insn (BYTES_BIG_ENDIAN
? gen_vextublx (dest_si, element_si, src)
: gen_vextubrx (dest_si, element_si, src));
@ -7450,7 +7431,7 @@ rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
{
rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
emit_insn (VECTOR_ELT_ORDER_BIG
emit_insn (BYTES_BIG_ENDIAN
? gen_vextuhlx (dest_si, tmp_gpr_si, src)
: gen_vextuhrx (dest_si, tmp_gpr_si, src));
}
@ -7460,7 +7441,7 @@ rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
{
rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
emit_insn (VECTOR_ELT_ORDER_BIG
emit_insn (BYTES_BIG_ENDIAN
? gen_vextuwlx (dest_si, tmp_gpr_si, src)
: gen_vextuwrx (dest_si, tmp_gpr_si, src));
}
@ -7477,7 +7458,7 @@ rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
byte shift into a bit shift). */
if (scalar_size == 8)
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
{
emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
element2 = tmp_gpr;
@ -7496,7 +7477,7 @@ rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
}
else
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
{
rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
@ -7647,7 +7628,6 @@ rs6000_split_v4si_init (rtx operands[])
{
rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
rtx di_hi = gen_rtx_REG (DImode, d_regno);
gcc_assert (!VECTOR_ELT_ORDER_BIG);
rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
}
@ -13940,46 +13920,6 @@ altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
return target;
}
/* Return a constant vector for use as a little-endian permute control vector
to reverse the order of elements of the given vector mode. */
static rtx
swap_selector_for_mode (machine_mode mode)
{
/* These are little endian vectors, so their elements are reversed
from what you would normally expect for a permute control vector. */
unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
unsigned int *swaparray, i;
rtx perm[16];
switch (mode)
{
case E_V2DFmode:
case E_V2DImode:
swaparray = swap2;
break;
case E_V4SFmode:
case E_V4SImode:
swaparray = swap4;
break;
case E_V8HImode:
swaparray = swap8;
break;
case E_V16QImode:
swaparray = swap16;
break;
default:
gcc_unreachable ();
}
for (i = 0; i < 16; ++i)
perm[i] = GEN_INT (swaparray[i]);
return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
}
rtx
swap_endian_selector_for_mode (machine_mode mode)
{
@ -14018,60 +13958,6 @@ swap_endian_selector_for_mode (machine_mode mode)
gen_rtvec_v (16, perm)));
}
/* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
with -maltivec=be specified. Issue the load followed by an element-
reversing permute. */
void
altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
{
rtx tmp = gen_reg_rtx (mode);
rtx load = gen_rtx_SET (tmp, op1);
rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
rtx sel = swap_selector_for_mode (mode);
rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
gcc_assert (REG_P (op0));
emit_insn (par);
emit_insn (gen_rtx_SET (op0, vperm));
}
/* Generate code for a "stvxl" built-in for a little endian target with
-maltivec=be specified. Issue the store preceded by an element-reversing
permute. */
void
altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
{
rtx tmp = gen_reg_rtx (mode);
rtx store = gen_rtx_SET (op0, tmp);
rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
rtx sel = swap_selector_for_mode (mode);
rtx vperm;
gcc_assert (REG_P (op1));
vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
emit_insn (gen_rtx_SET (tmp, vperm));
emit_insn (par);
}
/* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
specified. Issue the store preceded by an element-reversing permute. */
void
altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
{
machine_mode inner_mode = GET_MODE_INNER (mode);
rtx tmp = gen_reg_rtx (mode);
rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
rtx sel = swap_selector_for_mode (mode);
rtx vperm;
gcc_assert (REG_P (op1));
vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
emit_insn (gen_rtx_SET (tmp, vperm));
emit_insn (gen_rtx_SET (op0, stvx));
}
static rtx
altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
{
@ -14121,20 +14007,7 @@ altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
/* For -maltivec=be, emit the load and follow it up with a
permute to swap the elements. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
rtx temp = gen_reg_rtx (tmode);
emit_insn (gen_rtx_SET (temp, addr));
rtx sel = swap_selector_for_mode (tmode);
rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
UNSPEC_VPERM);
emit_insn (gen_rtx_SET (target, vperm));
}
else
emit_insn (gen_rtx_SET (target, addr));
emit_insn (gen_rtx_SET (target, addr));
}
else
{
@ -14240,19 +14113,7 @@ altivec_expand_stv_builtin (enum insn_code icode, tree exp)
op0 = copy_to_mode_reg (tmode, op0);
/* For -maltivec=be, emit a permute to swap the elements, followed
by the store. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
rtx temp = gen_reg_rtx (tmode);
rtx sel = swap_selector_for_mode (tmode);
rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
UNSPEC_VPERM);
emit_insn (gen_rtx_SET (temp, vperm));
emit_insn (gen_rtx_SET (addr, temp));
}
else
emit_insn (gen_rtx_SET (addr, op0));
emit_insn (gen_rtx_SET (addr, op0));
}
else
{
@ -15937,9 +15798,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
arg0 = gimple_call_arg (stmt, 0); // offset
arg1 = gimple_call_arg (stmt, 1); // address
/* Do not fold for -maltivec=be on LE targets. */
if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
return false;
lhs = gimple_call_lhs (stmt);
location_t loc = gimple_location (stmt);
/* Since arg1 may be cast to a different type, just use ptr_type_node
@ -15976,9 +15834,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
case ALTIVEC_BUILTIN_STVX_V2DI:
case ALTIVEC_BUILTIN_STVX_V2DF:
{
/* Do not fold for -maltivec=be on LE targets. */
if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
return false;
arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
arg1 = gimple_call_arg (stmt, 1); /* Offset. */
tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
@ -16119,9 +15974,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
case VSX_BUILTIN_XXMRGLW_4SI:
case ALTIVEC_BUILTIN_VMRGLB:
case VSX_BUILTIN_VEC_MERGEL_V2DI:
/* Do not fold for -maltivec=be on LE targets. */
if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
return false;
fold_mergehl_helper (gsi, stmt, 1);
return true;
/* vec_mergeh (integrals). */
@ -16130,9 +15982,6 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
case VSX_BUILTIN_XXMRGHW_4SI:
case ALTIVEC_BUILTIN_VMRGHB:
case VSX_BUILTIN_VEC_MERGEH_V2DI:
/* Do not fold for -maltivec=be on LE targets. */
if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
return false;
fold_mergehl_helper (gsi, stmt, 0);
return true;
default:
@ -38768,7 +38617,7 @@ rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
vmrgew instruction will be correct. */
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
GEN_INT (0)));
@ -38787,7 +38636,7 @@ rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
else
emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
@ -38805,7 +38654,7 @@ rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
vmrgew instruction will be correct. */
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
@ -38830,7 +38679,7 @@ rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
}
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
else
emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));

View file

@ -512,15 +512,6 @@ extern int rs6000_vector_align[];
? rs6000_vector_align[(MODE)] \
: (int)GET_MODE_BITSIZE ((MODE)))
/* Determine the element order to use for vector instructions. By
default we use big-endian element order when targeting big-endian,
and little-endian element order when targeting little-endian. For
programs being ported from BE Power to LE Power, it can sometimes
be useful to use big-endian element order when targeting little-endian.
This is set via -maltivec=be, for example. */
#define VECTOR_ELT_ORDER_BIG \
(BYTES_BIG_ENDIAN || (rs6000_altivec_element_order == 2))
/* Element number of the 64-bit value in a 128-bit vector that can be accessed
with scalar instructions. */
#define VECTOR_ELEMENT_SCALAR_64BIT ((BYTES_BIG_ENDIAN) ? 0 : 1)
@ -720,12 +711,10 @@ extern int rs6000_vector_align[];
/* Macro to say whether we can do optimizations where we need to do parts of
the calculation in 64-bit GPRs and then is transfered to the vector
registers. Do not allow -maltivec=be for these optimizations, because it
adds to the complexity of the code. */
registers. */
#define TARGET_DIRECT_MOVE_64BIT (TARGET_DIRECT_MOVE \
&& TARGET_P8_VECTOR \
&& TARGET_POWERPC64 \
&& (rs6000_altivec_element_order != 2))
&& TARGET_POWERPC64)
/* Whether the various reciprocal divide/square root estimate instructions
exist, and whether we should automatically generate code for the instruction

View file

@ -1033,7 +1033,7 @@
rtx dest_v2di = gen_rtx_REG (V2DImode, dest_regno);
rtx src_v4si = gen_rtx_REG (V4SImode, src_regno);
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_altivec_vupkhsw (dest_v2di, src_v4si));
emit_insn (gen_vsx_xxspltd_v2di (dest_v2di, dest_v2di, const1_rtx));

View file

@ -152,14 +152,6 @@ maltivec
Target Report Mask(ALTIVEC) Var(rs6000_isa_flags)
Use AltiVec instructions.
maltivec=le
Target Report RejectNegative Var(rs6000_altivec_element_order, 1) Save
Generate AltiVec instructions using little-endian element order.
maltivec=be
Target Report RejectNegative Var(rs6000_altivec_element_order, 2)
Generate AltiVec instructions using big-endian element order.
mfold-gimple
Target Report Var(rs6000_fold_gimple) Init(1)
Enable early gimple folding of builtins.

View file

@ -2557,7 +2557,7 @@
(use (match_operand:VSX_D 1 "register_operand" "wa"))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Shift left one word to put even word correct location */
rtx rtx_tmp;
@ -2582,7 +2582,7 @@
(use (match_operand:V2DI 1 "register_operand" "wa"))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
/* Shift left one word to put even word correct location */
rtx rtx_tmp;
@ -2607,7 +2607,7 @@
(use (match_operand:VSX_D 1 "register_operand" "wa"))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_vsx_xvcv<VF_sxddp>sp (operands[0], operands[1]));
else
{
@ -2631,7 +2631,7 @@
(use (match_operand:V2DI 1 "register_operand" "wa"))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_vsx_xvcvuxdsp (operands[0], operands[1]));
else
{
@ -2674,7 +2674,7 @@
(match_operand:V2DF 1 "register_operand" "wa"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
rtx rtx_tmp;
rtx rtx_val = GEN_INT (12);
@ -2706,7 +2706,7 @@
(match_operand:V2DF 1 "register_operand" "v"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
/* Big endian word numbering for words in operand is 0 1
Result words 0 is where they need to be. */
emit_insn (gen_vsx_xvcvdpsxws (operands[0], operands[1]));
@ -2758,7 +2758,7 @@
(match_operand:V2DF 1 "register_operand" "v"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
{
rtx rtx_tmp;
rtx rtx_val = GEN_INT (12);
@ -2790,7 +2790,7 @@
(match_operand:V2DF 1 "register_operand" "v"))]
"TARGET_VSX"
{
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
/* Big endian word numbering for words in operand is 0 1
Result words 0 is where they need to be. */
emit_insn (gen_vsx_xvcvdpuxws (operands[0], operands[1]));
@ -3498,7 +3498,7 @@
else
{
HOST_WIDE_INT elt = INTVAL (operands[2]);
HOST_WIDE_INT elt_adj = (!VECTOR_ELT_ORDER_BIG
HOST_WIDE_INT elt_adj = (!BYTES_BIG_ENDIAN
? GET_MODE_NUNITS (<MODE>mode) - 1 - elt
: elt);
@ -3530,7 +3530,7 @@
HOST_WIDE_INT offset = INTVAL (op2) * GET_MODE_UNIT_SIZE (<MODE>mode);
emit_move_insn (op3, GEN_INT (offset));
if (VECTOR_ELT_ORDER_BIG)
if (BYTES_BIG_ENDIAN)
emit_insn (gen_vextu<wd>lx (op0_si, op3, op1));
else
emit_insn (gen_vextu<wd>rx (op0_si, op3, op1));
@ -3593,7 +3593,7 @@
rtx vec_tmp = operands[3];
int value;
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
element = GEN_INT (GET_MODE_NUNITS (V4SImode) - 1 - INTVAL (element));
/* If the value is in the correct position, we can avoid doing the VSPLT<x>
@ -3644,7 +3644,7 @@
rtx vec_tmp = operands[3];
int value;
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
element = GEN_INT (GET_MODE_NUNITS (<MODE>mode) - 1 - INTVAL (element));
/* If the value is in the correct position, we can avoid doing the VSPLT<x>
@ -3752,7 +3752,7 @@
rtx v4si_tmp = operands[3];
int value;
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
element = GEN_INT (GET_MODE_NUNITS (V4SImode) - 1 - INTVAL (element));
/* If the value is in the correct position, we can avoid doing the VSPLT<x>
@ -3795,7 +3795,7 @@
rtx df_tmp = operands[4];
int value;
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
element = GEN_INT (GET_MODE_NUNITS (V4SImode) - 1 - INTVAL (element));
/* If the value is in the correct position, we can avoid doing the VSPLT<x>
@ -3895,7 +3895,7 @@
int ele = INTVAL (operands[3]);
int nunits = GET_MODE_NUNITS (<MODE>mode);
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
ele = nunits - 1 - ele;
operands[3] = GEN_INT (GET_MODE_SIZE (<VS_scalar>mode) * ele);
@ -3934,7 +3934,7 @@
operands[5] = gen_rtx_REG (V4SFmode, tmp_regno);
operands[6] = gen_rtx_REG (V4SImode, tmp_regno);
operands[7] = GEN_INT (VECTOR_ELT_ORDER_BIG ? 1 : 2);
operands[7] = GEN_INT (BYTES_BIG_ENDIAN ? 1 : 2);
operands[8] = gen_rtx_REG (V4SImode, reg_or_subregno (operands[0]));
}
[(set_attr "type" "vecperm")
@ -3980,11 +3980,11 @@
(match_operand:QI 4 "const_0_to_3_operand" "n")]
UNSPEC_VSX_SET))]
"VECTOR_MEM_VSX_P (V4SFmode) && TARGET_P9_VECTOR && TARGET_POWERPC64
&& (INTVAL (operands[3]) == (VECTOR_ELT_ORDER_BIG ? 1 : 2))"
&& (INTVAL (operands[3]) == (BYTES_BIG_ENDIAN ? 1 : 2))"
{
int ele = INTVAL (operands[4]);
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
ele = GET_MODE_NUNITS (V4SFmode) - 1 - ele;
operands[4] = GEN_INT (GET_MODE_SIZE (SFmode) * ele);
@ -4008,7 +4008,7 @@
(clobber (match_scratch:SI 5 "=&wJwK"))]
"VECTOR_MEM_VSX_P (V4SFmode) && VECTOR_MEM_VSX_P (V4SImode)
&& TARGET_P9_VECTOR && TARGET_POWERPC64
&& (INTVAL (operands[3]) != (VECTOR_ELT_ORDER_BIG ? 1 : 2))"
&& (INTVAL (operands[3]) != (BYTES_BIG_ENDIAN ? 1 : 2))"
"#"
"&& 1"
[(parallel [(set (match_dup 5)
@ -4037,21 +4037,8 @@
(use (match_operand:VSX_D 2 "vsx_register_operand"))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
rtx x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -4063,21 +4050,8 @@
(use (match_operand:VSX_D 2 "vsx_register_operand"))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
}
rtvec v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
rtx x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (operands[0], x));
DONE;
@ -4209,8 +4183,8 @@
UNSPEC_VSX_XXSPLTD))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
{
if ((VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 0)
|| (!VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 1))
if ((BYTES_BIG_ENDIAN && INTVAL (operands[2]) == 0)
|| (!BYTES_BIG_ENDIAN && INTVAL (operands[2]) == 1))
return "xxpermdi %x0,%x1,%x1,0";
else
return "xxpermdi %x0,%x1,%x1,3";
@ -5161,7 +5135,7 @@
UNSPEC_XXEXTRACTUW))]
"TARGET_P9_VECTOR"
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
operands[2] = GEN_INT (12 - INTVAL (operands[2]));
return "xxextractuw %x0,%x1,%2";
@ -5175,7 +5149,7 @@
UNSPEC_XXINSERTW))]
"TARGET_P9_VECTOR"
{
if (!VECTOR_ELT_ORDER_BIG)
if (!BYTES_BIG_ENDIAN)
{
rtx op1 = operands[1];
rtx v4si_tmp = gen_reg_rtx (V4SImode);

View file

@ -1,3 +1,33 @@
2018-05-10 Segher Boessenkool <segher@kernel.crashing.org>
* gcc.dg/vmx/extract-be-order.c: Delete testcase.
* gcc.dg/vmx/extract-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/insert-be-order.c: Delete testcase.
* gcc.dg/vmx/insert-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/ld-be-order.c: Delete testcase.
* gcc.dg/vmx/ld-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/lde-be-order.c: Delete testcase.
* gcc.dg/vmx/ldl-be-order.c: Delete testcase.
* gcc.dg/vmx/ldl-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/merge-be-order.c: Delete testcase.
* gcc.dg/vmx/merge-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/mult-even-odd-be-order.c: Delete testcase.
* gcc.dg/vmx/pack-be-order.c: Delete testcase.
* gcc.dg/vmx/perm-be-order.c: Delete testcase.
* gcc.dg/vmx/splat-be-order.c: Delete testcase.
* gcc.dg/vmx/splat-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/st-be-order.c: Delete testcase.
* gcc.dg/vmx/st-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/ste-be-order.c: Delete testcase.
* gcc.dg/vmx/stl-be-order.c: Delete testcase.
* gcc.dg/vmx/stl-vsx-be-order.c: Delete testcase.
* gcc.dg/vmx/sum2s-be-order.c: Delete testcase.
* gcc.dg/vmx/unpack-be-order.c: Delete testcase.
* gcc.dg/vmx/vsums-be-order.c: Delete testcase.
* gcc.target/powerpc/vec-setup-be-double.c: Delete testcase.
* gcc.target/powerpc/vec-setup-be-long.c: Delete testcase.
* gcc.target/powerpc/vec-setup.h: Remove -maltivec=be support.
2018-05-10 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/85699

View file

@ -1,34 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx -w" } */
#include "harness.h"
static void test()
{
vector unsigned char va = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vb = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector unsigned short vc = {0,1,2,3,4,5,6,7};
vector signed short vd = {-4,-3,-2,-1,0,1,2,3};
vector unsigned int ve = {0,1,2,3};
vector signed int vf = {-2,-1,0,1};
vector float vg = {-2.0f,-1.0f,0.0f,1.0f};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
check (vec_extract (va, 5) == 10, "vec_extract (va, 5)");
check (vec_extract (vb, 0) == 7, "vec_extract (vb, 0)");
check (vec_extract (vc, 7) == 0, "vec_extract (vc, 7)");
check (vec_extract (vd, 3) == 0, "vec_extract (vd, 3)");
check (vec_extract (ve, 2) == 1, "vec_extract (ve, 2)");
check (vec_extract (vf, 1) == 0, "vec_extract (vf, 1)");
check (vec_extract (vg, 0) == 1.0f, "vec_extract (vg, 0)");
#else
check (vec_extract (va, 5) == 5, "vec_extract (va, 5)");
check (vec_extract (vb, 0) == -8, "vec_extract (vb, 0)");
check (vec_extract (vc, 7) == 7, "vec_extract (vc, 7)");
check (vec_extract (vd, 3) == -1, "vec_extract (vd, 3)");
check (vec_extract (ve, 2) == 2, "vec_extract (ve, 2)");
check (vec_extract (vf, 1) == -1, "vec_extract (vf, 1)");
check (vec_extract (vg, 0) == -2.0f, "vec_extract (vg, 0)");
#endif
}

View file

@ -1,20 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx -w" } */
#include "harness.h"
static void test()
{
vector long long vl = {0, 1};
vector double vd = {0.0, 1.0};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
check (vec_extract (vl, 0) == 1, "vl, 0");
check (vec_extract (vd, 1) == 0.0, "vd, 1");
#else
check (vec_extract (vl, 0) == 0, "vl, 0");
check (vec_extract (vd, 1) == 1.0, "vd, 1");
#endif
}

View file

@ -1,66 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
vector unsigned char va = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vb = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector unsigned short vc = {0,1,2,3,4,5,6,7};
vector signed short vd = {-4,-3,-2,-1,0,1,2,3};
vector unsigned int ve = {0,1,2,3};
vector signed int vf = {-2,-1,0,1};
vector float vg = {-2.0f,-1.0f,0.0f,1.0f};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
check (vec_all_eq (vec_insert (16, va, 5),
((vector unsigned char)
{0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15})),
"vec_insert (va LE)");
check (vec_all_eq (vec_insert (-16, vb, 0),
((vector signed char)
{-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-16})),
"vec_insert (vb LE)");
check (vec_all_eq (vec_insert (16, vc, 7),
((vector unsigned short){16,1,2,3,4,5,6,7})),
"vec_insert (vc LE)");
check (vec_all_eq (vec_insert (-16, vd, 3),
((vector signed short){-4,-3,-2,-1,-16,1,2,3})),
"vec_insert (vd LE)");
check (vec_all_eq (vec_insert (16, ve, 2),
((vector unsigned int){0,16,2,3})),
"vec_insert (ve LE)");
check (vec_all_eq (vec_insert (-16, vf, 1),
((vector signed int){-2,-1,-16,1})),
"vec_insert (vf LE)");
check (vec_all_eq (vec_insert (-16.0f, vg, 0),
((vector float){-2.0f,-1.0f,0.0f,-16.0f})),
"vec_insert (vg LE)");
#else
check (vec_all_eq (vec_insert (16, va, 5),
((vector unsigned char)
{0,1,2,3,4,16,6,7,8,9,10,11,12,13,14,15})),
"vec_insert (va BE)");
check (vec_all_eq (vec_insert (-16, vb, 0),
((vector signed char)
{-16,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7})),
"vec_insert (vb BE)");
check (vec_all_eq (vec_insert (16, vc, 7),
((vector unsigned short){0,1,2,3,4,5,6,16})),
"vec_insert (vc BE)");
check (vec_all_eq (vec_insert (-16, vd, 3),
((vector signed short){-4,-3,-2,-16,0,1,2,3})),
"vec_insert (vd BE)");
check (vec_all_eq (vec_insert (16, ve, 2),
((vector unsigned int){0,1,16,3})),
"vec_insert (ve BE)");
check (vec_all_eq (vec_insert (-16, vf, 1),
((vector signed int){-2,-16,0,1})),
"vec_insert (vf BE)");
check (vec_all_eq (vec_insert (-16.0f, vg, 0),
((vector float){-16.0f,-1.0f,0.0f,1.0f})),
"vec_insert (vg BE)");
#endif
}

View file

@ -1,35 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static int vec_long_long_eq (vector long long x, vector long long y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static int vec_dbl_eq (vector double x, vector double y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static void test()
{
vector long long vl = {0, 1};
vector double vd = {0.0, 1.0};
vector long long vlr = vec_insert (2, vl, 0);
vector double vdr = vec_insert (2.0, vd, 1);
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector long long vler = {0, 2};
vector double vder = {2.0, 1.0};
#else
vector long long vler = {2, 1};
vector double vder = {0.0, 2.0};
#endif
check (vec_long_long_eq (vlr, vler), "vl");
check (vec_dbl_eq (vdr, vder), "vd");
}

View file

@ -1,108 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned char svbc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned short svbs[8] __attribute__ ((aligned (16)));
static unsigned short svp[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static unsigned int svbi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void init ()
{
unsigned int i;
for (i = 0; i < 16; ++i)
{
svuc[i] = i;
svsc[i] = i - 8;
svbc[i] = (i % 2) ? 0xff : 0;
}
for (i = 0; i < 8; ++i)
{
svus[i] = i;
svss[i] = i - 4;
svbs[i] = (i % 2) ? 0xffff : 0;
svp[i] = i;
}
for (i = 0; i < 4; ++i)
{
svui[i] = i;
svsi[i] = i - 2;
svbi[i] = (i % 2) ? 0xffffffff : 0;
svf[i] = i * 1.0f;
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char evuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
vector signed char evsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
vector bool char evbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
vector unsigned short evus = {7,6,5,4,3,2,1,0};
vector signed short evss = {3,2,1,0,-1,-2,-3,-4};
vector bool short evbs = {65535,0,65535,0,65535,0,65535,0};
vector pixel evp = {7,6,5,4,3,2,1,0};
vector unsigned int evui = {3,2,1,0};
vector signed int evsi = {1,0,-1,-2};
vector bool int evbi = {0xffffffff,0,0xffffffff,0};
vector float evf = {3.0,2.0,1.0,0.0};
#else
vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
vector unsigned short evus = {0,1,2,3,4,5,6,7};
vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
vector pixel evp = {0,1,2,3,4,5,6,7};
vector unsigned int evui = {0,1,2,3};
vector signed int evsi = {-2,-1,0,1};
vector bool int evbi = {0,0xffffffff,0,0xffffffff};
vector float evf = {0.0,1.0,2.0,3.0};
#endif
vector unsigned char vuc;
vector signed char vsc;
vector bool char vbc;
vector unsigned short vus;
vector signed short vss;
vector bool short vbs;
vector pixel vp;
vector unsigned int vui;
vector signed int vsi;
vector bool int vbi;
vector float vf;
init ();
vuc = vec_ld (0, (vector unsigned char *)svuc);
vsc = vec_ld (0, (vector signed char *)svsc);
vbc = vec_ld (0, (vector bool char *)svbc);
vus = vec_ld (0, (vector unsigned short *)svus);
vss = vec_ld (0, (vector signed short *)svss);
vbs = vec_ld (0, (vector bool short *)svbs);
vp = vec_ld (0, (vector pixel *)svp);
vui = vec_ld (0, (vector unsigned int *)svui);
vsi = vec_ld (0, (vector signed int *)svsi);
vbi = vec_ld (0, (vector bool int *)svbi);
vf = vec_ld (0, (vector float *)svf);
check (vec_all_eq (vuc, evuc), "vuc");
check (vec_all_eq (vsc, evsc), "vsc");
check (vec_all_eq (vbc, evbc), "vbc");
check (vec_all_eq (vus, evus), "vus");
check (vec_all_eq (vss, evss), "vss");
check (vec_all_eq (vbs, evbs), "vbs");
check (vec_all_eq (vp, evp ), "vp" );
check (vec_all_eq (vui, evui), "vui");
check (vec_all_eq (vsi, evsi), "vsi");
check (vec_all_eq (vbi, evbi), "vbi");
check (vec_all_eq (vf, evf ), "vf" );
}

View file

@ -1,45 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static unsigned long long svul[2] __attribute__ ((aligned (16)));
static double svd[2] __attribute__ ((aligned (16)));
static void init ()
{
unsigned int i;
for (i = 0; i < 2; ++i)
{
svul[i] = i;
svd[i] = i * 1.0;
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned long long evul = {1,0};
vector double evd = {1.0,0.0};
#else
vector unsigned long long evul = {0,1};
vector double evd = {0.0,1.0};
#endif
vector unsigned long long vul;
vector double vd;
unsigned i;
init ();
vul = vec_ld (0, (vector unsigned long long *)svul);
vd = vec_ld (0, (vector double *)svd);
for (i = 0; i < 2; ++i)
{
check (vul[i] == evul[i], "vul");
check (vd[i] == evd[i], "vd" );
}
}

View file

@ -1,74 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void init ()
{
int i;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
for (i = 15; i >= 0; --i)
#else
for (i = 0; i < 16; ++i)
#endif
{
svuc[i] = i;
svsc[i] = i - 8;
}
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
for (i = 7; i >= 0; --i)
#else
for (i = 0; i < 8; ++i)
#endif
{
svus[i] = i;
svss[i] = i - 4;
}
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
for (i = 3; i >= 0; --i)
#else
for (i = 0; i < 4; ++i)
#endif
{
svui[i] = i;
svsi[i] = i - 2;
svf[i] = i * 1.0f;
}
}
static void test ()
{
vector unsigned char vuc;
vector signed char vsc;
vector unsigned short vus;
vector signed short vss;
vector unsigned int vui;
vector signed int vsi;
vector float vf;
init ();
vuc = vec_lde (9*1, (unsigned char *)svuc);
vsc = vec_lde (14*1, (signed char *)svsc);
vus = vec_lde (7*2, (unsigned short *)svus);
vss = vec_lde (1*2, (signed short *)svss);
vui = vec_lde (3*4, (unsigned int *)svui);
vsi = vec_lde (2*4, (signed int *)svsi);
vf = vec_lde (0*4, (float *)svf);
check (vec_extract (vuc, 9) == 9, "vuc");
check (vec_extract (vsc, 14) == 6, "vsc");
check (vec_extract (vus, 7) == 7, "vus");
check (vec_extract (vss, 1) == -3, "vss");
check (vec_extract (vui, 3) == 3, "vui");
check (vec_extract (vsi, 2) == 0, "vsi");
check (vec_extract (vf, 0) == 0.0, "vf");
}

View file

@ -1,108 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned char svbc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned short svbs[8] __attribute__ ((aligned (16)));
static unsigned short svp[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static unsigned int svbi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void init ()
{
unsigned int i;
for (i = 0; i < 16; ++i)
{
svuc[i] = i;
svsc[i] = i - 8;
svbc[i] = (i % 2) ? 0xff : 0;
}
for (i = 0; i < 8; ++i)
{
svus[i] = i;
svss[i] = i - 4;
svbs[i] = (i % 2) ? 0xffff : 0;
svp[i] = i;
}
for (i = 0; i < 4; ++i)
{
svui[i] = i;
svsi[i] = i - 2;
svbi[i] = (i % 2) ? 0xffffffff : 0;
svf[i] = i * 1.0f;
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char evuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
vector signed char evsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
vector bool char evbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
vector unsigned short evus = {7,6,5,4,3,2,1,0};
vector signed short evss = {3,2,1,0,-1,-2,-3,-4};
vector bool short evbs = {65535,0,65535,0,65535,0,65535,0};
vector pixel evp = {7,6,5,4,3,2,1,0};
vector unsigned int evui = {3,2,1,0};
vector signed int evsi = {1,0,-1,-2};
vector bool int evbi = {0xffffffff,0,0xffffffff,0};
vector float evf = {3.0,2.0,1.0,0.0};
#else
vector unsigned char evuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char evsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char evbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
vector unsigned short evus = {0,1,2,3,4,5,6,7};
vector signed short evss = {-4,-3,-2,-1,0,1,2,3};
vector bool short evbs = {0,65535,0,65535,0,65535,0,65535};
vector pixel evp = {0,1,2,3,4,5,6,7};
vector unsigned int evui = {0,1,2,3};
vector signed int evsi = {-2,-1,0,1};
vector bool int evbi = {0,0xffffffff,0,0xffffffff};
vector float evf = {0.0,1.0,2.0,3.0};
#endif
vector unsigned char vuc;
vector signed char vsc;
vector bool char vbc;
vector unsigned short vus;
vector signed short vss;
vector bool short vbs;
vector pixel vp;
vector unsigned int vui;
vector signed int vsi;
vector bool int vbi;
vector float vf;
init ();
vuc = vec_ldl (0, (vector unsigned char *)svuc);
vsc = vec_ldl (0, (vector signed char *)svsc);
vbc = vec_ldl (0, (vector bool char *)svbc);
vus = vec_ldl (0, (vector unsigned short *)svus);
vss = vec_ldl (0, (vector signed short *)svss);
vbs = vec_ldl (0, (vector bool short *)svbs);
vp = vec_ldl (0, (vector pixel *)svp);
vui = vec_ldl (0, (vector unsigned int *)svui);
vsi = vec_ldl (0, (vector signed int *)svsi);
vbi = vec_ldl (0, (vector bool int *)svbi);
vf = vec_ldl (0, (vector float *)svf);
check (vec_all_eq (vuc, evuc), "vuc");
check (vec_all_eq (vsc, evsc), "vsc");
check (vec_all_eq (vbc, evbc), "vbc");
check (vec_all_eq (vus, evus), "vus");
check (vec_all_eq (vss, evss), "vss");
check (vec_all_eq (vbs, evbs), "vbs");
check (vec_all_eq (vp, evp ), "vp" );
check (vec_all_eq (vui, evui), "vui");
check (vec_all_eq (vsi, evsi), "vsi");
check (vec_all_eq (vbi, evbi), "vbi");
check (vec_all_eq (vf, evf ), "vf" );
}

View file

@ -1,45 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static unsigned long long svul[2] __attribute__ ((aligned (16)));
static double svd[2] __attribute__ ((aligned (16)));
static void init ()
{
unsigned int i;
for (i = 0; i < 2; ++i)
{
svul[i] = i;
svd[i] = i * 1.0;
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned long long evul = {1,0};
vector double evd = {1.0,0.0};
#else
vector unsigned long long evul = {0,1};
vector double evd = {0.0,1.0};
#endif
vector unsigned long long vul;
vector double vd;
unsigned i;
init ();
vul = vec_ldl (0, (vector unsigned long long *)svul);
vd = vec_ldl (0, (vector double *)svd);
for (i = 0; i < 2; ++i)
{
check (vul[i] == evul[i], "vul");
check (vd[i] == evd[i], "vd" );
}
}

View file

@ -1,97 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned char vuca = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned char vucb
= {16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
vector signed char vsca
= {-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1};
vector signed char vscb = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned short vusa = {0,1,2,3,4,5,6,7};
vector unsigned short vusb = {8,9,10,11,12,13,14,15};
vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vssb = {0,1,2,3,4,5,6,7};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector float vfa = {-4.0,-3.0,-2.0,-1.0};
vector float vfb = {0.0,1.0,2.0,3.0};
/* Result vectors. */
vector unsigned char vuch, vucl;
vector signed char vsch, vscl;
vector unsigned short vush, vusl;
vector signed short vssh, vssl;
vector unsigned int vuih, vuil;
vector signed int vsih, vsil;
vector float vfh, vfl;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vucrh = {24,8,25,9,26,10,27,11,28,12,29,13,30,14,31,15};
vector unsigned char vucrl = {16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7};
vector signed char vscrh = {8,-8,9,-7,10,-6,11,-5,12,-4,13,-3,14,-2,15,-1};
vector signed char vscrl = {0,-16,1,-15,2,-14,3,-13,4,-12,5,-11,6,-10,7,-9};
vector unsigned short vusrh = {12,4,13,5,14,6,15,7};
vector unsigned short vusrl = {8,0,9,1,10,2,11,3};
vector signed short vssrh = {4,-4,5,-3,6,-2,7,-1};
vector signed short vssrl = {0,-8,1,-7,2,-6,3,-5};
vector unsigned int vuirh = {6,2,7,3};
vector unsigned int vuirl = {4,0,5,1};
vector signed int vsirh = {2,-2,3,-1};
vector signed int vsirl = {0,-4,1,-3};
vector float vfrh = {2.0,-2.0,3.0,-1.0};
vector float vfrl = {0.0,-4.0,1.0,-3.0};
#else
vector unsigned char vucrh = {0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23};
vector unsigned char vucrl = {8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31};
vector signed char vscrh = {-16,0,-15,1,-14,2,-13,3,-12,4,-11,5,-10,6,-9,7};
vector signed char vscrl = {-8,8,-7,9,-6,10,-5,11,-4,12,-3,13,-2,14,-1,15};
vector unsigned short vusrh = {0,8,1,9,2,10,3,11};
vector unsigned short vusrl = {4,12,5,13,6,14,7,15};
vector signed short vssrh = {-8,0,-7,1,-6,2,-5,3};
vector signed short vssrl = {-4,4,-3,5,-2,6,-1,7};
vector unsigned int vuirh = {0,4,1,5};
vector unsigned int vuirl = {2,6,3,7};
vector signed int vsirh = {-4,0,-3,1};
vector signed int vsirl = {-2,2,-1,3};
vector float vfrh = {-4.0,0.0,-3.0,1.0};
vector float vfrl = {-2.0,2.0,-1.0,3.0};
#endif
vuch = vec_mergeh (vuca, vucb);
vucl = vec_mergel (vuca, vucb);
vsch = vec_mergeh (vsca, vscb);
vscl = vec_mergel (vsca, vscb);
vush = vec_mergeh (vusa, vusb);
vusl = vec_mergel (vusa, vusb);
vssh = vec_mergeh (vssa, vssb);
vssl = vec_mergel (vssa, vssb);
vuih = vec_mergeh (vuia, vuib);
vuil = vec_mergel (vuia, vuib);
vsih = vec_mergeh (vsia, vsib);
vsil = vec_mergel (vsia, vsib);
vfh = vec_mergeh (vfa, vfb );
vfl = vec_mergel (vfa, vfb );
check (vec_all_eq (vuch, vucrh), "vuch");
check (vec_all_eq (vucl, vucrl), "vucl");
check (vec_all_eq (vsch, vscrh), "vsch");
check (vec_all_eq (vscl, vscrl), "vscl");
check (vec_all_eq (vush, vusrh), "vush");
check (vec_all_eq (vusl, vusrl), "vusl");
check (vec_all_eq (vssh, vssrh), "vssh");
check (vec_all_eq (vssl, vssrl), "vssl");
check (vec_all_eq (vuih, vuirh), "vuih");
check (vec_all_eq (vuil, vuirl), "vuil");
check (vec_all_eq (vsih, vsirh), "vsih");
check (vec_all_eq (vsil, vsirl), "vsil");
check (vec_all_eq (vfh, vfrh), "vfh");
check (vec_all_eq (vfl, vfrl), "vfl");
}

View file

@ -1,85 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static int vec_long_long_eq (vector long long x, vector long long y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static int vec_double_eq (vector double x, vector double y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static void test()
{
/* Input vectors. */
vector long long vla = {-2,-1};
vector long long vlb = {0,1};
vector double vda = {-2.0,-1.0};
vector double vdb = {0.0,1.0};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector float vfa = {-4.0,-3.0,-2.0,-1.0};
vector float vfb = {0.0,1.0,2.0,3.0};
/* Result vectors. */
vector long long vlh, vll;
vector double vdh, vdl;
vector unsigned int vuih, vuil;
vector signed int vsih, vsil;
vector float vfh, vfl;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector long long vlrh = {1,-1};
vector long long vlrl = {0,-2};
vector double vdrh = {1.0,-1.0};
vector double vdrl = {0.0,-2.0};
vector unsigned int vuirh = {6,2,7,3};
vector unsigned int vuirl = {4,0,5,1};
vector signed int vsirh = {2,-2,3,-1};
vector signed int vsirl = {0,-4,1,-3};
vector float vfrh = {2.0,-2.0,3.0,-1.0};
vector float vfrl = {0.0,-4.0,1.0,-3.0};
#else
vector long long vlrh = {-2,0};
vector long long vlrl = {-1,1};
vector double vdrh = {-2.0,0.0};
vector double vdrl = {-1.0,1.0};
vector unsigned int vuirh = {0,4,1,5};
vector unsigned int vuirl = {2,6,3,7};
vector signed int vsirh = {-4,0,-3,1};
vector signed int vsirl = {-2,2,-1,3};
vector float vfrh = {-4.0,0.0,-3.0,1.0};
vector float vfrl = {-2.0,2.0,-1.0,3.0};
#endif
vlh = vec_mergeh (vla, vlb);
vll = vec_mergel (vla, vlb);
vdh = vec_mergeh (vda, vdb);
vdl = vec_mergel (vda, vdb);
vuih = vec_mergeh (vuia, vuib);
vuil = vec_mergel (vuia, vuib);
vsih = vec_mergeh (vsia, vsib);
vsil = vec_mergel (vsia, vsib);
vfh = vec_mergeh (vfa, vfb );
vfl = vec_mergel (vfa, vfb );
check (vec_long_long_eq (vlh, vlrh), "vlh");
check (vec_long_long_eq (vll, vlrl), "vll");
check (vec_double_eq (vdh, vdrh), "vdh" );
check (vec_double_eq (vdl, vdrl), "vdl" );
check (vec_all_eq (vuih, vuirh), "vuih");
check (vec_all_eq (vuil, vuirl), "vuil");
check (vec_all_eq (vsih, vsirh), "vsih");
check (vec_all_eq (vsil, vsirl), "vsil");
check (vec_all_eq (vfh, vfrh), "vfh");
check (vec_all_eq (vfl, vfrl), "vfl");
}

View file

@ -1,65 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
volatile vector unsigned char vuca = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
volatile vector unsigned char vucb = {2,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3};
volatile vector signed char vsca = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
volatile vector signed char vscb = {2,-3,2,-3,2,-3,2,-3,2,-3,2,-3,2,-3,2,-3};
volatile vector unsigned short vusa = {0,1,2,3,4,5,6,7};
volatile vector unsigned short vusb = {2,3,2,3,2,3,2,3};
volatile vector signed short vssa = {-4,-3,-2,-1,0,1,2,3};
volatile vector signed short vssb = {2,-3,2,-3,2,-3,2,-3};
volatile vector unsigned short vuse, vuso;
volatile vector signed short vsse, vsso;
volatile vector unsigned int vuie, vuio;
volatile vector signed int vsie, vsio;
vuse = vec_mule (vuca, vucb);
vuso = vec_mulo (vuca, vucb);
vsse = vec_mule (vsca, vscb);
vsso = vec_mulo (vsca, vscb);
vuie = vec_mule (vusa, vusb);
vuio = vec_mulo (vusa, vusb);
vsie = vec_mule (vssa, vssb);
vsio = vec_mulo (vssa, vssb);
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
check (vec_all_eq (vuse,
((vector unsigned short){3,9,15,21,27,33,39,45})),
"vuse");
check (vec_all_eq (vuso,
((vector unsigned short){0,4,8,12,16,20,24,28})),
"vuso");
check (vec_all_eq (vsse,
((vector signed short){21,15,9,3,-3,-9,-15,-21})),
"vsse");
check (vec_all_eq (vsso,
((vector signed short){-16,-12,-8,-4,0,4,8,12})),
"vsso");
check (vec_all_eq (vuie, ((vector unsigned int){3,9,15,21})), "vuie");
check (vec_all_eq (vuio, ((vector unsigned int){0,4,8,12})), "vuio");
check (vec_all_eq (vsie, ((vector signed int){9,3,-3,-9})), "vsie");
check (vec_all_eq (vsio, ((vector signed int){-8,-4,0,4})), "vsio");
#else
check (vec_all_eq (vuse,
((vector unsigned short){0,4,8,12,16,20,24,28})),
"vuse");
check (vec_all_eq (vuso,
((vector unsigned short){3,9,15,21,27,33,39,45})),
"vuso");
check (vec_all_eq (vsse,
((vector signed short){-16,-12,-8,-4,0,4,8,12})),
"vsse");
check (vec_all_eq (vsso,
((vector signed short){21,15,9,3,-3,-9,-15,-21})),
"vsso");
check (vec_all_eq (vuie, ((vector unsigned int){0,4,8,12})), "vuie");
check (vec_all_eq (vuio, ((vector unsigned int){3,9,15,21})), "vuio");
check (vec_all_eq (vsie, ((vector signed int){-8,-4,0,4})), "vsie");
check (vec_all_eq (vsio, ((vector signed int){9,3,-3,-9})), "vsio");
#endif
}

View file

@ -1,137 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
#define BIG 4294967295
static void test()
{
/* Input vectors. */
vector unsigned short vusa = {0,1,2,3,4,5,6,7};
vector unsigned short vusb = {8,9,10,11,12,13,14,15};
vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vssb = {0,1,2,3,4,5,6,7};
vector bool short vbsa = {0,65535,65535,0,0,0,65535,0};
vector bool short vbsb = {65535,0,0,65535,65535,65535,0,65535};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector bool int vbia = {0,BIG,BIG,BIG};
vector bool int vbib = {BIG,0,0,0};
vector unsigned int vipa = {(0<<24) + (2<<19) + (3<<11) + (4<<3),
(1<<24) + (5<<19) + (6<<11) + (7<<3),
(0<<24) + (8<<19) + (9<<11) + (10<<3),
(1<<24) + (11<<19) + (12<<11) + (13<<3)};
vector unsigned int vipb = {(1<<24) + (14<<19) + (15<<11) + (16<<3),
(0<<24) + (17<<19) + (18<<11) + (19<<3),
(1<<24) + (20<<19) + (21<<11) + (22<<3),
(0<<24) + (23<<19) + (24<<11) + (25<<3)};
vector unsigned short vusc = {0,256,1,257,2,258,3,259};
vector unsigned short vusd = {4,260,5,261,6,262,7,263};
vector signed short vssc = {-1,-128,0,127,-2,-129,1,128};
vector signed short vssd = {-3,-130,2,129,-4,-131,3,130};
vector unsigned int vuic = {0,65536,1,65537};
vector unsigned int vuid = {2,65538,3,65539};
vector signed int vsic = {-1,-32768,0,32767};
vector signed int vsid = {-2,-32769,1,32768};
/* Result vectors. */
vector unsigned char vucr;
vector signed char vscr;
vector bool char vbcr;
vector unsigned short vusr;
vector signed short vssr;
vector bool short vbsr;
vector pixel vpr;
vector unsigned char vucsr;
vector signed char vscsr;
vector unsigned short vussr;
vector signed short vsssr;
vector unsigned char vucsur1, vucsur2;
vector unsigned short vussur1, vussur2;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vucer = {8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7};
vector signed char vscer = {0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1};
vector bool char vbcer = {255,0,0,255,255,255,0,255,0,255,255,0,0,0,255,0};
vector unsigned short vuser = {4,5,6,7,0,1,2,3};
vector signed short vsser = {0,1,2,3,-4,-3,-2,-1};
vector bool short vbser = {65535,0,0,0,0,65535,65535,65535};
vector pixel vper = {(1<<15) + (14<<10) + (15<<5) + 16,
(0<<15) + (17<<10) + (18<<5) + 19,
(1<<15) + (20<<10) + (21<<5) + 22,
(0<<15) + (23<<10) + (24<<5) + 25,
(0<<15) + (2<<10) + (3<<5) + 4,
(1<<15) + (5<<10) + (6<<5) + 7,
(0<<15) + (8<<10) + (9<<5) + 10,
(1<<15) + (11<<10) + (12<<5) + 13};
vector unsigned char vucser = {4,255,5,255,6,255,7,255,0,255,1,255,2,255,3,255};
vector signed char vscser = {-3,-128,2,127,-4,-128,3,127,
-1,-128,0,127,-2,-128,1,127};
vector unsigned short vusser = {2,65535,3,65535,0,65535,1,65535};
vector signed short vssser = {-2,-32768,1,32767,-1,-32768,0,32767};
vector unsigned char vucsuer1 = {4,255,5,255,6,255,7,255,0,255,1,255,2,255,3,255};
vector unsigned char vucsuer2 = {0,0,2,129,0,0,3,130,0,0,0,127,0,0,1,128};
vector unsigned short vussuer1 = {2,65535,3,65535,0,65535,1,65535};
vector unsigned short vussuer2 = {0,0,1,32768,0,0,0,32767};
#else
vector unsigned char vucer = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vscer = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char vbcer = {0,255,255,0,0,0,255,0,255,0,0,255,255,255,0,255};
vector unsigned short vuser = {0,1,2,3,4,5,6,7};
vector signed short vsser = {-4,-3,-2,-1,0,1,2,3};
vector bool short vbser = {0,65535,65535,65535,65535,0,0,0};
vector pixel vper = {(0<<15) + (2<<10) + (3<<5) + 4,
(1<<15) + (5<<10) + (6<<5) + 7,
(0<<15) + (8<<10) + (9<<5) + 10,
(1<<15) + (11<<10) + (12<<5) + 13,
(1<<15) + (14<<10) + (15<<5) + 16,
(0<<15) + (17<<10) + (18<<5) + 19,
(1<<15) + (20<<10) + (21<<5) + 22,
(0<<15) + (23<<10) + (24<<5) + 25};
vector unsigned char vucser = {0,255,1,255,2,255,3,255,4,255,5,255,6,255,7,255};
vector signed char vscser = {-1,-128,0,127,-2,-128,1,127,
-3,-128,2,127,-4,-128,3,127};
vector unsigned short vusser = {0,65535,1,65535,2,65535,3,65535};
vector signed short vssser = {-1,-32768,0,32767,-2,-32768,1,32767};
vector unsigned char vucsuer1 = {0,255,1,255,2,255,3,255,4,255,5,255,6,255,7,255};
vector unsigned char vucsuer2 = {0,0,0,127,0,0,1,128,0,0,2,129,0,0,3,130};
vector unsigned short vussuer1 = {0,65535,1,65535,2,65535,3,65535};
vector unsigned short vussuer2 = {0,0,0,32767,0,0,1,32768};
#endif
vucr = vec_pack (vusa, vusb);
vscr = vec_pack (vssa, vssb);
vbcr = vec_pack (vbsa, vbsb);
vusr = vec_pack (vuia, vuib);
vssr = vec_pack (vsia, vsib);
vbsr = vec_pack (vbia, vbib);
vpr = vec_packpx (vipa, vipb);
vucsr = vec_packs (vusc, vusd);
vscsr = vec_packs (vssc, vssd);
vussr = vec_packs (vuic, vuid);
vsssr = vec_packs (vsic, vsid);
vucsur1 = vec_packsu (vusc, vusd);
vucsur2 = vec_packsu (vssc, vssd);
vussur1 = vec_packsu (vuic, vuid);
vussur2 = vec_packsu (vsic, vsid);
check (vec_all_eq (vucr, vucer), "vucr");
check (vec_all_eq (vscr, vscer), "vscr");
check (vec_all_eq (vbcr, vbcer), "vbcr");
check (vec_all_eq (vusr, vuser), "vusr");
check (vec_all_eq (vssr, vsser), "vssr");
check (vec_all_eq (vbsr, vbser), "vbsr");
check (vec_all_eq (vpr, vper ), "vpr" );
check (vec_all_eq (vucsr, vucser), "vucsr");
check (vec_all_eq (vscsr, vscser), "vscsr");
check (vec_all_eq (vussr, vusser), "vussr");
check (vec_all_eq (vsssr, vssser), "vsssr");
check (vec_all_eq (vucsur1, vucsuer1), "vucsur1");
check (vec_all_eq (vucsur2, vucsuer2), "vucsur2");
check (vec_all_eq (vussur1, vussuer1), "vussur1");
check (vec_all_eq (vussur2, vussuer2), "vussur2");
}

View file

@ -1,75 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned char vuca = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned char vucb = {16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
vector signed char vsca = {-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1};
vector signed char vscb = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned short vusa = {0,1,2,3,4,5,6,7};
vector unsigned short vusb = {8,9,10,11,12,13,14,15};
vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vssb = {0,1,2,3,4,5,6,7};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector float vfa = {-4.0,-3.0,-2.0,-1.0};
vector float vfb = {0.0,1.0,2.0,3.0};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vucp = {15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23};
vector unsigned char vscp = {15,16,14,17,13,18,12,19,11,20,10,21,9,22,8,23};
vector unsigned char vusp = {15,14,17,16,13,12,19,18,11,10,21,20,9,8,23,22};
vector unsigned char vssp = {15,14,17,16,13,12,19,18,11,10,21,20,9,8,23,22};
vector unsigned char vuip = {15,14,13,12,19,18,17,16,11,10,9,8,23,22,21,20};
vector unsigned char vsip = {15,14,13,12,19,18,17,16,11,10,9,8,23,22,21,20};
vector unsigned char vfp = {15,14,13,12,19,18,17,16,11,10,9,8,23,22,21,20};
#else
vector unsigned char vucp = {0,31,1,30,2,29,3,28,4,27,5,26,6,25,7,24};
vector unsigned char vscp = {0,31,1,30,2,29,3,28,4,27,5,26,6,25,7,24};
vector unsigned char vusp = {0,1,30,31,2,3,28,29,4,5,26,27,6,7,24,25};
vector unsigned char vssp = {0,1,30,31,2,3,28,29,4,5,26,27,6,7,24,25};
vector unsigned char vuip = {0,1,2,3,28,29,30,31,4,5,6,7,24,25,26,27};
vector unsigned char vsip = {0,1,2,3,28,29,30,31,4,5,6,7,24,25,26,27};
vector unsigned char vfp = {0,1,2,3,28,29,30,31,4,5,6,7,24,25,26,27};
#endif
/* Result vectors. */
vector unsigned char vuc;
vector signed char vsc;
vector unsigned short vus;
vector signed short vss;
vector unsigned int vui;
vector signed int vsi;
vector float vf;
/* Expected result vectors. */
vector unsigned char vucr = {0,31,1,30,2,29,3,28,4,27,5,26,6,25,7,24};
vector signed char vscr = {-16,15,-15,14,-14,13,-13,12,-12,11,-11,10,-10,9,-9,8};
vector unsigned short vusr = {0,15,1,14,2,13,3,12};
vector signed short vssr = {-8,7,-7,6,-6,5,-5,4};
vector unsigned int vuir = {0,7,1,6};
vector signed int vsir = {-4,3,-3,2};
vector float vfr = {-4.0,3.0,-3.0,2.0};
vuc = vec_perm (vuca, vucb, vucp);
vsc = vec_perm (vsca, vscb, vscp);
vus = vec_perm (vusa, vusb, vusp);
vss = vec_perm (vssa, vssb, vssp);
vui = vec_perm (vuia, vuib, vuip);
vsi = vec_perm (vsia, vsib, vsip);
vf = vec_perm (vfa, vfb, vfp );
check (vec_all_eq (vuc, vucr), "vuc");
check (vec_all_eq (vsc, vscr), "vsc");
check (vec_all_eq (vus, vusr), "vus");
check (vec_all_eq (vss, vssr), "vss");
check (vec_all_eq (vui, vuir), "vui");
check (vec_all_eq (vsi, vsir), "vsi");
check (vec_all_eq (vf, vfr), "vf" );
}

View file

@ -1,60 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector unsigned short vus = {0,1,2,3,4,5,6,7};
vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
vector unsigned int vui = {0,1,2,3};
vector signed int vsi = {-2,-1,0,1};
vector float vf = {-2.0,-1.0,0.0,1.0};
/* Result vectors. */
vector unsigned char vucr;
vector signed char vscr;
vector unsigned short vusr;
vector signed short vssr;
vector unsigned int vuir;
vector signed int vsir;
vector float vfr;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vucer = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
vector signed char vscer = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
vector unsigned short vuser = {0,0,0,0,0,0,0,0};
vector signed short vsser = {3,3,3,3,3,3,3,3};
vector unsigned int vuier = {1,1,1,1};
vector signed int vsier = {-2,-2,-2,-2};
vector float vfer = {0.0,0.0,0.0,0.0};
#else
vector unsigned char vucer = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
vector signed char vscer = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
vector unsigned short vuser = {7,7,7,7,7,7,7,7};
vector signed short vsser = {-4,-4,-4,-4,-4,-4,-4,-4};
vector unsigned int vuier = {2,2,2,2};
vector signed int vsier = {1,1,1,1};
vector float vfer = {-1.0,-1.0,-1.0,-1.0};
#endif
vucr = vec_splat (vuc, 1);
vscr = vec_splat (vsc, 8);
vusr = vec_splat (vus, 7);
vssr = vec_splat (vss, 0);
vuir = vec_splat (vui, 2);
vsir = vec_splat (vsi, 3);
vfr = vec_splat (vf, 1);
check (vec_all_eq (vucr, vucer), "vuc");
check (vec_all_eq (vscr, vscer), "vsc");
check (vec_all_eq (vusr, vuser), "vus");
check (vec_all_eq (vssr, vsser), "vss");
check (vec_all_eq (vuir, vuier), "vui");
check (vec_all_eq (vsir, vsier), "vsi");
check (vec_all_eq (vfr, vfer ), "vf");
}

View file

@ -1,38 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned int vui = {0,1,2,3};
vector signed int vsi = {-2,-1,0,1};
vector float vf = {-2.0,-1.0,0.0,1.0};
/* Result vectors. */
vector unsigned int vuir;
vector signed int vsir;
vector float vfr;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned int vuier = {1,1,1,1};
vector signed int vsier = {-2,-2,-2,-2};
vector float vfer = {0.0,0.0,0.0,0.0};
#else
vector unsigned int vuier = {2,2,2,2};
vector signed int vsier = {1,1,1,1};
vector float vfer = {-1.0,-1.0,-1.0,-1.0};
#endif
vuir = vec_splat (vui, 2);
vsir = vec_splat (vsi, 3);
vfr = vec_splat (vf, 1);
check (vec_all_eq (vuir, vuier), "vui");
check (vec_all_eq (vsir, vsier), "vsi");
check (vec_all_eq (vfr, vfer ), "vf");
}

View file

@ -1,84 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned char svbc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned short svbs[8] __attribute__ ((aligned (16)));
static unsigned short svp[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static unsigned int svbi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void check_arrays ()
{
unsigned int i;
for (i = 0; i < 16; ++i)
{
check (svuc[i] == i, "svuc");
check (svsc[i] == i - 8, "svsc");
check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
}
for (i = 0; i < 8; ++i)
{
check (svus[i] == i, "svus");
check (svss[i] == i - 4, "svss");
check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
check (svp[i] == i, "svp");
}
for (i = 0; i < 4; ++i)
{
check (svui[i] == i, "svui");
check (svsi[i] == i - 2, "svsi");
check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
check (svf[i] == i * 1.0f, "svf");
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
vector signed char vsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
vector bool char vbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
vector unsigned short vus = {7,6,5,4,3,2,1,0};
vector signed short vss = {3,2,1,0,-1,-2,-3,-4};
vector bool short vbs = {65535,0,65535,0,65535,0,65535,0};
vector pixel vp = {7,6,5,4,3,2,1,0};
vector unsigned int vui = {3,2,1,0};
vector signed int vsi = {1,0,-1,-2};
vector bool int vbi = {0xffffffff,0,0xffffffff,0};
vector float vf = {3.0,2.0,1.0,0.0};
#else
vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
vector unsigned short vus = {0,1,2,3,4,5,6,7};
vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
vector pixel vp = {0,1,2,3,4,5,6,7};
vector unsigned int vui = {0,1,2,3};
vector signed int vsi = {-2,-1,0,1};
vector bool int vbi = {0,0xffffffff,0,0xffffffff};
vector float vf = {0.0,1.0,2.0,3.0};
#endif
vec_st (vuc, 0, (vector unsigned char *)svuc);
vec_st (vsc, 0, (vector signed char *)svsc);
vec_st (vbc, 0, (vector bool char *)svbc);
vec_st (vus, 0, (vector unsigned short *)svus);
vec_st (vss, 0, (vector signed short *)svss);
vec_st (vbs, 0, (vector bool short *)svbs);
vec_st (vp, 0, (vector pixel *)svp);
vec_st (vui, 0, (vector unsigned int *)svui);
vec_st (vsi, 0, (vector signed int *)svsi);
vec_st (vbi, 0, (vector bool int *)svbi);
vec_st (vf, 0, (vector float *)svf);
check_arrays ();
}

View file

@ -1,35 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static unsigned long long svul[2] __attribute__ ((aligned (16)));
static double svd[2] __attribute__ ((aligned (16)));
static void check_arrays ()
{
unsigned int i;
for (i = 0; i < 2; ++i)
{
check (svul[i] == i, "svul");
check (svd[i] == i * 1.0, "svd");
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned long long vul = {1,0};
vector double vd = {1.0,0.0};
#else
vector unsigned long long vul = {0,1};
vector double vd = {0.0,1.0};
#endif
vec_st (vul, 0, (vector unsigned long long *)svul);
vec_st (vd, 0, (vector double *)svd);
check_arrays ();
}

View file

@ -1,54 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void check_arrays ()
{
check (svuc[9] == 9, "svuc");
check (svsc[14] == 6, "svsc");
check (svus[7] == 7, "svus");
check (svss[1] == -3, "svss");
check (svui[3] == 3, "svui");
check (svsi[2] == 0, "svsi");
check (svf[0] == 0.0, "svf");
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
vector signed char vsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
vector unsigned short vus = {7,6,5,4,3,2,1,0};
vector signed short vss = {3,2,1,0,-1,-2,-3,-4};
vector unsigned int vui = {3,2,1,0};
vector signed int vsi = {1,0,-1,-2};
vector float vf = {3.0,2.0,1.0,0.0};
#else
vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector unsigned short vus = {0,1,2,3,4,5,6,7};
vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
vector unsigned int vui = {0,1,2,3};
vector signed int vsi = {-2,-1,0,1};
vector float vf = {0.0,1.0,2.0,3.0};
#endif
vec_ste (vuc, 9*1, (unsigned char *)svuc);
vec_ste (vsc, 14*1, (signed char *)svsc);
vec_ste (vus, 7*2, (unsigned short *)svus);
vec_ste (vss, 1*2, (signed short *)svss);
vec_ste (vui, 3*4, (unsigned int *)svui);
vec_ste (vsi, 2*4, (signed int *)svsi);
vec_ste (vf, 0*4, (float *)svf);
check_arrays ();
}

View file

@ -1,84 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static unsigned char svuc[16] __attribute__ ((aligned (16)));
static signed char svsc[16] __attribute__ ((aligned (16)));
static unsigned char svbc[16] __attribute__ ((aligned (16)));
static unsigned short svus[8] __attribute__ ((aligned (16)));
static signed short svss[8] __attribute__ ((aligned (16)));
static unsigned short svbs[8] __attribute__ ((aligned (16)));
static unsigned short svp[8] __attribute__ ((aligned (16)));
static unsigned int svui[4] __attribute__ ((aligned (16)));
static signed int svsi[4] __attribute__ ((aligned (16)));
static unsigned int svbi[4] __attribute__ ((aligned (16)));
static float svf[4] __attribute__ ((aligned (16)));
static void check_arrays ()
{
unsigned int i;
for (i = 0; i < 16; ++i)
{
check (svuc[i] == i, "svuc");
check (svsc[i] == i - 8, "svsc");
check (svbc[i] == ((i % 2) ? 0xff : 0), "svbc");
}
for (i = 0; i < 8; ++i)
{
check (svus[i] == i, "svus");
check (svss[i] == i - 4, "svss");
check (svbs[i] == ((i % 2) ? 0xffff : 0), "svbs");
check (svp[i] == i, "svp");
}
for (i = 0; i < 4; ++i)
{
check (svui[i] == i, "svui");
check (svsi[i] == i - 2, "svsi");
check (svbi[i] == ((i % 2) ? 0xffffffff : 0), "svbi");
check (svf[i] == i * 1.0f, "svf");
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vuc = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
vector signed char vsc = {7,6,5,4,3,2,1,0,-1,-2,-3,-4,-5,-6,-7,-8};
vector bool char vbc = {255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0};
vector unsigned short vus = {7,6,5,4,3,2,1,0};
vector signed short vss = {3,2,1,0,-1,-2,-3,-4};
vector bool short vbs = {65535,0,65535,0,65535,0,65535,0};
vector pixel vp = {7,6,5,4,3,2,1,0};
vector unsigned int vui = {3,2,1,0};
vector signed int vsi = {1,0,-1,-2};
vector bool int vbi = {0xffffffff,0,0xffffffff,0};
vector float vf = {3.0,2.0,1.0,0.0};
#else
vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char vbc = {0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255};
vector unsigned short vus = {0,1,2,3,4,5,6,7};
vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
vector bool short vbs = {0,65535,0,65535,0,65535,0,65535};
vector pixel vp = {0,1,2,3,4,5,6,7};
vector unsigned int vui = {0,1,2,3};
vector signed int vsi = {-2,-1,0,1};
vector bool int vbi = {0,0xffffffff,0,0xffffffff};
vector float vf = {0.0,1.0,2.0,3.0};
#endif
vec_stl (vuc, 0, (vector unsigned char *)svuc);
vec_stl (vsc, 0, (vector signed char *)svsc);
vec_stl (vbc, 0, (vector bool char *)svbc);
vec_stl (vus, 0, (vector unsigned short *)svus);
vec_stl (vss, 0, (vector signed short *)svss);
vec_stl (vbs, 0, (vector bool short *)svbs);
vec_stl (vp, 0, (vector pixel *)svp);
vec_stl (vui, 0, (vector unsigned int *)svui);
vec_stl (vsi, 0, (vector signed int *)svsi);
vec_stl (vbi, 0, (vector bool int *)svbi);
vec_stl (vf, 0, (vector float *)svf);
check_arrays ();
}

View file

@ -1,35 +0,0 @@
/* { dg-skip-if "" { powerpc*-*-darwin* } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static unsigned long long svul[2] __attribute__ ((aligned (16)));
static double svd[2] __attribute__ ((aligned (16)));
static void check_arrays ()
{
unsigned int i;
for (i = 0; i < 2; ++i)
{
check (svul[i] == i, "svul");
check (svd[i] == i * 1.0, "svd");
}
}
static void test ()
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned long long vul = {1,0};
vector double vd = {1.0,0.0};
#else
vector unsigned long long vul = {0,1};
vector double vd = {0.0,1.0};
#endif
vec_stl (vul, 0, (vector unsigned long long *)svul);
vec_stl (vd, 0, (vector double *)svd);
check_arrays ();
}

View file

@ -1,20 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
vector signed int vsia = {-10,1,2,3};
vector signed int vsib = {100,101,102,-103};
vector signed int vsir;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector signed int vsier = {91,0,107,0};
#else
vector signed int vsier = {0,92,0,-98};
#endif
vsir = vec_sum2s (vsia, vsib);
check (vec_all_eq (vsir, vsier), "vsir");
}

View file

@ -1,89 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx -Wno-shift-overflow" } */
#include "harness.h"
#define BIG 4294967295
static void test()
{
/* Input vectors. */
vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7};
vector bool char vbc = {0,255,255,0,0,0,255,0,255,0,0,255,255,255,0,255};
vector pixel vp = {(0<<15) + (1<<10) + (2<<5) + 3,
(1<<15) + (4<<10) + (5<<5) + 6,
(0<<15) + (7<<10) + (8<<5) + 9,
(1<<15) + (10<<10) + (11<<5) + 12,
(1<<15) + (13<<10) + (14<<5) + 15,
(0<<15) + (16<<10) + (17<<5) + 18,
(1<<15) + (19<<10) + (20<<5) + 21,
(0<<15) + (22<<10) + (23<<5) + 24};
vector signed short vss = {-4,-3,-2,-1,0,1,2,3};
vector bool short vbs = {0,65535,65535,0,0,0,65535,0};
/* Result vectors. */
vector signed short vsch, vscl;
vector bool short vbsh, vbsl;
vector unsigned int vuih, vuil;
vector signed int vsih, vsil;
vector bool int vbih, vbil;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector signed short vschr = {0,1,2,3,4,5,6,7};
vector signed short vsclr = {-8,-7,-6,-5,-4,-3,-2,-1};
vector bool short vbshr = {65535,0,0,65535,65535,65535,0,65535};
vector bool short vbslr = {0,65535,65535,0,0,0,65535,0};
vector unsigned int vuihr = {(65535<<24) + (13<<16) + (14<<8) + 15,
(0<<24) + (16<<16) + (17<<8) + 18,
(65535<<24) + (19<<16) + (20<<8) + 21,
(0<<24) + (22<<16) + (23<<8) + 24};
vector unsigned int vuilr = {(0<<24) + (1<<16) + (2<<8) + 3,
(65535<<24) + (4<<16) + (5<<8) + 6,
(0<<24) + (7<<16) + (8<<8) + 9,
(65535<<24) + (10<<16) + (11<<8) + 12};
vector signed int vsihr = {0,1,2,3};
vector signed int vsilr = {-4,-3,-2,-1};
vector bool int vbihr = {0,0,BIG,0};
vector bool int vbilr = {0,BIG,BIG,0};
#else
vector signed short vschr = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vsclr = {0,1,2,3,4,5,6,7};
vector bool short vbshr = {0,65535,65535,0,0,0,65535,0};
vector bool short vbslr = {65535,0,0,65535,65535,65535,0,65535};
vector unsigned int vuihr = {(0<<24) + (1<<16) + (2<<8) + 3,
(65535<<24) + (4<<16) + (5<<8) + 6,
(0<<24) + (7<<16) + (8<<8) + 9,
(65535<<24) + (10<<16) + (11<<8) + 12};
vector unsigned int vuilr = {(65535<<24) + (13<<16) + (14<<8) + 15,
(0<<24) + (16<<16) + (17<<8) + 18,
(65535<<24) + (19<<16) + (20<<8) + 21,
(0<<24) + (22<<16) + (23<<8) + 24};
vector signed int vsihr = {-4,-3,-2,-1};
vector signed int vsilr = {0,1,2,3};
vector bool int vbihr = {0,BIG,BIG,0};
vector bool int vbilr = {0,0,BIG,0};
#endif
vsch = vec_unpackh (vsc);
vscl = vec_unpackl (vsc);
vbsh = vec_unpackh (vbc);
vbsl = vec_unpackl (vbc);
vuih = vec_unpackh (vp);
vuil = vec_unpackl (vp);
vsih = vec_unpackh (vss);
vsil = vec_unpackl (vss);
vbih = vec_unpackh (vbs);
vbil = vec_unpackl (vbs);
check (vec_all_eq (vsch, vschr), "vsch");
check (vec_all_eq (vscl, vsclr), "vscl");
check (vec_all_eq (vbsh, vbshr), "vbsh");
check (vec_all_eq (vbsl, vbslr), "vbsl");
check (vec_all_eq (vuih, vuihr), "vuih");
check (vec_all_eq (vuil, vuilr), "vuil");
check (vec_all_eq (vsih, vsihr), "vsih");
check (vec_all_eq (vsil, vsilr), "vsil");
check (vec_all_eq (vbih, vbihr), "vbih");
check (vec_all_eq (vbil, vbilr), "vbil");
}

View file

@ -1,21 +0,0 @@
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
vector signed int va = {-7,11,-13,17};
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector signed int vb = {128,0,0,0};
vector signed int evd = {136,0,0,0};
#else
vector signed int vb = {0,0,0,128};
vector signed int evd = {0,0,0,136};
#endif
vector signed int vd = vec_sums (va, vb);
check (vec_all_eq (vd, evd), "sums");
}

View file

@ -1,12 +0,0 @@
/* { dg-do run { target { powerpc*-*-linux* } } } */
/* { dg-require-effective-target vsx_hw } */
/* { dg-options "-O2 -mvsx" } */
/* Test various ways of creating vectors with 2 double words and accessing the
elements. This test uses the double datatype.
This test explicitly tests -maltivec=be to make sure things are correct. */
#define DO_DOUBLE
#include "vec-setup.h"

View file

@ -1,15 +0,0 @@
/* Per PR78303, we are deprecating usage of -maltivec=be on little endian,
so XFAIL this test until support is actually removed. */
/* { dg-do run { target { powerpc64le*-*-linux* } } } */
/* { dg-xfail-run-if "PR78303 and PR84534" { powerpc64le*-*-linux* } } */
/* { dg-require-effective-target vsx_hw } */
/* Disable warnings to squelch deprecation message about -maltivec=be. */
/* { dg-options "-w -O2 -mvsx -maltivec=be" } */
/* Test various ways of creating vectors with 2 double words and accessing the
elements. This test uses the long (on 64-bit systems) or long long datatype
(on 32-bit systems).
This test explicitly tests -maltivec=be to make sure things are correct. */
#include "vec-setup.h"

View file

@ -10,8 +10,7 @@
The endian support is:
big endian
little endian with little endian element ordering
little endian with big endian element ordering. */
little endian. */
#ifdef DEBUG
#include <stdio.h>
@ -63,21 +62,12 @@ static int errors = 0;
#define FMT "lld"
#endif
/* Macros to order the left/right values correctly. Note, -maltivec=be does
not change the order for static initializations, so we have to handle it
specially. */
/* Macros to order the left/right values correctly. */
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define INIT_ORDER(A, B) (TYPE) A, (TYPE) B
#define ELEMENT_ORDER(A, B) (TYPE) A, (TYPE) B
#define ENDIAN "-mbig"
#elif __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
#define NO_ARRAY
#define INIT_ORDER(A, B) (TYPE) B, (TYPE) A
#define ELEMENT_ORDER(A, B) (TYPE) A, (TYPE) B
#define ENDIAN "-mlittle -maltivec=be"
#else
#define INIT_ORDER(A, B) (TYPE) B, (TYPE) A
#define ELEMENT_ORDER(A, B) (TYPE) B, (TYPE) A
@ -201,7 +191,6 @@ concat_extract_nn (vector TYPE a, vector TYPE b, size_t i, size_t j)
return (vector TYPE) { vec_extract (a, i), vec_extract (b, j) };
}
#ifndef NO_ARRAY
__attribute__((__noinline__))
static vector TYPE
array_0 (vector TYPE v, TYPE a)
@ -236,7 +225,6 @@ array_01b (TYPE a, TYPE b)
v[1] = b;
return v;
}
#endif
int
main (void)
@ -245,9 +233,7 @@ main (void)
vector TYPE b = (vector TYPE) { THREE, FOUR };
size_t i, j;
#ifndef NO_ARRAY
vector TYPE z = (vector TYPE) { ZERO, ZERO };
#endif
DEBUG2 ("Endian: %s, type: %s\n", ENDIAN, STYPE);
DEBUG0 ("\nStatic/global initialization\n");
@ -268,7 +254,6 @@ main (void)
vector_check (a, ELEMENT_ORDER (1, 2));
vector_check (b, ELEMENT_ORDER (3, 4));
#ifndef NO_ARRAY
DEBUG0 ("\nTesting array syntax\n");
vector_check (array_0 (a, FIVE), ELEMENT_ORDER (5, 2));
vector_check (array_1 (b, SIX), ELEMENT_ORDER (3, 6));
@ -279,9 +264,6 @@ main (void)
vector_check (array_1 (b, six), ELEMENT_ORDER (3, 6));
vector_check (array_01 (z, five, six), ELEMENT_ORDER (5, 6));
vector_check (array_01b (five, six), ELEMENT_ORDER (5, 6));
#else
DEBUG0 ("\nSkipping array syntax on -maltivec=be\n");
#endif
DEBUG0 ("\nTesting concat and extract\n");
vector_check (concat_extract_00 (a, b), INIT_ORDER (1, 3));