diff --git a/ChangeLog b/ChangeLog index f640bb74049..7d4241766c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2016-05-20 Bill Seurer + + * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin): Fix + code formatting in ALTIVEC_BUILTIN_VEC_ADDE section. + 2016-05-20 Bill Seurer * config/rs6000/rs6000-builtin.def (vec_addec): Change vec_addec to a diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c index d22f6bd2095..e09a86e8ada 100644 --- a/gcc/config/rs6000/rs6000-c.c +++ b/gcc/config/rs6000/rs6000-c.c @@ -4622,37 +4622,41 @@ assignment for unaligned loads and stores"); /* All 3 arguments must be vectors of (signed or unsigned) (int or __int128) and the types must match. */ if ((arg0_type != arg1_type) || (arg1_type != arg2_type)) - goto bad; + goto bad; if (TREE_CODE (arg0_type) != VECTOR_TYPE) - goto bad; + goto bad; switch (TYPE_MODE (TREE_TYPE (arg0_type))) { - /* For {un}signed ints, - vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb), - vec_and (carryv, 0x1)). */ + /* For {un}signed ints, + vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb), + vec_and (carryv, 0x1)). */ case SImode: { - vec *params = make_tree_vector(); + vec *params = make_tree_vector (); vec_safe_push (params, arg0); vec_safe_push (params, arg1); - tree call = altivec_resolve_overloaded_builtin - (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD], params); - tree const1 = build_vector_from_val (arg0_type, - build_int_cstu(TREE_TYPE (arg0_type), 1)); - tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, - arg0_type, arg2, const1); - params = make_tree_vector(); + tree add_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD]; + tree call = altivec_resolve_overloaded_builtin (loc, add_builtin, + params); + tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1); + tree ones_vector = build_vector_from_val (arg0_type, const1); + tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type, + arg2, ones_vector); + params = make_tree_vector (); vec_safe_push (params, call); vec_safe_push (params, and_expr); - return altivec_resolve_overloaded_builtin - (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD], params); + return altivec_resolve_overloaded_builtin (loc, add_builtin, + params); } /* For {un}signed __int128s use the vaddeuqm instruction directly. */ case TImode: - return altivec_resolve_overloaded_builtin - (loc, rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM], arglist); + { + tree adde_bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM]; + return altivec_resolve_overloaded_builtin (loc, adde_bii, + arglist); + } /* Types other than {un}signed int and {un}signed __int128 are errors. */ @@ -4839,9 +4843,9 @@ assignment for unaligned loads and stores"); arg1_type = TREE_TYPE (arg1); if (TREE_CODE (arg1_type) != VECTOR_TYPE) - goto bad; + goto bad; if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2))) - goto bad; + goto bad; /* If we are targeting little-endian, but -maltivec=be has been specified to override the element order, adjust the element @@ -4942,9 +4946,9 @@ assignment for unaligned loads and stores"); arg2 = (*arglist)[2]; if (TREE_CODE (arg1_type) != VECTOR_TYPE) - goto bad; + goto bad; if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2))) - goto bad; + goto bad; /* If we are targeting little-endian, but -maltivec=be has been specified to override the element order, adjust the element diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 0391ce5e181..cdb5fea6d48 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,10 @@ +2016-05-20 Bill Seurer + + * gcc.target/powerpc/vec-adde.c: Change dejagnu options, fix code + formatting. + * gcc.target/powerpc/vec-adde-int128.c: Change dejagnu options, fix code + formatting. + 2016-05-20 David Malcolm * jit.dg/all-non-failing-tests.h: Add diff --git a/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c b/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c index f78622f11f2..4f951a993db 100644 --- a/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c +++ b/gcc/testsuite/gcc.target/powerpc/vec-adde-int128.c @@ -1,7 +1,9 @@ -/* { dg-do run { target { powerpc64-*-* } } } */ +/* { dg-do run { target { powerpc64*-*-* } } } */ +/* { dg-require-effective-target powerpc_p8vector_ok } */ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */ /* { dg-options "-mcpu=power8 -O3" } */ + /* Test that the vec_adde builtin works as expected. */ #include "altivec.h" @@ -20,38 +22,43 @@ STYPE expected_##NAMESUFFIX[N]; \ \ __attribute__((noinline)) void vector_tests_##NAMESUFFIX () \ { \ - int i; \ vector STYPE v1, v2, v3, tmp; \ - for (i = 0; i < N; i+=16/sizeof(STYPE)) { \ - /* result=addend1+addend2+(carry & 0x1) */ \ - v1 = (vector STYPE) { addend1_##NAMESUFFIX[i] }; \ - v2 = (vector STYPE) { addend2_##NAMESUFFIX[i] }; \ - v3 = (vector STYPE) { carry_##NAMESUFFIX[i] }; \ + int i; \ + for (i = 0; i < N; i+=16/sizeof (STYPE)) \ + { \ + /* result=addend1+addend2+(carry & 0x1). */ \ + v1 = (vector STYPE) { addend1_##NAMESUFFIX[i] }; \ + v2 = (vector STYPE) { addend2_##NAMESUFFIX[i] }; \ + v3 = (vector STYPE) { carry_##NAMESUFFIX[i] }; \ \ - tmp = vec_adde (v1, v2, v3); \ - result_##NAMESUFFIX[i] = tmp[0]; \ - } \ + tmp = vec_adde (v1, v2, v3); \ + result_##NAMESUFFIX[i] = tmp[0]; \ + } \ } \ \ __attribute__((noinline)) void init_##NAMESUFFIX () \ { \ int i; \ - for (i = 0; i < N; ++i) { \ - result_##NAMESUFFIX[i] = 0; \ - addend1_##NAMESUFFIX[i] = 1; \ - addend2_##NAMESUFFIX[i] = 2; \ - carry_##NAMESUFFIX[i] = (i%12); \ - expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \ - addend2_##NAMESUFFIX[i] + (carry_##NAMESUFFIX[i] & 0x1); \ - } \ + for (i = 0; i < N; ++i) \ + { \ + result_##NAMESUFFIX[i] = 0; \ + addend1_##NAMESUFFIX[i] = 1; \ + addend2_##NAMESUFFIX[i] = 2; \ + carry_##NAMESUFFIX[i] = (i%12); \ + expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \ + addend2_##NAMESUFFIX[i] + \ + (carry_##NAMESUFFIX[i] & 0x1); \ + } \ } \ \ __attribute__((noinline)) void verify_results_##NAMESUFFIX () \ { \ - for (int i = 0; i < N; ++i) { \ - if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \ - abort(); \ - } \ + int i; \ + for (i = 0; i < N; ++i) \ + { \ + if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \ + abort (); \ + } \ } @@ -63,13 +70,13 @@ __attribute__((noinline)) void verify_results_##NAMESUFFIX () \ } -define_test_functions(signed __int128, si128); -define_test_functions(unsigned __int128, ui128); +define_test_functions (signed __int128, si128); +define_test_functions (unsigned __int128, ui128); int main () { - execute_test_functions(signed __int128, si128); - execute_test_functions(unsigned __int128, ui128); + execute_test_functions (signed __int128, si128); + execute_test_functions (unsigned __int128, ui128); return 0; } diff --git a/gcc/testsuite/gcc.target/powerpc/vec-adde.c b/gcc/testsuite/gcc.target/powerpc/vec-adde.c index b7d5b44b7a7..a235a1c987d 100644 --- a/gcc/testsuite/gcc.target/powerpc/vec-adde.c +++ b/gcc/testsuite/gcc.target/powerpc/vec-adde.c @@ -1,6 +1,6 @@ -/* { dg-do run { target { powerpc64-*-* } } } */ -/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */ -/* { dg-options "-mcpu=power8 -O3" } */ +/* { dg-do run { target { powerpc64*-*-* } } } */ +/* { dg-require-effective-target powerpc_vsx_ok } */ +/* { dg-options "-mvsx -O3" } */ /* Test that the vec_adde builtin works as expected. */ @@ -20,38 +20,43 @@ STYPE expected_##NAMESUFFIX[N]; \ \ __attribute__((noinline)) void vector_tests_##NAMESUFFIX () \ { \ - int i; \ vector STYPE v1, v2, v3, tmp; \ - for (i = 0; i < N; i+=16/sizeof(STYPE)) { \ - /* result=addend1+addend2+(carry & 0x1) */ \ - v1 = vec_vsx_ld (0, &addend1_##NAMESUFFIX[i]); \ - v2 = vec_vsx_ld (0, &addend2_##NAMESUFFIX[i]); \ - v3 = vec_vsx_ld (0, &carry_##NAMESUFFIX[i]); \ + int i; \ + for (i = 0; i < N; i+=16/sizeof (STYPE)) \ + { \ + /* result=addend1+addend2+(carry & 0x1). */ \ + v1 = vec_vsx_ld (0, &addend1_##NAMESUFFIX[i]); \ + v2 = vec_vsx_ld (0, &addend2_##NAMESUFFIX[i]); \ + v3 = vec_vsx_ld (0, &carry_##NAMESUFFIX[i]); \ \ - tmp = vec_adde (v1, v2, v3); \ - vec_vsx_st (tmp, 0, &result_##NAMESUFFIX[i]); \ - } \ + tmp = vec_adde (v1, v2, v3); \ + vec_vsx_st (tmp, 0, &result_##NAMESUFFIX[i]); \ + } \ } \ \ __attribute__((noinline)) void init_##NAMESUFFIX () \ { \ int i; \ - for (i = 0; i < N; ++i) { \ - result_##NAMESUFFIX[i] = 0; \ - addend1_##NAMESUFFIX[i] = 1; \ - addend2_##NAMESUFFIX[i] = 2; \ - carry_##NAMESUFFIX[i] = (i%12); \ - expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \ - addend2_##NAMESUFFIX[i] + (carry_##NAMESUFFIX[i] & 0x1); \ - } \ + for (i = 0; i < N; ++i) \ + { \ + result_##NAMESUFFIX[i] = 0; \ + addend1_##NAMESUFFIX[i] = 1; \ + addend2_##NAMESUFFIX[i] = 2; \ + carry_##NAMESUFFIX[i] = (i%12); \ + expected_##NAMESUFFIX[i] = addend1_##NAMESUFFIX[i] + \ + addend2_##NAMESUFFIX[i] + \ + (carry_##NAMESUFFIX[i] & 0x1); \ + } \ } \ \ __attribute__((noinline)) void verify_results_##NAMESUFFIX () \ { \ - for (int i = 0; i < N; ++i) { \ - if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \ - abort(); \ - } \ + int i; \ + for (i = 0; i < N; ++i) \ + { \ + if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \ + abort (); \ + } \ } @@ -63,13 +68,13 @@ __attribute__((noinline)) void verify_results_##NAMESUFFIX () \ } -define_test_functions(signed int, si); -define_test_functions(unsigned int, ui); +define_test_functions (signed int, si); +define_test_functions (unsigned int, ui); int main () { - execute_test_functions(signed int, si); - execute_test_functions(unsigned int, ui); + execute_test_functions (signed int, si); + execute_test_functions (unsigned int, ui); return 0; }