diff --git a/gcc/ChangeLog b/gcc/ChangeLog index f322b66e2e1..5f0da7be9d6 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,24 @@ +2006-12-01 Trevor Smigiel + + * config/spu/spu.c (spu_immediate): Remove trailing comma. + (reloc_diagnostic): Call warning when -mwarn-reloc is specified. + * config/spu/spu.md: (zero_extendhisi2): Expand instead of split for + better optimization. + (floatv4siv4sf2): New. + (fix_truncv4sfv4si2): New. + (floatunsv4siv4sf2): New. + (fixuns_truncv4sfv4si2): New. + (addv16qi3): New. + (subv16qi3): New. + (negv16qi2): New. + (mulv8hi3): New. + (mulsi3): Remove. + (mul3): New. + (_mulv4si3): New. + (cmp): Don't accept constant arguments for DI, TI and SF. + * config/spu/spu_internals.h: Handle overloaded intrinsics in C++ with + spu_resolve_overloaded_builtin instead of static inline functions. + 2006-12-01 Eric Botcazou * fold-const.c (fold_binary) : Use the precision of the diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c index e62390894ca..c3916c7ce97 100644 --- a/gcc/config/spu/spu.c +++ b/gcc/config/spu/spu.c @@ -141,7 +141,7 @@ enum spu_immediate { SPU_ORI, SPU_ORHI, SPU_ORBI, - SPU_IOHL, + SPU_IOHL }; static enum spu_immediate which_immediate_load (HOST_WIDE_INT val); @@ -3670,10 +3670,10 @@ reloc_diagnostic (rtx x) else msg = "creating run-time relocation"; - if (TARGET_ERROR_RELOC) /** default : error reloc **/ - error (msg, loc_decl, decl); - else + if (TARGET_WARN_RELOC) warning (0, msg, loc_decl, decl); + else + error (msg, loc_decl, decl); } /* Hook into assemble_integer so we can generate an error for run-time diff --git a/gcc/config/spu/spu.md b/gcc/config/spu/spu.md index ef47f81e6f6..1cdc20af2eb 100644 --- a/gcc/config/spu/spu.md +++ b/gcc/config/spu/spu.md @@ -425,20 +425,19 @@ "" "andi\t%0,%1,0x00ff") -(define_insn_and_split "zero_extendhisi2" +(define_expand "zero_extendhisi2" [(set (match_operand:SI 0 "spu_reg_operand" "=r") (zero_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))) (clobber (match_scratch:SI 2 "=&r"))] "" - "#" - "reload_completed" - [(set (match_dup:SI 2) - (const_int 65535)) - (set (match_dup:SI 0) - (and:SI (match_dup:SI 3) - (match_dup:SI 2)))] - "operands[3] = gen_rtx_REG (SImode, REGNO (operands[1]));") - + { + rtx mask = gen_reg_rtx (SImode); + rtx op1 = simplify_gen_subreg (SImode, operands[1], HImode, 0); + emit_move_insn (mask, GEN_INT (0xffff)); + emit_insn (gen_andsi3(operands[0], op1, mask)); + DONE; + }) + (define_insn "zero_extendsidi2" [(set (match_operand:DI 0 "spu_reg_operand" "=r") (zero_extend:DI (match_operand:SI 1 "spu_reg_operand" "r")))] @@ -522,6 +521,13 @@ "csflt\t%0,%1,0" [(set_attr "type" "fp7")]) +(define_insn "floatv4siv4sf2" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (float:V4SF (match_operand:V4SI 1 "spu_reg_operand" "r")))] + "" + "csflt\t%0,%1,0" + [(set_attr "type" "fp7")]) + (define_insn "fix_truncsfsi2" [(set (match_operand:SI 0 "spu_reg_operand" "=r") (fix:SI (match_operand:SF 1 "spu_reg_operand" "r")))] @@ -529,6 +535,13 @@ "cflts\t%0,%1,0" [(set_attr "type" "fp7")]) +(define_insn "fix_truncv4sfv4si2" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (fix:V4SI (match_operand:V4SF 1 "spu_reg_operand" "r")))] + "" + "cflts\t%0,%1,0" + [(set_attr "type" "fp7")]) + (define_insn "floatunssisf2" [(set (match_operand:SF 0 "spu_reg_operand" "=r") (unsigned_float:SF (match_operand:SI 1 "spu_reg_operand" "r")))] @@ -536,6 +549,13 @@ "cuflt\t%0,%1,0" [(set_attr "type" "fp7")]) +(define_insn "floatunsv4siv4sf2" + [(set (match_operand:V4SF 0 "spu_reg_operand" "=r") + (unsigned_float:V4SF (match_operand:V4SI 1 "spu_reg_operand" "r")))] + "" + "cuflt\t%0,%1,0" + [(set_attr "type" "fp7")]) + (define_insn "fixuns_truncsfsi2" [(set (match_operand:SI 0 "spu_reg_operand" "=r") (unsigned_fix:SI (match_operand:SF 1 "spu_reg_operand" "r")))] @@ -543,6 +563,13 @@ "cfltu\t%0,%1,0" [(set_attr "type" "fp7")]) +(define_insn "fixuns_truncv4sfv4si2" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (unsigned_fix:V4SI (match_operand:V4SF 1 "spu_reg_operand" "r")))] + "" + "cfltu\t%0,%1,0" + [(set_attr "type" "fp7")]) + (define_insn "extendsfdf2" [(set (match_operand:DF 0 "spu_reg_operand" "=r") (float_extend:DF (match_operand:SF 1 "spu_reg_operand" "r")))] @@ -627,6 +654,28 @@ ;; add +(define_expand "addv16qi3" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (plus:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r") + (match_operand:V16QI 2 "spu_reg_operand" "r")))] + "" + "{ + rtx res_short = simplify_gen_subreg (V8HImode, operands[0], V16QImode, 0); + rtx lhs_short = simplify_gen_subreg (V8HImode, operands[1], V16QImode, 0); + rtx rhs_short = simplify_gen_subreg (V8HImode, operands[2], V16QImode, 0); + rtx rhs_and = gen_reg_rtx (V8HImode); + rtx hi_char = gen_reg_rtx (V8HImode); + rtx lo_char = gen_reg_rtx (V8HImode); + rtx mask = gen_reg_rtx (V8HImode); + + emit_move_insn (mask, spu_const (V8HImode, 0x00ff)); + emit_insn (gen_andv8hi3 (rhs_and, rhs_short, spu_const (V8HImode, 0xff00))); + emit_insn (gen_addv8hi3 (hi_char, lhs_short, rhs_and)); + emit_insn (gen_addv8hi3 (lo_char, lhs_short, rhs_short)); + emit_insn (gen_selb (res_short, hi_char, lo_char, mask)); + DONE; + }") + (define_insn "add3" [(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r") (plus:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r") @@ -728,6 +777,28 @@ ;; sub +(define_expand "subv16qi3" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (minus:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r") + (match_operand:V16QI 2 "spu_reg_operand" "r")))] + "" + "{ + rtx res_short = simplify_gen_subreg (V8HImode, operands[0], V16QImode, 0); + rtx lhs_short = simplify_gen_subreg (V8HImode, operands[1], V16QImode, 0); + rtx rhs_short = simplify_gen_subreg (V8HImode, operands[2], V16QImode, 0); + rtx rhs_and = gen_reg_rtx (V8HImode); + rtx hi_char = gen_reg_rtx (V8HImode); + rtx lo_char = gen_reg_rtx (V8HImode); + rtx mask = gen_reg_rtx (V8HImode); + + emit_move_insn (mask, spu_const (V8HImode, 0x00ff)); + emit_insn (gen_andv8hi3 (rhs_and, rhs_short, spu_const (V8HImode, 0xff00))); + emit_insn (gen_subv8hi3 (hi_char, lhs_short, rhs_and)); + emit_insn (gen_subv8hi3 (lo_char, lhs_short, rhs_short)); + emit_insn (gen_selb (res_short, hi_char, lo_char, mask)); + DONE; + }") + (define_insn "sub3" [(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r") (minus:VHSI (match_operand:VHSI 1 "spu_arith_operand" "r,B") @@ -825,6 +896,17 @@ ;; neg +(define_expand "negv16qi2" + [(set (match_operand:V16QI 0 "spu_reg_operand" "=r") + (neg:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r")))] + "" + "{ + rtx zero = gen_reg_rtx (V16QImode); + emit_move_insn (zero, CONST0_RTX (V16QImode)); + emit_insn (gen_subv16qi3 (operands[0], zero, operands[1])); + DONE; + }") + (define_insn "neg2" [(set (match_operand:VHSI 0 "spu_reg_operand" "=r") (neg:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r")))] @@ -935,27 +1017,47 @@ mpyi\t%0,%1,%2" [(set_attr "type" "fp7")]) -(define_expand "mulsi3" +(define_expand "mulv8hi3" + [(set (match_operand:V8HI 0 "spu_reg_operand" "") + (mult:V8HI (match_operand:V8HI 1 "spu_reg_operand" "") + (match_operand:V8HI 2 "spu_reg_operand" "")))] + "" + "{ + rtx result = simplify_gen_subreg (V4SImode, operands[0], V8HImode, 0); + rtx low = gen_reg_rtx (V4SImode); + rtx high = gen_reg_rtx (V4SImode); + rtx shift = gen_reg_rtx (V4SImode); + rtx mask = gen_reg_rtx (V4SImode); + + emit_move_insn (mask, spu_const (V4SImode, 0x0000ffff)); + emit_insn (gen_spu_mpyhh (high, operands[1], operands[2])); + emit_insn (gen_spu_mpy (low, operands[1], operands[2])); + emit_insn (gen_ashlv4si3 (shift, high, spu_const(V4SImode, 16))); + emit_insn (gen_selb (result, shift, low, mask)); + DONE; + }") + +(define_expand "mul3" [(parallel - [(set (match_operand:SI 0 "spu_reg_operand" "") - (mult:SI (match_operand:SI 1 "spu_reg_operand" "") - (match_operand:SI 2 "spu_reg_operand" ""))) - (clobber (match_dup:SI 3)) - (clobber (match_dup:SI 4)) - (clobber (match_dup:SI 5)) - (clobber (match_dup:SI 6))])] + [(set (match_operand:VSI 0 "spu_reg_operand" "") + (mult:VSI (match_operand:VSI 1 "spu_reg_operand" "") + (match_operand:VSI 2 "spu_reg_operand" ""))) + (clobber (match_dup:VSI 3)) + (clobber (match_dup:VSI 4)) + (clobber (match_dup:VSI 5)) + (clobber (match_dup:VSI 6))])] "" { - operands[3] = gen_reg_rtx(SImode); - operands[4] = gen_reg_rtx(SImode); - operands[5] = gen_reg_rtx(SImode); - operands[6] = gen_reg_rtx(SImode); + operands[3] = gen_reg_rtx(mode); + operands[4] = gen_reg_rtx(mode); + operands[5] = gen_reg_rtx(mode); + operands[6] = gen_reg_rtx(mode); }) (define_insn_and_split "_mulsi3" [(set (match_operand:SI 0 "spu_reg_operand" "=r") (mult:SI (match_operand:SI 1 "spu_reg_operand" "r") - (match_operand:SI 2 "spu_nonmem_operand" "ri"))) + (match_operand:SI 2 "spu_arith_operand" "rK"))) (clobber (match_operand:SI 3 "spu_reg_operand" "=&r")) (clobber (match_operand:SI 4 "spu_reg_operand" "=&r")) (clobber (match_operand:SI 5 "spu_reg_operand" "=&r")) @@ -1000,6 +1102,37 @@ DONE; }) +(define_insn_and_split "_mulv4si3" + [(set (match_operand:V4SI 0 "spu_reg_operand" "=r") + (mult:V4SI (match_operand:V4SI 1 "spu_reg_operand" "r") + (match_operand:V4SI 2 "spu_reg_operand" "r"))) + (clobber (match_operand:V4SI 3 "spu_reg_operand" "=&r")) + (clobber (match_operand:V4SI 4 "spu_reg_operand" "=&r")) + (clobber (match_operand:V4SI 5 "spu_reg_operand" "=&r")) + (clobber (match_operand:V4SI 6 "spu_reg_operand" "=&r"))] + "" + "#" + "" + [(set (match_dup:V4SI 0) + (mult:V4SI (match_dup:V4SI 1) + (match_dup:V4SI 2)))] + { + HOST_WIDE_INT val = 0; + rtx a = operands[3]; + rtx b = operands[4]; + rtx c = operands[5]; + rtx d = operands[6]; + rtx op1 = simplify_gen_subreg (V8HImode, operands[1], V4SImode, 0); + rtx op2 = simplify_gen_subreg (V8HImode, operands[2], V4SImode, 0); + rtx op3 = simplify_gen_subreg (V8HImode, operands[3], V4SImode, 0); + emit_insn(gen_spu_mpyh(a, op1, op2)); + emit_insn(gen_spu_mpyh(b, op2, op1)); + emit_insn(gen_spu_mpyu(c, op1, op2)); + emit_insn(gen_addv4si3(d, a, b)); + emit_insn(gen_addv4si3(operands[0], d, c)); + DONE; + }) + (define_insn "mulhisi3" [(set (match_operand:SI 0 "spu_reg_operand" "=r") (mult:SI (sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r")) @@ -2532,8 +2665,19 @@ selb\t%0,%4,%0,%3" (define_expand "cmp" [(set (cc0) - (compare (match_operand:VINT 0 "spu_reg_operand" "") - (match_operand:VINT 1 "spu_nonmem_operand" "")))] + (compare (match_operand:VQHSI 0 "spu_reg_operand" "") + (match_operand:VQHSI 1 "spu_nonmem_operand" "")))] + "" + { + spu_compare_op0 = operands[0]; + spu_compare_op1 = operands[1]; + DONE; + }) + +(define_expand "cmp" + [(set (cc0) + (compare (match_operand:DTI 0 "spu_reg_operand" "") + (match_operand:DTI 1 "spu_reg_operand" "")))] "" { spu_compare_op0 = operands[0]; @@ -2544,7 +2688,7 @@ selb\t%0,%4,%0,%3" (define_expand "cmp" [(set (cc0) (compare (match_operand:VSF 0 "spu_reg_operand" "") - (match_operand:VSF 1 "spu_nonmem_operand" "")))] + (match_operand:VSF 1 "spu_reg_operand" "")))] "" { spu_compare_op0 = operands[0]; diff --git a/gcc/config/spu/spu_internals.h b/gcc/config/spu/spu_internals.h index 752ddb6f04f..98e289d6944 100644 --- a/gcc/config/spu/spu_internals.h +++ b/gcc/config/spu/spu_internals.h @@ -256,8 +256,6 @@ #define __align_hint(ptr,base,offset) __builtin_spu_align_hint(ptr,base,offset) -#ifndef __cplusplus - /* generic spu_* intrinisics */ #define spu_splats(scalar) __builtin_spu_splats(scalar) @@ -330,2444 +328,6 @@ #define spu_insert(scalar,ra,pos) __builtin_spu_insert(scalar,ra,pos) #define spu_promote(scalar,pos) __builtin_spu_promote(scalar,pos) -#else /* __cplusplus */ - -/* A bit of a hack... Float conversion needs an immediate operand. - * always_inline doesn't help because the compiler generates an error - * before inlining happens. */ -static inline vec_float4 __hack_spu_convtf (vec_int4, vec_float4, vec_float4) __attribute__((__always_inline__)); -static inline vec_float4 __hack_spu_convtf (vec_uint4, vec_float4, vec_float4) __attribute__((__always_inline__)); -static inline vec_float4 -__hack_spu_convtf (vec_int4 ra, vec_float4 from_signed, vec_float4 from_unsigned) -{ - (void)ra; - (void)from_unsigned; - return from_signed; -} -static inline vec_float4 -__hack_spu_convtf (vec_uint4 ra, vec_float4 from_signed, vec_float4 from_unsigned) -{ - (void)ra; - (void)from_signed; - return from_unsigned; -} -#define spu_convtf(ra,imm) \ - __hack_spu_convtf((ra), \ - __builtin_spu_convtf_1((vec_int4)(ra), (imm)), \ - __builtin_spu_convtf_0((vec_uint4)(ra), (imm))) - -/* The following defines and functions were created automatically from - * spu_builtins.def. */ -#define spu_convts(a, b) __builtin_spu_convts (a, b) -#define spu_convtu(a, b) __builtin_spu_convtu (a, b) -#define spu_roundtf(a) __builtin_spu_roundtf (a) -#define spu_mulh(a, b) __builtin_spu_mulh (a, b) -#define spu_mulsr(a, b) __builtin_spu_mulsr (a, b) -#define spu_frest(a) __builtin_spu_frest (a) -#define spu_frsqest(a) __builtin_spu_frsqest (a) -#define spu_nmadd(a, b, c) __builtin_spu_nmadd (a, b, c) -#define spu_absd(a, b) __builtin_spu_absd (a, b) -#define spu_avg(a, b) __builtin_spu_avg (a, b) -#define spu_sumb(a, b) __builtin_spu_sumb (a, b) -#define spu_bisled(a) __builtin_spu_bisled (a, 0) -#define spu_bisled_d(a) __builtin_spu_bisled_d (a, 0) -#define spu_bisled_e(a) __builtin_spu_bisled_e (a, 0) -#define spu_cmpabseq(a, b) __builtin_spu_cmpabseq (a, b) -#define spu_cmpabsgt(a, b) __builtin_spu_cmpabsgt (a, b) - -static inline vec_short8 spu_extend (vec_char16 a) __attribute__((__always_inline__)); -static inline vec_int4 spu_extend (vec_short8 a) __attribute__((__always_inline__)); -static inline vec_llong2 spu_extend (vec_int4 a) __attribute__((__always_inline__)); -static inline vec_double2 spu_extend (vec_float4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_add (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_add (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_add (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_add (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_add (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_add (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_add (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_short8 spu_add (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_add (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_add (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_addx (vec_int4 a, vec_int4 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_addx (vec_uint4 a, vec_uint4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_genc (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_genc (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_gencx (vec_int4 a, vec_int4 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gencx (vec_uint4 a, vec_uint4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_madd (vec_short8 a, vec_short8 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_float4 spu_madd (vec_float4 a, vec_float4 b, vec_float4 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_madd (vec_double2 a, vec_double2 b, vec_double2 c) __attribute__((__always_inline__)); -static inline vec_float4 spu_msub (vec_float4 a, vec_float4 b, vec_float4 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_msub (vec_double2 a, vec_double2 b, vec_double2 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_mhhadd (vec_ushort8 a, vec_ushort8 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_mhhadd (vec_short8 a, vec_short8 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_mule (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_mule (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_mul (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_mul (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_mulo (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_mulo (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_mulo (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_mulo (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_float4 spu_nmsub (vec_float4 a, vec_float4 b, vec_float4 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_nmsub (vec_double2 a, vec_double2 b, vec_double2 c) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sub (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_sub (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sub (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_sub (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_sub (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_sub (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sub (unsigned short a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_sub (short a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sub (unsigned int a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_sub (int a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_subx (vec_uint4 a, vec_uint4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_subx (vec_int4 a, vec_int4 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_genb (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_genb (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_genbx (vec_uint4 a, vec_uint4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_genbx (vec_int4 a, vec_int4 b, vec_int4 c) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpeq (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpeq (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpeq (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpeq (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpeq (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpeq (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpeq (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpeq (vec_uchar16 a, unsigned char b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpeq (vec_char16 a, signed char b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpeq (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpeq (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpeq (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpeq (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpgt (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpgt (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpgt (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpgt (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpgt (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpgt (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpgt (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpgt (vec_uchar16 a, unsigned char b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cmpgt (vec_char16 a, signed char b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpgt (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_cmpgt (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpgt (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cmpgt (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline void spu_hcmpeq (int a, int b) __attribute__((__always_inline__)); -static inline void spu_hcmpeq (unsigned int a, unsigned int b) __attribute__((__always_inline__)); -static inline void spu_hcmpgt (int a, int b) __attribute__((__always_inline__)); -static inline void spu_hcmpgt (unsigned int a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cntb (vec_char16 a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_cntb (vec_uchar16 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cntlz (vec_int4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cntlz (vec_uint4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_cntlz (vec_float4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_int4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_uint4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_short8 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_ushort8 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_char16 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_uchar16 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_gather (vec_float4 a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_maskb (unsigned short a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_maskb (short a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_maskb (unsigned int a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_maskb (int a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (unsigned char a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (signed char a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (char a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (unsigned short a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (short a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (unsigned int a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_maskh (int a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (unsigned char a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (signed char a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (char a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (unsigned short a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (short a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (unsigned int a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_maskw (int a) __attribute__((__always_inline__)); -static inline vec_llong2 spu_sel (vec_llong2 a, vec_llong2 b, vec_ullong2 c) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_sel (vec_ullong2 a, vec_ullong2 b, vec_ullong2 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_sel (vec_int4 a, vec_int4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sel (vec_uint4 a, vec_uint4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_short8 spu_sel (vec_short8 a, vec_short8 b, vec_ushort8 c) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sel (vec_ushort8 a, vec_ushort8 b, vec_ushort8 c) __attribute__((__always_inline__)); -static inline vec_char16 spu_sel (vec_char16 a, vec_char16 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_sel (vec_uchar16 a, vec_uchar16 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_float4 spu_sel (vec_float4 a, vec_float4 b, vec_uint4 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_sel (vec_double2 a, vec_double2 b, vec_ullong2 c) __attribute__((__always_inline__)); -static inline vec_llong2 spu_sel (vec_llong2 a, vec_llong2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_sel (vec_ullong2 a, vec_ullong2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_sel (vec_int4 a, vec_int4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sel (vec_uint4 a, vec_uint4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_short8 spu_sel (vec_short8 a, vec_short8 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sel (vec_ushort8 a, vec_ushort8 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_float4 spu_sel (vec_float4 a, vec_float4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_sel (vec_double2 a, vec_double2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_shuffle (vec_uchar16 a, vec_uchar16 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_char16 spu_shuffle (vec_char16 a, vec_char16 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_shuffle (vec_ushort8 a, vec_ushort8 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_short8 spu_shuffle (vec_short8 a, vec_short8 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_shuffle (vec_uint4 a, vec_uint4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_int4 spu_shuffle (vec_int4 a, vec_int4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_shuffle (vec_ullong2 a, vec_ullong2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_llong2 spu_shuffle (vec_llong2 a, vec_llong2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_float4 spu_shuffle (vec_float4 a, vec_float4 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_double2 spu_shuffle (vec_double2 a, vec_double2 b, vec_uchar16 c) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_and (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_and (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_and (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_and (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_and (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_and (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_and (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_and (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_and (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_and (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_and (vec_uchar16 a, unsigned char b) __attribute__((__always_inline__)); -static inline vec_char16 spu_and (vec_char16 a, signed char b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_and (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_short8 spu_and (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_and (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_and (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_andc (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_andc (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_andc (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_andc (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_andc (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_andc (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_andc (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_andc (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_andc (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_andc (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_eqv (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_eqv (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_eqv (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_eqv (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_eqv (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_eqv (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_eqv (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_eqv (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_eqv (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_eqv (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_nand (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_nand (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_nand (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_nand (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_nand (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_nand (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_nand (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_nand (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_nand (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_nand (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_nor (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_nor (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_nor (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_nor (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_nor (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_nor (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_nor (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_nor (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_nor (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_nor (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_or (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_or (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_or (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_or (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_or (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_or (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_or (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_or (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_or (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_or (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_or (vec_uchar16 a, unsigned char b) __attribute__((__always_inline__)); -static inline vec_char16 spu_or (vec_char16 a, signed char b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_or (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_short8 spu_or (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_or (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_or (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_orc (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_orc (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_orc (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_orc (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_orc (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_orc (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_orc (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_orc (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_orc (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_orc (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_orx (vec_int4 a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_orx (vec_uint4 a) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_xor (vec_uchar16 a, vec_uchar16 b) __attribute__((__always_inline__)); -static inline vec_char16 spu_xor (vec_char16 a, vec_char16 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_xor (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_xor (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_xor (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_xor (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_xor (vec_ullong2 a, vec_ullong2 b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_xor (vec_llong2 a, vec_llong2 b) __attribute__((__always_inline__)); -static inline vec_float4 spu_xor (vec_float4 a, vec_float4 b) __attribute__((__always_inline__)); -static inline vec_double2 spu_xor (vec_double2 a, vec_double2 b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_xor (vec_uchar16 a, unsigned char b) __attribute__((__always_inline__)); -static inline vec_char16 spu_xor (vec_char16 a, signed char b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_xor (vec_ushort8 a, unsigned short b) __attribute__((__always_inline__)); -static inline vec_short8 spu_xor (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_xor (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_xor (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rl (vec_ushort8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rl (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rl (vec_uint4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rl (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rl (vec_ushort8 a, short b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rl (vec_short8 a, short b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rl (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rl (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlqw (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlqw (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlqw (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlqw (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlqw (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlqw (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlqw (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlqw (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlqw (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlqw (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlqwbyte (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlqwbyte (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlqwbyte (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlqwbyte (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlqwbyte (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlqwbyte (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlqwbyte (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlqwbyte (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlqwbyte (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlqwbyte (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlqwbytebc (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlqwbytebc (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlqwbytebc (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlqwbytebc (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlqwbytebc (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlqwbytebc (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlqwbytebc (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlqwbytebc (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlqwbytebc (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlqwbytebc (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmask (vec_ushort8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmask (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmask (vec_uint4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmask (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmask (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmask (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmask (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmask (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmaska (vec_ushort8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmaska (vec_short8 a, vec_short8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmaska (vec_uint4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmaska (vec_int4 a, vec_int4 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmaska (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmaska (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmaska (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmaska (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlmaskqw (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlmaskqw (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmaskqw (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmaskqw (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmaskqw (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmaskqw (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlmaskqw (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlmaskqw (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlmaskqw (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlmaskqw (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlmaskqwbyte (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlmaskqwbyte (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmaskqwbyte (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmaskqwbyte (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmaskqwbyte (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmaskqwbyte (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlmaskqwbyte (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlmaskqwbyte (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlmaskqwbyte (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlmaskqwbyte (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_rlmaskqwbytebc (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_rlmaskqwbytebc (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_rlmaskqwbytebc (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_rlmaskqwbytebc (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_rlmaskqwbytebc (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_rlmaskqwbytebc (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_rlmaskqwbytebc (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_rlmaskqwbytebc (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_rlmaskqwbytebc (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_rlmaskqwbytebc (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sl (vec_ushort8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_short8 spu_sl (vec_short8 a, vec_ushort8 b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sl (vec_uint4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_int4 spu_sl (vec_int4 a, vec_uint4 b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_sl (vec_ushort8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_sl (vec_short8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_sl (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_sl (vec_int4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_slqw (vec_llong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_slqw (vec_ullong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_slqw (vec_int4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_slqw (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_slqw (vec_short8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_slqw (vec_ushort8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_slqw (vec_char16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_slqw (vec_uchar16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_slqw (vec_float4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_slqw (vec_double2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_slqwbyte (vec_llong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_slqwbyte (vec_ullong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_slqwbyte (vec_int4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_slqwbyte (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_slqwbyte (vec_short8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_slqwbyte (vec_ushort8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_slqwbyte (vec_char16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_slqwbyte (vec_uchar16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_slqwbyte (vec_float4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_slqwbyte (vec_double2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_slqwbytebc (vec_llong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_slqwbytebc (vec_ullong2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_slqwbytebc (vec_int4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_slqwbytebc (vec_uint4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_slqwbytebc (vec_short8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_slqwbytebc (vec_ushort8 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_slqwbytebc (vec_char16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_slqwbytebc (vec_uchar16 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_slqwbytebc (vec_float4 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_slqwbytebc (vec_double2 a, unsigned int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_splats (unsigned char a) __attribute__((__always_inline__)); -static inline vec_char16 spu_splats (signed char a) __attribute__((__always_inline__)); -static inline vec_char16 spu_splats (char a) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_splats (unsigned short a) __attribute__((__always_inline__)); -static inline vec_short8 spu_splats (short a) __attribute__((__always_inline__)); -static inline vec_uint4 spu_splats (unsigned int a) __attribute__((__always_inline__)); -static inline vec_int4 spu_splats (int a) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_splats (unsigned long long a) __attribute__((__always_inline__)); -static inline vec_llong2 spu_splats (long long a) __attribute__((__always_inline__)); -static inline vec_float4 spu_splats (float a) __attribute__((__always_inline__)); -static inline vec_double2 spu_splats (double a) __attribute__((__always_inline__)); -static inline unsigned char spu_extract (vec_uchar16 a, int b) __attribute__((__always_inline__)); -static inline signed char spu_extract (vec_char16 a, int b) __attribute__((__always_inline__)); -static inline unsigned short spu_extract (vec_ushort8 a, int b) __attribute__((__always_inline__)); -static inline short spu_extract (vec_short8 a, int b) __attribute__((__always_inline__)); -static inline unsigned int spu_extract (vec_uint4 a, int b) __attribute__((__always_inline__)); -static inline int spu_extract (vec_int4 a, int b) __attribute__((__always_inline__)); -static inline unsigned long long spu_extract (vec_ullong2 a, int b) __attribute__((__always_inline__)); -static inline long long spu_extract (vec_llong2 a, int b) __attribute__((__always_inline__)); -static inline float spu_extract (vec_float4 a, int b) __attribute__((__always_inline__)); -static inline double spu_extract (vec_double2 a, int b) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_insert (unsigned char a, vec_uchar16 b, int c) __attribute__((__always_inline__)); -static inline vec_char16 spu_insert (signed char a, vec_char16 b, int c) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_insert (unsigned short a, vec_ushort8 b, int c) __attribute__((__always_inline__)); -static inline vec_short8 spu_insert (short a, vec_short8 b, int c) __attribute__((__always_inline__)); -static inline vec_uint4 spu_insert (unsigned int a, vec_uint4 b, int c) __attribute__((__always_inline__)); -static inline vec_int4 spu_insert (int a, vec_int4 b, int c) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_insert (unsigned long long a, vec_ullong2 b, int c) __attribute__((__always_inline__)); -static inline vec_llong2 spu_insert (long long a, vec_llong2 b, int c) __attribute__((__always_inline__)); -static inline vec_float4 spu_insert (float a, vec_float4 b, int c) __attribute__((__always_inline__)); -static inline vec_double2 spu_insert (double a, vec_double2 b, int c) __attribute__((__always_inline__)); -static inline vec_uchar16 spu_promote (unsigned char a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_promote (signed char a, int b) __attribute__((__always_inline__)); -static inline vec_char16 spu_promote (char a, int b) __attribute__((__always_inline__)); -static inline vec_ushort8 spu_promote (unsigned short a, int b) __attribute__((__always_inline__)); -static inline vec_short8 spu_promote (short a, int b) __attribute__((__always_inline__)); -static inline vec_uint4 spu_promote (unsigned int a, int b) __attribute__((__always_inline__)); -static inline vec_int4 spu_promote (int a, int b) __attribute__((__always_inline__)); -static inline vec_ullong2 spu_promote (unsigned long long a, int b) __attribute__((__always_inline__)); -static inline vec_llong2 spu_promote (long long a, int b) __attribute__((__always_inline__)); -static inline vec_float4 spu_promote (float a, int b) __attribute__((__always_inline__)); -static inline vec_double2 spu_promote (double a, int b) __attribute__((__always_inline__)); - -static inline vec_short8 -spu_extend (vec_char16 a) -{ - return __builtin_spu_extend_0 (a); -} -static inline vec_int4 -spu_extend (vec_short8 a) -{ - return __builtin_spu_extend_1 (a); -} -static inline vec_llong2 -spu_extend (vec_int4 a) -{ - return __builtin_spu_extend_2 (a); -} -static inline vec_double2 -spu_extend (vec_float4 a) -{ - return __builtin_spu_extend_3 (a); -} -static inline vec_uint4 -spu_add (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_add_0 (a, b); -} -static inline vec_int4 -spu_add (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_add_1 (a, b); -} -static inline vec_ushort8 -spu_add (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_add_2 (a, b); -} -static inline vec_short8 -spu_add (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_add_3 (a, b); -} -static inline vec_float4 -spu_add (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_add_4 (a, b); -} -static inline vec_double2 -spu_add (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_add_5 (a, b); -} -static inline vec_ushort8 -spu_add (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_add_6 (a, b); -} -static inline vec_short8 -spu_add (vec_short8 a, short b) -{ - return __builtin_spu_add_7 (a, b); -} -static inline vec_uint4 -spu_add (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_add_8 (a, b); -} -static inline vec_int4 -spu_add (vec_int4 a, int b) -{ - return __builtin_spu_add_9 (a, b); -} -static inline vec_int4 -spu_addx (vec_int4 a, vec_int4 b, vec_int4 c) -{ - return __builtin_spu_addx_0 (a, b, c); -} -static inline vec_uint4 -spu_addx (vec_uint4 a, vec_uint4 b, vec_uint4 c) -{ - return __builtin_spu_addx_1 (a, b, c); -} -static inline vec_int4 -spu_genc (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_genc_0 (a, b); -} -static inline vec_uint4 -spu_genc (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_genc_1 (a, b); -} -static inline vec_int4 -spu_gencx (vec_int4 a, vec_int4 b, vec_int4 c) -{ - return __builtin_spu_gencx_0 (a, b, c); -} -static inline vec_uint4 -spu_gencx (vec_uint4 a, vec_uint4 b, vec_uint4 c) -{ - return __builtin_spu_gencx_1 (a, b, c); -} -static inline vec_int4 -spu_madd (vec_short8 a, vec_short8 b, vec_int4 c) -{ - return __builtin_spu_madd_0 (a, b, c); -} -static inline vec_float4 -spu_madd (vec_float4 a, vec_float4 b, vec_float4 c) -{ - return __builtin_spu_madd_1 (a, b, c); -} -static inline vec_double2 -spu_madd (vec_double2 a, vec_double2 b, vec_double2 c) -{ - return __builtin_spu_madd_2 (a, b, c); -} -static inline vec_float4 -spu_msub (vec_float4 a, vec_float4 b, vec_float4 c) -{ - return __builtin_spu_msub_0 (a, b, c); -} -static inline vec_double2 -spu_msub (vec_double2 a, vec_double2 b, vec_double2 c) -{ - return __builtin_spu_msub_1 (a, b, c); -} -static inline vec_uint4 -spu_mhhadd (vec_ushort8 a, vec_ushort8 b, vec_uint4 c) -{ - return __builtin_spu_mhhadd_0 (a, b, c); -} -static inline vec_int4 -spu_mhhadd (vec_short8 a, vec_short8 b, vec_int4 c) -{ - return __builtin_spu_mhhadd_1 (a, b, c); -} -static inline vec_uint4 -spu_mule (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_mule_0 (a, b); -} -static inline vec_int4 -spu_mule (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_mule_1 (a, b); -} -static inline vec_float4 -spu_mul (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_mul_0 (a, b); -} -static inline vec_double2 -spu_mul (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_mul_1 (a, b); -} -static inline vec_int4 -spu_mulo (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_mulo_0 (a, b); -} -static inline vec_uint4 -spu_mulo (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_mulo_1 (a, b); -} -static inline vec_int4 -spu_mulo (vec_short8 a, short b) -{ - return __builtin_spu_mulo_2 (a, b); -} -static inline vec_uint4 -spu_mulo (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_mulo_3 (a, b); -} -static inline vec_float4 -spu_nmsub (vec_float4 a, vec_float4 b, vec_float4 c) -{ - return __builtin_spu_nmsub_0 (a, b, c); -} -static inline vec_double2 -spu_nmsub (vec_double2 a, vec_double2 b, vec_double2 c) -{ - return __builtin_spu_nmsub_1 (a, b, c); -} -static inline vec_ushort8 -spu_sub (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_sub_0 (a, b); -} -static inline vec_short8 -spu_sub (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_sub_1 (a, b); -} -static inline vec_uint4 -spu_sub (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_sub_2 (a, b); -} -static inline vec_int4 -spu_sub (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_sub_3 (a, b); -} -static inline vec_float4 -spu_sub (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_sub_4 (a, b); -} -static inline vec_double2 -spu_sub (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_sub_5 (a, b); -} -static inline vec_ushort8 -spu_sub (unsigned short a, vec_ushort8 b) -{ - return __builtin_spu_sub_6 (a, b); -} -static inline vec_short8 -spu_sub (short a, vec_short8 b) -{ - return __builtin_spu_sub_7 (a, b); -} -static inline vec_uint4 -spu_sub (unsigned int a, vec_uint4 b) -{ - return __builtin_spu_sub_8 (a, b); -} -static inline vec_int4 -spu_sub (int a, vec_int4 b) -{ - return __builtin_spu_sub_9 (a, b); -} -static inline vec_uint4 -spu_subx (vec_uint4 a, vec_uint4 b, vec_uint4 c) -{ - return __builtin_spu_subx_0 (a, b, c); -} -static inline vec_int4 -spu_subx (vec_int4 a, vec_int4 b, vec_int4 c) -{ - return __builtin_spu_subx_1 (a, b, c); -} -static inline vec_uint4 -spu_genb (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_genb_0 (a, b); -} -static inline vec_int4 -spu_genb (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_genb_1 (a, b); -} -static inline vec_uint4 -spu_genbx (vec_uint4 a, vec_uint4 b, vec_uint4 c) -{ - return __builtin_spu_genbx_0 (a, b, c); -} -static inline vec_int4 -spu_genbx (vec_int4 a, vec_int4 b, vec_int4 c) -{ - return __builtin_spu_genbx_1 (a, b, c); -} -static inline vec_uchar16 -spu_cmpeq (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_cmpeq_0 (a, b); -} -static inline vec_uchar16 -spu_cmpeq (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_cmpeq_1 (a, b); -} -static inline vec_ushort8 -spu_cmpeq (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_cmpeq_2 (a, b); -} -static inline vec_ushort8 -spu_cmpeq (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_cmpeq_3 (a, b); -} -static inline vec_uint4 -spu_cmpeq (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_cmpeq_4 (a, b); -} -static inline vec_uint4 -spu_cmpeq (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_cmpeq_5 (a, b); -} -static inline vec_uint4 -spu_cmpeq (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_cmpeq_6 (a, b); -} -static inline vec_uchar16 -spu_cmpeq (vec_uchar16 a, unsigned char b) -{ - return __builtin_spu_cmpeq_7 (a, b); -} -static inline vec_uchar16 -spu_cmpeq (vec_char16 a, signed char b) -{ - return __builtin_spu_cmpeq_8 (a, b); -} -static inline vec_ushort8 -spu_cmpeq (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_cmpeq_9 (a, b); -} -static inline vec_ushort8 -spu_cmpeq (vec_short8 a, short b) -{ - return __builtin_spu_cmpeq_10 (a, b); -} -static inline vec_uint4 -spu_cmpeq (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_cmpeq_11 (a, b); -} -static inline vec_uint4 -spu_cmpeq (vec_int4 a, int b) -{ - return __builtin_spu_cmpeq_12 (a, b); -} -static inline vec_uchar16 -spu_cmpgt (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_cmpgt_0 (a, b); -} -static inline vec_uchar16 -spu_cmpgt (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_cmpgt_1 (a, b); -} -static inline vec_ushort8 -spu_cmpgt (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_cmpgt_2 (a, b); -} -static inline vec_ushort8 -spu_cmpgt (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_cmpgt_3 (a, b); -} -static inline vec_uint4 -spu_cmpgt (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_cmpgt_4 (a, b); -} -static inline vec_uint4 -spu_cmpgt (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_cmpgt_5 (a, b); -} -static inline vec_uint4 -spu_cmpgt (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_cmpgt_6 (a, b); -} -static inline vec_uchar16 -spu_cmpgt (vec_uchar16 a, unsigned char b) -{ - return __builtin_spu_cmpgt_7 (a, b); -} -static inline vec_uchar16 -spu_cmpgt (vec_char16 a, signed char b) -{ - return __builtin_spu_cmpgt_8 (a, b); -} -static inline vec_ushort8 -spu_cmpgt (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_cmpgt_9 (a, b); -} -static inline vec_ushort8 -spu_cmpgt (vec_short8 a, short b) -{ - return __builtin_spu_cmpgt_10 (a, b); -} -static inline vec_uint4 -spu_cmpgt (vec_int4 a, int b) -{ - return __builtin_spu_cmpgt_11 (a, b); -} -static inline vec_uint4 -spu_cmpgt (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_cmpgt_12 (a, b); -} -static inline void -spu_hcmpeq (int a, int b) -{ - return __builtin_spu_hcmpeq_0 (a, b); -} -static inline void -spu_hcmpeq (unsigned int a, unsigned int b) -{ - return __builtin_spu_hcmpeq_1 (a, b); -} -static inline void -spu_hcmpgt (int a, int b) -{ - return __builtin_spu_hcmpgt_0 (a, b); -} -static inline void -spu_hcmpgt (unsigned int a, unsigned int b) -{ - return __builtin_spu_hcmpgt_1 (a, b); -} -static inline vec_uchar16 -spu_cntb (vec_char16 a) -{ - return __builtin_spu_cntb_0 (a); -} -static inline vec_uchar16 -spu_cntb (vec_uchar16 a) -{ - return __builtin_spu_cntb_1 (a); -} -static inline vec_uint4 -spu_cntlz (vec_int4 a) -{ - return __builtin_spu_cntlz_0 (a); -} -static inline vec_uint4 -spu_cntlz (vec_uint4 a) -{ - return __builtin_spu_cntlz_1 (a); -} -static inline vec_uint4 -spu_cntlz (vec_float4 a) -{ - return __builtin_spu_cntlz_2 (a); -} -static inline vec_uint4 -spu_gather (vec_int4 a) -{ - return __builtin_spu_gather_0 (a); -} -static inline vec_uint4 -spu_gather (vec_uint4 a) -{ - return __builtin_spu_gather_1 (a); -} -static inline vec_uint4 -spu_gather (vec_short8 a) -{ - return __builtin_spu_gather_2 (a); -} -static inline vec_uint4 -spu_gather (vec_ushort8 a) -{ - return __builtin_spu_gather_3 (a); -} -static inline vec_uint4 -spu_gather (vec_char16 a) -{ - return __builtin_spu_gather_4 (a); -} -static inline vec_uint4 -spu_gather (vec_uchar16 a) -{ - return __builtin_spu_gather_5 (a); -} -static inline vec_uint4 -spu_gather (vec_float4 a) -{ - return __builtin_spu_gather_6 (a); -} -static inline vec_uchar16 -spu_maskb (unsigned short a) -{ - return __builtin_spu_maskb_0 (a); -} -static inline vec_uchar16 -spu_maskb (short a) -{ - return __builtin_spu_maskb_1 (a); -} -static inline vec_uchar16 -spu_maskb (unsigned int a) -{ - return __builtin_spu_maskb_2 (a); -} -static inline vec_uchar16 -spu_maskb (int a) -{ - return __builtin_spu_maskb_3 (a); -} -static inline vec_ushort8 -spu_maskh (unsigned char a) -{ - return __builtin_spu_maskh_0 (a); -} -static inline vec_ushort8 -spu_maskh (signed char a) -{ - return __builtin_spu_maskh_1 (a); -} -static inline vec_ushort8 -spu_maskh (char a) -{ - return __builtin_spu_maskh_1 (a); -} -static inline vec_ushort8 -spu_maskh (unsigned short a) -{ - return __builtin_spu_maskh_2 (a); -} -static inline vec_ushort8 -spu_maskh (short a) -{ - return __builtin_spu_maskh_3 (a); -} -static inline vec_ushort8 -spu_maskh (unsigned int a) -{ - return __builtin_spu_maskh_4 (a); -} -static inline vec_ushort8 -spu_maskh (int a) -{ - return __builtin_spu_maskh_5 (a); -} -static inline vec_uint4 -spu_maskw (unsigned char a) -{ - return __builtin_spu_maskw_0 (a); -} -static inline vec_uint4 -spu_maskw (signed char a) -{ - return __builtin_spu_maskw_1 (a); -} -static inline vec_uint4 -spu_maskw (char a) -{ - return __builtin_spu_maskw_1 (a); -} -static inline vec_uint4 -spu_maskw (unsigned short a) -{ - return __builtin_spu_maskw_2 (a); -} -static inline vec_uint4 -spu_maskw (short a) -{ - return __builtin_spu_maskw_3 (a); -} -static inline vec_uint4 -spu_maskw (unsigned int a) -{ - return __builtin_spu_maskw_4 (a); -} -static inline vec_uint4 -spu_maskw (int a) -{ - return __builtin_spu_maskw_5 (a); -} -static inline vec_llong2 -spu_sel (vec_llong2 a, vec_llong2 b, vec_ullong2 c) -{ - return __builtin_spu_sel_0 (a, b, c); -} -static inline vec_ullong2 -spu_sel (vec_ullong2 a, vec_ullong2 b, vec_ullong2 c) -{ - return __builtin_spu_sel_1 (a, b, c); -} -static inline vec_int4 -spu_sel (vec_int4 a, vec_int4 b, vec_uint4 c) -{ - return __builtin_spu_sel_2 (a, b, c); -} -static inline vec_uint4 -spu_sel (vec_uint4 a, vec_uint4 b, vec_uint4 c) -{ - return __builtin_spu_sel_3 (a, b, c); -} -static inline vec_short8 -spu_sel (vec_short8 a, vec_short8 b, vec_ushort8 c) -{ - return __builtin_spu_sel_4 (a, b, c); -} -static inline vec_ushort8 -spu_sel (vec_ushort8 a, vec_ushort8 b, vec_ushort8 c) -{ - return __builtin_spu_sel_5 (a, b, c); -} -static inline vec_char16 -spu_sel (vec_char16 a, vec_char16 b, vec_uchar16 c) -{ - return __builtin_spu_sel_6 (a, b, c); -} -static inline vec_uchar16 -spu_sel (vec_uchar16 a, vec_uchar16 b, vec_uchar16 c) -{ - return __builtin_spu_sel_7 (a, b, c); -} -static inline vec_float4 -spu_sel (vec_float4 a, vec_float4 b, vec_uint4 c) -{ - return __builtin_spu_sel_8 (a, b, c); -} -static inline vec_double2 -spu_sel (vec_double2 a, vec_double2 b, vec_ullong2 c) -{ - return __builtin_spu_sel_9 (a, b, c); -} -static inline vec_uchar16 -spu_shuffle (vec_uchar16 a, vec_uchar16 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_0 (a, b, c); -} -static inline vec_char16 -spu_shuffle (vec_char16 a, vec_char16 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_1 (a, b, c); -} -static inline vec_ushort8 -spu_shuffle (vec_ushort8 a, vec_ushort8 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_2 (a, b, c); -} -static inline vec_short8 -spu_shuffle (vec_short8 a, vec_short8 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_3 (a, b, c); -} -static inline vec_uint4 -spu_shuffle (vec_uint4 a, vec_uint4 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_4 (a, b, c); -} -static inline vec_int4 -spu_shuffle (vec_int4 a, vec_int4 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_5 (a, b, c); -} -static inline vec_ullong2 -spu_shuffle (vec_ullong2 a, vec_ullong2 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_6 (a, b, c); -} -static inline vec_llong2 -spu_shuffle (vec_llong2 a, vec_llong2 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_7 (a, b, c); -} -static inline vec_float4 -spu_shuffle (vec_float4 a, vec_float4 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_8 (a, b, c); -} -static inline vec_double2 -spu_shuffle (vec_double2 a, vec_double2 b, vec_uchar16 c) -{ - return __builtin_spu_shuffle_9 (a, b, c); -} -static inline vec_uchar16 -spu_and (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_and_0 (a, b); -} -static inline vec_char16 -spu_and (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_and_1 (a, b); -} -static inline vec_ushort8 -spu_and (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_and_2 (a, b); -} -static inline vec_short8 -spu_and (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_and_3 (a, b); -} -static inline vec_uint4 -spu_and (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_and_4 (a, b); -} -static inline vec_int4 -spu_and (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_and_5 (a, b); -} -static inline vec_ullong2 -spu_and (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_and_6 (a, b); -} -static inline vec_llong2 -spu_and (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_and_7 (a, b); -} -static inline vec_float4 -spu_and (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_and_8 (a, b); -} -static inline vec_double2 -spu_and (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_and_9 (a, b); -} -static inline vec_uchar16 -spu_and (vec_uchar16 a, unsigned char b) -{ - return __builtin_spu_and_10 (a, b); -} -static inline vec_char16 -spu_and (vec_char16 a, signed char b) -{ - return __builtin_spu_and_11 (a, b); -} -static inline vec_ushort8 -spu_and (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_and_12 (a, b); -} -static inline vec_short8 -spu_and (vec_short8 a, short b) -{ - return __builtin_spu_and_13 (a, b); -} -static inline vec_uint4 -spu_and (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_and_14 (a, b); -} -static inline vec_int4 -spu_and (vec_int4 a, int b) -{ - return __builtin_spu_and_15 (a, b); -} -static inline vec_llong2 -spu_andc (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_andc_0 (a, b); -} -static inline vec_ullong2 -spu_andc (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_andc_1 (a, b); -} -static inline vec_int4 -spu_andc (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_andc_2 (a, b); -} -static inline vec_uint4 -spu_andc (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_andc_3 (a, b); -} -static inline vec_short8 -spu_andc (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_andc_4 (a, b); -} -static inline vec_ushort8 -spu_andc (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_andc_5 (a, b); -} -static inline vec_char16 -spu_andc (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_andc_6 (a, b); -} -static inline vec_uchar16 -spu_andc (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_andc_7 (a, b); -} -static inline vec_float4 -spu_andc (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_andc_8 (a, b); -} -static inline vec_double2 -spu_andc (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_andc_9 (a, b); -} -static inline vec_llong2 -spu_eqv (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_eqv_0 (a, b); -} -static inline vec_ullong2 -spu_eqv (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_eqv_1 (a, b); -} -static inline vec_int4 -spu_eqv (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_eqv_2 (a, b); -} -static inline vec_uint4 -spu_eqv (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_eqv_3 (a, b); -} -static inline vec_short8 -spu_eqv (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_eqv_4 (a, b); -} -static inline vec_ushort8 -spu_eqv (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_eqv_5 (a, b); -} -static inline vec_char16 -spu_eqv (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_eqv_6 (a, b); -} -static inline vec_uchar16 -spu_eqv (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_eqv_7 (a, b); -} -static inline vec_float4 -spu_eqv (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_eqv_8 (a, b); -} -static inline vec_double2 -spu_eqv (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_eqv_9 (a, b); -} -static inline vec_llong2 -spu_nand (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_nand_0 (a, b); -} -static inline vec_ullong2 -spu_nand (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_nand_1 (a, b); -} -static inline vec_int4 -spu_nand (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_nand_2 (a, b); -} -static inline vec_uint4 -spu_nand (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_nand_3 (a, b); -} -static inline vec_short8 -spu_nand (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_nand_4 (a, b); -} -static inline vec_ushort8 -spu_nand (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_nand_5 (a, b); -} -static inline vec_char16 -spu_nand (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_nand_6 (a, b); -} -static inline vec_uchar16 -spu_nand (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_nand_7 (a, b); -} -static inline vec_float4 -spu_nand (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_nand_8 (a, b); -} -static inline vec_double2 -spu_nand (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_nand_9 (a, b); -} -static inline vec_llong2 -spu_nor (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_nor_0 (a, b); -} -static inline vec_ullong2 -spu_nor (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_nor_1 (a, b); -} -static inline vec_int4 -spu_nor (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_nor_2 (a, b); -} -static inline vec_uint4 -spu_nor (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_nor_3 (a, b); -} -static inline vec_short8 -spu_nor (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_nor_4 (a, b); -} -static inline vec_ushort8 -spu_nor (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_nor_5 (a, b); -} -static inline vec_char16 -spu_nor (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_nor_6 (a, b); -} -static inline vec_uchar16 -spu_nor (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_nor_7 (a, b); -} -static inline vec_float4 -spu_nor (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_nor_8 (a, b); -} -static inline vec_double2 -spu_nor (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_nor_9 (a, b); -} -static inline vec_uchar16 -spu_or (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_or_0 (a, b); -} -static inline vec_char16 -spu_or (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_or_1 (a, b); -} -static inline vec_ushort8 -spu_or (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_or_2 (a, b); -} -static inline vec_short8 -spu_or (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_or_3 (a, b); -} -static inline vec_uint4 -spu_or (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_or_4 (a, b); -} -static inline vec_int4 -spu_or (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_or_5 (a, b); -} -static inline vec_ullong2 -spu_or (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_or_6 (a, b); -} -static inline vec_llong2 -spu_or (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_or_7 (a, b); -} -static inline vec_float4 -spu_or (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_or_8 (a, b); -} -static inline vec_double2 -spu_or (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_or_9 (a, b); -} -static inline vec_uchar16 -spu_or (vec_uchar16 a, unsigned char b) -{ - return __builtin_spu_or_10 (a, b); -} -static inline vec_char16 -spu_or (vec_char16 a, signed char b) -{ - return __builtin_spu_or_11 (a, b); -} -static inline vec_ushort8 -spu_or (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_or_12 (a, b); -} -static inline vec_short8 -spu_or (vec_short8 a, short b) -{ - return __builtin_spu_or_13 (a, b); -} -static inline vec_uint4 -spu_or (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_or_14 (a, b); -} -static inline vec_int4 -spu_or (vec_int4 a, int b) -{ - return __builtin_spu_or_15 (a, b); -} -static inline vec_llong2 -spu_orc (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_orc_0 (a, b); -} -static inline vec_ullong2 -spu_orc (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_orc_1 (a, b); -} -static inline vec_int4 -spu_orc (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_orc_2 (a, b); -} -static inline vec_uint4 -spu_orc (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_orc_3 (a, b); -} -static inline vec_short8 -spu_orc (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_orc_4 (a, b); -} -static inline vec_ushort8 -spu_orc (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_orc_5 (a, b); -} -static inline vec_char16 -spu_orc (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_orc_6 (a, b); -} -static inline vec_uchar16 -spu_orc (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_orc_7 (a, b); -} -static inline vec_float4 -spu_orc (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_orc_8 (a, b); -} -static inline vec_double2 -spu_orc (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_orc_9 (a, b); -} -static inline vec_int4 -spu_orx (vec_int4 a) -{ - return __builtin_spu_orx_0 (a); -} -static inline vec_uint4 -spu_orx (vec_uint4 a) -{ - return __builtin_spu_orx_1 (a); -} -static inline vec_uchar16 -spu_xor (vec_uchar16 a, vec_uchar16 b) -{ - return __builtin_spu_xor_0 (a, b); -} -static inline vec_char16 -spu_xor (vec_char16 a, vec_char16 b) -{ - return __builtin_spu_xor_1 (a, b); -} -static inline vec_ushort8 -spu_xor (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_xor_2 (a, b); -} -static inline vec_short8 -spu_xor (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_xor_3 (a, b); -} -static inline vec_uint4 -spu_xor (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_xor_4 (a, b); -} -static inline vec_int4 -spu_xor (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_xor_5 (a, b); -} -static inline vec_ullong2 -spu_xor (vec_ullong2 a, vec_ullong2 b) -{ - return __builtin_spu_xor_6 (a, b); -} -static inline vec_llong2 -spu_xor (vec_llong2 a, vec_llong2 b) -{ - return __builtin_spu_xor_7 (a, b); -} -static inline vec_float4 -spu_xor (vec_float4 a, vec_float4 b) -{ - return __builtin_spu_xor_8 (a, b); -} -static inline vec_double2 -spu_xor (vec_double2 a, vec_double2 b) -{ - return __builtin_spu_xor_9 (a, b); -} -static inline vec_uchar16 -spu_xor (vec_uchar16 a, unsigned char b) -{ - return __builtin_spu_xor_10 (a, b); -} -static inline vec_char16 -spu_xor (vec_char16 a, signed char b) -{ - return __builtin_spu_xor_11 (a, b); -} -static inline vec_ushort8 -spu_xor (vec_ushort8 a, unsigned short b) -{ - return __builtin_spu_xor_12 (a, b); -} -static inline vec_short8 -spu_xor (vec_short8 a, short b) -{ - return __builtin_spu_xor_13 (a, b); -} -static inline vec_uint4 -spu_xor (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_xor_14 (a, b); -} -static inline vec_int4 -spu_xor (vec_int4 a, int b) -{ - return __builtin_spu_xor_15 (a, b); -} -static inline vec_ushort8 -spu_rl (vec_ushort8 a, vec_short8 b) -{ - return __builtin_spu_rl_0 (a, b); -} -static inline vec_short8 -spu_rl (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_rl_1 (a, b); -} -static inline vec_uint4 -spu_rl (vec_uint4 a, vec_int4 b) -{ - return __builtin_spu_rl_2 (a, b); -} -static inline vec_int4 -spu_rl (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_rl_3 (a, b); -} -static inline vec_ushort8 -spu_rl (vec_ushort8 a, short b) -{ - return __builtin_spu_rl_4 (a, b); -} -static inline vec_short8 -spu_rl (vec_short8 a, short b) -{ - return __builtin_spu_rl_5 (a, b); -} -static inline vec_uint4 -spu_rl (vec_uint4 a, int b) -{ - return __builtin_spu_rl_6 (a, b); -} -static inline vec_int4 -spu_rl (vec_int4 a, int b) -{ - return __builtin_spu_rl_7 (a, b); -} -static inline vec_uchar16 -spu_rlqw (vec_uchar16 a, int b) -{ - return __builtin_spu_rlqw_0 (a, b); -} -static inline vec_char16 -spu_rlqw (vec_char16 a, int b) -{ - return __builtin_spu_rlqw_1 (a, b); -} -static inline vec_ushort8 -spu_rlqw (vec_ushort8 a, int b) -{ - return __builtin_spu_rlqw_2 (a, b); -} -static inline vec_short8 -spu_rlqw (vec_short8 a, int b) -{ - return __builtin_spu_rlqw_3 (a, b); -} -static inline vec_uint4 -spu_rlqw (vec_uint4 a, int b) -{ - return __builtin_spu_rlqw_4 (a, b); -} -static inline vec_int4 -spu_rlqw (vec_int4 a, int b) -{ - return __builtin_spu_rlqw_5 (a, b); -} -static inline vec_ullong2 -spu_rlqw (vec_ullong2 a, int b) -{ - return __builtin_spu_rlqw_6 (a, b); -} -static inline vec_llong2 -spu_rlqw (vec_llong2 a, int b) -{ - return __builtin_spu_rlqw_7 (a, b); -} -static inline vec_float4 -spu_rlqw (vec_float4 a, int b) -{ - return __builtin_spu_rlqw_8 (a, b); -} -static inline vec_double2 -spu_rlqw (vec_double2 a, int b) -{ - return __builtin_spu_rlqw_9 (a, b); -} -static inline vec_uchar16 -spu_rlqwbyte (vec_uchar16 a, int b) -{ - return __builtin_spu_rlqwbyte_0 (a, b); -} -static inline vec_char16 -spu_rlqwbyte (vec_char16 a, int b) -{ - return __builtin_spu_rlqwbyte_1 (a, b); -} -static inline vec_ushort8 -spu_rlqwbyte (vec_ushort8 a, int b) -{ - return __builtin_spu_rlqwbyte_2 (a, b); -} -static inline vec_short8 -spu_rlqwbyte (vec_short8 a, int b) -{ - return __builtin_spu_rlqwbyte_3 (a, b); -} -static inline vec_uint4 -spu_rlqwbyte (vec_uint4 a, int b) -{ - return __builtin_spu_rlqwbyte_4 (a, b); -} -static inline vec_int4 -spu_rlqwbyte (vec_int4 a, int b) -{ - return __builtin_spu_rlqwbyte_5 (a, b); -} -static inline vec_ullong2 -spu_rlqwbyte (vec_ullong2 a, int b) -{ - return __builtin_spu_rlqwbyte_6 (a, b); -} -static inline vec_llong2 -spu_rlqwbyte (vec_llong2 a, int b) -{ - return __builtin_spu_rlqwbyte_7 (a, b); -} -static inline vec_float4 -spu_rlqwbyte (vec_float4 a, int b) -{ - return __builtin_spu_rlqwbyte_8 (a, b); -} -static inline vec_double2 -spu_rlqwbyte (vec_double2 a, int b) -{ - return __builtin_spu_rlqwbyte_9 (a, b); -} -static inline vec_uchar16 -spu_rlqwbytebc (vec_uchar16 a, int b) -{ - return __builtin_spu_rlqwbytebc_0 (a, b); -} -static inline vec_char16 -spu_rlqwbytebc (vec_char16 a, int b) -{ - return __builtin_spu_rlqwbytebc_1 (a, b); -} -static inline vec_ushort8 -spu_rlqwbytebc (vec_ushort8 a, int b) -{ - return __builtin_spu_rlqwbytebc_2 (a, b); -} -static inline vec_short8 -spu_rlqwbytebc (vec_short8 a, int b) -{ - return __builtin_spu_rlqwbytebc_3 (a, b); -} -static inline vec_uint4 -spu_rlqwbytebc (vec_uint4 a, int b) -{ - return __builtin_spu_rlqwbytebc_4 (a, b); -} -static inline vec_int4 -spu_rlqwbytebc (vec_int4 a, int b) -{ - return __builtin_spu_rlqwbytebc_5 (a, b); -} -static inline vec_ullong2 -spu_rlqwbytebc (vec_ullong2 a, int b) -{ - return __builtin_spu_rlqwbytebc_6 (a, b); -} -static inline vec_llong2 -spu_rlqwbytebc (vec_llong2 a, int b) -{ - return __builtin_spu_rlqwbytebc_7 (a, b); -} -static inline vec_float4 -spu_rlqwbytebc (vec_float4 a, int b) -{ - return __builtin_spu_rlqwbytebc_8 (a, b); -} -static inline vec_double2 -spu_rlqwbytebc (vec_double2 a, int b) -{ - return __builtin_spu_rlqwbytebc_9 (a, b); -} -static inline vec_ushort8 -spu_rlmask (vec_ushort8 a, vec_short8 b) -{ - return __builtin_spu_rlmask_0 (a, b); -} -static inline vec_short8 -spu_rlmask (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_rlmask_1 (a, b); -} -static inline vec_uint4 -spu_rlmask (vec_uint4 a, vec_int4 b) -{ - return __builtin_spu_rlmask_2 (a, b); -} -static inline vec_int4 -spu_rlmask (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_rlmask_3 (a, b); -} -static inline vec_ushort8 -spu_rlmask (vec_ushort8 a, int b) -{ - return __builtin_spu_rlmask_4 (a, b); -} -static inline vec_short8 -spu_rlmask (vec_short8 a, int b) -{ - return __builtin_spu_rlmask_5 (a, b); -} -static inline vec_uint4 -spu_rlmask (vec_uint4 a, int b) -{ - return __builtin_spu_rlmask_6 (a, b); -} -static inline vec_int4 -spu_rlmask (vec_int4 a, int b) -{ - return __builtin_spu_rlmask_7 (a, b); -} -static inline vec_ushort8 -spu_rlmaska (vec_ushort8 a, vec_short8 b) -{ - return __builtin_spu_rlmaska_0 (a, b); -} -static inline vec_short8 -spu_rlmaska (vec_short8 a, vec_short8 b) -{ - return __builtin_spu_rlmaska_1 (a, b); -} -static inline vec_uint4 -spu_rlmaska (vec_uint4 a, vec_int4 b) -{ - return __builtin_spu_rlmaska_2 (a, b); -} -static inline vec_int4 -spu_rlmaska (vec_int4 a, vec_int4 b) -{ - return __builtin_spu_rlmaska_3 (a, b); -} -static inline vec_ushort8 -spu_rlmaska (vec_ushort8 a, int b) -{ - return __builtin_spu_rlmaska_4 (a, b); -} -static inline vec_short8 -spu_rlmaska (vec_short8 a, int b) -{ - return __builtin_spu_rlmaska_5 (a, b); -} -static inline vec_uint4 -spu_rlmaska (vec_uint4 a, int b) -{ - return __builtin_spu_rlmaska_6 (a, b); -} -static inline vec_int4 -spu_rlmaska (vec_int4 a, int b) -{ - return __builtin_spu_rlmaska_7 (a, b); -} -static inline vec_uchar16 -spu_rlmaskqw (vec_uchar16 a, int b) -{ - return __builtin_spu_rlmaskqw_0 (a, b); -} -static inline vec_char16 -spu_rlmaskqw (vec_char16 a, int b) -{ - return __builtin_spu_rlmaskqw_1 (a, b); -} -static inline vec_ushort8 -spu_rlmaskqw (vec_ushort8 a, int b) -{ - return __builtin_spu_rlmaskqw_2 (a, b); -} -static inline vec_short8 -spu_rlmaskqw (vec_short8 a, int b) -{ - return __builtin_spu_rlmaskqw_3 (a, b); -} -static inline vec_uint4 -spu_rlmaskqw (vec_uint4 a, int b) -{ - return __builtin_spu_rlmaskqw_4 (a, b); -} -static inline vec_int4 -spu_rlmaskqw (vec_int4 a, int b) -{ - return __builtin_spu_rlmaskqw_5 (a, b); -} -static inline vec_ullong2 -spu_rlmaskqw (vec_ullong2 a, int b) -{ - return __builtin_spu_rlmaskqw_6 (a, b); -} -static inline vec_llong2 -spu_rlmaskqw (vec_llong2 a, int b) -{ - return __builtin_spu_rlmaskqw_7 (a, b); -} -static inline vec_float4 -spu_rlmaskqw (vec_float4 a, int b) -{ - return __builtin_spu_rlmaskqw_8 (a, b); -} -static inline vec_double2 -spu_rlmaskqw (vec_double2 a, int b) -{ - return __builtin_spu_rlmaskqw_9 (a, b); -} -static inline vec_uchar16 -spu_rlmaskqwbyte (vec_uchar16 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_0 (a, b); -} -static inline vec_char16 -spu_rlmaskqwbyte (vec_char16 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_1 (a, b); -} -static inline vec_ushort8 -spu_rlmaskqwbyte (vec_ushort8 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_2 (a, b); -} -static inline vec_short8 -spu_rlmaskqwbyte (vec_short8 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_3 (a, b); -} -static inline vec_uint4 -spu_rlmaskqwbyte (vec_uint4 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_4 (a, b); -} -static inline vec_int4 -spu_rlmaskqwbyte (vec_int4 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_5 (a, b); -} -static inline vec_ullong2 -spu_rlmaskqwbyte (vec_ullong2 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_6 (a, b); -} -static inline vec_llong2 -spu_rlmaskqwbyte (vec_llong2 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_7 (a, b); -} -static inline vec_float4 -spu_rlmaskqwbyte (vec_float4 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_8 (a, b); -} -static inline vec_double2 -spu_rlmaskqwbyte (vec_double2 a, int b) -{ - return __builtin_spu_rlmaskqwbyte_9 (a, b); -} -static inline vec_uchar16 -spu_rlmaskqwbytebc (vec_uchar16 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_0 (a, b); -} -static inline vec_char16 -spu_rlmaskqwbytebc (vec_char16 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_1 (a, b); -} -static inline vec_ushort8 -spu_rlmaskqwbytebc (vec_ushort8 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_2 (a, b); -} -static inline vec_short8 -spu_rlmaskqwbytebc (vec_short8 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_3 (a, b); -} -static inline vec_uint4 -spu_rlmaskqwbytebc (vec_uint4 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_4 (a, b); -} -static inline vec_int4 -spu_rlmaskqwbytebc (vec_int4 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_5 (a, b); -} -static inline vec_ullong2 -spu_rlmaskqwbytebc (vec_ullong2 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_6 (a, b); -} -static inline vec_llong2 -spu_rlmaskqwbytebc (vec_llong2 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_7 (a, b); -} -static inline vec_float4 -spu_rlmaskqwbytebc (vec_float4 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_8 (a, b); -} -static inline vec_double2 -spu_rlmaskqwbytebc (vec_double2 a, int b) -{ - return __builtin_spu_rlmaskqwbytebc_9 (a, b); -} -static inline vec_ushort8 -spu_sl (vec_ushort8 a, vec_ushort8 b) -{ - return __builtin_spu_sl_0 (a, b); -} -static inline vec_short8 -spu_sl (vec_short8 a, vec_ushort8 b) -{ - return __builtin_spu_sl_1 (a, b); -} -static inline vec_uint4 -spu_sl (vec_uint4 a, vec_uint4 b) -{ - return __builtin_spu_sl_2 (a, b); -} -static inline vec_int4 -spu_sl (vec_int4 a, vec_uint4 b) -{ - return __builtin_spu_sl_3 (a, b); -} -static inline vec_ushort8 -spu_sl (vec_ushort8 a, unsigned int b) -{ - return __builtin_spu_sl_4 (a, b); -} -static inline vec_short8 -spu_sl (vec_short8 a, unsigned int b) -{ - return __builtin_spu_sl_5 (a, b); -} -static inline vec_uint4 -spu_sl (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_sl_6 (a, b); -} -static inline vec_int4 -spu_sl (vec_int4 a, unsigned int b) -{ - return __builtin_spu_sl_7 (a, b); -} -static inline vec_llong2 -spu_slqw (vec_llong2 a, unsigned int b) -{ - return __builtin_spu_slqw_0 (a, b); -} -static inline vec_ullong2 -spu_slqw (vec_ullong2 a, unsigned int b) -{ - return __builtin_spu_slqw_1 (a, b); -} -static inline vec_int4 -spu_slqw (vec_int4 a, unsigned int b) -{ - return __builtin_spu_slqw_2 (a, b); -} -static inline vec_uint4 -spu_slqw (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_slqw_3 (a, b); -} -static inline vec_short8 -spu_slqw (vec_short8 a, unsigned int b) -{ - return __builtin_spu_slqw_4 (a, b); -} -static inline vec_ushort8 -spu_slqw (vec_ushort8 a, unsigned int b) -{ - return __builtin_spu_slqw_5 (a, b); -} -static inline vec_char16 -spu_slqw (vec_char16 a, unsigned int b) -{ - return __builtin_spu_slqw_6 (a, b); -} -static inline vec_uchar16 -spu_slqw (vec_uchar16 a, unsigned int b) -{ - return __builtin_spu_slqw_7 (a, b); -} -static inline vec_float4 -spu_slqw (vec_float4 a, unsigned int b) -{ - return __builtin_spu_slqw_8 (a, b); -} -static inline vec_double2 -spu_slqw (vec_double2 a, unsigned int b) -{ - return __builtin_spu_slqw_9 (a, b); -} -static inline vec_llong2 -spu_slqwbyte (vec_llong2 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_0 (a, b); -} -static inline vec_ullong2 -spu_slqwbyte (vec_ullong2 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_1 (a, b); -} -static inline vec_int4 -spu_slqwbyte (vec_int4 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_2 (a, b); -} -static inline vec_uint4 -spu_slqwbyte (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_3 (a, b); -} -static inline vec_short8 -spu_slqwbyte (vec_short8 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_4 (a, b); -} -static inline vec_ushort8 -spu_slqwbyte (vec_ushort8 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_5 (a, b); -} -static inline vec_char16 -spu_slqwbyte (vec_char16 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_6 (a, b); -} -static inline vec_uchar16 -spu_slqwbyte (vec_uchar16 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_7 (a, b); -} -static inline vec_float4 -spu_slqwbyte (vec_float4 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_8 (a, b); -} -static inline vec_double2 -spu_slqwbyte (vec_double2 a, unsigned int b) -{ - return __builtin_spu_slqwbyte_9 (a, b); -} -static inline vec_llong2 -spu_slqwbytebc (vec_llong2 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_0 (a, b); -} -static inline vec_ullong2 -spu_slqwbytebc (vec_ullong2 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_1 (a, b); -} -static inline vec_int4 -spu_slqwbytebc (vec_int4 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_2 (a, b); -} -static inline vec_uint4 -spu_slqwbytebc (vec_uint4 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_3 (a, b); -} -static inline vec_short8 -spu_slqwbytebc (vec_short8 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_4 (a, b); -} -static inline vec_ushort8 -spu_slqwbytebc (vec_ushort8 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_5 (a, b); -} -static inline vec_char16 -spu_slqwbytebc (vec_char16 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_6 (a, b); -} -static inline vec_uchar16 -spu_slqwbytebc (vec_uchar16 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_7 (a, b); -} -static inline vec_float4 -spu_slqwbytebc (vec_float4 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_8 (a, b); -} -static inline vec_double2 -spu_slqwbytebc (vec_double2 a, unsigned int b) -{ - return __builtin_spu_slqwbytebc_9 (a, b); -} -static inline vec_uchar16 -spu_splats (unsigned char a) -{ - return __builtin_spu_splats_0 (a); -} -static inline vec_char16 -spu_splats (signed char a) -{ - return __builtin_spu_splats_1 (a); -} -static inline vec_char16 -spu_splats (char a) -{ - return __builtin_spu_splats_1 (a); -} -static inline vec_ushort8 -spu_splats (unsigned short a) -{ - return __builtin_spu_splats_2 (a); -} -static inline vec_short8 -spu_splats (short a) -{ - return __builtin_spu_splats_3 (a); -} -static inline vec_uint4 -spu_splats (unsigned int a) -{ - return __builtin_spu_splats_4 (a); -} -static inline vec_int4 -spu_splats (int a) -{ - return __builtin_spu_splats_5 (a); -} -static inline vec_ullong2 -spu_splats (unsigned long long a) -{ - return __builtin_spu_splats_6 (a); -} -static inline vec_llong2 -spu_splats (long long a) -{ - return __builtin_spu_splats_7 (a); -} -static inline vec_float4 -spu_splats (float a) -{ - return __builtin_spu_splats_8 (a); -} -static inline vec_double2 -spu_splats (double a) -{ - return __builtin_spu_splats_9 (a); -} -static inline unsigned char -spu_extract (vec_uchar16 a, int b) -{ - return __builtin_spu_extract_0 (a, b); -} -static inline signed char -spu_extract (vec_char16 a, int b) -{ - return __builtin_spu_extract_1 (a, b); -} -static inline unsigned short -spu_extract (vec_ushort8 a, int b) -{ - return __builtin_spu_extract_2 (a, b); -} -static inline short -spu_extract (vec_short8 a, int b) -{ - return __builtin_spu_extract_3 (a, b); -} -static inline unsigned int -spu_extract (vec_uint4 a, int b) -{ - return __builtin_spu_extract_4 (a, b); -} -static inline int -spu_extract (vec_int4 a, int b) -{ - return __builtin_spu_extract_5 (a, b); -} -static inline unsigned long long -spu_extract (vec_ullong2 a, int b) -{ - return __builtin_spu_extract_6 (a, b); -} -static inline long long -spu_extract (vec_llong2 a, int b) -{ - return __builtin_spu_extract_7 (a, b); -} -static inline float -spu_extract (vec_float4 a, int b) -{ - return __builtin_spu_extract_8 (a, b); -} -static inline double -spu_extract (vec_double2 a, int b) -{ - return __builtin_spu_extract_9 (a, b); -} -static inline vec_uchar16 -spu_insert (unsigned char a, vec_uchar16 b, int c) -{ - return __builtin_spu_insert_0 (a, b, c); -} -static inline vec_char16 -spu_insert (signed char a, vec_char16 b, int c) -{ - return __builtin_spu_insert_1 (a, b, c); -} -static inline vec_ushort8 -spu_insert (unsigned short a, vec_ushort8 b, int c) -{ - return __builtin_spu_insert_2 (a, b, c); -} -static inline vec_short8 -spu_insert (short a, vec_short8 b, int c) -{ - return __builtin_spu_insert_3 (a, b, c); -} -static inline vec_uint4 -spu_insert (unsigned int a, vec_uint4 b, int c) -{ - return __builtin_spu_insert_4 (a, b, c); -} -static inline vec_int4 -spu_insert (int a, vec_int4 b, int c) -{ - return __builtin_spu_insert_5 (a, b, c); -} -static inline vec_ullong2 -spu_insert (unsigned long long a, vec_ullong2 b, int c) -{ - return __builtin_spu_insert_6 (a, b, c); -} -static inline vec_llong2 -spu_insert (long long a, vec_llong2 b, int c) -{ - return __builtin_spu_insert_7 (a, b, c); -} -static inline vec_float4 -spu_insert (float a, vec_float4 b, int c) -{ - return __builtin_spu_insert_8 (a, b, c); -} -static inline vec_double2 -spu_insert (double a, vec_double2 b, int c) -{ - return __builtin_spu_insert_9 (a, b, c); -} -static inline vec_uchar16 -spu_promote (unsigned char a, int b) -{ - return __builtin_spu_promote_0 (a, b); -} -static inline vec_char16 -spu_promote (signed char a, int b) -{ - return __builtin_spu_promote_1 (a, b); -} -static inline vec_char16 -spu_promote (char a, int b) -{ - return __builtin_spu_promote_1 (a, b); -} -static inline vec_ushort8 -spu_promote (unsigned short a, int b) -{ - return __builtin_spu_promote_2 (a, b); -} -static inline vec_short8 -spu_promote (short a, int b) -{ - return __builtin_spu_promote_3 (a, b); -} -static inline vec_uint4 -spu_promote (unsigned int a, int b) -{ - return __builtin_spu_promote_4 (a, b); -} -static inline vec_int4 -spu_promote (int a, int b) -{ - return __builtin_spu_promote_5 (a, b); -} -static inline vec_ullong2 -spu_promote (unsigned long long a, int b) -{ - return __builtin_spu_promote_6 (a, b); -} -static inline vec_llong2 -spu_promote (long long a, int b) -{ - return __builtin_spu_promote_7 (a, b); -} -static inline vec_float4 -spu_promote (float a, int b) -{ - return __builtin_spu_promote_8 (a, b); -} -static inline vec_double2 -spu_promote (double a, int b) -{ - return __builtin_spu_promote_9 (a, b); -} -#endif /* __cplusplus */ - #ifdef __cplusplus extern "C" { #endif