[AArch64] Restructure arm_neon.h vector types.

2014-11-05  Tejas Belagod  <tejas.belagod@arm.com>

	* config/aarch64/aarch64-builtins.c
	(aarch64_build_scalar_type): Remove.
	(aarch64_scalar_builtin_types, aarch64_simd_type,
	aarch64_simd_type, aarch64_mangle_builtin_scalar_type,
	aarch64_mangle_builtin_vector_type,
	aarch64_mangle_builtin_type, aarch64_simd_builtin_std_type,
	aarch64_lookup_simd_builtin_type, aarch64_simd_builtin_type,
	aarch64_init_simd_builtin_types,
	aarch64_init_simd_builtin_scalar_types): New.
	(aarch64_init_simd_builtins): Refactor.
	(aarch64_init_crc32_builtins): Fixup with qualifier.
	* config/aarch64/aarch64-protos.h
	(aarch64_mangle_builtin_type): Export.
	* config/aarch64/aarch64-simd-builtin-types.def: New.
	* config/aarch64/aarch64.c (aarch64_simd_mangle_map): Remove.
	(aarch64_mangle_type): Refactor.
	* config/aarch64/arm_neon.h: Declare vector types based on
	internal types.
	* config/aarch64/t-aarch64: Update dependency.

From-SVN: r217114
This commit is contained in:
Tejas Belagod 2014-11-05 08:26:54 +00:00 committed by Tejas Belagod
parent 3cbdd8312a
commit f9d53c273b
7 changed files with 419 additions and 351 deletions

View file

@ -1,3 +1,25 @@
2014-11-05 Tejas Belagod <tejas.belagod@arm.com>
* config/aarch64/aarch64-builtins.c
(aarch64_build_scalar_type): Remove.
(aarch64_scalar_builtin_types, aarch64_simd_type,
aarch64_simd_type, aarch64_mangle_builtin_scalar_type,
aarch64_mangle_builtin_vector_type,
aarch64_mangle_builtin_type, aarch64_simd_builtin_std_type,
aarch64_lookup_simd_builtin_type, aarch64_simd_builtin_type,
aarch64_init_simd_builtin_types,
aarch64_init_simd_builtin_scalar_types): New.
(aarch64_init_simd_builtins): Refactor.
(aarch64_init_crc32_builtins): Fixup with qualifier.
* config/aarch64/aarch64-protos.h
(aarch64_mangle_builtin_type): Export.
* config/aarch64/aarch64-simd-builtin-types.def: New.
* config/aarch64/aarch64.c (aarch64_simd_mangle_map): Remove.
(aarch64_mangle_type): Refactor.
* config/aarch64/arm_neon.h: Declare vector types based on
internal types.
* config/aarch64/t-aarch64: Update dependency.
2014-11-04 Pat Haugen <pthaugen@us.ibm.com>
* config/rs6000/rs6000.c (atomic_hold_decl, atomic_clear_decl,

View file

@ -363,179 +363,320 @@ static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX];
#define NUM_DREG_TYPES 6
#define NUM_QREG_TYPES 6
/* Return a tree for a signed or unsigned argument of either
the mode specified by MODE, or the inner mode of MODE. */
tree
aarch64_build_scalar_type (machine_mode mode,
bool unsigned_p,
bool poly_p)
/* Internal scalar builtin types. These types are used to support
neon intrinsic builtins. They are _not_ user-visible types. Therefore
the mangling for these types are implementation defined. */
const char *aarch64_scalar_builtin_types[] = {
"__builtin_aarch64_simd_qi",
"__builtin_aarch64_simd_hi",
"__builtin_aarch64_simd_si",
"__builtin_aarch64_simd_sf",
"__builtin_aarch64_simd_di",
"__builtin_aarch64_simd_df",
"__builtin_aarch64_simd_poly8",
"__builtin_aarch64_simd_poly16",
"__builtin_aarch64_simd_poly64",
"__builtin_aarch64_simd_poly128",
"__builtin_aarch64_simd_ti",
"__builtin_aarch64_simd_uqi",
"__builtin_aarch64_simd_uhi",
"__builtin_aarch64_simd_usi",
"__builtin_aarch64_simd_udi",
"__builtin_aarch64_simd_ei",
"__builtin_aarch64_simd_oi",
"__builtin_aarch64_simd_ci",
"__builtin_aarch64_simd_xi",
NULL
};
#define ENTRY(E, M, Q, G) E,
enum aarch64_simd_type
{
#undef INT_TYPES
#define INT_TYPES \
AARCH64_TYPE_BUILDER (QI) \
AARCH64_TYPE_BUILDER (HI) \
AARCH64_TYPE_BUILDER (SI) \
AARCH64_TYPE_BUILDER (DI) \
AARCH64_TYPE_BUILDER (EI) \
AARCH64_TYPE_BUILDER (OI) \
AARCH64_TYPE_BUILDER (CI) \
AARCH64_TYPE_BUILDER (XI) \
AARCH64_TYPE_BUILDER (TI) \
#include "aarch64-simd-builtin-types.def"
ARM_NEON_H_TYPES_LAST
};
#undef ENTRY
/* Statically declare all the possible types we might need. */
#undef AARCH64_TYPE_BUILDER
#define AARCH64_TYPE_BUILDER(X) \
static tree X##_aarch64_type_node_p = NULL; \
static tree X##_aarch64_type_node_s = NULL; \
static tree X##_aarch64_type_node_u = NULL;
INT_TYPES
static tree float_aarch64_type_node = NULL;
static tree double_aarch64_type_node = NULL;
gcc_assert (!VECTOR_MODE_P (mode));
/* If we've already initialised this type, don't initialise it again,
otherwise ask for a new type of the correct size. */
#undef AARCH64_TYPE_BUILDER
#define AARCH64_TYPE_BUILDER(X) \
case X##mode: \
if (unsigned_p) \
return (X##_aarch64_type_node_u \
? X##_aarch64_type_node_u \
: X##_aarch64_type_node_u \
= make_unsigned_type (GET_MODE_PRECISION (mode))); \
else if (poly_p) \
return (X##_aarch64_type_node_p \
? X##_aarch64_type_node_p \
: X##_aarch64_type_node_p \
= make_unsigned_type (GET_MODE_PRECISION (mode))); \
else \
return (X##_aarch64_type_node_s \
? X##_aarch64_type_node_s \
: X##_aarch64_type_node_s \
= make_signed_type (GET_MODE_PRECISION (mode))); \
break;
switch (mode)
{
INT_TYPES
case SFmode:
if (!float_aarch64_type_node)
{
float_aarch64_type_node = make_node (REAL_TYPE);
TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE;
layout_type (float_aarch64_type_node);
}
return float_aarch64_type_node;
break;
case DFmode:
if (!double_aarch64_type_node)
{
double_aarch64_type_node = make_node (REAL_TYPE);
TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE;
layout_type (double_aarch64_type_node);
}
return double_aarch64_type_node;
break;
default:
gcc_unreachable ();
}
}
tree
aarch64_build_vector_type (machine_mode mode,
bool unsigned_p,
bool poly_p)
struct aarch64_simd_type_info
{
enum aarch64_simd_type type;
/* Internal type name. */
const char *name;
/* Internal type name(mangled). The mangled names conform to the
AAPCS64 (see "Procedure Call Standard for the ARM 64-bit Architecture",
Appendix A). To qualify for emission with the mangled names defined in
that document, a vector type must not only be of the correct mode but also
be of the correct internal AdvSIMD vector type (e.g. __Int8x8_t); these
types are registered by aarch64_init_simd_builtin_types (). In other
words, vector types defined in other ways e.g. via vector_size attribute
will get default mangled names. */
const char *mangle;
/* Internal type. */
tree itype;
/* Element type. */
tree eltype;
#define VECTOR_TYPES \
AARCH64_TYPE_BUILDER (V16QI) \
AARCH64_TYPE_BUILDER (V8HI) \
AARCH64_TYPE_BUILDER (V4SI) \
AARCH64_TYPE_BUILDER (V2DI) \
AARCH64_TYPE_BUILDER (V8QI) \
AARCH64_TYPE_BUILDER (V4HI) \
AARCH64_TYPE_BUILDER (V2SI) \
\
AARCH64_TYPE_BUILDER (V4SF) \
AARCH64_TYPE_BUILDER (V2DF) \
AARCH64_TYPE_BUILDER (V2SF) \
/* Declare our "cache" of values. */
#undef AARCH64_TYPE_BUILDER
#define AARCH64_TYPE_BUILDER(X) \
static tree X##_aarch64_type_node_s = NULL; \
static tree X##_aarch64_type_node_u = NULL; \
static tree X##_aarch64_type_node_p = NULL;
/* Machine mode the internal type maps to. */
enum machine_mode mode;
VECTOR_TYPES
/* Qualifiers. */
enum aarch64_type_qualifiers q;
};
gcc_assert (VECTOR_MODE_P (mode));
#define ENTRY(E, M, Q, G) \
{E, "__" #E, #G "__" #E, NULL_TREE, NULL_TREE, M##mode, qualifier_##Q},
static struct aarch64_simd_type_info aarch64_simd_types [] = {
#include "aarch64-simd-builtin-types.def"
};
#undef ENTRY
#undef AARCH64_TYPE_BUILDER
#define AARCH64_TYPE_BUILDER(X) \
case X##mode: \
if (unsigned_p) \
return X##_aarch64_type_node_u \
? X##_aarch64_type_node_u \
: X##_aarch64_type_node_u \
= build_vector_type_for_mode (aarch64_build_scalar_type \
(GET_MODE_INNER (mode), \
unsigned_p, poly_p), mode); \
else if (poly_p) \
return X##_aarch64_type_node_p \
? X##_aarch64_type_node_p \
: X##_aarch64_type_node_p \
= build_vector_type_for_mode (aarch64_build_scalar_type \
(GET_MODE_INNER (mode), \
unsigned_p, poly_p), mode); \
else \
return X##_aarch64_type_node_s \
? X##_aarch64_type_node_s \
: X##_aarch64_type_node_s \
= build_vector_type_for_mode (aarch64_build_scalar_type \
(GET_MODE_INNER (mode), \
unsigned_p, poly_p), mode); \
break;
static tree aarch64_simd_intOI_type_node = NULL_TREE;
static tree aarch64_simd_intEI_type_node = NULL_TREE;
static tree aarch64_simd_intCI_type_node = NULL_TREE;
static tree aarch64_simd_intXI_type_node = NULL_TREE;
static const char *
aarch64_mangle_builtin_scalar_type (const_tree type)
{
int i = 0;
while (aarch64_scalar_builtin_types[i] != NULL)
{
const char *name = aarch64_scalar_builtin_types[i];
if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (type))
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))), name))
return aarch64_scalar_builtin_types[i];
i++;
}
return NULL;
}
static const char *
aarch64_mangle_builtin_vector_type (const_tree type)
{
int i;
int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
for (i = 0; i < nelts; i++)
if (aarch64_simd_types[i].mode == TYPE_MODE (type)
&& TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (type))
&& !strcmp
(IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))),
aarch64_simd_types[i].name))
return aarch64_simd_types[i].mangle;
return NULL;
}
const char *
aarch64_mangle_builtin_type (const_tree type)
{
const char *mangle;
/* Walk through all the AArch64 builtins types tables to filter out the
incoming type. */
if ((mangle = aarch64_mangle_builtin_vector_type (type))
|| (mangle = aarch64_mangle_builtin_scalar_type (type)))
return mangle;
return NULL;
}
static tree
aarch64_simd_builtin_std_type (enum machine_mode mode,
enum aarch64_type_qualifiers q)
{
#define QUAL_TYPE(M) \
((q == qualifier_none) ? int##M##_type_node : unsigned_int##M##_type_node);
switch (mode)
{
default:
eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode),
unsigned_p, poly_p);
return build_vector_type_for_mode (eltype, mode);
break;
VECTOR_TYPES
}
case QImode:
return QUAL_TYPE (QI);
case HImode:
return QUAL_TYPE (HI);
case SImode:
return QUAL_TYPE (SI);
case DImode:
return QUAL_TYPE (DI);
case TImode:
return QUAL_TYPE (TI);
case OImode:
return aarch64_simd_intOI_type_node;
case EImode:
return aarch64_simd_intEI_type_node;
case CImode:
return aarch64_simd_intCI_type_node;
case XImode:
return aarch64_simd_intXI_type_node;
case SFmode:
return float_type_node;
case DFmode:
return double_type_node;
default:
gcc_unreachable ();
}
#undef QUAL_TYPE
}
tree
aarch64_build_type (machine_mode mode, bool unsigned_p, bool poly_p)
static tree
aarch64_lookup_simd_builtin_type (enum machine_mode mode,
enum aarch64_type_qualifiers q)
{
if (VECTOR_MODE_P (mode))
return aarch64_build_vector_type (mode, unsigned_p, poly_p);
int i;
int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
/* Non-poly scalar modes map to standard types not in the table. */
if (q != qualifier_poly && !VECTOR_MODE_P (mode))
return aarch64_simd_builtin_std_type (mode, q);
for (i = 0; i < nelts; i++)
if (aarch64_simd_types[i].mode == mode
&& aarch64_simd_types[i].q == q)
return aarch64_simd_types[i].itype;
return NULL_TREE;
}
static tree
aarch64_simd_builtin_type (enum machine_mode mode,
bool unsigned_p, bool poly_p)
{
if (poly_p)
return aarch64_lookup_simd_builtin_type (mode, qualifier_poly);
else if (unsigned_p)
return aarch64_lookup_simd_builtin_type (mode, qualifier_unsigned);
else
return aarch64_build_scalar_type (mode, unsigned_p, poly_p);
return aarch64_lookup_simd_builtin_type (mode, qualifier_none);
}
static void
aarch64_init_simd_builtin_types (void)
{
int i;
int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
tree tdecl;
/* Init all the element types built by the front-end. */
aarch64_simd_types[Int8x8_t].eltype = intQI_type_node;
aarch64_simd_types[Int8x16_t].eltype = intQI_type_node;
aarch64_simd_types[Int16x4_t].eltype = intHI_type_node;
aarch64_simd_types[Int16x8_t].eltype = intHI_type_node;
aarch64_simd_types[Int32x2_t].eltype = intSI_type_node;
aarch64_simd_types[Int32x4_t].eltype = intSI_type_node;
aarch64_simd_types[Int64x1_t].eltype = intDI_type_node;
aarch64_simd_types[Int64x2_t].eltype = intDI_type_node;
aarch64_simd_types[Uint8x8_t].eltype = unsigned_intQI_type_node;
aarch64_simd_types[Uint8x16_t].eltype = unsigned_intQI_type_node;
aarch64_simd_types[Uint16x4_t].eltype = unsigned_intHI_type_node;
aarch64_simd_types[Uint16x8_t].eltype = unsigned_intHI_type_node;
aarch64_simd_types[Uint32x2_t].eltype = unsigned_intSI_type_node;
aarch64_simd_types[Uint32x4_t].eltype = unsigned_intSI_type_node;
aarch64_simd_types[Uint64x1_t].eltype = unsigned_intDI_type_node;
aarch64_simd_types[Uint64x2_t].eltype = unsigned_intDI_type_node;
/* Poly types are a world of their own. */
aarch64_simd_types[Poly8_t].eltype = aarch64_simd_types[Poly8_t].itype =
build_distinct_type_copy (unsigned_intQI_type_node);
aarch64_simd_types[Poly16_t].eltype = aarch64_simd_types[Poly16_t].itype =
build_distinct_type_copy (unsigned_intHI_type_node);
aarch64_simd_types[Poly64_t].eltype = aarch64_simd_types[Poly64_t].itype =
build_distinct_type_copy (unsigned_intDI_type_node);
aarch64_simd_types[Poly128_t].eltype = aarch64_simd_types[Poly128_t].itype =
build_distinct_type_copy (unsigned_intTI_type_node);
/* Init poly vector element types with scalar poly types. */
aarch64_simd_types[Poly8x8_t].eltype = aarch64_simd_types[Poly8_t].itype;
aarch64_simd_types[Poly8x16_t].eltype = aarch64_simd_types[Poly8_t].itype;
aarch64_simd_types[Poly16x4_t].eltype = aarch64_simd_types[Poly16_t].itype;
aarch64_simd_types[Poly16x8_t].eltype = aarch64_simd_types[Poly16_t].itype;
aarch64_simd_types[Poly64x1_t].eltype = aarch64_simd_types[Poly64_t].itype;
aarch64_simd_types[Poly64x2_t].eltype = aarch64_simd_types[Poly64_t].itype;
/* Continue with standard types. */
aarch64_simd_types[Float32x2_t].eltype = float_type_node;
aarch64_simd_types[Float32x4_t].eltype = float_type_node;
aarch64_simd_types[Float64x1_t].eltype = double_type_node;
aarch64_simd_types[Float64x2_t].eltype = double_type_node;
for (i = 0; i < nelts; i++)
{
tree eltype = aarch64_simd_types[i].eltype;
enum machine_mode mode = aarch64_simd_types[i].mode;
if (aarch64_simd_types[i].itype == NULL)
aarch64_simd_types[i].itype =
build_distinct_type_copy
(build_vector_type (eltype, GET_MODE_NUNITS (mode)));
tdecl = add_builtin_type (aarch64_simd_types[i].name,
aarch64_simd_types[i].itype);
TYPE_NAME (aarch64_simd_types[i].itype) = tdecl;
SET_TYPE_STRUCTURAL_EQUALITY (aarch64_simd_types[i].itype);
}
#define AARCH64_BUILD_SIGNED_TYPE(mode) \
make_signed_type (GET_MODE_PRECISION (mode));
aarch64_simd_intOI_type_node = AARCH64_BUILD_SIGNED_TYPE (OImode);
aarch64_simd_intEI_type_node = AARCH64_BUILD_SIGNED_TYPE (EImode);
aarch64_simd_intCI_type_node = AARCH64_BUILD_SIGNED_TYPE (CImode);
aarch64_simd_intXI_type_node = AARCH64_BUILD_SIGNED_TYPE (XImode);
#undef AARCH64_BUILD_SIGNED_TYPE
tdecl = add_builtin_type
("__builtin_aarch64_simd_ei" , aarch64_simd_intEI_type_node);
TYPE_NAME (aarch64_simd_intEI_type_node) = tdecl;
tdecl = add_builtin_type
("__builtin_aarch64_simd_oi" , aarch64_simd_intOI_type_node);
TYPE_NAME (aarch64_simd_intOI_type_node) = tdecl;
tdecl = add_builtin_type
("__builtin_aarch64_simd_ci" , aarch64_simd_intCI_type_node);
TYPE_NAME (aarch64_simd_intCI_type_node) = tdecl;
tdecl = add_builtin_type
("__builtin_aarch64_simd_xi" , aarch64_simd_intXI_type_node);
TYPE_NAME (aarch64_simd_intXI_type_node) = tdecl;
}
tree
aarch64_build_signed_type (machine_mode mode)
static void
aarch64_init_simd_builtin_scalar_types (void)
{
return aarch64_build_type (mode, false, false);
}
tree
aarch64_build_unsigned_type (machine_mode mode)
{
return aarch64_build_type (mode, true, false);
}
tree
aarch64_build_poly_type (machine_mode mode)
{
return aarch64_build_type (mode, false, true);
/* Define typedefs for all the standard scalar types. */
(*lang_hooks.types.register_builtin_type) (intQI_type_node,
"__builtin_aarch64_simd_qi");
(*lang_hooks.types.register_builtin_type) (intHI_type_node,
"__builtin_aarch64_simd_hi");
(*lang_hooks.types.register_builtin_type) (intSI_type_node,
"__builtin_aarch64_simd_si");
(*lang_hooks.types.register_builtin_type) (float_type_node,
"__builtin_aarch64_simd_sf");
(*lang_hooks.types.register_builtin_type) (intDI_type_node,
"__builtin_aarch64_simd_di");
(*lang_hooks.types.register_builtin_type) (double_type_node,
"__builtin_aarch64_simd_df");
(*lang_hooks.types.register_builtin_type) (unsigned_intQI_type_node,
"__builtin_aarch64_simd_poly8");
(*lang_hooks.types.register_builtin_type) (unsigned_intHI_type_node,
"__builtin_aarch64_simd_poly16");
(*lang_hooks.types.register_builtin_type) (unsigned_intDI_type_node,
"__builtin_aarch64_simd_poly64");
(*lang_hooks.types.register_builtin_type) (unsigned_intTI_type_node,
"__builtin_aarch64_simd_poly128");
(*lang_hooks.types.register_builtin_type) (intTI_type_node,
"__builtin_aarch64_simd_ti");
/* Unsigned integer types for various mode sizes. */
(*lang_hooks.types.register_builtin_type) (unsigned_intQI_type_node,
"__builtin_aarch64_simd_uqi");
(*lang_hooks.types.register_builtin_type) (unsigned_intHI_type_node,
"__builtin_aarch64_simd_uhi");
(*lang_hooks.types.register_builtin_type) (unsigned_intSI_type_node,
"__builtin_aarch64_simd_usi");
(*lang_hooks.types.register_builtin_type) (unsigned_intDI_type_node,
"__builtin_aarch64_simd_udi");
}
static void
@ -543,77 +684,14 @@ aarch64_init_simd_builtins (void)
{
unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
/* Signed scalar type nodes. */
tree aarch64_simd_intQI_type_node = aarch64_build_signed_type (QImode);
tree aarch64_simd_intHI_type_node = aarch64_build_signed_type (HImode);
tree aarch64_simd_intSI_type_node = aarch64_build_signed_type (SImode);
tree aarch64_simd_intDI_type_node = aarch64_build_signed_type (DImode);
tree aarch64_simd_intTI_type_node = aarch64_build_signed_type (TImode);
tree aarch64_simd_intEI_type_node = aarch64_build_signed_type (EImode);
tree aarch64_simd_intOI_type_node = aarch64_build_signed_type (OImode);
tree aarch64_simd_intCI_type_node = aarch64_build_signed_type (CImode);
tree aarch64_simd_intXI_type_node = aarch64_build_signed_type (XImode);
/* Unsigned scalar type nodes. */
tree aarch64_simd_intUQI_type_node = aarch64_build_unsigned_type (QImode);
tree aarch64_simd_intUHI_type_node = aarch64_build_unsigned_type (HImode);
tree aarch64_simd_intUSI_type_node = aarch64_build_unsigned_type (SImode);
tree aarch64_simd_intUDI_type_node = aarch64_build_unsigned_type (DImode);
/* Poly scalar type nodes. */
tree aarch64_simd_polyQI_type_node = aarch64_build_poly_type (QImode);
tree aarch64_simd_polyHI_type_node = aarch64_build_poly_type (HImode);
tree aarch64_simd_polyDI_type_node = aarch64_build_poly_type (DImode);
tree aarch64_simd_polyTI_type_node = aarch64_build_poly_type (TImode);
/* Float type nodes. */
tree aarch64_simd_float_type_node = aarch64_build_signed_type (SFmode);
tree aarch64_simd_double_type_node = aarch64_build_signed_type (DFmode);
/* Define typedefs which exactly correspond to the modes we are basing vector
types on. If you change these names you'll need to change
the table used by aarch64_mangle_type too. */
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
"__builtin_aarch64_simd_qi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
"__builtin_aarch64_simd_hi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
"__builtin_aarch64_simd_si");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
"__builtin_aarch64_simd_sf");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
"__builtin_aarch64_simd_di");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
"__builtin_aarch64_simd_df");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
"__builtin_aarch64_simd_poly8");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
"__builtin_aarch64_simd_poly16");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyDI_type_node,
"__builtin_aarch64_simd_poly64");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyTI_type_node,
"__builtin_aarch64_simd_poly128");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node,
"__builtin_aarch64_simd_ti");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node,
"__builtin_aarch64_simd_ei");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node,
"__builtin_aarch64_simd_oi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node,
"__builtin_aarch64_simd_ci");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node,
"__builtin_aarch64_simd_xi");
/* Unsigned integer types for various mode sizes. */
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node,
"__builtin_aarch64_simd_uqi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node,
"__builtin_aarch64_simd_uhi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node,
"__builtin_aarch64_simd_usi");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node,
"__builtin_aarch64_simd_udi");
aarch64_init_simd_builtin_types ();
/* Strong-typing hasn't been implemented for all AdvSIMD builtin intrinsics.
Therefore we need to preserve the old __builtin scalar types. It can be
removed once all the intrinsics become strongly typed using the qualifier
system. */
aarch64_init_simd_builtin_scalar_types ();
for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
{
bool print_type_signature_p = false;
@ -677,9 +755,11 @@ aarch64_init_simd_builtins (void)
if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
op_mode = GET_MODE_INNER (op_mode);
eltype = aarch64_build_type (op_mode,
qualifiers & qualifier_unsigned,
qualifiers & qualifier_poly);
eltype = aarch64_simd_builtin_type
(op_mode,
(qualifiers & qualifier_unsigned) != 0,
(qualifiers & qualifier_poly) != 0);
gcc_assert (eltype != NULL);
/* Add qualifiers. */
if (qualifiers & qualifier_const)
@ -717,13 +797,14 @@ aarch64_init_simd_builtins (void)
static void
aarch64_init_crc32_builtins ()
{
tree usi_type = aarch64_build_unsigned_type (SImode);
tree usi_type = aarch64_simd_builtin_std_type (SImode, qualifier_unsigned);
unsigned int i = 0;
for (i = 0; i < ARRAY_SIZE (aarch64_crc_builtin_data); ++i)
{
aarch64_crc_builtin_datum* d = &aarch64_crc_builtin_data[i];
tree argtype = aarch64_build_unsigned_type (d->mode);
tree argtype = aarch64_simd_builtin_std_type (d->mode,
qualifier_unsigned);
tree ftype = build_function_type_list (usi_type, usi_type, argtype, NULL_TREE);
tree fndecl = add_builtin_function (d->name, ftype, d->fcode,
BUILT_IN_MD, NULL, NULL_TREE);

View file

@ -214,6 +214,7 @@ bool aarch64_simd_valid_immediate (rtx, machine_mode, bool,
bool aarch64_symbolic_address_p (rtx);
bool aarch64_uimm12_shift (HOST_WIDE_INT);
bool aarch64_use_return_insn_p (void);
const char *aarch64_mangle_builtin_type (const_tree);
const char *aarch64_output_casesi (rtx *);
const char *aarch64_rewrite_selected_cpu (const char *name);

View file

@ -0,0 +1,50 @@
/* Builtin AdvSIMD types.
Copyright (C) 2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
ENTRY (Int8x8_t, V8QI, none, 10)
ENTRY (Int8x16_t, V16QI, none, 11)
ENTRY (Int16x4_t, V4HI, none, 11)
ENTRY (Int16x8_t, V8HI, none, 11)
ENTRY (Int32x2_t, V2SI, none, 11)
ENTRY (Int32x4_t, V4SI, none, 11)
ENTRY (Int64x1_t, DI, none, 11)
ENTRY (Int64x2_t, V2DI, none, 11)
ENTRY (Uint8x8_t, V8QI, unsigned, 11)
ENTRY (Uint8x16_t, V16QI, unsigned, 12)
ENTRY (Uint16x4_t, V4HI, unsigned, 12)
ENTRY (Uint16x8_t, V8HI, unsigned, 12)
ENTRY (Uint32x2_t, V2SI, unsigned, 12)
ENTRY (Uint32x4_t, V4SI, unsigned, 12)
ENTRY (Uint64x1_t, DI, unsigned, 12)
ENTRY (Uint64x2_t, V2DI, unsigned, 12)
ENTRY (Poly8_t, QI, poly, 9)
ENTRY (Poly16_t, HI, poly, 10)
ENTRY (Poly64_t, DI, poly, 10)
ENTRY (Poly128_t, TI, poly, 11)
ENTRY (Poly8x8_t, V8QI, poly, 11)
ENTRY (Poly8x16_t, V16QI, poly, 12)
ENTRY (Poly16x4_t, V4HI, poly, 12)
ENTRY (Poly16x8_t, V8HI, poly, 12)
ENTRY (Poly64x1_t, DI, poly, 12)
ENTRY (Poly64x2_t, V2DI, poly, 12)
ENTRY (Float32x2_t, V2SF, none, 13)
ENTRY (Float32x4_t, V4SF, none, 13)
ENTRY (Float64x1_t, V1DF, none, 13)
ENTRY (Float64x2_t, V2DF, none, 13)

View file

@ -7597,54 +7597,6 @@ aarch64_autovectorize_vector_sizes (void)
return (16 | 8);
}
/* A table to help perform AArch64-specific name mangling for AdvSIMD
vector types in order to conform to the AAPCS64 (see "Procedure
Call Standard for the ARM 64-bit Architecture", Appendix A). To
qualify for emission with the mangled names defined in that document,
a vector type must not only be of the correct mode but also be
composed of AdvSIMD vector element types (e.g.
_builtin_aarch64_simd_qi); these types are registered by
aarch64_init_simd_builtins (). In other words, vector types defined
in other ways e.g. via vector_size attribute will get default
mangled names. */
typedef struct
{
machine_mode mode;
const char *element_type_name;
const char *mangled_name;
} aarch64_simd_mangle_map_entry;
static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
/* 64-bit containerized types. */
{ V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
{ V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
{ V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
{ V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
{ V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
{ V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
{ V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
{ DImode, "__builtin_aarch64_simd_di", "11__Int64x1_t" },
{ DImode, "__builtin_aarch64_simd_udi", "12__Uint64x1_t" },
{ V1DFmode, "__builtin_aarch64_simd_df", "13__Float64x1_t" },
{ V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
{ V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
/* 128-bit containerized types. */
{ V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
{ V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
{ V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
{ V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
{ V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
{ V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
{ V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
{ V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
{ V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
{ V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
{ V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
{ V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
{ V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
{ VOIDmode, NULL, NULL }
};
/* Implement TARGET_MANGLE_TYPE. */
static const char *
@ -7655,25 +7607,10 @@ aarch64_mangle_type (const_tree type)
if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
return "St9__va_list";
/* Check the mode of the vector type, and the name of the vector
element type, against the table. */
if (TREE_CODE (type) == VECTOR_TYPE)
{
aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
while (pos->mode != VOIDmode)
{
tree elt_type = TREE_TYPE (type);
if (pos->mode == TYPE_MODE (type)
&& TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
pos->element_type_name))
return pos->mangled_name;
pos++;
}
}
/* Mangle AArch64-specific internal types. TYPE_NAME is non-NULL_TREE for
builtin types. */
if (TYPE_NAME (type) != NULL)
return aarch64_mangle_builtin_type (type);
/* Use the default mangling. */
return NULL;

View file

@ -32,63 +32,39 @@
#define __AARCH64_UINT64_C(__C) ((uint64_t) __C)
#define __AARCH64_INT64_C(__C) ((int64_t) __C)
typedef __builtin_aarch64_simd_qi int8x8_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_hi int16x4_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_si int32x2_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_di int64x1_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_df float64x1_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_sf float32x2_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_poly8 poly8x8_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_poly16 poly16x4_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_uqi uint8x8_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_uhi uint16x4_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_usi uint32x2_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_udi uint64x1_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_qi int8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_hi int16x8_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_si int32x4_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_di int64x2_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_sf float32x4_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_df float64x2_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_poly8 poly8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_poly16 poly16x8_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_poly64 poly64x2_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_uqi uint8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_uhi uint16x8_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_usi uint32x4_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_udi uint64x2_t
__attribute__ ((__vector_size__ (16)));
typedef __Int8x8_t int8x8_t;
typedef __Int16x4_t int16x4_t;
typedef __Int32x2_t int32x2_t;
typedef __Int64x1_t int64x1_t;
typedef __Float32x2_t float32x2_t;
typedef __Poly8x8_t poly8x8_t;
typedef __Poly16x4_t poly16x4_t;
typedef __Uint8x8_t uint8x8_t;
typedef __Uint16x4_t uint16x4_t;
typedef __Uint32x2_t uint32x2_t;
typedef __Float64x1_t float64x1_t;
typedef __Uint64x1_t uint64x1_t;
typedef __Int8x16_t int8x16_t;
typedef __Int16x8_t int16x8_t;
typedef __Int32x4_t int32x4_t;
typedef __Int64x2_t int64x2_t;
typedef __Float32x4_t float32x4_t;
typedef __Float64x2_t float64x2_t;
typedef __Poly8x16_t poly8x16_t;
typedef __Poly16x8_t poly16x8_t;
typedef __Poly64x2_t poly64x2_t;
typedef __Uint8x16_t uint8x16_t;
typedef __Uint16x8_t uint16x8_t;
typedef __Uint32x4_t uint32x4_t;
typedef __Uint64x2_t uint64x2_t;
typedef __Poly8_t poly8_t;
typedef __Poly16_t poly16_t;
typedef __Poly64_t poly64_t;
typedef __Poly128_t poly128_t;
typedef float float32_t;
typedef double float64_t;
typedef __builtin_aarch64_simd_poly8 poly8_t;
typedef __builtin_aarch64_simd_poly16 poly16_t;
typedef __builtin_aarch64_simd_poly64 poly64_t;
typedef __builtin_aarch64_simd_poly128 poly128_t;
typedef struct int8x8x2_t
{

View file

@ -32,6 +32,7 @@ aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
$(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
$(DIAGNOSTIC_CORE_H) $(OPTABS_H) \
$(srcdir)/config/aarch64/aarch64-simd-builtins.def \
$(srcdir)/config/aarch64/aarch64-simd-builtin-types.def \
aarch64-builtin-iterators.h
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/aarch64/aarch64-builtins.c