#define NUM_DREG_TYPES 6
#define NUM_QREG_TYPES 6
-/* Return a tree for a signed or unsigned argument of either
- the mode specified by MODE, or the inner mode of MODE. */
-tree
-aarch64_build_scalar_type (machine_mode mode,
- bool unsigned_p,
- bool poly_p)
-{
-#undef INT_TYPES
-#define INT_TYPES \
- AARCH64_TYPE_BUILDER (QI) \
- AARCH64_TYPE_BUILDER (HI) \
- AARCH64_TYPE_BUILDER (SI) \
- AARCH64_TYPE_BUILDER (DI) \
- AARCH64_TYPE_BUILDER (EI) \
- AARCH64_TYPE_BUILDER (OI) \
- AARCH64_TYPE_BUILDER (CI) \
- AARCH64_TYPE_BUILDER (XI) \
- AARCH64_TYPE_BUILDER (TI) \
-
-/* Statically declare all the possible types we might need. */
-#undef AARCH64_TYPE_BUILDER
-#define AARCH64_TYPE_BUILDER(X) \
- static tree X##_aarch64_type_node_p = NULL; \
- static tree X##_aarch64_type_node_s = NULL; \
- static tree X##_aarch64_type_node_u = NULL;
-
- INT_TYPES
-
- static tree float_aarch64_type_node = NULL;
- static tree double_aarch64_type_node = NULL;
-
- gcc_assert (!VECTOR_MODE_P (mode));
-
-/* If we've already initialised this type, don't initialise it again,
- otherwise ask for a new type of the correct size. */
-#undef AARCH64_TYPE_BUILDER
-#define AARCH64_TYPE_BUILDER(X) \
- case X##mode: \
- if (unsigned_p) \
- return (X##_aarch64_type_node_u \
- ? X##_aarch64_type_node_u \
- : X##_aarch64_type_node_u \
- = make_unsigned_type (GET_MODE_PRECISION (mode))); \
- else if (poly_p) \
- return (X##_aarch64_type_node_p \
- ? X##_aarch64_type_node_p \
- : X##_aarch64_type_node_p \
- = make_unsigned_type (GET_MODE_PRECISION (mode))); \
- else \
- return (X##_aarch64_type_node_s \
- ? X##_aarch64_type_node_s \
- : X##_aarch64_type_node_s \
- = make_signed_type (GET_MODE_PRECISION (mode))); \
- break;
+/* Internal scalar builtin types. These types are used to support
+ neon intrinsic builtins. They are _not_ user-visible types. Therefore
+ the mangling for these types are implementation defined. */
+const char *aarch64_scalar_builtin_types[] = {
+ "__builtin_aarch64_simd_qi",
+ "__builtin_aarch64_simd_hi",
+ "__builtin_aarch64_simd_si",
+ "__builtin_aarch64_simd_sf",
+ "__builtin_aarch64_simd_di",
+ "__builtin_aarch64_simd_df",
+ "__builtin_aarch64_simd_poly8",
+ "__builtin_aarch64_simd_poly16",
+ "__builtin_aarch64_simd_poly64",
+ "__builtin_aarch64_simd_poly128",
+ "__builtin_aarch64_simd_ti",
+ "__builtin_aarch64_simd_uqi",
+ "__builtin_aarch64_simd_uhi",
+ "__builtin_aarch64_simd_usi",
+ "__builtin_aarch64_simd_udi",
+ "__builtin_aarch64_simd_ei",
+ "__builtin_aarch64_simd_oi",
+ "__builtin_aarch64_simd_ci",
+ "__builtin_aarch64_simd_xi",
+ NULL
+};
- switch (mode)
- {
- INT_TYPES
- case SFmode:
- if (!float_aarch64_type_node)
- {
- float_aarch64_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE;
- layout_type (float_aarch64_type_node);
- }
- return float_aarch64_type_node;
- break;
- case DFmode:
- if (!double_aarch64_type_node)
- {
- double_aarch64_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE;
- layout_type (double_aarch64_type_node);
- }
- return double_aarch64_type_node;
- break;
- default:
- gcc_unreachable ();
- }
-}
+#define ENTRY(E, M, Q, G) E,
+enum aarch64_simd_type
+{
+#include "aarch64-simd-builtin-types.def"
+ ARM_NEON_H_TYPES_LAST
+};
+#undef ENTRY
-tree
-aarch64_build_vector_type (machine_mode mode,
- bool unsigned_p,
- bool poly_p)
+struct aarch64_simd_type_info
{
+ enum aarch64_simd_type type;
+
+ /* Internal type name. */
+ const char *name;
+
+ /* Internal type name(mangled). The mangled names conform to the
+ AAPCS64 (see "Procedure Call Standard for the ARM 64-bit Architecture",
+ Appendix A). To qualify for emission with the mangled names defined in
+ that document, a vector type must not only be of the correct mode but also
+ be of the correct internal AdvSIMD vector type (e.g. __Int8x8_t); these
+ types are registered by aarch64_init_simd_builtin_types (). In other
+ words, vector types defined in other ways e.g. via vector_size attribute
+ will get default mangled names. */
+ const char *mangle;
+
+ /* Internal type. */
+ tree itype;
+
+ /* Element type. */
tree eltype;
-#define VECTOR_TYPES \
- AARCH64_TYPE_BUILDER (V16QI) \
- AARCH64_TYPE_BUILDER (V8HI) \
- AARCH64_TYPE_BUILDER (V4SI) \
- AARCH64_TYPE_BUILDER (V2DI) \
- AARCH64_TYPE_BUILDER (V8QI) \
- AARCH64_TYPE_BUILDER (V4HI) \
- AARCH64_TYPE_BUILDER (V2SI) \
- \
- AARCH64_TYPE_BUILDER (V4SF) \
- AARCH64_TYPE_BUILDER (V2DF) \
- AARCH64_TYPE_BUILDER (V2SF) \
-/* Declare our "cache" of values. */
-#undef AARCH64_TYPE_BUILDER
-#define AARCH64_TYPE_BUILDER(X) \
- static tree X##_aarch64_type_node_s = NULL; \
- static tree X##_aarch64_type_node_u = NULL; \
- static tree X##_aarch64_type_node_p = NULL;
-
- VECTOR_TYPES
-
- gcc_assert (VECTOR_MODE_P (mode));
-
-#undef AARCH64_TYPE_BUILDER
-#define AARCH64_TYPE_BUILDER(X) \
- case X##mode: \
- if (unsigned_p) \
- return X##_aarch64_type_node_u \
- ? X##_aarch64_type_node_u \
- : X##_aarch64_type_node_u \
- = build_vector_type_for_mode (aarch64_build_scalar_type \
- (GET_MODE_INNER (mode), \
- unsigned_p, poly_p), mode); \
- else if (poly_p) \
- return X##_aarch64_type_node_p \
- ? X##_aarch64_type_node_p \
- : X##_aarch64_type_node_p \
- = build_vector_type_for_mode (aarch64_build_scalar_type \
- (GET_MODE_INNER (mode), \
- unsigned_p, poly_p), mode); \
- else \
- return X##_aarch64_type_node_s \
- ? X##_aarch64_type_node_s \
- : X##_aarch64_type_node_s \
- = build_vector_type_for_mode (aarch64_build_scalar_type \
- (GET_MODE_INNER (mode), \
- unsigned_p, poly_p), mode); \
- break;
+ /* Machine mode the internal type maps to. */
+ enum machine_mode mode;
- switch (mode)
+ /* Qualifiers. */
+ enum aarch64_type_qualifiers q;
+};
+
+#define ENTRY(E, M, Q, G) \
+ {E, "__" #E, #G "__" #E, NULL_TREE, NULL_TREE, M##mode, qualifier_##Q},
+static struct aarch64_simd_type_info aarch64_simd_types [] = {
+#include "aarch64-simd-builtin-types.def"
+};
+#undef ENTRY
+
+static tree aarch64_simd_intOI_type_node = NULL_TREE;
+static tree aarch64_simd_intEI_type_node = NULL_TREE;
+static tree aarch64_simd_intCI_type_node = NULL_TREE;
+static tree aarch64_simd_intXI_type_node = NULL_TREE;
+
+static const char *
+aarch64_mangle_builtin_scalar_type (const_tree type)
+{
+ int i = 0;
+
+ while (aarch64_scalar_builtin_types[i] != NULL)
{
- default:
- eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode),
- unsigned_p, poly_p);
- return build_vector_type_for_mode (eltype, mode);
- break;
- VECTOR_TYPES
- }
+ const char *name = aarch64_scalar_builtin_types[i];
+
+ if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type))
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))), name))
+ return aarch64_scalar_builtin_types[i];
+ i++;
+ }
+ return NULL;
}
-tree
-aarch64_build_type (machine_mode mode, bool unsigned_p, bool poly_p)
+static const char *
+aarch64_mangle_builtin_vector_type (const_tree type)
{
- if (VECTOR_MODE_P (mode))
- return aarch64_build_vector_type (mode, unsigned_p, poly_p);
- else
- return aarch64_build_scalar_type (mode, unsigned_p, poly_p);
+ int i;
+ int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
+
+ for (i = 0; i < nelts; i++)
+ if (aarch64_simd_types[i].mode == TYPE_MODE (type)
+ && TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_NAME (TYPE_NAME (type))
+ && !strcmp
+ (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))),
+ aarch64_simd_types[i].name))
+ return aarch64_simd_types[i].mangle;
+
+ return NULL;
}
-tree
-aarch64_build_signed_type (machine_mode mode)
+const char *
+aarch64_mangle_builtin_type (const_tree type)
{
- return aarch64_build_type (mode, false, false);
+ const char *mangle;
+ /* Walk through all the AArch64 builtins types tables to filter out the
+ incoming type. */
+ if ((mangle = aarch64_mangle_builtin_vector_type (type))
+ || (mangle = aarch64_mangle_builtin_scalar_type (type)))
+ return mangle;
+
+ return NULL;
}
-tree
-aarch64_build_unsigned_type (machine_mode mode)
+static tree
+aarch64_simd_builtin_std_type (enum machine_mode mode,
+ enum aarch64_type_qualifiers q)
{
- return aarch64_build_type (mode, true, false);
+#define QUAL_TYPE(M) \
+ ((q == qualifier_none) ? int##M##_type_node : unsigned_int##M##_type_node);
+ switch (mode)
+ {
+ case QImode:
+ return QUAL_TYPE (QI);
+ case HImode:
+ return QUAL_TYPE (HI);
+ case SImode:
+ return QUAL_TYPE (SI);
+ case DImode:
+ return QUAL_TYPE (DI);
+ case TImode:
+ return QUAL_TYPE (TI);
+ case OImode:
+ return aarch64_simd_intOI_type_node;
+ case EImode:
+ return aarch64_simd_intEI_type_node;
+ case CImode:
+ return aarch64_simd_intCI_type_node;
+ case XImode:
+ return aarch64_simd_intXI_type_node;
+ case SFmode:
+ return float_type_node;
+ case DFmode:
+ return double_type_node;
+ default:
+ gcc_unreachable ();
+ }
+#undef QUAL_TYPE
}
-tree
-aarch64_build_poly_type (machine_mode mode)
+static tree
+aarch64_lookup_simd_builtin_type (enum machine_mode mode,
+ enum aarch64_type_qualifiers q)
{
- return aarch64_build_type (mode, false, true);
+ int i;
+ int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
+
+ /* Non-poly scalar modes map to standard types not in the table. */
+ if (q != qualifier_poly && !VECTOR_MODE_P (mode))
+ return aarch64_simd_builtin_std_type (mode, q);
+
+ for (i = 0; i < nelts; i++)
+ if (aarch64_simd_types[i].mode == mode
+ && aarch64_simd_types[i].q == q)
+ return aarch64_simd_types[i].itype;
+
+ return NULL_TREE;
}
+static tree
+aarch64_simd_builtin_type (enum machine_mode mode,
+ bool unsigned_p, bool poly_p)
+{
+ if (poly_p)
+ return aarch64_lookup_simd_builtin_type (mode, qualifier_poly);
+ else if (unsigned_p)
+ return aarch64_lookup_simd_builtin_type (mode, qualifier_unsigned);
+ else
+ return aarch64_lookup_simd_builtin_type (mode, qualifier_none);
+}
+
static void
-aarch64_init_simd_builtins (void)
+aarch64_init_simd_builtin_types (void)
{
- unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
+ int i;
+ int nelts = sizeof (aarch64_simd_types) / sizeof (aarch64_simd_types[0]);
+ tree tdecl;
+
+ /* Init all the element types built by the front-end. */
+ aarch64_simd_types[Int8x8_t].eltype = intQI_type_node;
+ aarch64_simd_types[Int8x16_t].eltype = intQI_type_node;
+ aarch64_simd_types[Int16x4_t].eltype = intHI_type_node;
+ aarch64_simd_types[Int16x8_t].eltype = intHI_type_node;
+ aarch64_simd_types[Int32x2_t].eltype = intSI_type_node;
+ aarch64_simd_types[Int32x4_t].eltype = intSI_type_node;
+ aarch64_simd_types[Int64x1_t].eltype = intDI_type_node;
+ aarch64_simd_types[Int64x2_t].eltype = intDI_type_node;
+ aarch64_simd_types[Uint8x8_t].eltype = unsigned_intQI_type_node;
+ aarch64_simd_types[Uint8x16_t].eltype = unsigned_intQI_type_node;
+ aarch64_simd_types[Uint16x4_t].eltype = unsigned_intHI_type_node;
+ aarch64_simd_types[Uint16x8_t].eltype = unsigned_intHI_type_node;
+ aarch64_simd_types[Uint32x2_t].eltype = unsigned_intSI_type_node;
+ aarch64_simd_types[Uint32x4_t].eltype = unsigned_intSI_type_node;
+ aarch64_simd_types[Uint64x1_t].eltype = unsigned_intDI_type_node;
+ aarch64_simd_types[Uint64x2_t].eltype = unsigned_intDI_type_node;
+
+ /* Poly types are a world of their own. */
+ aarch64_simd_types[Poly8_t].eltype = aarch64_simd_types[Poly8_t].itype =
+ build_distinct_type_copy (unsigned_intQI_type_node);
+ aarch64_simd_types[Poly16_t].eltype = aarch64_simd_types[Poly16_t].itype =
+ build_distinct_type_copy (unsigned_intHI_type_node);
+ aarch64_simd_types[Poly64_t].eltype = aarch64_simd_types[Poly64_t].itype =
+ build_distinct_type_copy (unsigned_intDI_type_node);
+ aarch64_simd_types[Poly128_t].eltype = aarch64_simd_types[Poly128_t].itype =
+ build_distinct_type_copy (unsigned_intTI_type_node);
+ /* Init poly vector element types with scalar poly types. */
+ aarch64_simd_types[Poly8x8_t].eltype = aarch64_simd_types[Poly8_t].itype;
+ aarch64_simd_types[Poly8x16_t].eltype = aarch64_simd_types[Poly8_t].itype;
+ aarch64_simd_types[Poly16x4_t].eltype = aarch64_simd_types[Poly16_t].itype;
+ aarch64_simd_types[Poly16x8_t].eltype = aarch64_simd_types[Poly16_t].itype;
+ aarch64_simd_types[Poly64x1_t].eltype = aarch64_simd_types[Poly64_t].itype;
+ aarch64_simd_types[Poly64x2_t].eltype = aarch64_simd_types[Poly64_t].itype;
+
+ /* Continue with standard types. */
+ aarch64_simd_types[Float32x2_t].eltype = float_type_node;
+ aarch64_simd_types[Float32x4_t].eltype = float_type_node;
+ aarch64_simd_types[Float64x1_t].eltype = double_type_node;
+ aarch64_simd_types[Float64x2_t].eltype = double_type_node;
+
+ for (i = 0; i < nelts; i++)
+ {
+ tree eltype = aarch64_simd_types[i].eltype;
+ enum machine_mode mode = aarch64_simd_types[i].mode;
+
+ if (aarch64_simd_types[i].itype == NULL)
+ aarch64_simd_types[i].itype =
+ build_distinct_type_copy
+ (build_vector_type (eltype, GET_MODE_NUNITS (mode)));
+
+ tdecl = add_builtin_type (aarch64_simd_types[i].name,
+ aarch64_simd_types[i].itype);
+ TYPE_NAME (aarch64_simd_types[i].itype) = tdecl;
+ SET_TYPE_STRUCTURAL_EQUALITY (aarch64_simd_types[i].itype);
+ }
- /* Signed scalar type nodes. */
- tree aarch64_simd_intQI_type_node = aarch64_build_signed_type (QImode);
- tree aarch64_simd_intHI_type_node = aarch64_build_signed_type (HImode);
- tree aarch64_simd_intSI_type_node = aarch64_build_signed_type (SImode);
- tree aarch64_simd_intDI_type_node = aarch64_build_signed_type (DImode);
- tree aarch64_simd_intTI_type_node = aarch64_build_signed_type (TImode);
- tree aarch64_simd_intEI_type_node = aarch64_build_signed_type (EImode);
- tree aarch64_simd_intOI_type_node = aarch64_build_signed_type (OImode);
- tree aarch64_simd_intCI_type_node = aarch64_build_signed_type (CImode);
- tree aarch64_simd_intXI_type_node = aarch64_build_signed_type (XImode);
-
- /* Unsigned scalar type nodes. */
- tree aarch64_simd_intUQI_type_node = aarch64_build_unsigned_type (QImode);
- tree aarch64_simd_intUHI_type_node = aarch64_build_unsigned_type (HImode);
- tree aarch64_simd_intUSI_type_node = aarch64_build_unsigned_type (SImode);
- tree aarch64_simd_intUDI_type_node = aarch64_build_unsigned_type (DImode);
-
- /* Poly scalar type nodes. */
- tree aarch64_simd_polyQI_type_node = aarch64_build_poly_type (QImode);
- tree aarch64_simd_polyHI_type_node = aarch64_build_poly_type (HImode);
- tree aarch64_simd_polyDI_type_node = aarch64_build_poly_type (DImode);
- tree aarch64_simd_polyTI_type_node = aarch64_build_poly_type (TImode);
-
- /* Float type nodes. */
- tree aarch64_simd_float_type_node = aarch64_build_signed_type (SFmode);
- tree aarch64_simd_double_type_node = aarch64_build_signed_type (DFmode);
-
- /* Define typedefs which exactly correspond to the modes we are basing vector
- types on. If you change these names you'll need to change
- the table used by aarch64_mangle_type too. */
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
+#define AARCH64_BUILD_SIGNED_TYPE(mode) \
+ make_signed_type (GET_MODE_PRECISION (mode));
+ aarch64_simd_intOI_type_node = AARCH64_BUILD_SIGNED_TYPE (OImode);
+ aarch64_simd_intEI_type_node = AARCH64_BUILD_SIGNED_TYPE (EImode);
+ aarch64_simd_intCI_type_node = AARCH64_BUILD_SIGNED_TYPE (CImode);
+ aarch64_simd_intXI_type_node = AARCH64_BUILD_SIGNED_TYPE (XImode);
+#undef AARCH64_BUILD_SIGNED_TYPE
+
+ tdecl = add_builtin_type
+ ("__builtin_aarch64_simd_ei" , aarch64_simd_intEI_type_node);
+ TYPE_NAME (aarch64_simd_intEI_type_node) = tdecl;
+ tdecl = add_builtin_type
+ ("__builtin_aarch64_simd_oi" , aarch64_simd_intOI_type_node);
+ TYPE_NAME (aarch64_simd_intOI_type_node) = tdecl;
+ tdecl = add_builtin_type
+ ("__builtin_aarch64_simd_ci" , aarch64_simd_intCI_type_node);
+ TYPE_NAME (aarch64_simd_intCI_type_node) = tdecl;
+ tdecl = add_builtin_type
+ ("__builtin_aarch64_simd_xi" , aarch64_simd_intXI_type_node);
+ TYPE_NAME (aarch64_simd_intXI_type_node) = tdecl;
+}
+
+static void
+aarch64_init_simd_builtin_scalar_types (void)
+{
+ /* Define typedefs for all the standard scalar types. */
+ (*lang_hooks.types.register_builtin_type) (intQI_type_node,
"__builtin_aarch64_simd_qi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
+ (*lang_hooks.types.register_builtin_type) (intHI_type_node,
"__builtin_aarch64_simd_hi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
+ (*lang_hooks.types.register_builtin_type) (intSI_type_node,
"__builtin_aarch64_simd_si");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
+ (*lang_hooks.types.register_builtin_type) (float_type_node,
"__builtin_aarch64_simd_sf");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
+ (*lang_hooks.types.register_builtin_type) (intDI_type_node,
"__builtin_aarch64_simd_di");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
+ (*lang_hooks.types.register_builtin_type) (double_type_node,
"__builtin_aarch64_simd_df");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intQI_type_node,
"__builtin_aarch64_simd_poly8");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intHI_type_node,
"__builtin_aarch64_simd_poly16");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyDI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intDI_type_node,
"__builtin_aarch64_simd_poly64");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyTI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intTI_type_node,
"__builtin_aarch64_simd_poly128");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node,
+ (*lang_hooks.types.register_builtin_type) (intTI_type_node,
"__builtin_aarch64_simd_ti");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node,
- "__builtin_aarch64_simd_ei");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node,
- "__builtin_aarch64_simd_oi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node,
- "__builtin_aarch64_simd_ci");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node,
- "__builtin_aarch64_simd_xi");
-
/* Unsigned integer types for various mode sizes. */
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intQI_type_node,
"__builtin_aarch64_simd_uqi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intHI_type_node,
"__builtin_aarch64_simd_uhi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intSI_type_node,
"__builtin_aarch64_simd_usi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node,
+ (*lang_hooks.types.register_builtin_type) (unsigned_intDI_type_node,
"__builtin_aarch64_simd_udi");
+}
+
+static void
+aarch64_init_simd_builtins (void)
+{
+ unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
+
+ aarch64_init_simd_builtin_types ();
+ /* Strong-typing hasn't been implemented for all AdvSIMD builtin intrinsics.
+ Therefore we need to preserve the old __builtin scalar types. It can be
+ removed once all the intrinsics become strongly typed using the qualifier
+ system. */
+ aarch64_init_simd_builtin_scalar_types ();
+
for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
{
bool print_type_signature_p = false;
if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
op_mode = GET_MODE_INNER (op_mode);
- eltype = aarch64_build_type (op_mode,
- qualifiers & qualifier_unsigned,
- qualifiers & qualifier_poly);
+ eltype = aarch64_simd_builtin_type
+ (op_mode,
+ (qualifiers & qualifier_unsigned) != 0,
+ (qualifiers & qualifier_poly) != 0);
+ gcc_assert (eltype != NULL);
/* Add qualifiers. */
if (qualifiers & qualifier_const)
static void
aarch64_init_crc32_builtins ()
{
- tree usi_type = aarch64_build_unsigned_type (SImode);
+ tree usi_type = aarch64_simd_builtin_std_type (SImode, qualifier_unsigned);
unsigned int i = 0;
for (i = 0; i < ARRAY_SIZE (aarch64_crc_builtin_data); ++i)
{
aarch64_crc_builtin_datum* d = &aarch64_crc_builtin_data[i];
- tree argtype = aarch64_build_unsigned_type (d->mode);
+ tree argtype = aarch64_simd_builtin_std_type (d->mode,
+ qualifier_unsigned);
tree ftype = build_function_type_list (usi_type, usi_type, argtype, NULL_TREE);
tree fndecl = add_builtin_function (d->name, ftype, d->fcode,
BUILT_IN_MD, NULL, NULL_TREE);
return (16 | 8);
}
-/* A table to help perform AArch64-specific name mangling for AdvSIMD
- vector types in order to conform to the AAPCS64 (see "Procedure
- Call Standard for the ARM 64-bit Architecture", Appendix A). To
- qualify for emission with the mangled names defined in that document,
- a vector type must not only be of the correct mode but also be
- composed of AdvSIMD vector element types (e.g.
- _builtin_aarch64_simd_qi); these types are registered by
- aarch64_init_simd_builtins (). In other words, vector types defined
- in other ways e.g. via vector_size attribute will get default
- mangled names. */
-typedef struct
-{
- machine_mode mode;
- const char *element_type_name;
- const char *mangled_name;
-} aarch64_simd_mangle_map_entry;
-
-static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
- /* 64-bit containerized types. */
- { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
- { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
- { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
- { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
- { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
- { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
- { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
- { DImode, "__builtin_aarch64_simd_di", "11__Int64x1_t" },
- { DImode, "__builtin_aarch64_simd_udi", "12__Uint64x1_t" },
- { V1DFmode, "__builtin_aarch64_simd_df", "13__Float64x1_t" },
- { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
- { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
- /* 128-bit containerized types. */
- { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
- { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
- { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
- { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
- { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
- { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
- { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
- { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
- { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
- { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
- { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
- { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
- { V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
- { VOIDmode, NULL, NULL }
-};
-
/* Implement TARGET_MANGLE_TYPE. */
static const char *
if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
return "St9__va_list";
- /* Check the mode of the vector type, and the name of the vector
- element type, against the table. */
- if (TREE_CODE (type) == VECTOR_TYPE)
- {
- aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
-
- while (pos->mode != VOIDmode)
- {
- tree elt_type = TREE_TYPE (type);
-
- if (pos->mode == TYPE_MODE (type)
- && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
- && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
- pos->element_type_name))
- return pos->mangled_name;
-
- pos++;
- }
- }
+ /* Mangle AArch64-specific internal types. TYPE_NAME is non-NULL_TREE for
+ builtin types. */
+ if (TYPE_NAME (type) != NULL)
+ return aarch64_mangle_builtin_type (type);
/* Use the default mangling. */
return NULL;