+2016-12-14 Andrew Pinski <apinski@cavium.com>
+
+ * config/aarch64/aarch64-cores.def: Add -1 as the variant to all
+ of the cores.
+ (thunderx): Update to include LSE by default.
+ (thunderxt88p1): New core.
+ (thunderxt88): New core.
+ (thunderxt81): New core.
+ (thunderxt83): New core.
+ * config/aarch64/driver-aarch64.c (struct aarch64_core_data):
+ Add variant field.
+ (ALL_VARIANTS): New define.
+ (AARCH64_CORE): Support VARIANT operand.
+ (cpu_data): Likewise.
+ (host_detect_local_cpu): Parse variant field of /proc/cpuinfo.
+ Combine the arch and single core case and support variant searching.
+ * common/config/aarch64/aarch64-common.c (AARCH64_CORE):
+ Add VARIANT operand.
+ * config/aarch64/aarch64-opts.h (AARCH64_CORE): Likewise.
+ * config/aarch64/aarch64.c (AARCH64_CORE): Likewise.
+ * config/aarch64/aarch64.h (AARCH64_CORE): Likewise.
+ * config/aarch64/aarch64-tune.md: Regenerate.
+ * doc/invoke.texi (AARCH64/mtune): Document thunderxt88,
+ thunderxt88p1, thunderxt81, thunderxt83 as available options.
+
2016-12-14 Martin Jambor <mjambor@suse.cz>
* omp-offload.c: Fix coding style.
the default set of architectural feature flags they support. */
static const struct processor_name_to_arch all_cores[] =
{
-#define AARCH64_CORE(NAME, X, IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART) \
+#define AARCH64_CORE(NAME, X, IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART, VARIANT) \
{NAME, AARCH64_ARCH_##ARCH_IDENT, FLAGS},
#include "config/aarch64/aarch64-cores.def"
{"generic", AARCH64_ARCH_8A, AARCH64_FL_FOR_ARCH8},
Before using #include to read this file, define a macro:
- AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHEDULER_IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART)
+ AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHEDULER_IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART, VARIANT)
The CORE_NAME is the name of the core, represented as a string constant.
The CORE_IDENT is the name of the core, represented as an identifier.
PART is the part number of the CPU. On a GNU/Linux system it can be
found in /proc/cpuinfo. For big.LITTLE systems this should use the
macro AARCH64_BIG_LITTLE where the big part number comes as the first
- argument to the macro and little is the second. */
+ argument to the macro and little is the second.
+ VARIANT is the variant of the CPU. In a GNU/Linux system it can found
+ in /proc/cpuinfo. If this is -1, this means it can match any variant. */
/* V8 Architecture Processors. */
/* ARM ('A') cores. */
-AARCH64_CORE("cortex-a35", cortexa35, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa35, 0x41, 0xd04)
-AARCH64_CORE("cortex-a53", cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa53, 0x41, 0xd03)
-AARCH64_CORE("cortex-a57", cortexa57, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa57, 0x41, 0xd07)
-AARCH64_CORE("cortex-a72", cortexa72, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa72, 0x41, 0xd08)
-AARCH64_CORE("cortex-a73", cortexa73, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, 0xd09)
+AARCH64_CORE("cortex-a35", cortexa35, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa35, 0x41, 0xd04, -1)
+AARCH64_CORE("cortex-a53", cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa53, 0x41, 0xd03, -1)
+AARCH64_CORE("cortex-a57", cortexa57, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa57, 0x41, 0xd07, -1)
+AARCH64_CORE("cortex-a72", cortexa72, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa72, 0x41, 0xd08, -1)
+AARCH64_CORE("cortex-a73", cortexa73, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, 0xd09, -1)
/* Samsung ('S') cores. */
-AARCH64_CORE("exynos-m1", exynosm1, exynosm1, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, exynosm1, 0x53, 0x001)
+AARCH64_CORE("exynos-m1", exynosm1, exynosm1, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, exynosm1, 0x53, 0x001, -1)
/* Qualcomm ('Q') cores. */
-AARCH64_CORE("falkor", falkor, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, qdf24xx, 0x51, 0xC00)
-AARCH64_CORE("qdf24xx", qdf24xx, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, qdf24xx, 0x51, 0xC00)
+AARCH64_CORE("falkor", falkor, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, qdf24xx, 0x51, 0xC00, -1)
+AARCH64_CORE("qdf24xx", qdf24xx, cortexa57, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, qdf24xx, 0x51, 0xC00, -1)
/* Cavium ('C') cores. */
-AARCH64_CORE("thunderx", thunderx, thunderx, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, thunderx, 0x43, 0x0a1)
+AARCH64_CORE("thunderx", thunderx, thunderx, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO | AARCH64_FL_LSE, thunderx, 0x43, 0x0a0, -1)
+/* Do not swap around "thunderxt88p1" and "thunderxt88",
+ this order is required to handle variant correctly. */
+AARCH64_CORE("thunderxt88p1", thunderxt88p1, thunderx, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, thunderx, 0x43, 0x0a1, 0)
+AARCH64_CORE("thunderxt88", thunderxt88, thunderx, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO | AARCH64_FL_LSE, thunderx, 0x43, 0x0a1, -1)
+AARCH64_CORE("thunderxt81", thunderxt81, thunderx, 8_1A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO | AARCH64_FL_LSE, thunderx, 0x43, 0x0a2, -1)
+AARCH64_CORE("thunderxt83", thunderxt83, thunderx, 8_1A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC | AARCH64_FL_CRYPTO | AARCH64_FL_LSE, thunderx, 0x43, 0x0a3, -1)
/* APM ('P') cores. */
-AARCH64_CORE("xgene1", xgene1, xgene1, 8A, AARCH64_FL_FOR_ARCH8, xgene1, 0x50, 0x000)
+AARCH64_CORE("xgene1", xgene1, xgene1, 8A, AARCH64_FL_FOR_ARCH8, xgene1, 0x50, 0x000, -1)
/* V8.1 Architecture Processors. */
/* Broadcom ('B') cores. */
-AARCH64_CORE("vulcan", vulcan, cortexa57, 8_1A, AARCH64_FL_FOR_ARCH8_1 | AARCH64_FL_CRYPTO, vulcan, 0x42, 0x516)
+AARCH64_CORE("vulcan", vulcan, cortexa57, 8_1A, AARCH64_FL_FOR_ARCH8_1 | AARCH64_FL_CRYPTO, vulcan, 0x42, 0x516, -1)
/* V8 big.LITTLE implementations. */
-AARCH64_CORE("cortex-a57.cortex-a53", cortexa57cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa57, 0x41, AARCH64_BIG_LITTLE (0xd07, 0xd03))
-AARCH64_CORE("cortex-a72.cortex-a53", cortexa72cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa72, 0x41, AARCH64_BIG_LITTLE (0xd08, 0xd03))
-AARCH64_CORE("cortex-a73.cortex-a35", cortexa73cortexa35, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, AARCH64_BIG_LITTLE (0xd09, 0xd04))
-AARCH64_CORE("cortex-a73.cortex-a53", cortexa73cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, AARCH64_BIG_LITTLE (0xd09, 0xd03))
+AARCH64_CORE("cortex-a57.cortex-a53", cortexa57cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa57, 0x41, AARCH64_BIG_LITTLE (0xd07, 0xd03), -1)
+AARCH64_CORE("cortex-a72.cortex-a53", cortexa72cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa72, 0x41, AARCH64_BIG_LITTLE (0xd08, 0xd03), -1)
+AARCH64_CORE("cortex-a73.cortex-a35", cortexa73cortexa35, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, AARCH64_BIG_LITTLE (0xd09, 0xd04), -1)
+AARCH64_CORE("cortex-a73.cortex-a53", cortexa73cortexa53, cortexa53, 8A, AARCH64_FL_FOR_ARCH8 | AARCH64_FL_CRC, cortexa73, 0x41, AARCH64_BIG_LITTLE (0xd09, 0xd03), -1)
#undef AARCH64_CORE
/* The various cores that implement AArch64. */
enum aarch64_processor
{
-#define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART) \
+#define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
INTERNAL_IDENT,
#include "aarch64-cores.def"
/* Used to indicate that no processor has been specified. */
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from aarch64-cores.def
(define_attr "tune"
- "cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,exynosm1,falkor,qdf24xx,thunderx,xgene1,vulcan,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53"
+ "cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,exynosm1,falkor,qdf24xx,thunderx,thunderxt88p1,thunderxt88,thunderxt81,thunderxt83,xgene1,vulcan,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53"
(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
/* Processor cores implementing AArch64. */
static const struct processor all_cores[] =
{
-#define AARCH64_CORE(NAME, IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART) \
+#define AARCH64_CORE(NAME, IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
{NAME, IDENT, SCHED, AARCH64_ARCH_##ARCH, \
all_architectures[AARCH64_ARCH_##ARCH].architecture_version, \
FLAGS, &COSTS##_tunings},
enum target_cpus
{
-#define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART) \
+#define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
TARGET_CPU_##INTERNAL_IDENT,
#include "aarch64-cores.def"
TARGET_CPU_generic
const char* arch;
unsigned char implementer_id; /* Exactly 8 bits */
unsigned int part_no; /* 12 bits + 12 bits */
+ unsigned variant;
const unsigned long flags;
};
(((BIG)&0xFFFu) << 12 | ((LITTLE) & 0xFFFu))
#define INVALID_IMP ((unsigned char) -1)
#define INVALID_CORE ((unsigned)-1)
+#define ALL_VARIANTS ((unsigned)-1)
-#define AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART) \
- { CORE_NAME, #ARCH, IMP, PART, FLAGS },
+#define AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
+ { CORE_NAME, #ARCH, IMP, PART, VARIANT, FLAGS },
static struct aarch64_core_data aarch64_cpu_data[] =
{
#include "aarch64-cores.def"
- { NULL, NULL, INVALID_IMP, INVALID_CORE, 0 }
+ { NULL, NULL, INVALID_IMP, INVALID_CORE, ALL_VARIANTS, 0 }
};
const char *
host_detect_local_cpu (int argc, const char **argv)
{
- const char *arch_id = NULL;
const char *res = NULL;
static const int num_exts = ARRAY_SIZE (aarch64_extensions);
char buf[128];
unsigned char imp = INVALID_IMP;
unsigned int cores[2] = { INVALID_CORE, INVALID_CORE };
unsigned int n_cores = 0;
+ unsigned int variants[2] = { ALL_VARIANTS, ALL_VARIANTS };
+ unsigned int n_variants = 0;
bool processed_exts = false;
const char *ext_string = "";
unsigned long extension_flags = 0;
goto not_found;
}
+ if (strstr (buf, "variant") != NULL)
+ {
+ unsigned cvariant = parse_field (buf);
+ if (!contains_core_p (variants, cvariant))
+ {
+ if (n_variants == 2)
+ goto not_found;
+
+ variants[n_variants++] = cvariant;
+ }
+ continue;
+ }
+
if (strstr (buf, "part") != NULL)
{
unsigned ccore = parse_field (buf);
f = NULL;
/* Weird cpuinfo format that we don't know how to handle. */
- if (n_cores == 0 || n_cores > 2 || imp == INVALID_IMP)
+ if (n_cores == 0
+ || n_cores > 2
+ || (n_cores == 1 && n_variants != 1)
+ || imp == INVALID_IMP)
goto not_found;
- if (arch)
+ /* Simple case, one core type or just looking for the arch. */
+ if (n_cores == 1 || arch)
{
/* Search for one of the cores in the list. */
for (i = 0; aarch64_cpu_data[i].name != NULL; i++)
if (aarch64_cpu_data[i].implementer_id == imp
- && contains_core_p (cores, aarch64_cpu_data[i].part_no))
- {
- arch_id = aarch64_cpu_data[i].arch;
- break;
- }
- if (!arch_id)
- goto not_found;
-
- struct aarch64_arch_driver_info* arch_info = get_arch_from_id (arch_id);
+ && cores[0] == aarch64_cpu_data[i].part_no
+ && (aarch64_cpu_data[i].variant == ALL_VARIANTS
+ || variants[0] == aarch64_cpu_data[i].variant))
+ break;
+ if (aarch64_cpu_data[i].name == NULL)
+ goto not_found;
+
+ if (arch)
+ {
+ const char *arch_id = aarch64_cpu_data[i].arch;
+ aarch64_arch_driver_info* arch_info = get_arch_from_id (arch_id);
- /* We got some arch indentifier that's not in aarch64-arches.def? */
- if (!arch_info)
- goto not_found;
+ /* We got some arch indentifier that's not in aarch64-arches.def? */
+ if (!arch_info)
+ goto not_found;
- res = concat ("-march=", arch_info->name, NULL);
- default_flags = arch_info->flags;
+ res = concat ("-march=", arch_info->name, NULL);
+ default_flags = arch_info->flags;
+ }
+ else
+ {
+ default_flags = aarch64_cpu_data[i].flags;
+ res = concat ("-m",
+ cpu ? "cpu" : "tune", "=",
+ aarch64_cpu_data[i].name,
+ NULL);
+ }
}
/* We have big.LITTLE. */
- else if (n_cores == 2)
+ else
{
for (i = 0; aarch64_cpu_data[i].name != NULL; i++)
{
if (!res)
goto not_found;
}
- /* The simple, non-big.LITTLE case. */
- else
- {
- int core_idx = -1;
- for (i = 0; aarch64_cpu_data[i].name != NULL; i++)
- if (cores[0] == aarch64_cpu_data[i].part_no
- && aarch64_cpu_data[i].implementer_id == imp)
- {
- core_idx = i;
- break;
- }
- if (core_idx == -1)
- goto not_found;
-
- res = concat ("-m", cpu ? "cpu" : "tune", "=",
- aarch64_cpu_data[core_idx].name, NULL);
- default_flags = aarch64_cpu_data[core_idx].flags;
- }
if (tune)
return res;
performance of the code. Permissible values for this option are:
@samp{generic}, @samp{cortex-a35}, @samp{cortex-a53}, @samp{cortex-a57},
@samp{cortex-a72}, @samp{cortex-a73}, @samp{exynos-m1}, @samp{falkor},
-@samp{qdf24xx}, @samp{thunderx}, @samp{xgene1}, @samp{vulcan},
-@samp{cortex-a57.cortex-a53}, @samp{cortex-a72.cortex-a53},
+@samp{qdf24xx}, @samp{xgene1}, @samp{vulcan}, @samp{thunderx},
+@samp{thunderxt88}, @samp{thunderxt88p1}, @samp{thunderxt81},
+@samp{thunderxt83}, @samp{cortex-a57.cortex-a53}, @samp{cortex-a72.cortex-a53},
@samp{cortex-a73.cortex-a35}, @samp{cortex-a73.cortex-a53}, @samp{native}.
The values @samp{cortex-a57.cortex-a53}, @samp{cortex-a72.cortex-a53},