+2018-05-17 Olga Makhotina <olga.makhotina@intel.com>
+
+ * config.gcc: Support "goldmont-plus".
+ * config/i386/driver-i386.c (host_detect_local_cpu): Detect
+ "goldmont-plus".
+ * config/i386/i386-c.c (ix86_target_macros_internal): Handle
+ PROCESSOR_GOLDMONT_PLUS.
+ * config/i386/i386.c (m_GOLDMONT_PLUS): Define.
+ (processor_target_table): Add "goldmont-plus".
+ (PTA_GOLDMONT_PLUS): Define.
+ (ix86_lea_outperforms): Add TARGET_GOLDMONT_PLUS.
+ (get_builtin_code_for_version): Handle PROCESSOR_GOLDMONT_PLUS.
+ (fold_builtin_cpu): Add M_INTEL_GOLDMONT_PLUS.
+ (fold_builtin_cpu): Add "goldmont-plus".
+ (ix86_add_stmt_cost): Add TARGET_GOLDMONT_PLUS.
+ (ix86_option_override_internal): Add "goldmont-plus".
+ * config/i386/i386.h (processor_costs): Define TARGET_GOLDMONT_PLUS.
+ (processor_type): Add PROCESSOR_GOLDMONT_PLUS.
+ * config/i386/x86-tune.def: Add m_GOLDMONT_PLUS.
+ * doc/invoke.texi: Add goldmont-plus as x86 -march=/-mtune= CPU type.
+
2018-05-17 Richard Biener <rguenther@suse.de>
PR tree-optimization/85757
core2 corei7 corei7-avx core-avx-i core-avx2 atom slm nehalem westmere \
sandybridge ivybridge haswell broadwell bonnell silvermont knl knm \
skylake-avx512 cannonlake icelake-client icelake-server skylake goldmont \
-x86-64 native"
+goldmont-plus x86-64 native"
# Additional x86 processors supported by --with-cpu=. Each processor
# MUST be separated by exactly one space.
/* Goldmont. */
cpu = "goldmont";
break;
+ case 0x7a:
+ /* Goldmont Plus. */
+ cpu = "goldmont-plus";
+ break;
case 0x0f:
/* Merom. */
case 0x17:
cpu = "sandybridge";
else if (has_sse4_2)
{
- if (has_xsave)
+ if (has_sgx)
+ /* Assume Goldmont Plus. */
+ cpu = "goldmont-plus";
+ else if (has_xsave)
/* Assume Goldmont. */
cpu = "goldmont";
else if (has_movbe)
def_or_undef (parse_in, "__goldmont");
def_or_undef (parse_in, "__goldmont__");
break;
+ case PROCESSOR_GOLDMONT_PLUS:
+ def_or_undef (parse_in, "__goldmont_plus");
+ def_or_undef (parse_in, "__goldmont_plus__");
+ break;
case PROCESSOR_KNL:
def_or_undef (parse_in, "__knl");
def_or_undef (parse_in, "__knl__");
case PROCESSOR_GOLDMONT:
def_or_undef (parse_in, "__tune_goldmont__");
break;
+ case PROCESSOR_GOLDMONT_PLUS:
+ def_or_undef (parse_in, "__tune_goldmont_plus__");
+ break;
case PROCESSOR_KNL:
def_or_undef (parse_in, "__tune_knl__");
break;
#define m_ICELAKE_CLIENT (HOST_WIDE_INT_1U<<PROCESSOR_ICELAKE_CLIENT)
#define m_ICELAKE_SERVER (HOST_WIDE_INT_1U<<PROCESSOR_ICELAKE_SERVER)
#define m_GOLDMONT (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT)
+#define m_GOLDMONT_PLUS (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT_PLUS)
#define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL)
#define m_GEODE (HOST_WIDE_INT_1U<<PROCESSOR_GEODE)
{"bonnell", &atom_cost, 16, 15, 16, 7, 16},
{"silvermont", &slm_cost, 16, 15, 16, 7, 16},
{"goldmont", &slm_cost, 16, 15, 16, 7, 16},
+ {"goldmont-plus", &slm_cost, 16, 15, 16, 7, 16},
{"knl", &slm_cost, 16, 15, 16, 7, 16},
{"knm", &slm_cost, 16, 15, 16, 7, 16},
{"skylake", &skylake_cost, 16, 10, 16, 10, 16},
const wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_SHA | PTA_XSAVE
| PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT | PTA_XSAVEOPT
| PTA_FSGSBASE;
+ const wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID
+ | PTA_SGX;
const wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
| PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
{"silvermont", PROCESSOR_SILVERMONT, CPU_SLM, PTA_SILVERMONT},
{"slm", PROCESSOR_SILVERMONT, CPU_SLM, PTA_SILVERMONT},
{"goldmont", PROCESSOR_GOLDMONT, CPU_GLM, PTA_GOLDMONT},
+ {"goldmont-plus", PROCESSOR_GOLDMONT_PLUS, CPU_GLM, PTA_GOLDMONT_PLUS},
{"knl", PROCESSOR_KNL, CPU_SLM, PTA_KNL},
{"knm", PROCESSOR_KNM, CPU_SLM, PTA_KNM},
{"intel", PROCESSOR_INTEL, CPU_SLM, PTA_NEHALEM},
/* For Silvermont if using a 2-source or 3-source LEA for
non-destructive destination purposes, or due to wanting
ability to use SCALE, the use of LEA is justified. */
- if (TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_INTEL)
+ if (TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_GOLDMONT_PLUS
+ || TARGET_INTEL)
{
if (has_scale)
return true;
arg_str = "silvermont";
priority = P_PROC_SSE4_2;
break;
- case PROCESSOR_GOLDMONT:
+ case PROCESSOR_GOLDMONT:
arg_str = "goldmont";
priority = P_PROC_SSE4_2;
break;
+ case PROCESSOR_GOLDMONT_PLUS:
+ arg_str = "goldmont-plus";
+ priority = P_PROC_SSE4_2;
+ break;
case PROCESSOR_AMDFAM10:
arg_str = "amdfam10h";
priority = P_PROC_SSE4_A;
M_INTEL_COREI7_CANNONLAKE,
M_INTEL_COREI7_ICELAKE_CLIENT,
M_INTEL_COREI7_ICELAKE_SERVER,
- M_INTEL_GOLDMONT
+ M_INTEL_GOLDMONT,
+ M_INTEL_GOLDMONT_PLUS
};
static struct _arch_names_table
{"bonnell", M_INTEL_BONNELL},
{"silvermont", M_INTEL_SILVERMONT},
{"goldmont", M_INTEL_GOLDMONT},
+ {"goldmont-plus", M_INTEL_GOLDMONT_PLUS},
{"knl", M_INTEL_KNL},
{"knm", M_INTEL_KNM},
{"amdfam10h", M_AMDFAM10H},
/* We need to multiply all vector stmt cost by 1.7 (estimated cost)
for Silvermont as it has out of order integer pipeline and can execute
2 scalar instruction per tick, but has in order SIMD pipeline. */
- if ((TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_INTEL)
- && stmt_info && stmt_info->stmt)
+ if ((TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_GOLDMONT_PLUS
+ || TARGET_INTEL) && stmt_info && stmt_info->stmt)
{
tree lhs_op = gimple_get_lhs (stmt_info->stmt);
if (lhs_op && TREE_CODE (TREE_TYPE (lhs_op)) == INTEGER_TYPE)
#define TARGET_BONNELL (ix86_tune == PROCESSOR_BONNELL)
#define TARGET_SILVERMONT (ix86_tune == PROCESSOR_SILVERMONT)
#define TARGET_GOLDMONT (ix86_tune == PROCESSOR_GOLDMONT)
+#define TARGET_GOLDMONT_PLUS (ix86_tune == PROCESSOR_GOLDMONT_PLUS)
#define TARGET_KNL (ix86_tune == PROCESSOR_KNL)
#define TARGET_KNM (ix86_tune == PROCESSOR_KNM)
#define TARGET_SKYLAKE (ix86_tune == PROCESSOR_SKYLAKE)
PROCESSOR_BONNELL,
PROCESSOR_SILVERMONT,
PROCESSOR_GOLDMONT,
+ PROCESSOR_GOLDMONT_PLUS,
PROCESSOR_KNL,
PROCESSOR_KNM,
PROCESSOR_SKYLAKE,
DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_GOLDMONT
- | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_GENERIC)
/* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming
on modern chips. Preffer stores affecting whole integer register
value over movb. */
DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
- | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_INTEL
+ | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL
| m_KNL | m_KNM | m_AMD_MULTIPLE | m_SKYLAKE_AVX512 | m_GENERIC)
/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store
DEF_TUNE (X86_TUNE_MOVX, "movx",
m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
| m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL
- | m_GEODE | m_AMD_MULTIPLE | m_SKYLAKE_AVX512 | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE | m_SKYLAKE_AVX512
+ | m_GENERIC)
/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
full sized loads. */
DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall",
m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL
- | m_KNL | m_KNM | m_GOLDMONT | m_AMD_MULTIPLE | m_GENERIC)
+ | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE
+ | m_GENERIC)
/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent
conditional jump instruction for 32 bit TARGET. */
DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args",
m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
- | m_GOLDMONT | m_ATHLON_K8)
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_ATHLON_K8)
/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are
considered on critical path. */
than 4 branch instructions in the 16 byte window. */
DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit",
m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM
- | m_GOLDMONT | m_INTEL | m_ATHLON_K8 | m_AMDFAM10)
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL | m_ATHLON_K8 | m_AMDFAM10)
/*****************************************************************************/
/* Integer instruction selection tuning */
DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec",
~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE
| m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT
- | m_GENERIC))
+ | m_GOLDMONT_PLUS | m_GENERIC))
/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
for DFmode copies */
DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves",
~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_GOLDMONT
- | m_GENERIC))
+ | m_GOLDMONT_PLUS | m_GENERIC))
/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
will impact LEA instruction selection. */
DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT | m_KNL
- | m_KNM | m_GOLDMONT | m_INTEL)
+ | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL)
/* X86_TUNE_AVOID_LEA_FOR_ADDR: Avoid lea for address computation. */
DEF_TUNE (X86_TUNE_AVOID_LEA_FOR_ADDR, "avoid_lea_for_addr",
- m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM)
+ m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_KNL
+ | m_KNM)
/* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
vector path on AMD machines.
/* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
a conditional move. */
DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove",
- m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL)
+ m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_KNL
+ | m_KNM | m_INTEL)
/* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such
as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */
DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER
- | m_BTVER | m_ZNVER1 | m_GOLDMONT | m_GENERIC)
+ | m_BTVER | m_ZNVER1 | m_GOLDMONT | m_GOLDMONT_PLUS | m_GENERIC)
/* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */
DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd",
~(m_PENT | m_LAKEMONT | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
- | m_K6 | m_GOLDMONT))
+ | m_K6 | m_GOLDMONT | m_GOLDMONT_PLUS))
/* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */
DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL
- | m_LAKEMONT | m_AMD_MULTIPLE | m_GOLDMONT | m_GENERIC)
+ | m_LAKEMONT | m_AMD_MULTIPLE | m_GOLDMONT | m_GOLDMONT_PLUS
+ | m_GENERIC)
/* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency
for bit-manipulation instructions. */
if-converted sequence to one. */
DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn",
m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT
- | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_GENERIC)
/*****************************************************************************/
/* 387 instruction selection tuning */
DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop",
~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL
| m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE
- | m_GOLDMONT | m_GENERIC))
+ | m_GOLDMONT | m_GOLDMONT_PLUS | m_GENERIC))
/* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */
DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE)
DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_GOLDMONT
- | m_GENERIC)
+ | m_GOLDMONT_PLUS | m_GENERIC)
/*****************************************************************************/
/* SSE instruction selection tuning */
of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal",
m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_SILVERMONT | m_KNL | m_KNM
- | m_INTEL | m_SKYLAKE_AVX512 | m_GOLDMONT | m_AMDFAM10 | m_BDVER
- | m_BTVER | m_ZNVER1 | m_GENERIC)
+ | m_INTEL | m_SKYLAKE_AVX512 | m_GOLDMONT | m_GOLDMONT_PLUS
+ | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER1 | m_GENERIC)
/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead
of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal",
m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_SILVERMONT | m_KNL | m_KNM
- | m_INTEL | m_SKYLAKE_AVX512 | m_GOLDMONT | m_BDVER | m_ZNVER1
- | m_GENERIC)
+ | m_INTEL | m_SKYLAKE_AVX512 | m_GOLDMONT | m_GOLDMONT_PLUS
+ | m_BDVER | m_ZNVER1 | m_GENERIC)
/* Use packed single precision instructions where posisble. I.e. movups instead
of movupd. */
/* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for
fp converts to destination register. */
DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts",
- m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_INTEL)
+ m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS
+ | m_INTEL)
/* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
from FP to FP. This form of instructions avoids partial write to the
/* X86_TUNE_SLOW_SHUFB: Indicates tunings with slow pshufb instruction. */
DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb",
- m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_INTEL)
+ m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT
+ | m_GOLDMONT_PLUS | m_INTEL)
/* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */
DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes",
- m_SILVERMONT | m_GOLDMONT | m_INTEL)
+ m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL)
/* X86_TUNE_USE_GATHER: Use gather instructions. */
DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather",
SSE4.1, SSE4.2, POPCNT, AES, PCLMUL, RDRND, XSAVE, XSAVEOPT and FSGSBASE
instruction set support.
+@item goldmont-plus
+Intel Goldmont Plus CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PCLMUL, RDRND, XSAVE, XSAVEOPT, FSGSBASE,
+PTWRITE, RDPID, SGX and UMIP instruction set support.
+
@item knl
Intel Knight's Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
SSSE3, SSE4.1, SSE4.2, POPCNT, AVX, AVX2, AES, PCLMUL, FSGSBASE, RDRND, FMA,
+2018-05-17 Olga Makhotina <olga.makhotina@intel.com>
+
+ * gcc.target/i386/builtin_target.c: Test goldmont-plus.
+ * gcc.target/i386/funcspec-56.inc: Tests for arch=goldmont-plus.
+
2018-05-17 Richard Biener <rguenther@suse.de>
PR tree-optimization/85757
/* Goldmont. */
assert (__builtin_cpu_is ("goldmont"));
break;
+ case 0x7a:
+ /* Goldmont Plus. */
+ assert (__builtin_cpu_is ("goldmont-plus"));
+ break;
case 0x57:
/* Knights Landing. */
assert (__builtin_cpu_is ("knl"));
extern void test_arch_core_avx2 (void) __attribute__((__target__("arch=core-avx2")));
extern void test_arch_silvermont (void) __attribute__((__target__("arch=silvermont")));
extern void test_arch_goldmont (void) __attribute__((__target__("arch=goldmont")));
+extern void test_arch_goldmont_plus (void) __attribute__((__target__("arch=goldmont-plus")));
extern void test_arch_knl (void) __attribute__((__target__("arch=knl")));
extern void test_arch_knm (void) __attribute__((__target__("arch=knm")));
extern void test_arch_skylake (void) __attribute__((__target__("arch=skylake")));
+2018-05-17 Olga Makhotina <olga.makhotina@intel.com>
+
+ * config/i386/cpuinfo.h (processor_types): Add INTEL_GOLDMONT_PLUS.
+ * config/i386/cpuinfo.c (get_intel_cpu): Detect Goldmont Plus.
+
2018-05-08 Olga Makhotina <olga.makhotina@intel.com>
* config/i386/cpuinfo.h (processor_types): Add INTEL_GOLDMONT.
/* Goldmont. */
__cpu_model.__cpu_type = INTEL_GOLDMONT;
break;
+ case 0x7a:
+ /* Goldmont Plus. */
+ __cpu_model.__cpu_type = INTEL_GOLDMONT_PLUS;
+ break;
case 0x57:
/* Knights Landing. */
__cpu_model.__cpu_type = INTEL_KNL;
AMDFAM17H,
INTEL_KNM,
INTEL_GOLDMONT,
+ INTEL_GOLDMONT_PLUS,
CPU_TYPE_MAX
};