if (*line == ',')
{
i386_operand_type combined;
+ expressionS *disp_exp = NULL;
bool changed;
+ i.memshift = -1;
+
ptr = parse_operands (line + 1, &i386_mnemonics[MN__insn]);
this_operand = -1;
if (!ptr)
/* Establish operand size encoding. */
operand_type_set (&combined, 0);
+
for (j = i.imm_operands; j < i.operands; ++j)
{
i.types[j].bitfield.instance = InstanceNone;
if (operand_type_check (i.types[j], disp))
- i.types[j].bitfield.baseindex = 1;
+ {
+ i.types[j].bitfield.baseindex = 1;
+ disp_exp = i.op[j].disps;
+ }
+
+ if (evex && i.types[j].bitfield.baseindex)
+ {
+ unsigned int n = i.memshift;
+
+ if (i.types[j].bitfield.byte)
+ n = 0;
+ else if (i.types[j].bitfield.word)
+ n = 1;
+ else if (i.types[j].bitfield.dword)
+ n = 2;
+ else if (i.types[j].bitfield.qword)
+ n = 3;
+ else if (i.types[j].bitfield.xmmword)
+ n = 4;
+ else if (i.types[j].bitfield.ymmword)
+ n = 5;
+ else if (i.types[j].bitfield.zmmword)
+ n = 6;
+
+ if (i.memshift < 32 && n != i.memshift)
+ as_warn ("conflicting memory operand size specifiers");
+ i.memshift = n;
+ }
if ((i.broadcast.type || i.broadcast.bytes)
&& j == i.broadcast.operand)
combined.bitfield.class = ClassNone;
}
+ switch ((i.broadcast.type ? i.broadcast.type : 1)
+ << (i.memshift < 32 ? i.memshift : 0))
+ {
+ case 64: combined.bitfield.zmmword = 1; break;
+ case 32: combined.bitfield.ymmword = 1; break;
+ case 16: combined.bitfield.xmmword = 1; break;
+ case 8: combined.bitfield.qword = 1; break;
+ case 4: combined.bitfield.dword = 1; break;
+ }
+
if (i.vec_encoding == vex_encoding_default)
{
if (flag_code == CODE_64BIT && combined.bitfield.qword)
else if (combined.bitfield.xmmword)
i.tm.opcode_modifier.evex = EVEX128;
}
+
+ if (i.memshift >= 32)
+ {
+ unsigned int n = 0;
+
+ switch (i.tm.opcode_modifier.evex)
+ {
+ case EVEX512: n = 64; break;
+ case EVEX256: n = 32; break;
+ case EVEX128: n = 16; break;
+ }
+
+ if (i.broadcast.type)
+ n /= i.broadcast.type;
+
+ if (n > 0)
+ for (i.memshift = 0; !(n & 1); n >>= 1)
+ ++i.memshift;
+ else if (disp_exp != NULL && disp_exp->X_op == O_constant
+ && disp_exp->X_add_number != 0
+ && i.disp_encoding != disp_encoding_32bit)
+ {
+ if (!quiet_warnings)
+ as_warn ("cannot determine memory operand size");
+ i.disp_encoding = disp_encoding_32bit;
+ }
+ }
}
+ if (i.memshift >= 32)
+ i.memshift = 0;
+ else if (!evex)
+ i.vec_encoding = vex_encoding_error;
+
if (i.disp_operands && !optimize_disp (&i.tm))
goto done;
i.broadcast.type = bcst_type;
i.broadcast.operand = this_operand;
+
+ /* For .insn a data size specifier may be appended. */
+ if (dot_insn () && *op_string == ':')
+ goto dot_insn_modifier;
+ }
+ /* Check .insn special cases. */
+ else if (dot_insn () && *op_string == ':')
+ {
+ dot_insn_modifier:
+ if (op_string[1] == 'd')
+ {
+ unsigned long n;
+
+ if (i.memshift < 32)
+ goto duplicated_vec_op;
+
+ n = strtoul (op_string + 2, &end_op, 0);
+ if (n)
+ for (i.memshift = 0; !(n & 1); n >>= 1)
+ ++i.memshift;
+ if (i.memshift < 32 && n == 1)
+ op_string = end_op;
+ }
}
/* Check masking operation. */
else if ((mask = parse_register (op_string, &end_op)) != NULL)
[ ]*[a-f0-9]+: c5 f1 58 d0[ ]+vaddpd %xmm0,%xmm1,%xmm2
[ ]*[a-f0-9]+: c5 f5 58 d0[ ]+vaddpd %ymm0,%ymm1,%ymm2
[ ]*[a-f0-9]+: c5 f2 58 d0[ ]+vaddss %xmm0,%xmm1,%xmm2
+[ ]*[a-f0-9]+: 62 f1 76 08 58 50 01[ ]+\{evex\} vaddss (0x)?4\(%eax\),%xmm1,%xmm2
[ ]*[a-f0-9]+: c4 e3 69 68 19 00[ ]+vfmaddps %xmm0,\(%ecx\),%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 e3 e9 68 19 00[ ]+vfmaddps \(%ecx\),%xmm0,%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 e3 e9 68 18 10[ ]+vfmaddps \(%eax\),%xmm1,%xmm2,%xmm3
[ ]*[a-f0-9]+: 62 f1 74 18 58 d0[ ]+vaddps \{rn-sae\},%zmm0,%zmm1,%zmm2
[ ]*[a-f0-9]+: c4 e2 79 92 1c 48[ ]+vgatherdps %xmm0,\(%eax,%xmm1,2\),%xmm3
[ ]*[a-f0-9]+: 62 f2 fd 0c 93 1c 48[ ]+vgatherqpd \(%eax,%xmm1,2\),%xmm3\{%k4\}
+[ ]*[a-f0-9]+: 62 f2 7d 28 88 48 01[ ]+vexpandps (0x)?4\(%eax\),%ymm1
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%eax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%eax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%eax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%eax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%eax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%eax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 7c 48 5a 40 01[ ]+vcvtph2pd 0x10\(%eax\),%zmm0
+[ ]*[a-f0-9]+: 62 f5 7c 58 5a 40 01[ ]+vcvtph2pd (0x)?2\(%eax\)\{1to8\},%zmm0
#pass
# vaddss
.insn VEX.LIG.F3.0F 0x58, %xmm0, %xmm1, %xmm2
+ .insn EVEX.LIG.F3.0F.W0 0x58, 4(%eax){:d4}, %xmm1, %xmm2
# vfmaddps
.insn VEX.66.0F3A.W0 0x68, %xmm0, (%ecx), %xmm2, %xmm3
# vgather...
.insn VEX.66.0f38.W0 0x92, %xmm0, (%eax, %xmm1, 2), %xmm3
.insn EVEX.66.0f38.W1 0x93, (%eax, %xmm1, 2), %xmm3{%k4}
+
+ # vexpandps
+ .insn EVEX.66.0F38.W0 0x88, 4(%eax){:d4}, %ymm1
+
+ # vcvtpd2phz
+ .insn EVEX.512.66.M5.W1 0x5a, 64(%eax), %xmm0
+ .insn EVEX.66.M5.W1 0x5a, 64(%eax), %zmm0
+ .insn EVEX.66.M5.W1 0x5a, 64(%eax){:d64}, %xmm0
+ .insn EVEX.512.66.M5.W1 0x5a, 8(%eax){1to8}, %xmm0
+ .insn EVEX.66.M5.W1 0x5a, 8(%eax){1to8}, %zmm0
+ .insn EVEX.66.M5.W1 0x5a, 8(%eax){1to8:d8}, %xmm0
+
+ # vcvtph2pd
+ .insn EVEX.M5.W0 0x5a, 16(%eax){:d16}, %zmm0
+ .insn EVEX.M5.W0 0x5a, 2(%eax){1to8:d2}, %zmm0
[ ]*[a-f0-9]+: c4 c1 71 58 d0[ ]+vaddpd %xmm8,%xmm1,%xmm2
[ ]*[a-f0-9]+: c5 b5 58 d0[ ]+vaddpd %ymm0,%ymm9,%ymm2
[ ]*[a-f0-9]+: c5 72 58 d0[ ]+vaddss %xmm0,%xmm1,%xmm10
+[ ]*[a-f0-9]+: 62 f1 76 08 58 50 01[ ]+\{evex\} vaddss (0x)?4\(%rax\),%xmm1,%xmm2
[ ]*[a-f0-9]+: c4 e3 69 68 19 80[ ]+vfmaddps %xmm8,\(%rcx\),%xmm2,%xmm3
[ ]*[a-f0-9]+: 67 c4 e3 e9 68 19 00[ ]+vfmaddps \(%ecx\),%xmm0,%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 c3 e9 68 18 10[ ]+vfmaddps \(%r8\),%xmm1,%xmm2,%xmm3
[ ]*[a-f0-9]+: 62 f2 fd 04 93 1c 48[ ]+vgatherqpd \(%rax,%xmm17,2\),%xmm3\{%k4\}
[ ]*[a-f0-9]+: 62 72 fd 0c 93 1c 48[ ]+vgatherqpd \(%rax,%xmm1,2\),%xmm11\{%k4\}
[ ]*[a-f0-9]+: 62 e2 fd 0c 93 1c 48[ ]+vgatherqpd \(%rax,%xmm1,2\),%xmm19\{%k4\}
+[ ]*[a-f0-9]+: 62 f2 7d 28 88 48 01[ ]+vexpandps (0x)?4\(%rax\),%ymm1
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%rax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%rax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 48 5a 40 01[ ]+vcvtpd2phz 0x40\(%rax\),%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%rax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%rax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%rax\)\{1to8\},%xmm0
+[ ]*[a-f0-9]+: 62 f5 7c 48 5a 40 01[ ]+vcvtph2pd 0x10\(%rax\),%zmm0
+[ ]*[a-f0-9]+: 62 f5 7c 58 5a 40 01[ ]+vcvtph2pd (0x)?2\(%rax\)\{1to8\},%zmm0
#pass
# vaddss
.insn VEX.LIG.F3.0F 0x58, %xmm0, %xmm1, %xmm10
+ .insn EVEX.LIG.F3.0F.W0 0x58, 4(%rax){:d4}, %xmm1, %xmm2
# vfmaddps
.insn VEX.66.0F3A.W0 0x68, %xmm8, (%rcx), %xmm2, %xmm3
.insn EVEX.66.0f38.W1 0x93, (%rax, %xmm17, 2), %xmm3{%k4}
.insn EVEX.66.0f38.W1 0x93, (%rax, %xmm1, 2), %xmm11{%k4}
.insn EVEX.66.0f38.W1 0x93, (%rax, %xmm1, 2), %xmm19{%k4}
+
+ # vexpandps
+ .insn EVEX.66.0F38.W0 0x88, 4(%rax){:d4}, %ymm1
+
+ # vcvtpd2phz
+ .insn EVEX.512.66.M5.W1 0x5a, 64(%rax), %xmm0
+ .insn EVEX.66.M5.W1 0x5a, 64(%rax), %zmm0
+ .insn EVEX.66.M5.W1 0x5a, 64(%rax){:d64}, %xmm0
+ .insn EVEX.512.66.M5.W1 0x5a, 8(%rax){1to8}, %xmm0
+ .insn EVEX.66.M5.W1 0x5a, 8(%rax){1to8}, %zmm0
+ .insn EVEX.66.M5.W1 0x5a, 8(%rax){1to8:d8}, %xmm0
+
+ # vcvtph2pd
+ .insn EVEX.M5.W0 0x5a, 16(%rax){:d16}, %zmm0
+ .insn EVEX.M5.W0 0x5a, 2(%rax){1to8:d2}, %zmm0