i386_operand_type temp;
/* Register operand. */
- if (intel_state.base || intel_state.index || intel_state.seg)
+ if (intel_state.base || intel_state.index || intel_state.seg
+ || i.imm_bits[this_operand])
{
as_bad (_("invalid operand"));
return 0;
|| intel_state.is_mem)
{
/* Memory operand. */
+ if (i.imm_bits[this_operand])
+ {
+ as_bad (_("invalid operand"));
+ return 0;
+ }
+
if (i.mem_operands)
{
/* Handle
unsigned int flags[MAX_OPERANDS];
#define Operand_PCrel 1
#define Operand_Mem 2
+#define Operand_Signed 4 /* .insn only */
/* Relocation type for operand */
enum bfd_reloc_code_real reloc[MAX_OPERANDS];
/* .insn allows for reserved opcode spaces. */
unsigned char insn_opcode_space;
+ /* .insn also allows (requires) specifying immediate size. */
+ unsigned char imm_bits[MAX_OPERANDS];
+
/* Register is in low 3 bits of opcode. */
bool short_form;
i.reloc[xchg2] = i.reloc[xchg1];
i.reloc[xchg1] = temp_reloc;
+ temp_flags = i.imm_bits[xchg2];
+ i.imm_bits[xchg2] = i.imm_bits[xchg1];
+ i.imm_bits[xchg1] = temp_flags;
+
if (i.mask.reg)
{
if (i.mask.operand == xchg1)
if (i.types[n].bitfield.imm32s
&& (i.suffix == QWORD_MNEM_SUFFIX
- || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
+ || (!i.suffix && i.tm.opcode_modifier.no_lsuf)
+ || dot_insn ()))
sign = 1;
else
sign = 0;
if (i.disp_operands && !optimize_disp (&i.tm))
goto done;
+ /* Establish size for immediate operands. */
+ for (j = 0; j < i.imm_operands; ++j)
+ {
+ expressionS *expP = i.op[j].imms;
+
+ gas_assert (operand_type_check (i.types[j], imm));
+ operand_type_set (&i.types[j], 0);
+
+ if (i.imm_bits[j] > 32)
+ i.types[j].bitfield.imm64 = 1;
+ else if (i.imm_bits[j] > 16)
+ {
+ if (flag_code == CODE_64BIT && (i.flags[j] & Operand_Signed))
+ i.types[j].bitfield.imm32s = 1;
+ else
+ i.types[j].bitfield.imm32 = 1;
+ }
+ else if (i.imm_bits[j] > 8)
+ i.types[j].bitfield.imm16 = 1;
+ else if (i.imm_bits[j] > 0)
+ {
+ if (i.flags[j] & Operand_Signed)
+ i.types[j].bitfield.imm8s = 1;
+ else
+ i.types[j].bitfield.imm8 = 1;
+ }
+ else if (expP->X_op == O_constant)
+ {
+ i.types[j] = smallest_imm_type (expP->X_add_number);
+ i.types[j].bitfield.imm1 = 0;
+ /* Oddly enough imm_size() checks imm64 first, so the bit needs
+ zapping since smallest_imm_type() sets it unconditionally. */
+ if (flag_code != CODE_64BIT)
+ {
+ i.types[j].bitfield.imm64 = 0;
+ i.types[j].bitfield.imm32s = 0;
+ i.types[j].bitfield.imm32 = 1;
+ }
+ else if (i.types[j].bitfield.imm32 || i.types[j].bitfield.imm32s)
+ i.types[j].bitfield.imm64 = 0;
+ }
+ else
+ /* Non-constant expressions are sized heuristically. */
+ switch (flag_code)
+ {
+ case CODE_64BIT: i.types[j].bitfield.imm32s = 1; break;
+ case CODE_32BIT: i.types[j].bitfield.imm32 = 1; break;
+ case CODE_16BIT: i.types[j].bitfield.imm16 = 1; break;
+ }
+ }
+
for (j = 0; j < i.operands; ++j)
i.tm.operand_types[j] = i.types[j];
else if (dot_insn () && *op_string == ':')
{
dot_insn_modifier:
- if (op_string[1] == 'd')
+ switch (op_string[1])
{
unsigned long n;
+ case 'd':
if (i.memshift < 32)
goto duplicated_vec_op;
++i.memshift;
if (i.memshift < 32 && n == 1)
op_string = end_op;
+ break;
+
+ case 's': case 'u':
+ /* This isn't really a "vector" operation, but a sign/size
+ specifier for immediate operands of .insn. Note that AT&T
+ syntax handles the same in i386_immediate(). */
+ if (!intel_syntax)
+ break;
+
+ if (i.imm_bits[this_operand])
+ goto duplicated_vec_op;
+
+ n = strtoul (op_string + 2, &end_op, 0);
+ if (n && n <= (flag_code == CODE_64BIT ? 64 : 32))
+ {
+ i.imm_bits[this_operand] = n;
+ if (op_string[1] == 's')
+ i.flags[this_operand] |= Operand_Signed;
+ op_string = end_op;
+ }
+ break;
}
}
/* Check masking operation. */
exp_seg = expression (exp);
+ /* For .insn immediates there may be a size specifier. */
+ if (dot_insn () && *input_line_pointer == '{' && input_line_pointer[1] == ':'
+ && (input_line_pointer[2] == 's' || input_line_pointer[2] == 'u'))
+ {
+ char *e;
+ unsigned long n = strtoul (input_line_pointer + 3, &e, 0);
+
+ if (*e == '}' && n && n <= (flag_code == CODE_64BIT ? 64 : 32))
+ {
+ i.imm_bits[this_operand] = n;
+ if (input_line_pointer[2] == 's')
+ i.flags[this_operand] |= Operand_Signed;
+ input_line_pointer = e + 1;
+ }
+ }
+
SKIP_WHITESPACE ();
if (*input_line_pointer)
as_bad (_("junk `%s' after expression"), input_line_pointer);
+#as: --divide
#objdump: -dw
#name: .insn (32-bit code)
+#xfail: *-*-darwin*
.*: +file format .*
[ ]*[a-f0-9]+: f3 90[ ]+pause
[ ]*[a-f0-9]+: f3 90[ ]+pause
[ ]*[a-f0-9]+: d9 ee[ ]+fldz
+[ ]*[a-f0-9]+: d9 ee[ ]+fldz
[ ]*[a-f0-9]+: f3 0f 01 e8[ ]+setssbsy
[ ]*[a-f0-9]+: 8b c1[ ]+mov %ecx,%eax
[ ]*[a-f0-9]+: 66 8b c8[ ]+mov %ax,%cx
[ ]*[a-f0-9]+: 8b 0c 05 44 44 00 00[ ]+mov 0x4444\(,%eax,1\),%ecx
[ ]*[a-f0-9]+: 66 0f b6 cc[ ]+movzbw %ah,%cx
[ ]*[a-f0-9]+: 0f b7 c8[ ]+movzwl %ax,%ecx
+[ ]*[a-f0-9]+: 64 f0 80 30 01[ ]+lock xorb \$(0x)?1,%fs:\(%eax\)
[ ]*[a-f0-9]+: 0f ca[ ]+bswap %edx
+[ ]*[a-f0-9]+: c7 f8 02 00 00 00[ ]+xbegin [0-9a-f]+ <insn\+.*>
+[ ]*[a-f0-9]+: e2 f8[ ]+loop [0-9a-f]+ <insn\+.*>
[ ]*[a-f0-9]+: c5 fc 77[ ]+vzeroall
[ ]*[a-f0-9]+: c4 e1 7c 77[ ]+vzeroall
[ ]*[a-f0-9]+: c5 f1 58 d0[ ]+vaddpd %xmm0,%xmm1,%xmm2
[ ]*[a-f0-9]+: c4 e3 69 68 19 00[ ]+vfmaddps %xmm0,\(%ecx\),%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 e3 e9 68 19 00[ ]+vfmaddps \(%ecx\),%xmm0,%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 e3 e9 68 18 10[ ]+vfmaddps \(%eax\),%xmm1,%xmm2,%xmm3
+[ ]*[a-f0-9]+: c4 e3 69 48 19 00[ ]+vpermil2ps \$(0x)?0,%xmm0,\(%ecx\),%xmm2,%xmm3
+[ ]*[a-f0-9]+: c4 e3 e9 48 19 02[ ]+vpermil2ps \$(0x)?2,\(%ecx\),%xmm0,%xmm2,%xmm3
+[ ]*[a-f0-9]+: c4 e3 e9 48 18 13[ ]+vpermil2ps \$(0x)?3,\(%eax\),%xmm1,%xmm2,%xmm3
[ ]*[a-f0-9]+: c5 f8 92 c8[ ]+kmovw %eax,%k1
[ ]*[a-f0-9]+: c5 f8 93 c1[ ]+kmovw %k1,%eax
[ ]*[a-f0-9]+: 62 f1 74 18 58 d0[ ]+vaddps \{rn-sae\},%zmm0,%zmm1,%zmm2
[ ]*[a-f0-9]+: 62 f5 fd 58 5a 40 01[ ]+vcvtpd2ph (0x)?8\(%eax\)\{1to8\},%xmm0
[ ]*[a-f0-9]+: 62 f5 7c 48 5a 40 01[ ]+vcvtph2pd 0x10\(%eax\),%zmm0
[ ]*[a-f0-9]+: 62 f5 7c 58 5a 40 01[ ]+vcvtph2pd (0x)?2\(%eax\)\{1to8\},%zmm0
+[ ]*[a-f0-9]+: 62 f3 7d 28 66 40 01 ff[ ]+vfpclasspsy \$0xff,0x20\(%eax\),%k0
+[ ]*[a-f0-9]+: 62 f3 7d 28 66 40 01 ff[ ]+vfpclasspsy \$0xff,0x20\(%eax\),%k0
+[ ]*[a-f0-9]+: 62 f3 7d 38 66 40 01 ff[ ]+vfpclassps \$0xff,(0x)?4\(%eax\)\{1to8\},%k0
+[ ]*[a-f0-9]+: 62 f3 7d 38 66 40 01 ff[ ]+vfpclassps \$0xff,(0x)?4\(%eax\)\{1to8\},%k0
#pass
# fldz
.insn 0xd9ee
+ .insn 0xd9, $0xee
# setssbsy
.insn 0xf30f01e8
.insn 0x0fb6, %ah, %cx
.insn 0x0fb7, %eax, %ecx
+ # xorb
+ .insn lock 0x80/6, $1, %fs:(%eax)
+
# bswap
.insn 0x0fc8+r, %edx
+1:
+ # xbegin 3f
+ .insn 0xc7f8, $3f-2f{:s32}
+2:
+ # loop 1b
+ .insn 0xe2, $1b-3f{:s8}
+3:
+
# vzeroall
.insn VEX.256.0F.WIG 0x77
.insn {vex3} VEX.L1 0x0f77
.insn VEX.66.0F3A.W1 0x68, %xmm0, (%ecx), %xmm2, %xmm3
.insn VEX.66.0F3A.W1 0x68, (%eax), %xmm1, %xmm2, %xmm3
+ # vpermil2ps
+ .insn VEX.66.0F3A.W0 0x48, $0, %xmm0, (%ecx), %xmm2, %xmm3
+ .insn VEX.66.0F3A.W1 0x48, $2, %xmm0, (%ecx), %xmm2, %xmm3
+ .insn VEX.66.0F3A.W1 0x48, $3, (%eax), %xmm1, %xmm2, %xmm3
+
# kmovw
.insn VEX.L0.0F.W0 0x92, %eax, %k1
.insn VEX.L0.0F.W0 0x93, %k1, %eax
# vcvtph2pd
.insn EVEX.M5.W0 0x5a, 16(%eax){:d16}, %zmm0
.insn EVEX.M5.W0 0x5a, 2(%eax){1to8:d2}, %zmm0
+
+ .intel_syntax noprefix
+ # vfpclassps
+ .insn EVEX.256.66.0f3a.W0 0x66, k0, [eax+32], 0xff
+ .insn EVEX.66.0f3a.W0 0x66, k0, ymmword ptr [eax+32], 0xff
+ .insn EVEX.256.66.0f3a.W0 0x66, k0, [eax+4]{1to8}, 0xff
+ .insn EVEX.66.0f3a.W0 0x66, k0, dword ptr [eax+4]{1to8}, 0xff
-#objdump: -dw
+#as: --divide
+#objdump: -dwr
#name: .insn (64-bit code)
+#xfail: *-*-darwin*
.*: +file format .*
[ ]*[a-f0-9]+: 66 0f be cc[ ]+movsbw %ah,%cx
[ ]*[a-f0-9]+: 0f bf c8[ ]+movswl %ax,%ecx
[ ]*[a-f0-9]+: 48 63 c8[ ]+movslq %eax,%rcx
+[ ]*[a-f0-9]+: f0 80 35 ((00|ff) ){4}01[ ]+lock xorb \$(0x)?1,[-x01]+\(%rip\) *# .*: (R_X86_64_PC32 lock-(0x)?5|IMAGE_REL_AMD64_REL32 lock)
[ ]*[a-f0-9]+: 48 0f ca[ ]+bswap %rdx
[ ]*[a-f0-9]+: 41 0f c8[ ]+bswap %r8d
+[ ]*[a-f0-9]+: c7 f8 02 00 00 00[ ]+xbegin [0-9a-f]+ <insn\+.*>
+[ ]*[a-f0-9]+: e2 f8[ ]+loop [0-9a-f]+ <insn\+.*>
+[ ]*[a-f0-9]+: 05 00 00 00 00[ ]+add \$(0x)?0,%eax .*: (R_X86_64_32|IMAGE_REL_AMD64_ADDR32) var
+[ ]*[a-f0-9]+: 48 05 00 00 00 00[ ]+add \$(0x)?0,%rax .*: R_X86_64_32S var
+[ ]*[a-f0-9]+: 81 3d (00|fc) ((00|ff) ){3}13 12 23 21[ ]+cmpl \$0x21231213,[-x04]+\(%rip\) *# .*: (R_X86_64_PC32 var-(0x)?8|IMAGE_REL_AMD64_REL32 var)
[ ]*[a-f0-9]+: c5 fc 77[ ]+vzeroall
[ ]*[a-f0-9]+: c4 e1 7c 77[ ]+vzeroall
[ ]*[a-f0-9]+: c4 c1 71 58 d0[ ]+vaddpd %xmm8,%xmm1,%xmm2
[ ]*[a-f0-9]+: c4 e3 69 68 19 80[ ]+vfmaddps %xmm8,\(%rcx\),%xmm2,%xmm3
[ ]*[a-f0-9]+: 67 c4 e3 e9 68 19 00[ ]+vfmaddps \(%ecx\),%xmm0,%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 c3 e9 68 18 10[ ]+vfmaddps \(%r8\),%xmm1,%xmm2,%xmm3
+[ ]*[a-f0-9]+: c4 e3 69 48 19 80[ ]+vpermil2ps \$(0x)0,%xmm8,\(%rcx\),%xmm2,%xmm3
+[ ]*[a-f0-9]+: 67 c4 e3 e9 48 19 02[ ]+vpermil2ps \$(0x)2,\(%ecx\),%xmm0,%xmm2,%xmm3
+[ ]*[a-f0-9]+: c4 c3 e9 48 18 13[ ]+vpermil2ps \$(0x)3,\(%r8\),%xmm1,%xmm2,%xmm3
[ ]*[a-f0-9]+: c4 c1 78 92 c8[ ]+kmovw %r8d,%k1
[ ]*[a-f0-9]+: c5 78 93 c1[ ]+kmovw %k1,%r8d
[ ]*[a-f0-9]+: 62 b1 74 38 58 d0[ ]+vaddps \{rd-sae\},%zmm16,%zmm1,%zmm2
.insn 0x0fbf, %eax, %ecx
.insn 0x63, %rax, %rcx
+ # xorb
+ .insn lock 0x80/6, $1, lock(%rip)
+
# bswap
.insn 0x0fc8+r, %rdx
.insn 0x0fc8+r, %r8d
+1:
+ # xbegin 3f
+ .insn 0xc7f8, $3f-2f{:s32}
+2:
+ # loop 1b
+ .insn 0xe2, $1b-3f{:s8}
+3:
+
+ # add $var, %eax
+ .insn 0x05, $var{:u32}
+
+ # add $var, %rax
+ .insn rex.w 0x05, $var{:s32}
+
+ # cmpl (32-bit immediate split into two 16-bit halves)
+ .insn 0x81/7, $0x1213, $0x2123, var(%rip)
+
# vzeroall
.insn VEX.256.0F.WIG 0x77
.insn {vex3} VEX.L1 0x0f77
.insn VEX.66.0F3A.W1 0x68, %xmm0, (%ecx), %xmm2, %xmm3
.insn VEX.66.0F3A.W1 0x68, (%r8), %xmm1, %xmm2, %xmm3
+ # vpermil2ps
+ .insn VEX.66.0F3A.W0 0x48, $0, %xmm8, (%rcx), %xmm2, %xmm3
+ .insn VEX.66.0F3A.W1 0x48, $2, %xmm0, (%ecx), %xmm2, %xmm3
+ .insn VEX.66.0F3A.W1 0x48, $3, (%r8), %xmm1, %xmm2, %xmm3
+
# kmovw
.insn VEX.L0.0F.W0 0x92, %r8d, %k1
.insn VEX.L0.0F.W0 0x93, %k1, %r8d