#include "subsegs.h"
#include "dwarf2dbg.h"
#include "dw2gencfi.h"
+#include "gen-sframe.h"
+#include "sframe.h"
#include "elf/x86-64.h"
#include "opcodes/i386-init.h"
#include <limits.h>
#define SHORT_MNEM_SUFFIX 's'
#define LONG_MNEM_SUFFIX 'l'
#define QWORD_MNEM_SUFFIX 'q'
-/* Intel Syntax. Use a non-ascii letter since since it never appears
- in instructions. */
-#define LONG_DOUBLE_MNEM_SUFFIX '\1'
#define END_OF_INSN '\0'
/* The operand to a branch insn indicates an absolute branch. */
bool jumpabsolute;
+ /* The operand to a branch insn indicates a far branch. */
+ bool far_branch;
+
+ /* There is a memory operand of (%dx) which should be only used
+ with input/output instructions. */
+ bool input_output_operand;
+
/* Extended states. */
enum
{
{ saeonly, STRING_COMMA_LEN ("sae") },
};
+/* To be indexed by segment register number. */
+static const unsigned char i386_seg_prefixes[] = {
+ ES_PREFIX_OPCODE,
+ CS_PREFIX_OPCODE,
+ SS_PREFIX_OPCODE,
+ DS_PREFIX_OPCODE,
+ FS_PREFIX_OPCODE,
+ GS_PREFIX_OPCODE
+};
+
/* List of chars besides those in app.c:symbol_chars that can start an
operand. Used to prevent the scrubber eating vital white-space. */
const char extra_symbol_chars[] = "*%-([{}"
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
/* 1 if generating code for a shared library. */
static int shared = 0;
+
+unsigned int x86_sframe_cfa_sp_reg;
+/* The other CFA base register for SFrame unwind info. */
+unsigned int x86_sframe_cfa_fp_reg;
+unsigned int x86_sframe_cfa_ra_reg;
+
#endif
/* 1 for intel syntax,
ARCH (znver1, ZNVER, ZNVER1, false),
ARCH (znver2, ZNVER, ZNVER2, false),
ARCH (znver3, ZNVER, ZNVER3, false),
+ ARCH (znver4, ZNVER, ZNVER4, false),
ARCH (btver1, BT, BTVER1, false),
ARCH (btver2, BT, BTVER2, false),
SUBARCH (padlock, PADLOCK, PADLOCK, false),
SUBARCH (pacifica, SVME, SVME, true),
SUBARCH (svme, SVME, SVME, false),
- SUBARCH (sse4a, SSE4A, SSE4A, false),
SUBARCH (abm, ABM, ABM, false),
SUBARCH (bmi, BMI, BMI, false),
SUBARCH (tbm, TBM, TBM, false),
SUBARCH (hreset, HRESET, ANY_HRESET, false),
SUBARCH (avx512_fp16, AVX512_FP16, ANY_AVX512_FP16, false),
SUBARCH (prefetchi, PREFETCHI, PREFETCHI, false),
+ SUBARCH (avx_ifma, AVX_IFMA, ANY_AVX_IFMA, false),
+ SUBARCH (avx_vnni_int8, AVX_VNNI_INT8, ANY_AVX_VNNI_INT8, false),
+ SUBARCH (cmpccxadd, CMPCCXADD, ANY_CMPCCXADD, false),
+ SUBARCH (wrmsrns, WRMSRNS, ANY_WRMSRNS, false),
+ SUBARCH (msrlist, MSRLIST, ANY_MSRLIST, false),
+ SUBARCH (avx_ne_convert, AVX_NE_CONVERT, ANY_AVX_NE_CONVERT, false),
+ SUBARCH (rao_int, RAO_INT, ANY_RAO_INT, false),
+ SUBARCH (rmpquery, RMPQUERY, RMPQUERY, false),
};
#undef SUBARCH
{
switch (ARRAY_SIZE(x->array))
{
+ case 5:
+ if (x->array[4])
+ return 0;
+ /* Fall through. */
case 4:
if (x->array[3])
return 0;
{
switch (ARRAY_SIZE(x->array))
{
+ case 5:
+ if (x->array[4] != y->array[4])
+ return 0;
+ /* Fall through. */
case 4:
if (x->array[3] != y->array[3])
return 0;
{
switch (ARRAY_SIZE (x.array))
{
+ case 5:
+ x.array [4] &= y.array [4];
+ /* Fall through. */
case 4:
x.array [3] &= y.array [3];
/* Fall through. */
{
switch (ARRAY_SIZE (x.array))
{
+ case 5:
+ x.array [4] |= y.array [4];
+ /* Fall through. */
case 4:
x.array [3] |= y.array [3];
/* Fall through. */
{
switch (ARRAY_SIZE (x.array))
{
+ case 5:
+ x.array [4] &= ~y.array [4];
+ /* Fall through. */
case 4:
x.array [3] &= ~y.array [3];
/* Fall through. */
static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
static const i386_operand_type anydisp = OPERAND_TYPE_ANYDISP;
static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
-static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
-static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
-static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
{
if (i.types[j].bitfield.class != Reg
&& i.types[j].bitfield.class != RegSIMD
- && t->opcode_modifier.anysize)
+ && t->opcode_modifier.operandconstraint == ANY_SIZE)
continue;
if (t->operand_types[j].bitfield.class == Reg
/* If given types g0 and g1 are registers they must be of the same type
unless the expected operand type register overlap is null.
- Some Intel syntax memory operand size checking also happens here. */
+ Intel syntax sized memory operands are also checked here. */
static INLINE int
operand_type_register_match (i386_operand_type g0,
{
if (g0.bitfield.class != Reg
&& g0.bitfield.class != RegSIMD
- && (!operand_type_check (g0, anymem)
- || g0.bitfield.unspecified
- || (t0.bitfield.class != Reg
- && t0.bitfield.class != RegSIMD)))
+ && (g0.bitfield.unspecified
+ || !operand_type_check (g0, anymem)))
return 1;
if (g1.bitfield.class != Reg
&& g1.bitfield.class != RegSIMD
- && (!operand_type_check (g1, anymem)
- || g1.bitfield.unspecified
- || (t1.bitfield.class != Reg
- && t1.bitfield.class != RegSIMD)))
+ && (g1.bitfield.unspecified
+ || !operand_type_check (g1, anymem)))
return 1;
if (g0.bitfield.byte == g1.bitfield.byte
x86_dwarf2_return_column = 16;
#endif
x86_cie_data_alignment = -8;
+#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+ x86_sframe_cfa_sp_reg = 7;
+ x86_sframe_cfa_fp_reg = 6;
+#endif
}
else
{
exp = &im_expressions[i.imm_operands++];
i.op[i.operands].imms = exp;
- i.types[i.operands] = imm8;
+ i.types[i.operands].bitfield.imm8 = 1;
i.operands++;
exp->X_op = O_constant;
exp->X_add_number = i.tm.extension_opcode;
{
/* Anysize insns: lea, invlpg, clflush, prefetch*, bndmk, bndcl, bndcu,
bndcn, bndstx, bndldx, clflushopt, clwb, cldemote. */
- if (i.tm.opcode_modifier.anysize)
+ if (i.tm.opcode_modifier.operandconstraint == ANY_SIZE)
return 0;
/* pop. */
/* All Intel opcodes have reversed operands except for "bound", "enter",
"invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
- "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
- and "call" instructions with 2 immediate operands so that the immediate
- segment precedes the offset consistently in Intel and AT&T modes. */
+ "rmpadjust", "rmpupdate", and "rmpquery". We also don't reverse
+ intersegment "jmp" and "call" instructions with 2 immediate operands so
+ that the immediate segment precedes the offset consistently in Intel and
+ AT&T modes. */
if (intel_syntax
&& i.operands > 1
&& (strcmp (mnemonic, "bound") != 0)
&& !startswith (mnemonic, "rmp")
&& (strcmp (mnemonic, "tpause") != 0)
&& (strcmp (mnemonic, "umwait") != 0)
- && !(operand_type_check (i.types[0], imm)
+ && !(i.operands == 2
+ && operand_type_check (i.types[0], imm)
&& operand_type_check (i.types[1], imm)))
swap_operands ();
i.disp_operands = 0;
}
+ /* The memory operand of (%dx) should be only used with input/output
+ instructions (base opcodes: 0x6c, 0x6e, 0xec, 0xee). */
+ if (i.input_output_operand
+ && ((i.tm.base_opcode | 0x82) != 0xee
+ || i.tm.opcode_modifier.opcodespace != SPACE_BASE))
+ {
+ as_bad (_("input/output port address isn't allowed with `%s'"),
+ i.tm.name);
+ return;
+ }
+
if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
optimize_encoding ();
if (!process_operands ())
return;
}
- else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
+ else if (!quiet_warnings && i.tm.opcode_modifier.operandconstraint == UGH)
{
/* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
as_warn (_("translating to `%sp'"), i.tm.name);
}
/* Check if default mask is allowed. */
- if (t->opcode_modifier.nodefmask
+ if (t->opcode_modifier.operandconstraint == NO_DEFAULT_MASK
&& (!i.mask.reg || i.mask.reg->reg_num == 0))
{
i.error = no_default_mask;
/* For some special instructions require that destination must be distinct
from source registers. */
- if (t->opcode_modifier.distinctdest)
+ if (t->opcode_modifier.operandconstraint == DISTINCT_DEST)
{
unsigned int dest_reg = i.operands - 1;
i386_operand_type overlap0, overlap1, overlap2, overlap3;
i386_operand_type overlap4;
unsigned int found_reverse_match;
- i386_opcode_modifier suffix_check;
i386_operand_type operand_types [MAX_OPERANDS];
int addr_prefix_disp;
unsigned int j, size_match, check_register, errline = __LINE__;
found_reverse_match = 0;
addr_prefix_disp = -1;
- /* Prepare for mnemonic suffix check. */
- memset (&suffix_check, 0, sizeof (suffix_check));
- switch (mnem_suffix)
- {
- case BYTE_MNEM_SUFFIX:
- suffix_check.no_bsuf = 1;
- break;
- case WORD_MNEM_SUFFIX:
- suffix_check.no_wsuf = 1;
- break;
- case SHORT_MNEM_SUFFIX:
- suffix_check.no_ssuf = 1;
- break;
- case LONG_MNEM_SUFFIX:
- suffix_check.no_lsuf = 1;
- break;
- case QWORD_MNEM_SUFFIX:
- suffix_check.no_qsuf = 1;
- break;
- default:
- /* NB: In Intel syntax, normally we can check for memory operand
- size when there is no mnemonic suffix. But jmp and call have
- 2 different encodings with Dword memory operand size, one with
- No_ldSuf and the other without. i.suffix is set to
- LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
- if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
- suffix_check.no_ldsuf = 1;
- }
-
for (t = current_templates->start; t < current_templates->end; t++)
{
addr_prefix_disp = -1;
/* Check the suffix. */
specific_error = progress (invalid_instruction_suffix);
- if ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
- || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
- || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
- || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
- || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
- || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf))
+ if ((t->opcode_modifier.no_bsuf && mnem_suffix == BYTE_MNEM_SUFFIX)
+ || (t->opcode_modifier.no_wsuf && mnem_suffix == WORD_MNEM_SUFFIX)
+ || (t->opcode_modifier.no_lsuf && mnem_suffix == LONG_MNEM_SUFFIX)
+ || (t->opcode_modifier.no_ssuf && mnem_suffix == SHORT_MNEM_SUFFIX)
+ || (t->opcode_modifier.no_qsuf && mnem_suffix == QWORD_MNEM_SUFFIX))
continue;
specific_error = progress (operand_size_mismatch);
if (i.jumpabsolute && t->opcode_modifier.jump != JUMP_ABSOLUTE)
continue;
+ /* In Intel syntax, normally we can check for memory operand size when
+ there is no mnemonic suffix. But jmp and call have 2 different
+ encodings with Dword memory operand size. Skip the "near" one
+ (permitting a register operand) when "far" was requested. */
+ if (i.far_branch
+ && t->opcode_modifier.jump == JUMP_ABSOLUTE
+ && t->operand_types[0].bitfield.class == Reg)
+ continue;
+
for (j = 0; j < MAX_OPERANDS; j++)
operand_types[j] = t->operand_types[j];
&& t->opcode_modifier.opcodespace == SPACE_BASE
&& i.types[0].bitfield.instance == Accum
&& i.types[0].bitfield.dword
- && i.types[1].bitfield.instance == Accum
- && i.types[1].bitfield.dword)
+ && i.types[1].bitfield.instance == Accum)
continue;
/* xrelease mov %eax, <disp> is another special case. It must not
match the accumulator-only encoding of mov. */
specific_error = progress (i.error);
continue;
}
- /* found_reverse_match holds which of D or FloatR
+ /* found_reverse_match holds which variant of D
we've found. */
if (!t->opcode_modifier.d)
found_reverse_match = 0;
else if (operand_types[0].bitfield.tbyte)
- found_reverse_match = Opcode_FloatD;
+ {
+ if (t->opcode_modifier.operandconstraint != UGH)
+ found_reverse_match = Opcode_FloatD;
+ /* FSUB{,R} and FDIV{,R} may need a 2nd bit flipped. */
+ if ((t->base_opcode & 0x20)
+ && (intel_syntax || intel_mnemonic))
+ found_reverse_match |= Opcode_FloatR;
+ }
else if (t->opcode_modifier.vexsources)
{
found_reverse_match = Opcode_VexW;
|| (t->base_opcode | 7) != 0x27))
found_reverse_match = (t->base_opcode & 0xee) != 0x6e
? Opcode_ExtD : Opcode_SIMD_IntD;
- else
+ else if (!t->opcode_modifier.commutative)
found_reverse_match = Opcode_D;
- if (t->opcode_modifier.floatr)
- found_reverse_match |= Opcode_FloatR;
+ else
+ found_reverse_match = ~0;
}
else
{
i.tm.base_opcode ^= found_reverse_match;
- i.tm.operand_types[0] = operand_types[i.operands - 1];
- i.tm.operand_types[i.operands - 1] = operand_types[0];
-
/* Certain SIMD insns have their load forms specified in the opcode
table, and hence we need to _set_ RegMem instead of clearing it.
We need to avoid setting the bit though on insns like KMOVW. */
= i.tm.opcode_modifier.modrm && i.tm.opcode_modifier.d
&& i.tm.operands > 2U - i.tm.opcode_modifier.sse2avx
&& !i.tm.opcode_modifier.regmem;
+
+ /* Fall through. */
+ case ~0:
+ i.tm.operand_types[0] = operand_types[i.operands - 1];
+ i.tm.operand_types[i.operands - 1] = operand_types[0];
break;
case Opcode_VexW:
i.suffix = QWORD_MNEM_SUFFIX;
else if (i.reg_operands
&& (i.operands > 1 || i.types[0].bitfield.class == Reg)
- && !i.tm.opcode_modifier.addrprefixopreg)
+ && i.tm.opcode_modifier.operandconstraint != ADDR_PREFIX_OP_REG)
{
unsigned int numop = i.operands;
suffixes |= 1 << 1;
if (!i.tm.opcode_modifier.no_lsuf)
suffixes |= 1 << 2;
- if (!i.tm.opcode_modifier.no_ldsuf)
- suffixes |= 1 << 3;
if (!i.tm.opcode_modifier.no_ssuf)
suffixes |= 1 << 4;
if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
need rex64. */
&& ! (i.operands == 2
&& i.tm.base_opcode == 0x90
- && i.tm.extension_opcode == None
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& i.types[0].bitfield.instance == Accum
&& i.types[0].bitfield.qword
- && i.types[1].bitfield.instance == Accum
- && i.types[1].bitfield.qword))
+ && i.types[1].bitfield.instance == Accum))
i.rex |= REX_W;
break;
break;
}
- if (i.tm.opcode_modifier.addrprefixopreg)
+ if (i.tm.opcode_modifier.operandconstraint == ADDR_PREFIX_OP_REG)
{
gas_assert (!i.suffix);
gas_assert (i.reg_operands);
update_imm (unsigned int j)
{
i386_operand_type overlap = i.types[j];
- if ((overlap.bitfield.imm8
- || overlap.bitfield.imm8s
- || overlap.bitfield.imm16
- || overlap.bitfield.imm32
- || overlap.bitfield.imm32s
- || overlap.bitfield.imm64)
- && !operand_type_equal (&overlap, &imm8)
- && !operand_type_equal (&overlap, &imm8s)
- && !operand_type_equal (&overlap, &imm16)
- && !operand_type_equal (&overlap, &imm32)
- && !operand_type_equal (&overlap, &imm32s)
- && !operand_type_equal (&overlap, &imm64))
+ if (overlap.bitfield.imm8
+ + overlap.bitfield.imm8s
+ + overlap.bitfield.imm16
+ + overlap.bitfield.imm32
+ + overlap.bitfield.imm32s
+ + overlap.bitfield.imm64 > 1)
{
if (i.suffix)
{
else if (i.prefix[DATA_PREFIX])
overlap = operand_type_and (overlap,
flag_code != CODE_16BIT ? imm16 : imm32);
- if (!operand_type_equal (&overlap, &imm8)
- && !operand_type_equal (&overlap, &imm8s)
- && !operand_type_equal (&overlap, &imm16)
- && !operand_type_equal (&overlap, &imm32)
- && !operand_type_equal (&overlap, &imm32s)
- && !operand_type_equal (&overlap, &imm64))
+ if (overlap.bitfield.imm8
+ + overlap.bitfield.imm8s
+ + overlap.bitfield.imm16
+ + overlap.bitfield.imm32
+ + overlap.bitfield.imm32s
+ + overlap.bitfield.imm64 != 1)
{
as_bad (_("no instruction mnemonic suffix given; "
"can't determine immediate size"));
}
}
}
- else if (i.tm.opcode_modifier.implicit1stxmm0)
+ else if (i.tm.opcode_modifier.operandconstraint == IMPLICIT_1ST_XMM0)
{
gas_assert ((MAX_OPERANDS - 1) > dupl
&& (i.tm.opcode_modifier.vexsources
i.reg_operands--;
i.tm.operands--;
}
- else if (i.tm.opcode_modifier.implicitquadgroup)
+ else if (i.tm.opcode_modifier.operandconstraint == IMPLICIT_QUAD_GROUP)
{
unsigned int regnum, first_reg_in_group, last_reg_in_group;
register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
i.tm.name);
}
- else if (i.tm.opcode_modifier.regkludge)
+ else if (i.tm.opcode_modifier.operandconstraint == REG_KLUDGE)
{
/* The imul $imm, %reg instruction is converted into
imul $imm, %reg, %reg, and the clr %reg instruction
{
/* The register or float register operand is in operand
0 or 1. */
- unsigned int op = i.tm.operand_types[0].bitfield.class != Reg;
+ const reg_entry *r = i.op[0].regs;
+ if (i.imm_operands
+ || (r->reg_type.bitfield.instance == Accum && i.op[1].regs))
+ r = i.op[1].regs;
/* Register goes in low 3 bits of opcode. */
- i.tm.base_opcode |= i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
+ i.tm.base_opcode |= r->reg_num;
+ if ((r->reg_flags & RegRex) != 0)
i.rex |= REX_B;
- if (!quiet_warnings && i.tm.opcode_modifier.ugh)
+ if (!quiet_warnings && i.tm.opcode_modifier.operandconstraint == UGH)
{
- /* Warn about some common errors, but press on regardless.
- The first case can be generated by gcc (<= 2.8.1). */
- if (i.operands == 2)
- {
- /* Reversed arguments on faddp, fsubp, etc. */
- as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
- register_prefix, i.op[!intel_syntax].regs->reg_name,
- register_prefix, i.op[intel_syntax].regs->reg_name);
- }
- else
+ /* Warn about some common errors, but press on regardless. */
+ if (i.operands != 2)
{
/* Extraneous `l' suffix on fp insn. */
as_warn (_("translating to `%s %s%s'"), i.tm.name,
register_prefix, i.op[0].regs->reg_name);
}
+ else if (i.op[0].regs->reg_type.bitfield.instance != Accum)
+ {
+ /* Reversed arguments on faddp or fmulp. */
+ as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
+ register_prefix, i.op[!intel_syntax].regs->reg_name,
+ register_prefix, i.op[intel_syntax].regs->reg_name);
+ }
}
}
immediate operand to encode the first operand. */
exp = &im_expressions[i.imm_operands++];
i.op[i.operands].imms = exp;
- i.types[i.operands] = imm8;
+ i.types[i.operands].bitfield.imm8 = 1;
i.operands++;
gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
unsigned int vvvv;
/* Swap two source operands if needed. */
- if (i.tm.opcode_modifier.swapsources)
+ if (i.tm.opcode_modifier.operandconstraint == SWAP_SOURCES)
{
vvvv = source;
source = dest;
source operand is encoded in VEX prefix. */
gas_assert (mem != (unsigned int) ~0);
- if (op > mem)
+ if (op > mem || i.tm.cpu_flags.bitfield.cpucmpccxadd)
{
vex_reg = op++;
gas_assert (op < i.operands);
if (seg && subseg)
subseg_set (seg, subseg);
}
+
+bool
+x86_support_sframe_p (void)
+{
+ /* At this time, SFrame unwind is supported for AMD64 ABI only. */
+ return (x86_elf_abi == X86_64_ABI);
+}
+
+bool
+x86_sframe_ra_tracking_p (void)
+{
+ /* In AMD64, return address is always stored on the stack at a fixed offset
+ from the CFA (provided via x86_sframe_cfa_ra_offset ()).
+ Do not track explicitly via an SFrame Frame Row Entry. */
+ return false;
+}
+
+offsetT
+x86_sframe_cfa_ra_offset (void)
+{
+ gas_assert (x86_elf_abi == X86_64_ABI);
+ return (offsetT) -8;
+}
+
+unsigned char
+x86_sframe_get_abi_arch (void)
+{
+ unsigned char sframe_abi_arch = 0;
+
+ if (x86_support_sframe_p ())
+ {
+ gas_assert (!target_big_endian);
+ sframe_abi_arch = SFRAME_ABI_AMD64_ENDIAN_LITTLE;
+ }
+
+ return sframe_abi_arch;
+}
+
#endif
static unsigned int
const insn_template *t = current_templates->start;
if (t->opcode_modifier.isstring
- && !t->cpu_flags.bitfield.cpupadlock
&& (current_templates->end[-1].opcode_modifier.isstring
|| i.mem_operands))
{
&& !operand_type_check (i.types[this_operand], disp))
{
i.types[this_operand] = i.base_reg->reg_type;
+ i.input_output_operand = true;
return 1;
}