+2005-06-20 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR 1013
+ * config/tc-i386.c (md_assemble): Don't call optimize_disp on
+ movabs.
+ (optimize_disp): Optimize only if possible. Don't use 64bit
+ displacement on non-constants and do same on constants if
+ possible.
+
2005-06-17 Jan Beulich <jbeulich@novell.com>
* config/tc-i386.c (reloc): Also handle BFD_RELOC_64_PCREL.
if (i.imm_operands)
optimize_imm ();
- if (i.disp_operands)
+ /* Don't optimize displacement for movabs since it only takes 64bit
+ displacement. */
+ if (i.disp_operands
+ && (flag_code != CODE_64BIT
+ || strcmp (mnemonic, "movabs") != 0))
optimize_disp ();
/* Next, we find a template that matches the given insn,
int op;
for (op = i.operands; --op >= 0;)
- if ((i.types[op] & Disp) && i.op[op].disps->X_op == O_constant)
+ if (i.types[op] & Disp)
{
- offsetT disp = i.op[op].disps->X_add_number;
-
- if (i.types[op] & Disp16)
+ if (i.op[op].disps->X_op == O_constant)
{
- /* We know this operand is at most 16 bits, so
- convert to a signed 16 bit number before trying
- to see whether it will fit in an even smaller
- size. */
+ offsetT disp = i.op[op].disps->X_add_number;
- disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
- }
- else if (i.types[op] & Disp32)
- {
- /* We know this operand is at most 32 bits, so convert to a
- signed 32 bit number before trying to see whether it will
- fit in an even smaller size. */
- disp &= (((offsetT) 2 << 31) - 1);
- disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
- }
- if (!disp && (i.types[op] & BaseIndex))
- {
- i.types[op] &= ~Disp;
- i.op[op].disps = 0;
- i.disp_operands--;
- }
- else if (flag_code == CODE_64BIT)
- {
- if (fits_in_signed_long (disp))
- i.types[op] |= Disp32S;
- if (fits_in_unsigned_long (disp))
- i.types[op] |= Disp32;
+ if ((i.types[op] & Disp16)
+ && (disp & ~(offsetT) 0xffff) == 0)
+ {
+ /* If this operand is at most 16 bits, convert
+ to a signed 16 bit number and don't use 64bit
+ displacement. */
+ disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
+ i.types[op] &= ~Disp64;
+ }
+ if ((i.types[op] & Disp32)
+ && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
+ {
+ /* If this operand is at most 32 bits, convert
+ to a signed 32 bit number and don't use 64bit
+ displacement. */
+ disp &= (((offsetT) 2 << 31) - 1);
+ disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
+ i.types[op] &= ~Disp64;
+ }
+ if (!disp && (i.types[op] & BaseIndex))
+ {
+ i.types[op] &= ~Disp;
+ i.op[op].disps = 0;
+ i.disp_operands--;
+ }
+ else if (flag_code == CODE_64BIT)
+ {
+ if (fits_in_signed_long (disp))
+ i.types[op] |= Disp32S;
+ if (fits_in_unsigned_long (disp))
+ i.types[op] |= Disp32;
+ }
+ if ((i.types[op] & (Disp32 | Disp32S | Disp16))
+ && fits_in_signed_byte (disp))
+ i.types[op] |= Disp8;
}
- if ((i.types[op] & (Disp32 | Disp32S | Disp16))
- && fits_in_signed_byte (disp))
- i.types[op] |= Disp8;
+ else
+ /* We only support 64bit displacement on constants. */
+ i.types[op] &= ~Disp64;
}
}
+2005-06-20 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR 1013
+ * i386/x86_64.s: Add absolute 64bit addressing tests for mov.
+ * i386/x86_64.s: Updated.
+
2005-06-17 Jan Beulich <jbeulich@novell.com>
* gas/i386/x86-64-pcrel.s: Add insn requiring 64-bit pc-relative
1f0: 8b 04 25 00 00 00 00 mov[ ]+0x0,%eax
1f7: 8b 80 00 00 00 00[ ]+mov[ ]+0x0\(%rax\),%eax
1fd: 8b 05 00 00 00 00[ ]+mov[ ]+0\(%rip\),%eax.*
+
+0+203 <foo>:
+ 203: a0 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%al
+ 20c: 66 a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%ax
+ 216: a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%eax
+ 21f: 48 a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%rax
+ 229: a2 11 22 33 44 55 66 77 88 mov[ ]+%al,0x8877665544332211
+ 232: 66 a3 11 22 33 44 55 66 77 88 mov[ ]+%ax,0x8877665544332211
+ 23c: a3 11 22 33 44 55 66 77 88 mov[ ]+%eax,0x8877665544332211
+ 245: 48 a3 11 22 33 44 55 66 77 88 mov[ ]+%rax,0x8877665544332211
+ 24f: a0 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%al
+ 258: 66 a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%ax
+ 262: a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%eax
+ 26b: 48 a1 11 22 33 44 55 66 77 88 mov[ ]+0x8877665544332211,%rax
+ 275: a2 11 22 33 44 55 66 77 88 mov[ ]+%al,0x8877665544332211
+ 27e: 66 a3 11 22 33 44 55 66 77 88 mov[ ]+%ax,0x8877665544332211
+ 288: a3 11 22 33 44 55 66 77 88 mov[ ]+%eax,0x8877665544332211
+ 291: 48 a3 11 22 33 44 55 66 77 88 mov[ ]+%rax,0x8877665544332211
#pass
#RIP relative
mov eax, [rip+symbol]
+foo:
+.att_syntax
+#absolute 64bit addressing
+mov 0x8877665544332211,%al
+mov 0x8877665544332211,%ax
+mov 0x8877665544332211,%eax
+mov 0x8877665544332211,%rax
+mov %al,0x8877665544332211
+mov %ax,0x8877665544332211
+mov %eax,0x8877665544332211
+mov %rax,0x8877665544332211
+movb 0x8877665544332211,%al
+movw 0x8877665544332211,%ax
+movl 0x8877665544332211,%eax
+movq 0x8877665544332211,%rax
+movb %al,0x8877665544332211
+movw %ax,0x8877665544332211
+movl %eax,0x8877665544332211
+movq %rax,0x8877665544332211
+
# Get a good alignment.
.p2align 4,0
+2005-06-20 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR 1013
+ * i386.h (i386_optab): Update comments for 64bit addressing on
+ mov. Allow 64bit addressing for mov and movq.
+
2005-06-11 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
* hppa.h (pa_opcodes): Use cM and cX instead of cm and cx,
/* Move instructions. */
#define MOV_AX_DISP32 0xa0
-/* In the 64bit mode the short form mov immediate is redefined to have
- 64bit displacement value. */
+/* We put the 64bit displacement first and we only mark constants
+ larger than 32bit as Disp64. */
+{ "mov", 2, 0xa0, X, Cpu64, bwlq_Suf|D|W, { Disp64, Acc, 0 } },
{ "mov", 2, 0xa0, X, CpuNo64,bwl_Suf|D|W, { Disp16|Disp32, Acc, 0 } },
{ "mov", 2, 0x88, X, 0, bwlq_Suf|D|W|Modrm, { Reg, Reg|AnyMem, 0} },
/* In the 64bit mode the short form mov immediate is redefined to have
- 64bit displacement value. */
+ 64bit value. */
{ "mov", 2, 0xb0, X, 0, bwl_Suf|W|ShortForm, { EncImm, Reg8|Reg16|Reg32, 0 } },
{ "mov", 2, 0xc6, 0, 0, bwlq_Suf|W|Modrm, { EncImm, Reg|AnyMem, 0 } },
{ "mov", 2, 0xb0, X, Cpu64, q_Suf|W|ShortForm, { Imm64, Reg64, 0 } },
{"movq", 2, 0x0f7f, X, CpuMMX, NoSuf|IgnoreSize|Modrm, { RegMMX, RegMMX|LongMem, 0 } },
{"movq", 2, 0xf30f7e,X,CpuSSE2,NoSuf|IgnoreSize|Modrm, { RegXMM|LLongMem, RegXMM, 0 } },
{"movq", 2, 0x660fd6,X,CpuSSE2,NoSuf|IgnoreSize|Modrm, { RegXMM, RegXMM|LLongMem, 0 } },
+/* We put the 64bit displacement first and we only mark constants
+ larger than 32bit as Disp64. */
+{"movq", 2, 0xa0, X, Cpu64, NoSuf|D|W|Size64, { Disp64, Acc, 0 } },
{"movq", 2, 0x88, X, Cpu64, NoSuf|D|W|Modrm|Size64,{ Reg64, Reg64|AnyMem, 0 } },
{"movq", 2, 0xc6, 0, Cpu64, NoSuf|W|Modrm|Size64, { Imm32S, Reg64|WordMem, 0 } },
{"movq", 2, 0xb0, X, Cpu64, NoSuf|W|ShortForm|Size64,{ Imm64, Reg64, 0 } },