*
**************************************************************************/
-#if defined(__i386__) || defined(__386__) || defined(i386)
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_X86)
#include "pipe/p_compiler.h"
-#include "pipe/p_debug.h"
-#include "pipe/p_pointer.h"
+#include "util/u_debug.h"
+#include "util/u_pointer.h"
#include "rtasm_execmem.h"
#include "rtasm_x86sse.h"
/* Build a modRM byte + possible displacement. No treatment of SIB
* indexing. BZZT - no way to encode an absolute address.
+ *
+ * This is the "/r" field in the x86 manuals...
*/
static void emit_modrm( struct x86_function *p,
struct x86_reg reg,
/* Oh-oh we've stumbled into the SIB thing.
*/
if (regmem.file == file_REG32 &&
- regmem.idx == reg_SP) {
+ regmem.idx == reg_SP &&
+ regmem.mod != mod_REG) {
emit_1ub(p, 0x24); /* simplistic! */
}
}
}
-
+/* Emits the "/0".."/7" specialized versions of the modrm ("/r") bytes.
+ */
static void emit_modrm_noreg( struct x86_function *p,
unsigned op,
struct x86_reg regmem )
else
reg.disp += disp;
- if (reg.disp == 0)
+ if (reg.disp == 0 && reg.idx != reg_BP)
reg.mod = mod_INDIRECT;
else if (reg.disp <= 127 && reg.disp >= -128)
reg.mod = mod_DISP8;
DUMP_I(cc);
if (offset < 0) {
- int amt = p->csr - p->store;
- assert(amt > -offset);
+ /*assert(p->csr - p->store > -offset);*/
+ if (p->csr - p->store <= -offset) {
+ /* probably out of memory (using the error_overflow buffer) */
+ return;
+ }
}
if (offset <= 127 && offset >= -128) {
}
-/* michal:
- * Temporary. As I need immediate operands, and dont want to mess with the codegen,
- * I load the immediate into general purpose register and use it.
- */
void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
{
DUMP_RI( dst, imm );
+ assert(dst.file == file_REG32);
assert(dst.mod == mod_REG);
emit_1ub(p, 0xb8 + dst.idx);
emit_1i(p, imm);
}
+/**
+ * Immediate group 1 instructions.
+ */
+static INLINE void
+x86_group1_imm( struct x86_function *p,
+ unsigned op, struct x86_reg dst, int imm )
+{
+ assert(dst.file == file_REG32);
+ assert(dst.mod == mod_REG);
+ if(-0x80 <= imm && imm < 0x80) {
+ emit_1ub(p, 0x83);
+ emit_modrm_noreg(p, op, dst);
+ emit_1b(p, (char)imm);
+ }
+ else {
+ emit_1ub(p, 0x81);
+ emit_modrm_noreg(p, op, dst);
+ emit_1i(p, imm);
+ }
+}
+
+void x86_add_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 0, dst, imm);
+}
+
+void x86_or_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 1, dst, imm);
+}
+
+void x86_and_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 4, dst, imm);
+}
+
+void x86_sub_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 5, dst, imm);
+}
+
+void x86_xor_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 6, dst, imm);
+}
+
+void x86_cmp_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 7, dst, imm);
+}
+
+
void x86_push( struct x86_function *p,
struct x86_reg reg )
{
p->stack_offset += 4;
}
+void x86_push_imm32( struct x86_function *p,
+ int imm32 )
+{
+ DUMP_I( imm32 );
+ emit_1ub(p, 0x68);
+ emit_1i(p, imm32);
+
+ p->stack_offset += 4;
+}
+
+
void x86_pop( struct x86_function *p,
struct x86_reg reg )
{
* SSE instructions
*/
+void sse_prefetchnta( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 0, ptr);
+}
+
+void sse_prefetch0( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 1, ptr);
+}
+
+void sse_prefetch1( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 2, ptr);
+}
+
+void sse_movntps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+
+ assert(dst.mod != mod_REG);
+ assert(src.mod == mod_REG);
+ emit_2ub(p, 0x0f, 0x2b);
+ emit_modrm(p, src, dst);
+}
+
+
+
void sse_movss( struct x86_function *p,
struct x86_reg dst,
void sse_cmpps( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src,
- unsigned char cc)
+ enum sse_cc cc)
{
DUMP_RRI( dst, src, cc );
emit_2ub(p, X86_TWOB, 0xC2);
emit_modrm(p, dst, src);
}
+void sse_movmskps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x50);
+ emit_modrm(p, dst, src);
+}
+
/***********************************************************************
* SSE2 instructions
*/
*/
+void x86_cdecl_caller_push_regs( struct x86_function *p )
+{
+ x86_push(p, x86_make_reg(file_REG32, reg_AX));
+ x86_push(p, x86_make_reg(file_REG32, reg_CX));
+ x86_push(p, x86_make_reg(file_REG32, reg_DX));
+}
+
+void x86_cdecl_caller_pop_regs( struct x86_function *p )
+{
+ x86_pop(p, x86_make_reg(file_REG32, reg_DX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_CX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_AX));
+}
+
+
/* Retreive a reference to one of the function arguments, taking into
* account any push/pop activity:
*/