1 #if defined(__i386__) || defined(__386__)
9 static GLubyte
*cptr( void (*label
)() )
11 return (char *)(unsigned long)label
;
15 /* Emit bytes to the instruction stream:
17 static void emit_1b( struct x86_function
*p
, GLbyte b0
)
19 *(GLbyte
*)(p
->csr
++) = b0
;
22 static void emit_1i( struct x86_function
*p
, GLint i0
)
24 *(GLint
*)(p
->csr
) = i0
;
28 static void disassem( struct x86_function
*p
, const char *fn
)
31 if (fn
&& fn
!= p
->fn
) {
32 _mesa_printf("0x%x: %s\n", p
->csr
, fn
);
38 static void emit_1ub_fn( struct x86_function
*p
, GLubyte b0
, const char *fn
)
44 static void emit_2ub_fn( struct x86_function
*p
, GLubyte b0
, GLubyte b1
, const char *fn
)
51 static void emit_3ub_fn( struct x86_function
*p
, GLubyte b0
, GLubyte b1
, GLubyte b2
, const char *fn
)
59 #define emit_1ub(p, b0) emit_1ub_fn(p, b0, __FUNCTION__)
60 #define emit_2ub(p, b0, b1) emit_2ub_fn(p, b0, b1, __FUNCTION__)
61 #define emit_3ub(p, b0, b1, b2) emit_3ub_fn(p, b0, b1, b2, __FUNCTION__)
65 /* Build a modRM byte + possible displacement. No treatment of SIB
66 * indexing. BZZT - no way to encode an absolute address.
68 static void emit_modrm( struct x86_function
*p
,
70 struct x86_reg regmem
)
74 assert(reg
.mod
== mod_REG
);
76 val
|= regmem
.mod
<< 6; /* mod field */
77 val
|= reg
.idx
<< 3; /* reg field */
78 val
|= regmem
.idx
; /* r/m field */
80 emit_1ub_fn(p
, val
, 0);
82 /* Oh-oh we've stumbled into the SIB thing.
84 if (regmem
.file
== file_REG32
&&
85 regmem
.idx
== reg_SP
) {
86 emit_1ub_fn(p
, 0x24, 0); /* simplistic! */
94 emit_1b(p
, regmem
.disp
);
97 emit_1i(p
, regmem
.disp
);
106 static void emit_modrm_noreg( struct x86_function
*p
,
108 struct x86_reg regmem
)
110 struct x86_reg dummy
= x86_make_reg(file_REG32
, op
);
111 emit_modrm(p
, dummy
, regmem
);
114 /* Many x86 instructions have two opcodes to cope with the situations
115 * where the destination is a register or memory reference
116 * respectively. This function selects the correct opcode based on
117 * the arguments presented.
119 static void emit_op_modrm( struct x86_function
*p
,
120 GLubyte op_dst_is_reg
,
121 GLubyte op_dst_is_mem
,
127 emit_1ub_fn(p
, op_dst_is_reg
, 0);
128 emit_modrm(p
, dst
, src
);
133 assert(src
.mod
== mod_REG
);
134 emit_1ub_fn(p
, op_dst_is_mem
, 0);
135 emit_modrm(p
, src
, dst
);
149 /* Create and manipulate registers and regmem values:
151 struct x86_reg
x86_make_reg( enum x86_reg_file file
,
152 enum x86_reg_name idx
)
164 struct x86_reg
x86_make_disp( struct x86_reg reg
,
167 assert(reg
.file
== file_REG32
);
169 if (reg
.mod
== mod_REG
)
175 reg
.mod
= mod_INDIRECT
;
176 else if (reg
.disp
<= 127 && reg
.disp
>= -128)
179 reg
.mod
= mod_DISP32
;
184 struct x86_reg
x86_deref( struct x86_reg reg
)
186 return x86_make_disp(reg
, 0);
189 struct x86_reg
x86_get_base_reg( struct x86_reg reg
)
191 return x86_make_reg( reg
.file
, reg
.idx
);
194 GLubyte
*x86_get_label( struct x86_function
*p
)
201 /***********************************************************************
206 void x86_jcc( struct x86_function
*p
,
210 GLint offset
= label
- (x86_get_label(p
) + 2);
212 if (offset
<= 127 && offset
>= -128) {
213 emit_1ub(p
, 0x70 + cc
);
214 emit_1b(p
, (GLbyte
) offset
);
217 offset
= label
- (x86_get_label(p
) + 6);
218 emit_2ub(p
, 0x0f, 0x80 + cc
);
223 /* Always use a 32bit offset for forward jumps:
225 GLubyte
*x86_jcc_forward( struct x86_function
*p
,
228 emit_2ub(p
, 0x0f, 0x80 + cc
);
230 return x86_get_label(p
);
233 GLubyte
*x86_jmp_forward( struct x86_function
*p
)
237 return x86_get_label(p
);
240 GLubyte
*x86_call_forward( struct x86_function
*p
)
244 return x86_get_label(p
);
247 /* Fixup offset from forward jump:
249 void x86_fixup_fwd_jump( struct x86_function
*p
,
252 *(int *)(fixup
- 4) = x86_get_label(p
) - fixup
;
255 void x86_jmp( struct x86_function
*p
, GLubyte
*label
)
258 emit_1i(p
, label
- x86_get_label(p
) - 4);
261 void x86_call( struct x86_function
*p
, void (*label
)())
264 emit_1i(p
, cptr(label
) - x86_get_label(p
) - 4);
268 * Temporary. As I need immediate operands, and dont want to mess with the codegen,
269 * I load the immediate into general purpose register and use it.
271 void x86_mov_reg_imm( struct x86_function
*p
, struct x86_reg dst
, GLint imm
)
273 assert(dst
.mod
== mod_REG
);
274 emit_1ub(p
, 0xb8 + dst
.idx
);
278 void x86_push( struct x86_function
*p
,
281 assert(reg
.mod
== mod_REG
);
282 emit_1ub(p
, 0x50 + reg
.idx
);
283 p
->stack_offset
+= 4;
286 void x86_pop( struct x86_function
*p
,
289 assert(reg
.mod
== mod_REG
);
290 emit_1ub(p
, 0x58 + reg
.idx
);
291 p
->stack_offset
-= 4;
294 void x86_inc( struct x86_function
*p
,
297 assert(reg
.mod
== mod_REG
);
298 emit_1ub(p
, 0x40 + reg
.idx
);
301 void x86_dec( struct x86_function
*p
,
304 assert(reg
.mod
== mod_REG
);
305 emit_1ub(p
, 0x48 + reg
.idx
);
308 void x86_ret( struct x86_function
*p
)
313 void x86_sahf( struct x86_function
*p
)
318 void x86_mov( struct x86_function
*p
,
322 emit_op_modrm( p
, 0x8b, 0x89, dst
, src
);
325 void x86_xor( struct x86_function
*p
,
329 emit_op_modrm( p
, 0x33, 0x31, dst
, src
);
332 void x86_cmp( struct x86_function
*p
,
336 emit_op_modrm( p
, 0x3b, 0x39, dst
, src
);
339 void x86_lea( struct x86_function
*p
,
344 emit_modrm( p
, dst
, src
);
347 void x86_test( struct x86_function
*p
,
352 emit_modrm( p
, dst
, src
);
355 void x86_add( struct x86_function
*p
,
359 emit_op_modrm(p
, 0x03, 0x01, dst
, src
);
362 void x86_mul( struct x86_function
*p
,
365 assert (src
.file
== file_REG32
&& src
.mod
== mod_REG
);
366 emit_op_modrm(p
, 0xf7, 0, x86_make_reg (file_REG32
, reg_SP
), src
);
369 void x86_sub( struct x86_function
*p
,
373 emit_op_modrm(p
, 0x2b, 0x29, dst
, src
);
376 void x86_or( struct x86_function
*p
,
380 emit_op_modrm( p
, 0x0b, 0x09, dst
, src
);
383 void x86_and( struct x86_function
*p
,
387 emit_op_modrm( p
, 0x23, 0x21, dst
, src
);
392 /***********************************************************************
397 void sse_movss( struct x86_function
*p
,
401 emit_2ub(p
, 0xF3, X86_TWOB
);
402 emit_op_modrm( p
, 0x10, 0x11, dst
, src
);
405 void sse_movaps( struct x86_function
*p
,
409 emit_1ub(p
, X86_TWOB
);
410 emit_op_modrm( p
, 0x28, 0x29, dst
, src
);
413 void sse_movups( struct x86_function
*p
,
417 emit_1ub(p
, X86_TWOB
);
418 emit_op_modrm( p
, 0x10, 0x11, dst
, src
);
421 void sse_movhps( struct x86_function
*p
,
425 assert(dst
.mod
!= mod_REG
|| src
.mod
!= mod_REG
);
426 emit_1ub(p
, X86_TWOB
);
427 emit_op_modrm( p
, 0x16, 0x17, dst
, src
); /* cf movlhps */
430 void sse_movlps( struct x86_function
*p
,
434 assert(dst
.mod
!= mod_REG
|| src
.mod
!= mod_REG
);
435 emit_1ub(p
, X86_TWOB
);
436 emit_op_modrm( p
, 0x12, 0x13, dst
, src
); /* cf movhlps */
439 void sse_maxps( struct x86_function
*p
,
443 emit_2ub(p
, X86_TWOB
, 0x5F);
444 emit_modrm( p
, dst
, src
);
447 void sse_maxss( struct x86_function
*p
,
451 emit_3ub(p
, 0xF3, X86_TWOB
, 0x5F);
452 emit_modrm( p
, dst
, src
);
455 void sse_divss( struct x86_function
*p
,
459 emit_3ub(p
, 0xF3, X86_TWOB
, 0x5E);
460 emit_modrm( p
, dst
, src
);
463 void sse_minps( struct x86_function
*p
,
467 emit_2ub(p
, X86_TWOB
, 0x5D);
468 emit_modrm( p
, dst
, src
);
471 void sse_subps( struct x86_function
*p
,
475 emit_2ub(p
, X86_TWOB
, 0x5C);
476 emit_modrm( p
, dst
, src
);
479 void sse_mulps( struct x86_function
*p
,
483 emit_2ub(p
, X86_TWOB
, 0x59);
484 emit_modrm( p
, dst
, src
);
487 void sse_mulss( struct x86_function
*p
,
491 emit_3ub(p
, 0xF3, X86_TWOB
, 0x59);
492 emit_modrm( p
, dst
, src
);
495 void sse_addps( struct x86_function
*p
,
499 emit_2ub(p
, X86_TWOB
, 0x58);
500 emit_modrm( p
, dst
, src
);
503 void sse_addss( struct x86_function
*p
,
507 emit_3ub(p
, 0xF3, X86_TWOB
, 0x58);
508 emit_modrm( p
, dst
, src
);
511 void sse_andnps( struct x86_function
*p
,
515 emit_2ub(p
, X86_TWOB
, 0x55);
516 emit_modrm( p
, dst
, src
);
519 void sse_andps( struct x86_function
*p
,
523 emit_2ub(p
, X86_TWOB
, 0x54);
524 emit_modrm( p
, dst
, src
);
527 void sse_rsqrtps( struct x86_function
*p
,
531 emit_2ub(p
, X86_TWOB
, 0x52);
532 emit_modrm( p
, dst
, src
);
535 void sse_rsqrtss( struct x86_function
*p
,
539 emit_3ub(p
, 0xF3, X86_TWOB
, 0x52);
540 emit_modrm( p
, dst
, src
);
544 void sse_movhlps( struct x86_function
*p
,
548 assert(dst
.mod
== mod_REG
&& src
.mod
== mod_REG
);
549 emit_2ub(p
, X86_TWOB
, 0x12);
550 emit_modrm( p
, dst
, src
);
553 void sse_movlhps( struct x86_function
*p
,
557 assert(dst
.mod
== mod_REG
&& src
.mod
== mod_REG
);
558 emit_2ub(p
, X86_TWOB
, 0x16);
559 emit_modrm( p
, dst
, src
);
562 void sse_orps( struct x86_function
*p
,
566 emit_2ub(p
, X86_TWOB
, 0x56);
567 emit_modrm( p
, dst
, src
);
570 void sse_xorps( struct x86_function
*p
,
574 emit_2ub(p
, X86_TWOB
, 0x57);
575 emit_modrm( p
, dst
, src
);
578 void sse_cvtps2pi( struct x86_function
*p
,
582 assert(dst
.file
== file_MMX
&&
583 (src
.file
== file_XMM
|| src
.mod
!= mod_REG
));
587 emit_2ub(p
, X86_TWOB
, 0x2d);
588 emit_modrm( p
, dst
, src
);
592 /* Shufps can also be used to implement a reduced swizzle when dest ==
595 void sse_shufps( struct x86_function
*p
,
600 emit_2ub(p
, X86_TWOB
, 0xC6);
601 emit_modrm(p
, dest
, arg0
);
605 void sse_cmpps( struct x86_function
*p
,
610 emit_2ub(p
, X86_TWOB
, 0xC2);
611 emit_modrm(p
, dest
, arg0
);
615 void sse_pmovmskb( struct x86_function
*p
,
619 emit_3ub(p
, 0x66, X86_TWOB
, 0xD7);
620 emit_modrm(p
, dest
, src
);
623 /***********************************************************************
628 * Perform a reduced swizzle:
630 void sse2_pshufd( struct x86_function
*p
,
635 emit_3ub(p
, 0x66, X86_TWOB
, 0x70);
636 emit_modrm(p
, dest
, arg0
);
640 void sse2_cvttps2dq( struct x86_function
*p
,
644 emit_3ub( p
, 0xF3, X86_TWOB
, 0x5B );
645 emit_modrm( p
, dst
, src
);
648 void sse2_cvtps2dq( struct x86_function
*p
,
652 emit_3ub(p
, 0x66, X86_TWOB
, 0x5B);
653 emit_modrm( p
, dst
, src
);
656 void sse2_packssdw( struct x86_function
*p
,
660 emit_3ub(p
, 0x66, X86_TWOB
, 0x6B);
661 emit_modrm( p
, dst
, src
);
664 void sse2_packsswb( struct x86_function
*p
,
668 emit_3ub(p
, 0x66, X86_TWOB
, 0x63);
669 emit_modrm( p
, dst
, src
);
672 void sse2_packuswb( struct x86_function
*p
,
676 emit_3ub(p
, 0x66, X86_TWOB
, 0x67);
677 emit_modrm( p
, dst
, src
);
680 void sse2_rcpps( struct x86_function
*p
,
684 emit_2ub(p
, X86_TWOB
, 0x53);
685 emit_modrm( p
, dst
, src
);
688 void sse2_rcpss( struct x86_function
*p
,
692 emit_3ub(p
, 0xF3, X86_TWOB
, 0x53);
693 emit_modrm( p
, dst
, src
);
696 void sse2_movd( struct x86_function
*p
,
700 emit_2ub(p
, 0x66, X86_TWOB
);
701 emit_op_modrm( p
, 0x6e, 0x7e, dst
, src
);
707 /***********************************************************************
710 void x87_fist( struct x86_function
*p
, struct x86_reg dst
)
713 emit_modrm_noreg(p
, 2, dst
);
716 void x87_fistp( struct x86_function
*p
, struct x86_reg dst
)
719 emit_modrm_noreg(p
, 3, dst
);
722 void x87_fild( struct x86_function
*p
, struct x86_reg arg
)
725 emit_modrm_noreg(p
, 0, arg
);
728 void x87_fldz( struct x86_function
*p
)
730 emit_2ub(p
, 0xd9, 0xee);
734 void x87_fldcw( struct x86_function
*p
, struct x86_reg arg
)
736 assert(arg
.file
== file_REG32
);
737 assert(arg
.mod
!= mod_REG
);
739 emit_modrm_noreg(p
, 5, arg
);
742 void x87_fld1( struct x86_function
*p
)
744 emit_2ub(p
, 0xd9, 0xe8);
747 void x87_fldl2e( struct x86_function
*p
)
749 emit_2ub(p
, 0xd9, 0xea);
752 void x87_fldln2( struct x86_function
*p
)
754 emit_2ub(p
, 0xd9, 0xed);
757 void x87_fwait( struct x86_function
*p
)
762 void x87_fnclex( struct x86_function
*p
)
764 emit_2ub(p
, 0xdb, 0xe2);
767 void x87_fclex( struct x86_function
*p
)
774 static void x87_arith_op( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
,
779 GLubyte argmem_noreg
)
781 assert(dst
.file
== file_x87
);
783 if (arg
.file
== file_x87
) {
785 emit_2ub(p
, dst0ub0
, dst0ub1
+arg
.idx
);
786 else if (arg
.idx
== 0)
787 emit_2ub(p
, arg0ub0
, arg0ub1
+arg
.idx
);
791 else if (dst
.idx
== 0) {
792 assert(arg
.file
= file_REG32
);
794 emit_modrm_noreg(p
, argmem_noreg
, arg
);
800 void x87_fmul( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
802 x87_arith_op(p
, dst
, arg
,
808 void x87_fsub( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
810 x87_arith_op(p
, dst
, arg
,
816 void x87_fsubr( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
818 x87_arith_op(p
, dst
, arg
,
824 void x87_fadd( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
826 x87_arith_op(p
, dst
, arg
,
832 void x87_fdiv( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
834 x87_arith_op(p
, dst
, arg
,
840 void x87_fdivr( struct x86_function
*p
, struct x86_reg dst
, struct x86_reg arg
)
842 x87_arith_op(p
, dst
, arg
,
848 void x87_fmulp( struct x86_function
*p
, struct x86_reg dst
)
850 assert(dst
.file
== file_x87
);
851 assert(dst
.idx
>= 1);
852 emit_2ub(p
, 0xde, 0xc8+dst
.idx
);
855 void x87_fsubp( struct x86_function
*p
, struct x86_reg dst
)
857 assert(dst
.file
== file_x87
);
858 assert(dst
.idx
>= 1);
859 emit_2ub(p
, 0xde, 0xe8+dst
.idx
);
862 void x87_fsubrp( struct x86_function
*p
, struct x86_reg dst
)
864 assert(dst
.file
== file_x87
);
865 assert(dst
.idx
>= 1);
866 emit_2ub(p
, 0xde, 0xe0+dst
.idx
);
869 void x87_faddp( struct x86_function
*p
, struct x86_reg dst
)
871 assert(dst
.file
== file_x87
);
872 assert(dst
.idx
>= 1);
873 emit_2ub(p
, 0xde, 0xc0+dst
.idx
);
876 void x87_fdivp( struct x86_function
*p
, struct x86_reg dst
)
878 assert(dst
.file
== file_x87
);
879 assert(dst
.idx
>= 1);
880 emit_2ub(p
, 0xde, 0xf8+dst
.idx
);
883 void x87_fdivrp( struct x86_function
*p
, struct x86_reg dst
)
885 assert(dst
.file
== file_x87
);
886 assert(dst
.idx
>= 1);
887 emit_2ub(p
, 0xde, 0xf0+dst
.idx
);
890 void x87_fucom( struct x86_function
*p
, struct x86_reg arg
)
892 assert(arg
.file
== file_x87
);
893 emit_2ub(p
, 0xdd, 0xe0+arg
.idx
);
896 void x87_fucomp( struct x86_function
*p
, struct x86_reg arg
)
898 assert(arg
.file
== file_x87
);
899 emit_2ub(p
, 0xdd, 0xe8+arg
.idx
);
902 void x87_fucompp( struct x86_function
*p
)
904 emit_2ub(p
, 0xda, 0xe9);
907 void x87_fxch( struct x86_function
*p
, struct x86_reg arg
)
909 assert(arg
.file
== file_x87
);
910 emit_2ub(p
, 0xd9, 0xc8+arg
.idx
);
913 void x87_fabs( struct x86_function
*p
)
915 emit_2ub(p
, 0xd9, 0xe1);
918 void x87_fchs( struct x86_function
*p
)
920 emit_2ub(p
, 0xd9, 0xe0);
923 void x87_fcos( struct x86_function
*p
)
925 emit_2ub(p
, 0xd9, 0xff);
929 void x87_fprndint( struct x86_function
*p
)
931 emit_2ub(p
, 0xd9, 0xfc);
934 void x87_fscale( struct x86_function
*p
)
936 emit_2ub(p
, 0xd9, 0xfd);
939 void x87_fsin( struct x86_function
*p
)
941 emit_2ub(p
, 0xd9, 0xfe);
944 void x87_fsincos( struct x86_function
*p
)
946 emit_2ub(p
, 0xd9, 0xfb);
949 void x87_fsqrt( struct x86_function
*p
)
951 emit_2ub(p
, 0xd9, 0xfa);
954 void x87_fxtract( struct x86_function
*p
)
956 emit_2ub(p
, 0xd9, 0xf4);
961 * Restrictions: -1.0 <= st0 <= 1.0
963 void x87_f2xm1( struct x86_function
*p
)
965 emit_2ub(p
, 0xd9, 0xf0);
968 /* st1 = st1 * log2(st0);
971 void x87_fyl2x( struct x86_function
*p
)
973 emit_2ub(p
, 0xd9, 0xf1);
976 /* st1 = st1 * log2(st0 + 1.0);
979 * A fast operation, with restrictions: -.29 < st0 < .29
981 void x87_fyl2xp1( struct x86_function
*p
)
983 emit_2ub(p
, 0xd9, 0xf9);
987 void x87_fld( struct x86_function
*p
, struct x86_reg arg
)
989 if (arg
.file
== file_x87
)
990 emit_2ub(p
, 0xd9, 0xc0 + arg
.idx
);
993 emit_modrm_noreg(p
, 0, arg
);
997 void x87_fst( struct x86_function
*p
, struct x86_reg dst
)
999 if (dst
.file
== file_x87
)
1000 emit_2ub(p
, 0xdd, 0xd0 + dst
.idx
);
1003 emit_modrm_noreg(p
, 2, dst
);
1007 void x87_fstp( struct x86_function
*p
, struct x86_reg dst
)
1009 if (dst
.file
== file_x87
)
1010 emit_2ub(p
, 0xdd, 0xd8 + dst
.idx
);
1013 emit_modrm_noreg(p
, 3, dst
);
1017 void x87_fcom( struct x86_function
*p
, struct x86_reg dst
)
1019 if (dst
.file
== file_x87
)
1020 emit_2ub(p
, 0xd8, 0xd0 + dst
.idx
);
1023 emit_modrm_noreg(p
, 2, dst
);
1027 void x87_fcomp( struct x86_function
*p
, struct x86_reg dst
)
1029 if (dst
.file
== file_x87
)
1030 emit_2ub(p
, 0xd8, 0xd8 + dst
.idx
);
1033 emit_modrm_noreg(p
, 3, dst
);
1038 void x87_fnstsw( struct x86_function
*p
, struct x86_reg dst
)
1040 assert(dst
.file
== file_REG32
);
1042 if (dst
.idx
== reg_AX
&&
1044 emit_2ub(p
, 0xdf, 0xe0);
1047 emit_modrm_noreg(p
, 7, dst
);
1054 /***********************************************************************
1058 void mmx_emms( struct x86_function
*p
)
1060 assert(p
->need_emms
);
1061 emit_2ub(p
, 0x0f, 0x77);
1065 void mmx_packssdw( struct x86_function
*p
,
1067 struct x86_reg src
)
1069 assert(dst
.file
== file_MMX
&&
1070 (src
.file
== file_MMX
|| src
.mod
!= mod_REG
));
1074 emit_2ub(p
, X86_TWOB
, 0x6b);
1075 emit_modrm( p
, dst
, src
);
1078 void mmx_packuswb( struct x86_function
*p
,
1080 struct x86_reg src
)
1082 assert(dst
.file
== file_MMX
&&
1083 (src
.file
== file_MMX
|| src
.mod
!= mod_REG
));
1087 emit_2ub(p
, X86_TWOB
, 0x67);
1088 emit_modrm( p
, dst
, src
);
1091 void mmx_movd( struct x86_function
*p
,
1093 struct x86_reg src
)
1096 emit_1ub(p
, X86_TWOB
);
1097 emit_op_modrm( p
, 0x6e, 0x7e, dst
, src
);
1100 void mmx_movq( struct x86_function
*p
,
1102 struct x86_reg src
)
1105 emit_1ub(p
, X86_TWOB
);
1106 emit_op_modrm( p
, 0x6f, 0x7f, dst
, src
);
1110 /***********************************************************************
1115 /* Retreive a reference to one of the function arguments, taking into
1116 * account any push/pop activity:
1118 struct x86_reg
x86_fn_arg( struct x86_function
*p
,
1121 return x86_make_disp(x86_make_reg(file_REG32
, reg_SP
),
1122 p
->stack_offset
+ arg
* 4); /* ??? */
1126 void x86_init_func( struct x86_function
*p
)
1128 x86_init_func_size(p
, 1024);
1131 void x86_init_func_size( struct x86_function
*p
, GLuint code_size
)
1133 p
->store
= _mesa_exec_malloc(code_size
);
1137 void x86_release_func( struct x86_function
*p
)
1139 _mesa_exec_free(p
->store
);
1143 void (*x86_get_func( struct x86_function
*p
))(void)
1146 _mesa_printf("disassemble %p %p\n", p
->store
, p
->csr
);
1147 return (void (*)(void)) (unsigned long) p
->store
;
1152 void x86sse_dummy( void )