Merge branch 'gallium-vertex-linear' into gallium-tex-surfaces
[mesa.git] / src / gallium / auxiliary / rtasm / rtasm_x86sse.c
1 /**************************************************************************
2 *
3 * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
19 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 **************************************************************************/
23
24 #if defined(__i386__) || defined(__386__) || defined(i386)
25
26 #include "pipe/p_compiler.h"
27 #include "pipe/p_debug.h"
28 #include "pipe/p_pointer.h"
29
30 #include "rtasm_execmem.h"
31 #include "rtasm_x86sse.h"
32
33 #define DISASSEM 0
34 #define X86_TWOB 0x0f
35
36
37 #define DUMP_SSE 0
38
39
40 void x86_print_reg( struct x86_reg reg )
41 {
42 if (reg.mod != mod_REG)
43 debug_printf( "[" );
44
45 switch( reg.file ) {
46 case file_REG32:
47 switch( reg.idx ) {
48 case reg_AX: debug_printf( "EAX" ); break;
49 case reg_CX: debug_printf( "ECX" ); break;
50 case reg_DX: debug_printf( "EDX" ); break;
51 case reg_BX: debug_printf( "EBX" ); break;
52 case reg_SP: debug_printf( "ESP" ); break;
53 case reg_BP: debug_printf( "EBP" ); break;
54 case reg_SI: debug_printf( "ESI" ); break;
55 case reg_DI: debug_printf( "EDI" ); break;
56 }
57 break;
58 case file_MMX:
59 debug_printf( "MMX%u", reg.idx );
60 break;
61 case file_XMM:
62 debug_printf( "XMM%u", reg.idx );
63 break;
64 case file_x87:
65 debug_printf( "fp%u", reg.idx );
66 break;
67 }
68
69 if (reg.mod == mod_DISP8 ||
70 reg.mod == mod_DISP32)
71 debug_printf("+%d", reg.disp);
72
73 if (reg.mod != mod_REG)
74 debug_printf( "]" );
75 }
76
77 #if DUMP_SSE
78
79 #define DUMP_START() debug_printf( "\n" )
80 #define DUMP_END() debug_printf( "\n" )
81
82 #define DUMP() do { \
83 const char *foo = __FUNCTION__; \
84 while (*foo && *foo != '_') \
85 foo++; \
86 if (*foo) \
87 foo++; \
88 debug_printf( "\n% 4x% 15s ", p->csr - p->store, foo ); \
89 } while (0)
90
91 #define DUMP_I( I ) do { \
92 DUMP(); \
93 debug_printf( "%u", I ); \
94 } while( 0 )
95
96 #define DUMP_R( R0 ) do { \
97 DUMP(); \
98 x86_print_reg( R0 ); \
99 } while( 0 )
100
101 #define DUMP_RR( R0, R1 ) do { \
102 DUMP(); \
103 x86_print_reg( R0 ); \
104 debug_printf( ", " ); \
105 x86_print_reg( R1 ); \
106 } while( 0 )
107
108 #define DUMP_RI( R0, I ) do { \
109 DUMP(); \
110 x86_print_reg( R0 ); \
111 debug_printf( ", %u", I ); \
112 } while( 0 )
113
114 #define DUMP_RRI( R0, R1, I ) do { \
115 DUMP(); \
116 x86_print_reg( R0 ); \
117 debug_printf( ", " ); \
118 x86_print_reg( R1 ); \
119 debug_printf( ", %u", I ); \
120 } while( 0 )
121
122 #else
123
124 #define DUMP_START()
125 #define DUMP_END()
126 #define DUMP( )
127 #define DUMP_I( I )
128 #define DUMP_R( R0 )
129 #define DUMP_RR( R0, R1 )
130 #define DUMP_RI( R0, I )
131 #define DUMP_RRI( R0, R1, I )
132
133 #endif
134
135
136 static void do_realloc( struct x86_function *p )
137 {
138 if (p->store == p->error_overflow) {
139 p->csr = p->store;
140 }
141 else if (p->size == 0) {
142 p->size = 1024;
143 p->store = rtasm_exec_malloc(p->size);
144 p->csr = p->store;
145 }
146 else {
147 uintptr_t used = pointer_to_uintptr( p->csr ) - pointer_to_uintptr( p->store );
148 unsigned char *tmp = p->store;
149 p->size *= 2;
150 p->store = rtasm_exec_malloc(p->size);
151
152 if (p->store) {
153 memcpy(p->store, tmp, used);
154 p->csr = p->store + used;
155 }
156 else {
157 p->csr = p->store;
158 }
159
160 rtasm_exec_free(tmp);
161 }
162
163 if (p->store == NULL) {
164 p->store = p->csr = p->error_overflow;
165 p->size = sizeof(p->error_overflow);
166 }
167 }
168
169 /* Emit bytes to the instruction stream:
170 */
171 static unsigned char *reserve( struct x86_function *p, int bytes )
172 {
173 if (p->csr + bytes - p->store > (int) p->size)
174 do_realloc(p);
175
176 {
177 unsigned char *csr = p->csr;
178 p->csr += bytes;
179 return csr;
180 }
181 }
182
183
184
185 static void emit_1b( struct x86_function *p, char b0 )
186 {
187 char *csr = (char *)reserve(p, 1);
188 *csr = b0;
189 }
190
191 static void emit_1i( struct x86_function *p, int i0 )
192 {
193 int *icsr = (int *)reserve(p, sizeof(i0));
194 *icsr = i0;
195 }
196
197 static void emit_1ub( struct x86_function *p, unsigned char b0 )
198 {
199 unsigned char *csr = reserve(p, 1);
200 *csr++ = b0;
201 }
202
203 static void emit_2ub( struct x86_function *p, unsigned char b0, unsigned char b1 )
204 {
205 unsigned char *csr = reserve(p, 2);
206 *csr++ = b0;
207 *csr++ = b1;
208 }
209
210 static void emit_3ub( struct x86_function *p, unsigned char b0, unsigned char b1, unsigned char b2 )
211 {
212 unsigned char *csr = reserve(p, 3);
213 *csr++ = b0;
214 *csr++ = b1;
215 *csr++ = b2;
216 }
217
218
219 /* Build a modRM byte + possible displacement. No treatment of SIB
220 * indexing. BZZT - no way to encode an absolute address.
221 *
222 * This is the "/r" field in the x86 manuals...
223 */
224 static void emit_modrm( struct x86_function *p,
225 struct x86_reg reg,
226 struct x86_reg regmem )
227 {
228 unsigned char val = 0;
229
230 assert(reg.mod == mod_REG);
231
232 val |= regmem.mod << 6; /* mod field */
233 val |= reg.idx << 3; /* reg field */
234 val |= regmem.idx; /* r/m field */
235
236 emit_1ub(p, val);
237
238 /* Oh-oh we've stumbled into the SIB thing.
239 */
240 if (regmem.file == file_REG32 &&
241 regmem.idx == reg_SP) {
242 emit_1ub(p, 0x24); /* simplistic! */
243 }
244
245 switch (regmem.mod) {
246 case mod_REG:
247 case mod_INDIRECT:
248 break;
249 case mod_DISP8:
250 emit_1b(p, (char) regmem.disp);
251 break;
252 case mod_DISP32:
253 emit_1i(p, regmem.disp);
254 break;
255 default:
256 assert(0);
257 break;
258 }
259 }
260
261 /* Emits the "/0".."/7" specialized versions of the modrm ("/r") bytes.
262 */
263 static void emit_modrm_noreg( struct x86_function *p,
264 unsigned op,
265 struct x86_reg regmem )
266 {
267 struct x86_reg dummy = x86_make_reg(file_REG32, op);
268 emit_modrm(p, dummy, regmem);
269 }
270
271 /* Many x86 instructions have two opcodes to cope with the situations
272 * where the destination is a register or memory reference
273 * respectively. This function selects the correct opcode based on
274 * the arguments presented.
275 */
276 static void emit_op_modrm( struct x86_function *p,
277 unsigned char op_dst_is_reg,
278 unsigned char op_dst_is_mem,
279 struct x86_reg dst,
280 struct x86_reg src )
281 {
282 switch (dst.mod) {
283 case mod_REG:
284 emit_1ub(p, op_dst_is_reg);
285 emit_modrm(p, dst, src);
286 break;
287 case mod_INDIRECT:
288 case mod_DISP32:
289 case mod_DISP8:
290 assert(src.mod == mod_REG);
291 emit_1ub(p, op_dst_is_mem);
292 emit_modrm(p, src, dst);
293 break;
294 default:
295 assert(0);
296 break;
297 }
298 }
299
300
301
302
303
304
305
306 /* Create and manipulate registers and regmem values:
307 */
308 struct x86_reg x86_make_reg( enum x86_reg_file file,
309 enum x86_reg_name idx )
310 {
311 struct x86_reg reg;
312
313 reg.file = file;
314 reg.idx = idx;
315 reg.mod = mod_REG;
316 reg.disp = 0;
317
318 return reg;
319 }
320
321 struct x86_reg x86_make_disp( struct x86_reg reg,
322 int disp )
323 {
324 assert(reg.file == file_REG32);
325
326 if (reg.mod == mod_REG)
327 reg.disp = disp;
328 else
329 reg.disp += disp;
330
331 if (reg.disp == 0)
332 reg.mod = mod_INDIRECT;
333 else if (reg.disp <= 127 && reg.disp >= -128)
334 reg.mod = mod_DISP8;
335 else
336 reg.mod = mod_DISP32;
337
338 return reg;
339 }
340
341 struct x86_reg x86_deref( struct x86_reg reg )
342 {
343 return x86_make_disp(reg, 0);
344 }
345
346 struct x86_reg x86_get_base_reg( struct x86_reg reg )
347 {
348 return x86_make_reg( reg.file, reg.idx );
349 }
350
351 int x86_get_label( struct x86_function *p )
352 {
353 return p->csr - p->store;
354 }
355
356
357
358 /***********************************************************************
359 * x86 instructions
360 */
361
362
363 void x86_jcc( struct x86_function *p,
364 enum x86_cc cc,
365 int label )
366 {
367 int offset = label - (x86_get_label(p) + 2);
368 DUMP_I(cc);
369
370 if (offset < 0) {
371 assert(p->csr - p->store > -offset);
372 }
373
374 if (offset <= 127 && offset >= -128) {
375 emit_1ub(p, 0x70 + cc);
376 emit_1b(p, (char) offset);
377 }
378 else {
379 offset = label - (x86_get_label(p) + 6);
380 emit_2ub(p, 0x0f, 0x80 + cc);
381 emit_1i(p, offset);
382 }
383 }
384
385 /* Always use a 32bit offset for forward jumps:
386 */
387 int x86_jcc_forward( struct x86_function *p,
388 enum x86_cc cc )
389 {
390 DUMP_I(cc);
391 emit_2ub(p, 0x0f, 0x80 + cc);
392 emit_1i(p, 0);
393 return x86_get_label(p);
394 }
395
396 int x86_jmp_forward( struct x86_function *p)
397 {
398 DUMP();
399 emit_1ub(p, 0xe9);
400 emit_1i(p, 0);
401 return x86_get_label(p);
402 }
403
404 int x86_call_forward( struct x86_function *p)
405 {
406 DUMP();
407
408 emit_1ub(p, 0xe8);
409 emit_1i(p, 0);
410 return x86_get_label(p);
411 }
412
413 /* Fixup offset from forward jump:
414 */
415 void x86_fixup_fwd_jump( struct x86_function *p,
416 int fixup )
417 {
418 *(int *)(p->store + fixup - 4) = x86_get_label(p) - fixup;
419 }
420
421 void x86_jmp( struct x86_function *p, int label)
422 {
423 DUMP_I( label );
424 emit_1ub(p, 0xe9);
425 emit_1i(p, label - x86_get_label(p) - 4);
426 }
427
428 void x86_call( struct x86_function *p, struct x86_reg reg)
429 {
430 DUMP_R( reg );
431 emit_1ub(p, 0xff);
432 emit_modrm_noreg(p, 2, reg);
433 }
434
435
436 /* michal:
437 * Temporary. As I need immediate operands, and dont want to mess with the codegen,
438 * I load the immediate into general purpose register and use it.
439 */
440 void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
441 {
442 DUMP_RI( dst, imm );
443 assert(dst.mod == mod_REG);
444 emit_1ub(p, 0xb8 + dst.idx);
445 emit_1i(p, imm);
446 }
447
448 void x86_add_reg_imm8( struct x86_function *p, struct x86_reg dst, ubyte imm )
449 {
450 DUMP_RI( dst, imm );
451 assert(dst.mod == mod_REG);
452 emit_1ub(p, 0x80);
453 emit_modrm_noreg(p, 0, dst);
454 emit_1ub(p, imm);
455 }
456
457
458 void x86_push( struct x86_function *p,
459 struct x86_reg reg )
460 {
461 DUMP_R( reg );
462 if (reg.mod == mod_REG)
463 emit_1ub(p, 0x50 + reg.idx);
464 else
465 {
466 emit_1ub(p, 0xff);
467 emit_modrm_noreg(p, 6, reg);
468 }
469
470
471 p->stack_offset += 4;
472 }
473
474 void x86_push_imm32( struct x86_function *p,
475 int imm32 )
476 {
477 DUMP_I( imm32 );
478 emit_1ub(p, 0x68);
479 emit_1i(p, imm32);
480
481 p->stack_offset += 4;
482 }
483
484
485 void x86_pop( struct x86_function *p,
486 struct x86_reg reg )
487 {
488 DUMP_R( reg );
489 assert(reg.mod == mod_REG);
490 emit_1ub(p, 0x58 + reg.idx);
491 p->stack_offset -= 4;
492 }
493
494 void x86_inc( struct x86_function *p,
495 struct x86_reg reg )
496 {
497 DUMP_R( reg );
498 assert(reg.mod == mod_REG);
499 emit_1ub(p, 0x40 + reg.idx);
500 }
501
502 void x86_dec( struct x86_function *p,
503 struct x86_reg reg )
504 {
505 DUMP_R( reg );
506 assert(reg.mod == mod_REG);
507 emit_1ub(p, 0x48 + reg.idx);
508 }
509
510 void x86_ret( struct x86_function *p )
511 {
512 DUMP();
513 assert(p->stack_offset == 0);
514 emit_1ub(p, 0xc3);
515 }
516
517 void x86_retw( struct x86_function *p, unsigned short imm )
518 {
519 DUMP();
520 emit_3ub(p, 0xc2, imm & 0xff, (imm >> 8) & 0xff);
521 }
522
523 void x86_sahf( struct x86_function *p )
524 {
525 DUMP();
526 emit_1ub(p, 0x9e);
527 }
528
529 void x86_mov( struct x86_function *p,
530 struct x86_reg dst,
531 struct x86_reg src )
532 {
533 DUMP_RR( dst, src );
534 emit_op_modrm( p, 0x8b, 0x89, dst, src );
535 }
536
537 void x86_xor( struct x86_function *p,
538 struct x86_reg dst,
539 struct x86_reg src )
540 {
541 DUMP_RR( dst, src );
542 emit_op_modrm( p, 0x33, 0x31, dst, src );
543 }
544
545 void x86_cmp( struct x86_function *p,
546 struct x86_reg dst,
547 struct x86_reg src )
548 {
549 DUMP_RR( dst, src );
550 emit_op_modrm( p, 0x3b, 0x39, dst, src );
551 }
552
553 void x86_lea( struct x86_function *p,
554 struct x86_reg dst,
555 struct x86_reg src )
556 {
557 DUMP_RR( dst, src );
558 emit_1ub(p, 0x8d);
559 emit_modrm( p, dst, src );
560 }
561
562 void x86_test( struct x86_function *p,
563 struct x86_reg dst,
564 struct x86_reg src )
565 {
566 DUMP_RR( dst, src );
567 emit_1ub(p, 0x85);
568 emit_modrm( p, dst, src );
569 }
570
571 void x86_add( struct x86_function *p,
572 struct x86_reg dst,
573 struct x86_reg src )
574 {
575 DUMP_RR( dst, src );
576 emit_op_modrm(p, 0x03, 0x01, dst, src );
577 }
578
579 /* Calculate EAX * src, results in EDX:EAX.
580 */
581 void x86_mul( struct x86_function *p,
582 struct x86_reg src )
583 {
584 DUMP_R( src );
585 emit_1ub(p, 0xf7);
586 emit_modrm_noreg(p, 4, src );
587 }
588
589
590 void x86_imul( struct x86_function *p,
591 struct x86_reg dst,
592 struct x86_reg src )
593 {
594 DUMP_RR( dst, src );
595 emit_2ub(p, X86_TWOB, 0xAF);
596 emit_modrm(p, dst, src);
597 }
598
599
600 void x86_sub( struct x86_function *p,
601 struct x86_reg dst,
602 struct x86_reg src )
603 {
604 DUMP_RR( dst, src );
605 emit_op_modrm(p, 0x2b, 0x29, dst, src );
606 }
607
608 void x86_or( struct x86_function *p,
609 struct x86_reg dst,
610 struct x86_reg src )
611 {
612 DUMP_RR( dst, src );
613 emit_op_modrm( p, 0x0b, 0x09, dst, src );
614 }
615
616 void x86_and( struct x86_function *p,
617 struct x86_reg dst,
618 struct x86_reg src )
619 {
620 DUMP_RR( dst, src );
621 emit_op_modrm( p, 0x23, 0x21, dst, src );
622 }
623
624
625
626 /***********************************************************************
627 * SSE instructions
628 */
629
630
631 void sse_movss( struct x86_function *p,
632 struct x86_reg dst,
633 struct x86_reg src )
634 {
635 DUMP_RR( dst, src );
636 emit_2ub(p, 0xF3, X86_TWOB);
637 emit_op_modrm( p, 0x10, 0x11, dst, src );
638 }
639
640 void sse_movaps( struct x86_function *p,
641 struct x86_reg dst,
642 struct x86_reg src )
643 {
644 DUMP_RR( dst, src );
645 emit_1ub(p, X86_TWOB);
646 emit_op_modrm( p, 0x28, 0x29, dst, src );
647 }
648
649 void sse_movups( struct x86_function *p,
650 struct x86_reg dst,
651 struct x86_reg src )
652 {
653 DUMP_RR( dst, src );
654 emit_1ub(p, X86_TWOB);
655 emit_op_modrm( p, 0x10, 0x11, dst, src );
656 }
657
658 void sse_movhps( struct x86_function *p,
659 struct x86_reg dst,
660 struct x86_reg src )
661 {
662 DUMP_RR( dst, src );
663 assert(dst.mod != mod_REG || src.mod != mod_REG);
664 emit_1ub(p, X86_TWOB);
665 emit_op_modrm( p, 0x16, 0x17, dst, src ); /* cf movlhps */
666 }
667
668 void sse_movlps( struct x86_function *p,
669 struct x86_reg dst,
670 struct x86_reg src )
671 {
672 DUMP_RR( dst, src );
673 assert(dst.mod != mod_REG || src.mod != mod_REG);
674 emit_1ub(p, X86_TWOB);
675 emit_op_modrm( p, 0x12, 0x13, dst, src ); /* cf movhlps */
676 }
677
678 void sse_maxps( struct x86_function *p,
679 struct x86_reg dst,
680 struct x86_reg src )
681 {
682 DUMP_RR( dst, src );
683 emit_2ub(p, X86_TWOB, 0x5F);
684 emit_modrm( p, dst, src );
685 }
686
687 void sse_maxss( struct x86_function *p,
688 struct x86_reg dst,
689 struct x86_reg src )
690 {
691 DUMP_RR( dst, src );
692 emit_3ub(p, 0xF3, X86_TWOB, 0x5F);
693 emit_modrm( p, dst, src );
694 }
695
696 void sse_divss( struct x86_function *p,
697 struct x86_reg dst,
698 struct x86_reg src )
699 {
700 DUMP_RR( dst, src );
701 emit_3ub(p, 0xF3, X86_TWOB, 0x5E);
702 emit_modrm( p, dst, src );
703 }
704
705 void sse_minps( struct x86_function *p,
706 struct x86_reg dst,
707 struct x86_reg src )
708 {
709 DUMP_RR( dst, src );
710 emit_2ub(p, X86_TWOB, 0x5D);
711 emit_modrm( p, dst, src );
712 }
713
714 void sse_subps( struct x86_function *p,
715 struct x86_reg dst,
716 struct x86_reg src )
717 {
718 DUMP_RR( dst, src );
719 emit_2ub(p, X86_TWOB, 0x5C);
720 emit_modrm( p, dst, src );
721 }
722
723 void sse_mulps( struct x86_function *p,
724 struct x86_reg dst,
725 struct x86_reg src )
726 {
727 DUMP_RR( dst, src );
728 emit_2ub(p, X86_TWOB, 0x59);
729 emit_modrm( p, dst, src );
730 }
731
732 void sse_mulss( struct x86_function *p,
733 struct x86_reg dst,
734 struct x86_reg src )
735 {
736 DUMP_RR( dst, src );
737 emit_3ub(p, 0xF3, X86_TWOB, 0x59);
738 emit_modrm( p, dst, src );
739 }
740
741 void sse_addps( struct x86_function *p,
742 struct x86_reg dst,
743 struct x86_reg src )
744 {
745 DUMP_RR( dst, src );
746 emit_2ub(p, X86_TWOB, 0x58);
747 emit_modrm( p, dst, src );
748 }
749
750 void sse_addss( struct x86_function *p,
751 struct x86_reg dst,
752 struct x86_reg src )
753 {
754 DUMP_RR( dst, src );
755 emit_3ub(p, 0xF3, X86_TWOB, 0x58);
756 emit_modrm( p, dst, src );
757 }
758
759 void sse_andnps( struct x86_function *p,
760 struct x86_reg dst,
761 struct x86_reg src )
762 {
763 DUMP_RR( dst, src );
764 emit_2ub(p, X86_TWOB, 0x55);
765 emit_modrm( p, dst, src );
766 }
767
768 void sse_andps( struct x86_function *p,
769 struct x86_reg dst,
770 struct x86_reg src )
771 {
772 DUMP_RR( dst, src );
773 emit_2ub(p, X86_TWOB, 0x54);
774 emit_modrm( p, dst, src );
775 }
776
777 void sse_rsqrtps( struct x86_function *p,
778 struct x86_reg dst,
779 struct x86_reg src )
780 {
781 DUMP_RR( dst, src );
782 emit_2ub(p, X86_TWOB, 0x52);
783 emit_modrm( p, dst, src );
784 }
785
786 void sse_rsqrtss( struct x86_function *p,
787 struct x86_reg dst,
788 struct x86_reg src )
789 {
790 DUMP_RR( dst, src );
791 emit_3ub(p, 0xF3, X86_TWOB, 0x52);
792 emit_modrm( p, dst, src );
793
794 }
795
796 void sse_movhlps( struct x86_function *p,
797 struct x86_reg dst,
798 struct x86_reg src )
799 {
800 DUMP_RR( dst, src );
801 assert(dst.mod == mod_REG && src.mod == mod_REG);
802 emit_2ub(p, X86_TWOB, 0x12);
803 emit_modrm( p, dst, src );
804 }
805
806 void sse_movlhps( struct x86_function *p,
807 struct x86_reg dst,
808 struct x86_reg src )
809 {
810 DUMP_RR( dst, src );
811 assert(dst.mod == mod_REG && src.mod == mod_REG);
812 emit_2ub(p, X86_TWOB, 0x16);
813 emit_modrm( p, dst, src );
814 }
815
816 void sse_orps( struct x86_function *p,
817 struct x86_reg dst,
818 struct x86_reg src )
819 {
820 DUMP_RR( dst, src );
821 emit_2ub(p, X86_TWOB, 0x56);
822 emit_modrm( p, dst, src );
823 }
824
825 void sse_xorps( struct x86_function *p,
826 struct x86_reg dst,
827 struct x86_reg src )
828 {
829 DUMP_RR( dst, src );
830 emit_2ub(p, X86_TWOB, 0x57);
831 emit_modrm( p, dst, src );
832 }
833
834 void sse_cvtps2pi( struct x86_function *p,
835 struct x86_reg dst,
836 struct x86_reg src )
837 {
838 DUMP_RR( dst, src );
839 assert(dst.file == file_MMX &&
840 (src.file == file_XMM || src.mod != mod_REG));
841
842 p->need_emms = 1;
843
844 emit_2ub(p, X86_TWOB, 0x2d);
845 emit_modrm( p, dst, src );
846 }
847
848 void sse2_cvtdq2ps( struct x86_function *p,
849 struct x86_reg dst,
850 struct x86_reg src )
851 {
852 DUMP_RR( dst, src );
853 emit_2ub(p, X86_TWOB, 0x5b);
854 emit_modrm( p, dst, src );
855 }
856
857
858 /* Shufps can also be used to implement a reduced swizzle when dest ==
859 * arg0.
860 */
861 void sse_shufps( struct x86_function *p,
862 struct x86_reg dst,
863 struct x86_reg src,
864 unsigned char shuf)
865 {
866 DUMP_RRI( dst, src, shuf );
867 emit_2ub(p, X86_TWOB, 0xC6);
868 emit_modrm(p, dst, src);
869 emit_1ub(p, shuf);
870 }
871
872 void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
873 {
874 DUMP_RR( dst, src );
875 emit_2ub( p, X86_TWOB, 0x15 );
876 emit_modrm( p, dst, src );
877 }
878
879 void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
880 {
881 DUMP_RR( dst, src );
882 emit_2ub( p, X86_TWOB, 0x14 );
883 emit_modrm( p, dst, src );
884 }
885
886 void sse_cmpps( struct x86_function *p,
887 struct x86_reg dst,
888 struct x86_reg src,
889 unsigned char cc)
890 {
891 DUMP_RRI( dst, src, cc );
892 emit_2ub(p, X86_TWOB, 0xC2);
893 emit_modrm(p, dst, src);
894 emit_1ub(p, cc);
895 }
896
897 void sse_pmovmskb( struct x86_function *p,
898 struct x86_reg dst,
899 struct x86_reg src)
900 {
901 DUMP_RR( dst, src );
902 emit_3ub(p, 0x66, X86_TWOB, 0xD7);
903 emit_modrm(p, dst, src);
904 }
905
906 /***********************************************************************
907 * SSE2 instructions
908 */
909
910 /**
911 * Perform a reduced swizzle:
912 */
913 void sse2_pshufd( struct x86_function *p,
914 struct x86_reg dst,
915 struct x86_reg src,
916 unsigned char shuf)
917 {
918 DUMP_RRI( dst, src, shuf );
919 emit_3ub(p, 0x66, X86_TWOB, 0x70);
920 emit_modrm(p, dst, src);
921 emit_1ub(p, shuf);
922 }
923
924 void sse2_cvttps2dq( struct x86_function *p,
925 struct x86_reg dst,
926 struct x86_reg src )
927 {
928 DUMP_RR( dst, src );
929 emit_3ub( p, 0xF3, X86_TWOB, 0x5B );
930 emit_modrm( p, dst, src );
931 }
932
933 void sse2_cvtps2dq( struct x86_function *p,
934 struct x86_reg dst,
935 struct x86_reg src )
936 {
937 DUMP_RR( dst, src );
938 emit_3ub(p, 0x66, X86_TWOB, 0x5B);
939 emit_modrm( p, dst, src );
940 }
941
942 void sse2_packssdw( struct x86_function *p,
943 struct x86_reg dst,
944 struct x86_reg src )
945 {
946 DUMP_RR( dst, src );
947 emit_3ub(p, 0x66, X86_TWOB, 0x6B);
948 emit_modrm( p, dst, src );
949 }
950
951 void sse2_packsswb( struct x86_function *p,
952 struct x86_reg dst,
953 struct x86_reg src )
954 {
955 DUMP_RR( dst, src );
956 emit_3ub(p, 0x66, X86_TWOB, 0x63);
957 emit_modrm( p, dst, src );
958 }
959
960 void sse2_packuswb( struct x86_function *p,
961 struct x86_reg dst,
962 struct x86_reg src )
963 {
964 DUMP_RR( dst, src );
965 emit_3ub(p, 0x66, X86_TWOB, 0x67);
966 emit_modrm( p, dst, src );
967 }
968
969 void sse2_punpcklbw( struct x86_function *p,
970 struct x86_reg dst,
971 struct x86_reg src )
972 {
973 DUMP_RR( dst, src );
974 emit_3ub(p, 0x66, X86_TWOB, 0x60);
975 emit_modrm( p, dst, src );
976 }
977
978
979 void sse2_rcpps( struct x86_function *p,
980 struct x86_reg dst,
981 struct x86_reg src )
982 {
983 DUMP_RR( dst, src );
984 emit_2ub(p, X86_TWOB, 0x53);
985 emit_modrm( p, dst, src );
986 }
987
988 void sse2_rcpss( struct x86_function *p,
989 struct x86_reg dst,
990 struct x86_reg src )
991 {
992 DUMP_RR( dst, src );
993 emit_3ub(p, 0xF3, X86_TWOB, 0x53);
994 emit_modrm( p, dst, src );
995 }
996
997 void sse2_movd( struct x86_function *p,
998 struct x86_reg dst,
999 struct x86_reg src )
1000 {
1001 DUMP_RR( dst, src );
1002 emit_2ub(p, 0x66, X86_TWOB);
1003 emit_op_modrm( p, 0x6e, 0x7e, dst, src );
1004 }
1005
1006
1007
1008
1009 /***********************************************************************
1010 * x87 instructions
1011 */
1012 static void note_x87_pop( struct x86_function *p )
1013 {
1014 p->x87_stack--;
1015 assert(p->x87_stack >= 0);
1016 }
1017
1018 static void note_x87_push( struct x86_function *p )
1019 {
1020 p->x87_stack++;
1021 assert(p->x87_stack <= 7);
1022 }
1023
1024 void x87_assert_stack_empty( struct x86_function *p )
1025 {
1026 assert (p->x87_stack == 0);
1027 }
1028
1029
1030 void x87_fist( struct x86_function *p, struct x86_reg dst )
1031 {
1032 DUMP_R( dst );
1033 emit_1ub(p, 0xdb);
1034 emit_modrm_noreg(p, 2, dst);
1035 }
1036
1037 void x87_fistp( struct x86_function *p, struct x86_reg dst )
1038 {
1039 DUMP_R( dst );
1040 emit_1ub(p, 0xdb);
1041 emit_modrm_noreg(p, 3, dst);
1042 note_x87_pop(p);
1043 }
1044
1045 void x87_fild( struct x86_function *p, struct x86_reg arg )
1046 {
1047 DUMP_R( arg );
1048 emit_1ub(p, 0xdf);
1049 emit_modrm_noreg(p, 0, arg);
1050 note_x87_push(p);
1051 }
1052
1053 void x87_fldz( struct x86_function *p )
1054 {
1055 DUMP();
1056 emit_2ub(p, 0xd9, 0xee);
1057 note_x87_push(p);
1058 }
1059
1060
1061 void x87_fldcw( struct x86_function *p, struct x86_reg arg )
1062 {
1063 DUMP_R( arg );
1064 assert(arg.file == file_REG32);
1065 assert(arg.mod != mod_REG);
1066 emit_1ub(p, 0xd9);
1067 emit_modrm_noreg(p, 5, arg);
1068 }
1069
1070 void x87_fld1( struct x86_function *p )
1071 {
1072 DUMP();
1073 emit_2ub(p, 0xd9, 0xe8);
1074 note_x87_push(p);
1075 }
1076
1077 void x87_fldl2e( struct x86_function *p )
1078 {
1079 DUMP();
1080 emit_2ub(p, 0xd9, 0xea);
1081 note_x87_push(p);
1082 }
1083
1084 void x87_fldln2( struct x86_function *p )
1085 {
1086 DUMP();
1087 emit_2ub(p, 0xd9, 0xed);
1088 note_x87_push(p);
1089 }
1090
1091 void x87_fwait( struct x86_function *p )
1092 {
1093 DUMP();
1094 emit_1ub(p, 0x9b);
1095 }
1096
1097 void x87_fnclex( struct x86_function *p )
1098 {
1099 DUMP();
1100 emit_2ub(p, 0xdb, 0xe2);
1101 }
1102
1103 void x87_fclex( struct x86_function *p )
1104 {
1105 x87_fwait(p);
1106 x87_fnclex(p);
1107 }
1108
1109 void x87_fcmovb( struct x86_function *p, struct x86_reg arg )
1110 {
1111 DUMP_R( arg );
1112 assert(arg.file == file_x87);
1113 emit_2ub(p, 0xda, 0xc0+arg.idx);
1114 }
1115
1116 void x87_fcmove( struct x86_function *p, struct x86_reg arg )
1117 {
1118 DUMP_R( arg );
1119 assert(arg.file == file_x87);
1120 emit_2ub(p, 0xda, 0xc8+arg.idx);
1121 }
1122
1123 void x87_fcmovbe( struct x86_function *p, struct x86_reg arg )
1124 {
1125 DUMP_R( arg );
1126 assert(arg.file == file_x87);
1127 emit_2ub(p, 0xda, 0xd0+arg.idx);
1128 }
1129
1130 void x87_fcmovnb( struct x86_function *p, struct x86_reg arg )
1131 {
1132 DUMP_R( arg );
1133 assert(arg.file == file_x87);
1134 emit_2ub(p, 0xdb, 0xc0+arg.idx);
1135 }
1136
1137 void x87_fcmovne( struct x86_function *p, struct x86_reg arg )
1138 {
1139 DUMP_R( arg );
1140 assert(arg.file == file_x87);
1141 emit_2ub(p, 0xdb, 0xc8+arg.idx);
1142 }
1143
1144 void x87_fcmovnbe( struct x86_function *p, struct x86_reg arg )
1145 {
1146 DUMP_R( arg );
1147 assert(arg.file == file_x87);
1148 emit_2ub(p, 0xdb, 0xd0+arg.idx);
1149 }
1150
1151
1152
1153 static void x87_arith_op( struct x86_function *p, struct x86_reg dst, struct x86_reg arg,
1154 unsigned char dst0ub0,
1155 unsigned char dst0ub1,
1156 unsigned char arg0ub0,
1157 unsigned char arg0ub1,
1158 unsigned char argmem_noreg)
1159 {
1160 assert(dst.file == file_x87);
1161
1162 if (arg.file == file_x87) {
1163 if (dst.idx == 0)
1164 emit_2ub(p, dst0ub0, dst0ub1+arg.idx);
1165 else if (arg.idx == 0)
1166 emit_2ub(p, arg0ub0, arg0ub1+arg.idx);
1167 else
1168 assert(0);
1169 }
1170 else if (dst.idx == 0) {
1171 assert(arg.file == file_REG32);
1172 emit_1ub(p, 0xd8);
1173 emit_modrm_noreg(p, argmem_noreg, arg);
1174 }
1175 else
1176 assert(0);
1177 }
1178
1179 void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1180 {
1181 DUMP_RR( dst, src );
1182 x87_arith_op(p, dst, src,
1183 0xd8, 0xc8,
1184 0xdc, 0xc8,
1185 4);
1186 }
1187
1188 void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1189 {
1190 DUMP_RR( dst, src );
1191 x87_arith_op(p, dst, src,
1192 0xd8, 0xe0,
1193 0xdc, 0xe8,
1194 4);
1195 }
1196
1197 void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1198 {
1199 DUMP_RR( dst, src );
1200 x87_arith_op(p, dst, src,
1201 0xd8, 0xe8,
1202 0xdc, 0xe0,
1203 5);
1204 }
1205
1206 void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1207 {
1208 DUMP_RR( dst, src );
1209 x87_arith_op(p, dst, src,
1210 0xd8, 0xc0,
1211 0xdc, 0xc0,
1212 0);
1213 }
1214
1215 void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1216 {
1217 DUMP_RR( dst, src );
1218 x87_arith_op(p, dst, src,
1219 0xd8, 0xf0,
1220 0xdc, 0xf8,
1221 6);
1222 }
1223
1224 void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1225 {
1226 DUMP_RR( dst, src );
1227 x87_arith_op(p, dst, src,
1228 0xd8, 0xf8,
1229 0xdc, 0xf0,
1230 7);
1231 }
1232
1233 void x87_fmulp( struct x86_function *p, struct x86_reg dst )
1234 {
1235 DUMP_R( dst );
1236 assert(dst.file == file_x87);
1237 assert(dst.idx >= 1);
1238 emit_2ub(p, 0xde, 0xc8+dst.idx);
1239 note_x87_pop(p);
1240 }
1241
1242 void x87_fsubp( struct x86_function *p, struct x86_reg dst )
1243 {
1244 DUMP_R( dst );
1245 assert(dst.file == file_x87);
1246 assert(dst.idx >= 1);
1247 emit_2ub(p, 0xde, 0xe8+dst.idx);
1248 note_x87_pop(p);
1249 }
1250
1251 void x87_fsubrp( struct x86_function *p, struct x86_reg dst )
1252 {
1253 DUMP_R( dst );
1254 assert(dst.file == file_x87);
1255 assert(dst.idx >= 1);
1256 emit_2ub(p, 0xde, 0xe0+dst.idx);
1257 note_x87_pop(p);
1258 }
1259
1260 void x87_faddp( struct x86_function *p, struct x86_reg dst )
1261 {
1262 DUMP_R( dst );
1263 assert(dst.file == file_x87);
1264 assert(dst.idx >= 1);
1265 emit_2ub(p, 0xde, 0xc0+dst.idx);
1266 note_x87_pop(p);
1267 }
1268
1269 void x87_fdivp( struct x86_function *p, struct x86_reg dst )
1270 {
1271 DUMP_R( dst );
1272 assert(dst.file == file_x87);
1273 assert(dst.idx >= 1);
1274 emit_2ub(p, 0xde, 0xf8+dst.idx);
1275 note_x87_pop(p);
1276 }
1277
1278 void x87_fdivrp( struct x86_function *p, struct x86_reg dst )
1279 {
1280 DUMP_R( dst );
1281 assert(dst.file == file_x87);
1282 assert(dst.idx >= 1);
1283 emit_2ub(p, 0xde, 0xf0+dst.idx);
1284 note_x87_pop(p);
1285 }
1286
1287 void x87_ftst( struct x86_function *p )
1288 {
1289 DUMP();
1290 emit_2ub(p, 0xd9, 0xe4);
1291 }
1292
1293 void x87_fucom( struct x86_function *p, struct x86_reg arg )
1294 {
1295 DUMP_R( arg );
1296 assert(arg.file == file_x87);
1297 emit_2ub(p, 0xdd, 0xe0+arg.idx);
1298 }
1299
1300 void x87_fucomp( struct x86_function *p, struct x86_reg arg )
1301 {
1302 DUMP_R( arg );
1303 assert(arg.file == file_x87);
1304 emit_2ub(p, 0xdd, 0xe8+arg.idx);
1305 note_x87_pop(p);
1306 }
1307
1308 void x87_fucompp( struct x86_function *p )
1309 {
1310 DUMP();
1311 emit_2ub(p, 0xda, 0xe9);
1312 note_x87_pop(p); /* pop twice */
1313 note_x87_pop(p); /* pop twice */
1314 }
1315
1316 void x87_fxch( struct x86_function *p, struct x86_reg arg )
1317 {
1318 DUMP_R( arg );
1319 assert(arg.file == file_x87);
1320 emit_2ub(p, 0xd9, 0xc8+arg.idx);
1321 }
1322
1323 void x87_fabs( struct x86_function *p )
1324 {
1325 DUMP();
1326 emit_2ub(p, 0xd9, 0xe1);
1327 }
1328
1329 void x87_fchs( struct x86_function *p )
1330 {
1331 DUMP();
1332 emit_2ub(p, 0xd9, 0xe0);
1333 }
1334
1335 void x87_fcos( struct x86_function *p )
1336 {
1337 DUMP();
1338 emit_2ub(p, 0xd9, 0xff);
1339 }
1340
1341
1342 void x87_fprndint( struct x86_function *p )
1343 {
1344 DUMP();
1345 emit_2ub(p, 0xd9, 0xfc);
1346 }
1347
1348 void x87_fscale( struct x86_function *p )
1349 {
1350 DUMP();
1351 emit_2ub(p, 0xd9, 0xfd);
1352 }
1353
1354 void x87_fsin( struct x86_function *p )
1355 {
1356 DUMP();
1357 emit_2ub(p, 0xd9, 0xfe);
1358 }
1359
1360 void x87_fsincos( struct x86_function *p )
1361 {
1362 DUMP();
1363 emit_2ub(p, 0xd9, 0xfb);
1364 }
1365
1366 void x87_fsqrt( struct x86_function *p )
1367 {
1368 DUMP();
1369 emit_2ub(p, 0xd9, 0xfa);
1370 }
1371
1372 void x87_fxtract( struct x86_function *p )
1373 {
1374 DUMP();
1375 emit_2ub(p, 0xd9, 0xf4);
1376 }
1377
1378 /* st0 = (2^st0)-1
1379 *
1380 * Restrictions: -1.0 <= st0 <= 1.0
1381 */
1382 void x87_f2xm1( struct x86_function *p )
1383 {
1384 DUMP();
1385 emit_2ub(p, 0xd9, 0xf0);
1386 }
1387
1388 /* st1 = st1 * log2(st0);
1389 * pop_stack;
1390 */
1391 void x87_fyl2x( struct x86_function *p )
1392 {
1393 DUMP();
1394 emit_2ub(p, 0xd9, 0xf1);
1395 note_x87_pop(p);
1396 }
1397
1398 /* st1 = st1 * log2(st0 + 1.0);
1399 * pop_stack;
1400 *
1401 * A fast operation, with restrictions: -.29 < st0 < .29
1402 */
1403 void x87_fyl2xp1( struct x86_function *p )
1404 {
1405 DUMP();
1406 emit_2ub(p, 0xd9, 0xf9);
1407 note_x87_pop(p);
1408 }
1409
1410
1411 void x87_fld( struct x86_function *p, struct x86_reg arg )
1412 {
1413 DUMP_R( arg );
1414 if (arg.file == file_x87)
1415 emit_2ub(p, 0xd9, 0xc0 + arg.idx);
1416 else {
1417 emit_1ub(p, 0xd9);
1418 emit_modrm_noreg(p, 0, arg);
1419 }
1420 note_x87_push(p);
1421 }
1422
1423 void x87_fst( struct x86_function *p, struct x86_reg dst )
1424 {
1425 DUMP_R( dst );
1426 if (dst.file == file_x87)
1427 emit_2ub(p, 0xdd, 0xd0 + dst.idx);
1428 else {
1429 emit_1ub(p, 0xd9);
1430 emit_modrm_noreg(p, 2, dst);
1431 }
1432 }
1433
1434 void x87_fstp( struct x86_function *p, struct x86_reg dst )
1435 {
1436 DUMP_R( dst );
1437 if (dst.file == file_x87)
1438 emit_2ub(p, 0xdd, 0xd8 + dst.idx);
1439 else {
1440 emit_1ub(p, 0xd9);
1441 emit_modrm_noreg(p, 3, dst);
1442 }
1443 note_x87_pop(p);
1444 }
1445
1446 void x87_fpop( struct x86_function *p )
1447 {
1448 x87_fstp( p, x86_make_reg( file_x87, 0 ));
1449 }
1450
1451
1452 void x87_fcom( struct x86_function *p, struct x86_reg dst )
1453 {
1454 DUMP_R( dst );
1455 if (dst.file == file_x87)
1456 emit_2ub(p, 0xd8, 0xd0 + dst.idx);
1457 else {
1458 emit_1ub(p, 0xd8);
1459 emit_modrm_noreg(p, 2, dst);
1460 }
1461 }
1462
1463
1464 void x87_fcomp( struct x86_function *p, struct x86_reg dst )
1465 {
1466 DUMP_R( dst );
1467 if (dst.file == file_x87)
1468 emit_2ub(p, 0xd8, 0xd8 + dst.idx);
1469 else {
1470 emit_1ub(p, 0xd8);
1471 emit_modrm_noreg(p, 3, dst);
1472 }
1473 note_x87_pop(p);
1474 }
1475
1476 void x87_fcomi( struct x86_function *p, struct x86_reg arg )
1477 {
1478 DUMP_R( arg );
1479 emit_2ub(p, 0xdb, 0xf0+arg.idx);
1480 }
1481
1482 void x87_fcomip( struct x86_function *p, struct x86_reg arg )
1483 {
1484 DUMP_R( arg );
1485 emit_2ub(p, 0xdb, 0xf0+arg.idx);
1486 note_x87_pop(p);
1487 }
1488
1489
1490 void x87_fnstsw( struct x86_function *p, struct x86_reg dst )
1491 {
1492 DUMP_R( dst );
1493 assert(dst.file == file_REG32);
1494
1495 if (dst.idx == reg_AX &&
1496 dst.mod == mod_REG)
1497 emit_2ub(p, 0xdf, 0xe0);
1498 else {
1499 emit_1ub(p, 0xdd);
1500 emit_modrm_noreg(p, 7, dst);
1501 }
1502 }
1503
1504
1505 void x87_fnstcw( struct x86_function *p, struct x86_reg dst )
1506 {
1507 DUMP_R( dst );
1508 assert(dst.file == file_REG32);
1509
1510 emit_1ub(p, 0x9b); /* WAIT -- needed? */
1511 emit_1ub(p, 0xd9);
1512 emit_modrm_noreg(p, 7, dst);
1513 }
1514
1515
1516
1517
1518 /***********************************************************************
1519 * MMX instructions
1520 */
1521
1522 void mmx_emms( struct x86_function *p )
1523 {
1524 DUMP();
1525 assert(p->need_emms);
1526 emit_2ub(p, 0x0f, 0x77);
1527 p->need_emms = 0;
1528 }
1529
1530 void mmx_packssdw( struct x86_function *p,
1531 struct x86_reg dst,
1532 struct x86_reg src )
1533 {
1534 DUMP_RR( dst, src );
1535 assert(dst.file == file_MMX &&
1536 (src.file == file_MMX || src.mod != mod_REG));
1537
1538 p->need_emms = 1;
1539
1540 emit_2ub(p, X86_TWOB, 0x6b);
1541 emit_modrm( p, dst, src );
1542 }
1543
1544 void mmx_packuswb( struct x86_function *p,
1545 struct x86_reg dst,
1546 struct x86_reg src )
1547 {
1548 DUMP_RR( dst, src );
1549 assert(dst.file == file_MMX &&
1550 (src.file == file_MMX || src.mod != mod_REG));
1551
1552 p->need_emms = 1;
1553
1554 emit_2ub(p, X86_TWOB, 0x67);
1555 emit_modrm( p, dst, src );
1556 }
1557
1558 void mmx_movd( struct x86_function *p,
1559 struct x86_reg dst,
1560 struct x86_reg src )
1561 {
1562 DUMP_RR( dst, src );
1563 p->need_emms = 1;
1564 emit_1ub(p, X86_TWOB);
1565 emit_op_modrm( p, 0x6e, 0x7e, dst, src );
1566 }
1567
1568 void mmx_movq( struct x86_function *p,
1569 struct x86_reg dst,
1570 struct x86_reg src )
1571 {
1572 DUMP_RR( dst, src );
1573 p->need_emms = 1;
1574 emit_1ub(p, X86_TWOB);
1575 emit_op_modrm( p, 0x6f, 0x7f, dst, src );
1576 }
1577
1578
1579 /***********************************************************************
1580 * Helper functions
1581 */
1582
1583
1584 void x86_cdecl_caller_push_regs( struct x86_function *p )
1585 {
1586 x86_push(p, x86_make_reg(file_REG32, reg_AX));
1587 x86_push(p, x86_make_reg(file_REG32, reg_CX));
1588 x86_push(p, x86_make_reg(file_REG32, reg_DX));
1589 }
1590
1591 void x86_cdecl_caller_pop_regs( struct x86_function *p )
1592 {
1593 x86_pop(p, x86_make_reg(file_REG32, reg_DX));
1594 x86_pop(p, x86_make_reg(file_REG32, reg_CX));
1595 x86_pop(p, x86_make_reg(file_REG32, reg_AX));
1596 }
1597
1598
1599 /* Retreive a reference to one of the function arguments, taking into
1600 * account any push/pop activity:
1601 */
1602 struct x86_reg x86_fn_arg( struct x86_function *p,
1603 unsigned arg )
1604 {
1605 return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
1606 p->stack_offset + arg * 4); /* ??? */
1607 }
1608
1609
1610 void x86_init_func( struct x86_function *p )
1611 {
1612 p->size = 0;
1613 p->store = NULL;
1614 p->csr = p->store;
1615 DUMP_START();
1616 }
1617
1618 void x86_init_func_size( struct x86_function *p, unsigned code_size )
1619 {
1620 p->size = code_size;
1621 p->store = rtasm_exec_malloc(code_size);
1622 if (p->store == NULL) {
1623 p->store = p->error_overflow;
1624 }
1625 p->csr = p->store;
1626 DUMP_START();
1627 }
1628
1629 void x86_release_func( struct x86_function *p )
1630 {
1631 if (p->store && p->store != p->error_overflow)
1632 rtasm_exec_free(p->store);
1633
1634 p->store = NULL;
1635 p->csr = NULL;
1636 p->size = 0;
1637 }
1638
1639
1640 void (*x86_get_func( struct x86_function *p ))(void)
1641 {
1642 DUMP_END();
1643 if (DISASSEM && p->store)
1644 debug_printf("disassemble %p %p\n", p->store, p->csr);
1645
1646 if (p->store == p->error_overflow)
1647 return (void (*)(void)) NULL;
1648 else
1649 return (void (*)(void)) p->store;
1650 }
1651
1652 #else
1653
1654 void x86sse_dummy( void )
1655 {
1656 }
1657
1658 #endif