Merge branch 'gallium-i915-current' into gallium-0.1
[mesa.git] / src / gallium / auxiliary / rtasm / rtasm_x86sse.c
1 /**************************************************************************
2 *
3 * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
19 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 **************************************************************************/
23
24 #if defined(__i386__) || defined(__386__) || defined(i386)
25
26 #include "pipe/p_compiler.h"
27 #include "pipe/p_debug.h"
28 #include "pipe/p_pointer.h"
29
30 #include "rtasm_execmem.h"
31 #include "rtasm_x86sse.h"
32
33 #define DISASSEM 0
34 #define X86_TWOB 0x0f
35
36
37 #define DUMP_SSE 0
38
39 #if DUMP_SSE
40
41 static void
42 _print_reg(
43 struct x86_reg reg )
44 {
45 if (reg.mod != mod_REG)
46 debug_printf( "[" );
47
48 switch( reg.file ) {
49 case file_REG32:
50 switch( reg.idx ) {
51 case reg_AX: debug_printf( "EAX" ); break;
52 case reg_CX: debug_printf( "ECX" ); break;
53 case reg_DX: debug_printf( "EDX" ); break;
54 case reg_BX: debug_printf( "EBX" ); break;
55 case reg_SP: debug_printf( "ESP" ); break;
56 case reg_BP: debug_printf( "EBP" ); break;
57 case reg_SI: debug_printf( "ESI" ); break;
58 case reg_DI: debug_printf( "EDI" ); break;
59 }
60 break;
61 case file_MMX:
62 debug_printf( "MMX%u", reg.idx );
63 break;
64 case file_XMM:
65 debug_printf( "XMM%u", reg.idx );
66 break;
67 case file_x87:
68 debug_printf( "fp%u", reg.idx );
69 break;
70 }
71
72 if (reg.mod == mod_DISP8 ||
73 reg.mod == mod_DISP32)
74 debug_printf("+%d", reg.disp);
75
76 if (reg.mod != mod_REG)
77 debug_printf( "]" );
78 }
79
80
81 #define DUMP_START() debug_printf( "\n" )
82 #define DUMP_END() debug_printf( "\n" )
83
84 #define DUMP() do { \
85 const char *foo = __FUNCTION__; \
86 while (*foo && *foo != '_') \
87 foo++; \
88 if (*foo) \
89 foo++; \
90 debug_printf( "\n% 15s ", foo ); \
91 } while (0)
92
93 #define DUMP_I( I ) do { \
94 DUMP(); \
95 debug_printf( "%u", I ); \
96 } while( 0 )
97
98 #define DUMP_R( R0 ) do { \
99 DUMP(); \
100 _print_reg( R0 ); \
101 } while( 0 )
102
103 #define DUMP_RR( R0, R1 ) do { \
104 DUMP(); \
105 _print_reg( R0 ); \
106 debug_printf( ", " ); \
107 _print_reg( R1 ); \
108 } while( 0 )
109
110 #define DUMP_RI( R0, I ) do { \
111 DUMP(); \
112 _print_reg( R0 ); \
113 debug_printf( ", %u", I ); \
114 } while( 0 )
115
116 #define DUMP_RRI( R0, R1, I ) do { \
117 DUMP(); \
118 _print_reg( R0 ); \
119 debug_printf( ", " ); \
120 _print_reg( R1 ); \
121 debug_printf( ", %u", I ); \
122 } while( 0 )
123
124 #else
125
126 #define DUMP_START()
127 #define DUMP_END()
128 #define DUMP( )
129 #define DUMP_I( I )
130 #define DUMP_R( R0 )
131 #define DUMP_RR( R0, R1 )
132 #define DUMP_RI( R0, I )
133 #define DUMP_RRI( R0, R1, I )
134
135 #endif
136
137
138 static void do_realloc( struct x86_function *p )
139 {
140 if (p->store == p->error_overflow) {
141 p->csr = p->store;
142 }
143 else if (p->size == 0) {
144 p->size = 1024;
145 p->store = rtasm_exec_malloc(p->size);
146 p->csr = p->store;
147 }
148 else {
149 uintptr_t used = pointer_to_uintptr( p->csr ) - pointer_to_uintptr( p->store );
150 unsigned char *tmp = p->store;
151 p->size *= 2;
152 p->store = rtasm_exec_malloc(p->size);
153
154 if (p->store) {
155 memcpy(p->store, tmp, used);
156 p->csr = p->store + used;
157 }
158 else {
159 p->csr = p->store;
160 }
161
162 rtasm_exec_free(tmp);
163 }
164
165 if (p->store == NULL) {
166 p->store = p->csr = p->error_overflow;
167 p->size = sizeof(p->error_overflow);
168 }
169 }
170
171 /* Emit bytes to the instruction stream:
172 */
173 static unsigned char *reserve( struct x86_function *p, int bytes )
174 {
175 if (p->csr + bytes - p->store > (int) p->size)
176 do_realloc(p);
177
178 {
179 unsigned char *csr = p->csr;
180 p->csr += bytes;
181 return csr;
182 }
183 }
184
185
186
187 static void emit_1b( struct x86_function *p, char b0 )
188 {
189 char *csr = (char *)reserve(p, 1);
190 *csr = b0;
191 }
192
193 static void emit_1i( struct x86_function *p, int i0 )
194 {
195 int *icsr = (int *)reserve(p, sizeof(i0));
196 *icsr = i0;
197 }
198
199 static void emit_1ub( struct x86_function *p, unsigned char b0 )
200 {
201 unsigned char *csr = reserve(p, 1);
202 *csr++ = b0;
203 }
204
205 static void emit_2ub( struct x86_function *p, unsigned char b0, unsigned char b1 )
206 {
207 unsigned char *csr = reserve(p, 2);
208 *csr++ = b0;
209 *csr++ = b1;
210 }
211
212 static void emit_3ub( struct x86_function *p, unsigned char b0, unsigned char b1, unsigned char b2 )
213 {
214 unsigned char *csr = reserve(p, 3);
215 *csr++ = b0;
216 *csr++ = b1;
217 *csr++ = b2;
218 }
219
220
221 /* Build a modRM byte + possible displacement. No treatment of SIB
222 * indexing. BZZT - no way to encode an absolute address.
223 */
224 static void emit_modrm( struct x86_function *p,
225 struct x86_reg reg,
226 struct x86_reg regmem )
227 {
228 unsigned char val = 0;
229
230 assert(reg.mod == mod_REG);
231
232 val |= regmem.mod << 6; /* mod field */
233 val |= reg.idx << 3; /* reg field */
234 val |= regmem.idx; /* r/m field */
235
236 emit_1ub(p, val);
237
238 /* Oh-oh we've stumbled into the SIB thing.
239 */
240 if (regmem.file == file_REG32 &&
241 regmem.idx == reg_SP) {
242 emit_1ub(p, 0x24); /* simplistic! */
243 }
244
245 switch (regmem.mod) {
246 case mod_REG:
247 case mod_INDIRECT:
248 break;
249 case mod_DISP8:
250 emit_1b(p, (char) regmem.disp);
251 break;
252 case mod_DISP32:
253 emit_1i(p, regmem.disp);
254 break;
255 default:
256 assert(0);
257 break;
258 }
259 }
260
261
262 static void emit_modrm_noreg( struct x86_function *p,
263 unsigned op,
264 struct x86_reg regmem )
265 {
266 struct x86_reg dummy = x86_make_reg(file_REG32, op);
267 emit_modrm(p, dummy, regmem);
268 }
269
270 /* Many x86 instructions have two opcodes to cope with the situations
271 * where the destination is a register or memory reference
272 * respectively. This function selects the correct opcode based on
273 * the arguments presented.
274 */
275 static void emit_op_modrm( struct x86_function *p,
276 unsigned char op_dst_is_reg,
277 unsigned char op_dst_is_mem,
278 struct x86_reg dst,
279 struct x86_reg src )
280 {
281 switch (dst.mod) {
282 case mod_REG:
283 emit_1ub(p, op_dst_is_reg);
284 emit_modrm(p, dst, src);
285 break;
286 case mod_INDIRECT:
287 case mod_DISP32:
288 case mod_DISP8:
289 assert(src.mod == mod_REG);
290 emit_1ub(p, op_dst_is_mem);
291 emit_modrm(p, src, dst);
292 break;
293 default:
294 assert(0);
295 break;
296 }
297 }
298
299
300
301
302
303
304
305 /* Create and manipulate registers and regmem values:
306 */
307 struct x86_reg x86_make_reg( enum x86_reg_file file,
308 enum x86_reg_name idx )
309 {
310 struct x86_reg reg;
311
312 reg.file = file;
313 reg.idx = idx;
314 reg.mod = mod_REG;
315 reg.disp = 0;
316
317 return reg;
318 }
319
320 struct x86_reg x86_make_disp( struct x86_reg reg,
321 int disp )
322 {
323 assert(reg.file == file_REG32);
324
325 if (reg.mod == mod_REG)
326 reg.disp = disp;
327 else
328 reg.disp += disp;
329
330 if (reg.disp == 0)
331 reg.mod = mod_INDIRECT;
332 else if (reg.disp <= 127 && reg.disp >= -128)
333 reg.mod = mod_DISP8;
334 else
335 reg.mod = mod_DISP32;
336
337 return reg;
338 }
339
340 struct x86_reg x86_deref( struct x86_reg reg )
341 {
342 return x86_make_disp(reg, 0);
343 }
344
345 struct x86_reg x86_get_base_reg( struct x86_reg reg )
346 {
347 return x86_make_reg( reg.file, reg.idx );
348 }
349
350 int x86_get_label( struct x86_function *p )
351 {
352 return p->csr - p->store;
353 }
354
355
356
357 /***********************************************************************
358 * x86 instructions
359 */
360
361
362 void x86_jcc( struct x86_function *p,
363 enum x86_cc cc,
364 int label )
365 {
366 int offset = label - (x86_get_label(p) + 2);
367 DUMP_I(cc);
368
369 if (offset < 0) {
370 int amt = p->csr - p->store;
371 assert(amt > -offset);
372 }
373
374 if (offset <= 127 && offset >= -128) {
375 emit_1ub(p, 0x70 + cc);
376 emit_1b(p, (char) offset);
377 }
378 else {
379 offset = label - (x86_get_label(p) + 6);
380 emit_2ub(p, 0x0f, 0x80 + cc);
381 emit_1i(p, offset);
382 }
383 }
384
385 /* Always use a 32bit offset for forward jumps:
386 */
387 int x86_jcc_forward( struct x86_function *p,
388 enum x86_cc cc )
389 {
390 DUMP_I(cc);
391 emit_2ub(p, 0x0f, 0x80 + cc);
392 emit_1i(p, 0);
393 return x86_get_label(p);
394 }
395
396 int x86_jmp_forward( struct x86_function *p)
397 {
398 DUMP();
399 emit_1ub(p, 0xe9);
400 emit_1i(p, 0);
401 return x86_get_label(p);
402 }
403
404 int x86_call_forward( struct x86_function *p)
405 {
406 DUMP();
407
408 emit_1ub(p, 0xe8);
409 emit_1i(p, 0);
410 return x86_get_label(p);
411 }
412
413 /* Fixup offset from forward jump:
414 */
415 void x86_fixup_fwd_jump( struct x86_function *p,
416 int fixup )
417 {
418 *(int *)(p->store + fixup - 4) = x86_get_label(p) - fixup;
419 }
420
421 void x86_jmp( struct x86_function *p, int label)
422 {
423 DUMP_I( label );
424 emit_1ub(p, 0xe9);
425 emit_1i(p, label - x86_get_label(p) - 4);
426 }
427
428 void x86_call( struct x86_function *p, struct x86_reg reg)
429 {
430 DUMP_R( reg );
431 emit_1ub(p, 0xff);
432 emit_modrm_noreg(p, 2, reg);
433 }
434
435
436 /* michal:
437 * Temporary. As I need immediate operands, and dont want to mess with the codegen,
438 * I load the immediate into general purpose register and use it.
439 */
440 void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
441 {
442 DUMP_RI( dst, imm );
443 assert(dst.mod == mod_REG);
444 emit_1ub(p, 0xb8 + dst.idx);
445 emit_1i(p, imm);
446 }
447
448 void x86_push( struct x86_function *p,
449 struct x86_reg reg )
450 {
451 DUMP_R( reg );
452 if (reg.mod == mod_REG)
453 emit_1ub(p, 0x50 + reg.idx);
454 else
455 {
456 emit_1ub(p, 0xff);
457 emit_modrm_noreg(p, 6, reg);
458 }
459
460
461 p->stack_offset += 4;
462 }
463
464 void x86_pop( struct x86_function *p,
465 struct x86_reg reg )
466 {
467 DUMP_R( reg );
468 assert(reg.mod == mod_REG);
469 emit_1ub(p, 0x58 + reg.idx);
470 p->stack_offset -= 4;
471 }
472
473 void x86_inc( struct x86_function *p,
474 struct x86_reg reg )
475 {
476 DUMP_R( reg );
477 assert(reg.mod == mod_REG);
478 emit_1ub(p, 0x40 + reg.idx);
479 }
480
481 void x86_dec( struct x86_function *p,
482 struct x86_reg reg )
483 {
484 DUMP_R( reg );
485 assert(reg.mod == mod_REG);
486 emit_1ub(p, 0x48 + reg.idx);
487 }
488
489 void x86_ret( struct x86_function *p )
490 {
491 DUMP();
492 assert(p->stack_offset == 0);
493 emit_1ub(p, 0xc3);
494 }
495
496 void x86_retw( struct x86_function *p, unsigned short imm )
497 {
498 DUMP();
499 emit_3ub(p, 0xc2, imm & 0xff, (imm >> 8) & 0xff);
500 }
501
502 void x86_sahf( struct x86_function *p )
503 {
504 DUMP();
505 emit_1ub(p, 0x9e);
506 }
507
508 void x86_mov( struct x86_function *p,
509 struct x86_reg dst,
510 struct x86_reg src )
511 {
512 DUMP_RR( dst, src );
513 emit_op_modrm( p, 0x8b, 0x89, dst, src );
514 }
515
516 void x86_xor( struct x86_function *p,
517 struct x86_reg dst,
518 struct x86_reg src )
519 {
520 DUMP_RR( dst, src );
521 emit_op_modrm( p, 0x33, 0x31, dst, src );
522 }
523
524 void x86_cmp( struct x86_function *p,
525 struct x86_reg dst,
526 struct x86_reg src )
527 {
528 DUMP_RR( dst, src );
529 emit_op_modrm( p, 0x3b, 0x39, dst, src );
530 }
531
532 void x86_lea( struct x86_function *p,
533 struct x86_reg dst,
534 struct x86_reg src )
535 {
536 DUMP_RR( dst, src );
537 emit_1ub(p, 0x8d);
538 emit_modrm( p, dst, src );
539 }
540
541 void x86_test( struct x86_function *p,
542 struct x86_reg dst,
543 struct x86_reg src )
544 {
545 DUMP_RR( dst, src );
546 emit_1ub(p, 0x85);
547 emit_modrm( p, dst, src );
548 }
549
550 void x86_add( struct x86_function *p,
551 struct x86_reg dst,
552 struct x86_reg src )
553 {
554 DUMP_RR( dst, src );
555 emit_op_modrm(p, 0x03, 0x01, dst, src );
556 }
557
558 /* Calculate EAX * src, results in EDX:EAX.
559 */
560 void x86_mul( struct x86_function *p,
561 struct x86_reg src )
562 {
563 DUMP_R( src );
564 emit_1ub(p, 0xf7);
565 emit_modrm_noreg(p, 4, src );
566 }
567
568
569 void x86_imul( struct x86_function *p,
570 struct x86_reg dst,
571 struct x86_reg src )
572 {
573 DUMP_RR( dst, src );
574 emit_2ub(p, X86_TWOB, 0xAF);
575 emit_modrm(p, dst, src);
576 }
577
578
579 void x86_sub( struct x86_function *p,
580 struct x86_reg dst,
581 struct x86_reg src )
582 {
583 DUMP_RR( dst, src );
584 emit_op_modrm(p, 0x2b, 0x29, dst, src );
585 }
586
587 void x86_or( struct x86_function *p,
588 struct x86_reg dst,
589 struct x86_reg src )
590 {
591 DUMP_RR( dst, src );
592 emit_op_modrm( p, 0x0b, 0x09, dst, src );
593 }
594
595 void x86_and( struct x86_function *p,
596 struct x86_reg dst,
597 struct x86_reg src )
598 {
599 DUMP_RR( dst, src );
600 emit_op_modrm( p, 0x23, 0x21, dst, src );
601 }
602
603
604
605 /***********************************************************************
606 * SSE instructions
607 */
608
609
610 void sse_movss( struct x86_function *p,
611 struct x86_reg dst,
612 struct x86_reg src )
613 {
614 DUMP_RR( dst, src );
615 emit_2ub(p, 0xF3, X86_TWOB);
616 emit_op_modrm( p, 0x10, 0x11, dst, src );
617 }
618
619 void sse_movaps( struct x86_function *p,
620 struct x86_reg dst,
621 struct x86_reg src )
622 {
623 DUMP_RR( dst, src );
624 emit_1ub(p, X86_TWOB);
625 emit_op_modrm( p, 0x28, 0x29, dst, src );
626 }
627
628 void sse_movups( struct x86_function *p,
629 struct x86_reg dst,
630 struct x86_reg src )
631 {
632 DUMP_RR( dst, src );
633 emit_1ub(p, X86_TWOB);
634 emit_op_modrm( p, 0x10, 0x11, dst, src );
635 }
636
637 void sse_movhps( struct x86_function *p,
638 struct x86_reg dst,
639 struct x86_reg src )
640 {
641 DUMP_RR( dst, src );
642 assert(dst.mod != mod_REG || src.mod != mod_REG);
643 emit_1ub(p, X86_TWOB);
644 emit_op_modrm( p, 0x16, 0x17, dst, src ); /* cf movlhps */
645 }
646
647 void sse_movlps( struct x86_function *p,
648 struct x86_reg dst,
649 struct x86_reg src )
650 {
651 DUMP_RR( dst, src );
652 assert(dst.mod != mod_REG || src.mod != mod_REG);
653 emit_1ub(p, X86_TWOB);
654 emit_op_modrm( p, 0x12, 0x13, dst, src ); /* cf movhlps */
655 }
656
657 void sse_maxps( struct x86_function *p,
658 struct x86_reg dst,
659 struct x86_reg src )
660 {
661 DUMP_RR( dst, src );
662 emit_2ub(p, X86_TWOB, 0x5F);
663 emit_modrm( p, dst, src );
664 }
665
666 void sse_maxss( struct x86_function *p,
667 struct x86_reg dst,
668 struct x86_reg src )
669 {
670 DUMP_RR( dst, src );
671 emit_3ub(p, 0xF3, X86_TWOB, 0x5F);
672 emit_modrm( p, dst, src );
673 }
674
675 void sse_divss( struct x86_function *p,
676 struct x86_reg dst,
677 struct x86_reg src )
678 {
679 DUMP_RR( dst, src );
680 emit_3ub(p, 0xF3, X86_TWOB, 0x5E);
681 emit_modrm( p, dst, src );
682 }
683
684 void sse_minps( struct x86_function *p,
685 struct x86_reg dst,
686 struct x86_reg src )
687 {
688 DUMP_RR( dst, src );
689 emit_2ub(p, X86_TWOB, 0x5D);
690 emit_modrm( p, dst, src );
691 }
692
693 void sse_subps( struct x86_function *p,
694 struct x86_reg dst,
695 struct x86_reg src )
696 {
697 DUMP_RR( dst, src );
698 emit_2ub(p, X86_TWOB, 0x5C);
699 emit_modrm( p, dst, src );
700 }
701
702 void sse_mulps( struct x86_function *p,
703 struct x86_reg dst,
704 struct x86_reg src )
705 {
706 DUMP_RR( dst, src );
707 emit_2ub(p, X86_TWOB, 0x59);
708 emit_modrm( p, dst, src );
709 }
710
711 void sse_mulss( struct x86_function *p,
712 struct x86_reg dst,
713 struct x86_reg src )
714 {
715 DUMP_RR( dst, src );
716 emit_3ub(p, 0xF3, X86_TWOB, 0x59);
717 emit_modrm( p, dst, src );
718 }
719
720 void sse_addps( struct x86_function *p,
721 struct x86_reg dst,
722 struct x86_reg src )
723 {
724 DUMP_RR( dst, src );
725 emit_2ub(p, X86_TWOB, 0x58);
726 emit_modrm( p, dst, src );
727 }
728
729 void sse_addss( struct x86_function *p,
730 struct x86_reg dst,
731 struct x86_reg src )
732 {
733 DUMP_RR( dst, src );
734 emit_3ub(p, 0xF3, X86_TWOB, 0x58);
735 emit_modrm( p, dst, src );
736 }
737
738 void sse_andnps( struct x86_function *p,
739 struct x86_reg dst,
740 struct x86_reg src )
741 {
742 DUMP_RR( dst, src );
743 emit_2ub(p, X86_TWOB, 0x55);
744 emit_modrm( p, dst, src );
745 }
746
747 void sse_andps( struct x86_function *p,
748 struct x86_reg dst,
749 struct x86_reg src )
750 {
751 DUMP_RR( dst, src );
752 emit_2ub(p, X86_TWOB, 0x54);
753 emit_modrm( p, dst, src );
754 }
755
756 void sse_rsqrtps( struct x86_function *p,
757 struct x86_reg dst,
758 struct x86_reg src )
759 {
760 DUMP_RR( dst, src );
761 emit_2ub(p, X86_TWOB, 0x52);
762 emit_modrm( p, dst, src );
763 }
764
765 void sse_rsqrtss( struct x86_function *p,
766 struct x86_reg dst,
767 struct x86_reg src )
768 {
769 DUMP_RR( dst, src );
770 emit_3ub(p, 0xF3, X86_TWOB, 0x52);
771 emit_modrm( p, dst, src );
772
773 }
774
775 void sse_movhlps( struct x86_function *p,
776 struct x86_reg dst,
777 struct x86_reg src )
778 {
779 DUMP_RR( dst, src );
780 assert(dst.mod == mod_REG && src.mod == mod_REG);
781 emit_2ub(p, X86_TWOB, 0x12);
782 emit_modrm( p, dst, src );
783 }
784
785 void sse_movlhps( struct x86_function *p,
786 struct x86_reg dst,
787 struct x86_reg src )
788 {
789 DUMP_RR( dst, src );
790 assert(dst.mod == mod_REG && src.mod == mod_REG);
791 emit_2ub(p, X86_TWOB, 0x16);
792 emit_modrm( p, dst, src );
793 }
794
795 void sse_orps( struct x86_function *p,
796 struct x86_reg dst,
797 struct x86_reg src )
798 {
799 DUMP_RR( dst, src );
800 emit_2ub(p, X86_TWOB, 0x56);
801 emit_modrm( p, dst, src );
802 }
803
804 void sse_xorps( struct x86_function *p,
805 struct x86_reg dst,
806 struct x86_reg src )
807 {
808 DUMP_RR( dst, src );
809 emit_2ub(p, X86_TWOB, 0x57);
810 emit_modrm( p, dst, src );
811 }
812
813 void sse_cvtps2pi( struct x86_function *p,
814 struct x86_reg dst,
815 struct x86_reg src )
816 {
817 DUMP_RR( dst, src );
818 assert(dst.file == file_MMX &&
819 (src.file == file_XMM || src.mod != mod_REG));
820
821 p->need_emms = 1;
822
823 emit_2ub(p, X86_TWOB, 0x2d);
824 emit_modrm( p, dst, src );
825 }
826
827 void sse2_cvtdq2ps( struct x86_function *p,
828 struct x86_reg dst,
829 struct x86_reg src )
830 {
831 DUMP_RR( dst, src );
832 emit_2ub(p, X86_TWOB, 0x5b);
833 emit_modrm( p, dst, src );
834 }
835
836
837 /* Shufps can also be used to implement a reduced swizzle when dest ==
838 * arg0.
839 */
840 void sse_shufps( struct x86_function *p,
841 struct x86_reg dst,
842 struct x86_reg src,
843 unsigned char shuf)
844 {
845 DUMP_RRI( dst, src, shuf );
846 emit_2ub(p, X86_TWOB, 0xC6);
847 emit_modrm(p, dst, src);
848 emit_1ub(p, shuf);
849 }
850
851 void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
852 {
853 DUMP_RR( dst, src );
854 emit_2ub( p, X86_TWOB, 0x15 );
855 emit_modrm( p, dst, src );
856 }
857
858 void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
859 {
860 DUMP_RR( dst, src );
861 emit_2ub( p, X86_TWOB, 0x14 );
862 emit_modrm( p, dst, src );
863 }
864
865 void sse_cmpps( struct x86_function *p,
866 struct x86_reg dst,
867 struct x86_reg src,
868 unsigned char cc)
869 {
870 DUMP_RRI( dst, src, cc );
871 emit_2ub(p, X86_TWOB, 0xC2);
872 emit_modrm(p, dst, src);
873 emit_1ub(p, cc);
874 }
875
876 void sse_pmovmskb( struct x86_function *p,
877 struct x86_reg dst,
878 struct x86_reg src)
879 {
880 DUMP_RR( dst, src );
881 emit_3ub(p, 0x66, X86_TWOB, 0xD7);
882 emit_modrm(p, dst, src);
883 }
884
885 /***********************************************************************
886 * SSE2 instructions
887 */
888
889 /**
890 * Perform a reduced swizzle:
891 */
892 void sse2_pshufd( struct x86_function *p,
893 struct x86_reg dst,
894 struct x86_reg src,
895 unsigned char shuf)
896 {
897 DUMP_RRI( dst, src, shuf );
898 emit_3ub(p, 0x66, X86_TWOB, 0x70);
899 emit_modrm(p, dst, src);
900 emit_1ub(p, shuf);
901 }
902
903 void sse2_cvttps2dq( struct x86_function *p,
904 struct x86_reg dst,
905 struct x86_reg src )
906 {
907 DUMP_RR( dst, src );
908 emit_3ub( p, 0xF3, X86_TWOB, 0x5B );
909 emit_modrm( p, dst, src );
910 }
911
912 void sse2_cvtps2dq( struct x86_function *p,
913 struct x86_reg dst,
914 struct x86_reg src )
915 {
916 DUMP_RR( dst, src );
917 emit_3ub(p, 0x66, X86_TWOB, 0x5B);
918 emit_modrm( p, dst, src );
919 }
920
921 void sse2_packssdw( struct x86_function *p,
922 struct x86_reg dst,
923 struct x86_reg src )
924 {
925 DUMP_RR( dst, src );
926 emit_3ub(p, 0x66, X86_TWOB, 0x6B);
927 emit_modrm( p, dst, src );
928 }
929
930 void sse2_packsswb( struct x86_function *p,
931 struct x86_reg dst,
932 struct x86_reg src )
933 {
934 DUMP_RR( dst, src );
935 emit_3ub(p, 0x66, X86_TWOB, 0x63);
936 emit_modrm( p, dst, src );
937 }
938
939 void sse2_packuswb( struct x86_function *p,
940 struct x86_reg dst,
941 struct x86_reg src )
942 {
943 DUMP_RR( dst, src );
944 emit_3ub(p, 0x66, X86_TWOB, 0x67);
945 emit_modrm( p, dst, src );
946 }
947
948 void sse2_punpcklbw( struct x86_function *p,
949 struct x86_reg dst,
950 struct x86_reg src )
951 {
952 DUMP_RR( dst, src );
953 emit_3ub(p, 0x66, X86_TWOB, 0x60);
954 emit_modrm( p, dst, src );
955 }
956
957
958 void sse2_rcpps( struct x86_function *p,
959 struct x86_reg dst,
960 struct x86_reg src )
961 {
962 DUMP_RR( dst, src );
963 emit_2ub(p, X86_TWOB, 0x53);
964 emit_modrm( p, dst, src );
965 }
966
967 void sse2_rcpss( struct x86_function *p,
968 struct x86_reg dst,
969 struct x86_reg src )
970 {
971 DUMP_RR( dst, src );
972 emit_3ub(p, 0xF3, X86_TWOB, 0x53);
973 emit_modrm( p, dst, src );
974 }
975
976 void sse2_movd( struct x86_function *p,
977 struct x86_reg dst,
978 struct x86_reg src )
979 {
980 DUMP_RR( dst, src );
981 emit_2ub(p, 0x66, X86_TWOB);
982 emit_op_modrm( p, 0x6e, 0x7e, dst, src );
983 }
984
985
986
987
988 /***********************************************************************
989 * x87 instructions
990 */
991 void x87_fist( struct x86_function *p, struct x86_reg dst )
992 {
993 DUMP_R( dst );
994 emit_1ub(p, 0xdb);
995 emit_modrm_noreg(p, 2, dst);
996 }
997
998 void x87_fistp( struct x86_function *p, struct x86_reg dst )
999 {
1000 DUMP_R( dst );
1001 emit_1ub(p, 0xdb);
1002 emit_modrm_noreg(p, 3, dst);
1003 }
1004
1005 void x87_fild( struct x86_function *p, struct x86_reg arg )
1006 {
1007 DUMP_R( arg );
1008 emit_1ub(p, 0xdf);
1009 emit_modrm_noreg(p, 0, arg);
1010 }
1011
1012 void x87_fldz( struct x86_function *p )
1013 {
1014 DUMP();
1015 emit_2ub(p, 0xd9, 0xee);
1016 }
1017
1018
1019 void x87_fldcw( struct x86_function *p, struct x86_reg arg )
1020 {
1021 DUMP_R( arg );
1022 assert(arg.file == file_REG32);
1023 assert(arg.mod != mod_REG);
1024 emit_1ub(p, 0xd9);
1025 emit_modrm_noreg(p, 5, arg);
1026 }
1027
1028 void x87_fld1( struct x86_function *p )
1029 {
1030 DUMP();
1031 emit_2ub(p, 0xd9, 0xe8);
1032 }
1033
1034 void x87_fldl2e( struct x86_function *p )
1035 {
1036 DUMP();
1037 emit_2ub(p, 0xd9, 0xea);
1038 }
1039
1040 void x87_fldln2( struct x86_function *p )
1041 {
1042 DUMP();
1043 emit_2ub(p, 0xd9, 0xed);
1044 }
1045
1046 void x87_fwait( struct x86_function *p )
1047 {
1048 DUMP();
1049 emit_1ub(p, 0x9b);
1050 }
1051
1052 void x87_fnclex( struct x86_function *p )
1053 {
1054 DUMP();
1055 emit_2ub(p, 0xdb, 0xe2);
1056 }
1057
1058 void x87_fclex( struct x86_function *p )
1059 {
1060 x87_fwait(p);
1061 x87_fnclex(p);
1062 }
1063
1064
1065 static void x87_arith_op( struct x86_function *p, struct x86_reg dst, struct x86_reg arg,
1066 unsigned char dst0ub0,
1067 unsigned char dst0ub1,
1068 unsigned char arg0ub0,
1069 unsigned char arg0ub1,
1070 unsigned char argmem_noreg)
1071 {
1072 assert(dst.file == file_x87);
1073
1074 if (arg.file == file_x87) {
1075 if (dst.idx == 0)
1076 emit_2ub(p, dst0ub0, dst0ub1+arg.idx);
1077 else if (arg.idx == 0)
1078 emit_2ub(p, arg0ub0, arg0ub1+arg.idx);
1079 else
1080 assert(0);
1081 }
1082 else if (dst.idx == 0) {
1083 assert(arg.file == file_REG32);
1084 emit_1ub(p, 0xd8);
1085 emit_modrm_noreg(p, argmem_noreg, arg);
1086 }
1087 else
1088 assert(0);
1089 }
1090
1091 void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1092 {
1093 DUMP_RR( dst, src );
1094 x87_arith_op(p, dst, src,
1095 0xd8, 0xc8,
1096 0xdc, 0xc8,
1097 4);
1098 }
1099
1100 void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1101 {
1102 DUMP_RR( dst, src );
1103 x87_arith_op(p, dst, src,
1104 0xd8, 0xe0,
1105 0xdc, 0xe8,
1106 4);
1107 }
1108
1109 void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1110 {
1111 DUMP_RR( dst, src );
1112 x87_arith_op(p, dst, src,
1113 0xd8, 0xe8,
1114 0xdc, 0xe0,
1115 5);
1116 }
1117
1118 void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1119 {
1120 DUMP_RR( dst, src );
1121 x87_arith_op(p, dst, src,
1122 0xd8, 0xc0,
1123 0xdc, 0xc0,
1124 0);
1125 }
1126
1127 void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1128 {
1129 DUMP_RR( dst, src );
1130 x87_arith_op(p, dst, src,
1131 0xd8, 0xf0,
1132 0xdc, 0xf8,
1133 6);
1134 }
1135
1136 void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
1137 {
1138 DUMP_RR( dst, src );
1139 x87_arith_op(p, dst, src,
1140 0xd8, 0xf8,
1141 0xdc, 0xf0,
1142 7);
1143 }
1144
1145 void x87_fmulp( struct x86_function *p, struct x86_reg dst )
1146 {
1147 DUMP_R( dst );
1148 assert(dst.file == file_x87);
1149 assert(dst.idx >= 1);
1150 emit_2ub(p, 0xde, 0xc8+dst.idx);
1151 }
1152
1153 void x87_fsubp( struct x86_function *p, struct x86_reg dst )
1154 {
1155 DUMP_R( dst );
1156 assert(dst.file == file_x87);
1157 assert(dst.idx >= 1);
1158 emit_2ub(p, 0xde, 0xe8+dst.idx);
1159 }
1160
1161 void x87_fsubrp( struct x86_function *p, struct x86_reg dst )
1162 {
1163 DUMP_R( dst );
1164 assert(dst.file == file_x87);
1165 assert(dst.idx >= 1);
1166 emit_2ub(p, 0xde, 0xe0+dst.idx);
1167 }
1168
1169 void x87_faddp( struct x86_function *p, struct x86_reg dst )
1170 {
1171 DUMP_R( dst );
1172 assert(dst.file == file_x87);
1173 assert(dst.idx >= 1);
1174 emit_2ub(p, 0xde, 0xc0+dst.idx);
1175 }
1176
1177 void x87_fdivp( struct x86_function *p, struct x86_reg dst )
1178 {
1179 DUMP_R( dst );
1180 assert(dst.file == file_x87);
1181 assert(dst.idx >= 1);
1182 emit_2ub(p, 0xde, 0xf8+dst.idx);
1183 }
1184
1185 void x87_fdivrp( struct x86_function *p, struct x86_reg dst )
1186 {
1187 DUMP_R( dst );
1188 assert(dst.file == file_x87);
1189 assert(dst.idx >= 1);
1190 emit_2ub(p, 0xde, 0xf0+dst.idx);
1191 }
1192
1193 void x87_fucom( struct x86_function *p, struct x86_reg arg )
1194 {
1195 DUMP_R( arg );
1196 assert(arg.file == file_x87);
1197 emit_2ub(p, 0xdd, 0xe0+arg.idx);
1198 }
1199
1200 void x87_fucomp( struct x86_function *p, struct x86_reg arg )
1201 {
1202 DUMP_R( arg );
1203 assert(arg.file == file_x87);
1204 emit_2ub(p, 0xdd, 0xe8+arg.idx);
1205 }
1206
1207 void x87_fucompp( struct x86_function *p )
1208 {
1209 DUMP();
1210 emit_2ub(p, 0xda, 0xe9);
1211 }
1212
1213 void x87_fxch( struct x86_function *p, struct x86_reg arg )
1214 {
1215 DUMP_R( arg );
1216 assert(arg.file == file_x87);
1217 emit_2ub(p, 0xd9, 0xc8+arg.idx);
1218 }
1219
1220 void x87_fabs( struct x86_function *p )
1221 {
1222 DUMP();
1223 emit_2ub(p, 0xd9, 0xe1);
1224 }
1225
1226 void x87_fchs( struct x86_function *p )
1227 {
1228 DUMP();
1229 emit_2ub(p, 0xd9, 0xe0);
1230 }
1231
1232 void x87_fcos( struct x86_function *p )
1233 {
1234 DUMP();
1235 emit_2ub(p, 0xd9, 0xff);
1236 }
1237
1238
1239 void x87_fprndint( struct x86_function *p )
1240 {
1241 DUMP();
1242 emit_2ub(p, 0xd9, 0xfc);
1243 }
1244
1245 void x87_fscale( struct x86_function *p )
1246 {
1247 DUMP();
1248 emit_2ub(p, 0xd9, 0xfd);
1249 }
1250
1251 void x87_fsin( struct x86_function *p )
1252 {
1253 DUMP();
1254 emit_2ub(p, 0xd9, 0xfe);
1255 }
1256
1257 void x87_fsincos( struct x86_function *p )
1258 {
1259 DUMP();
1260 emit_2ub(p, 0xd9, 0xfb);
1261 }
1262
1263 void x87_fsqrt( struct x86_function *p )
1264 {
1265 DUMP();
1266 emit_2ub(p, 0xd9, 0xfa);
1267 }
1268
1269 void x87_fxtract( struct x86_function *p )
1270 {
1271 DUMP();
1272 emit_2ub(p, 0xd9, 0xf4);
1273 }
1274
1275 /* st0 = (2^st0)-1
1276 *
1277 * Restrictions: -1.0 <= st0 <= 1.0
1278 */
1279 void x87_f2xm1( struct x86_function *p )
1280 {
1281 DUMP();
1282 emit_2ub(p, 0xd9, 0xf0);
1283 }
1284
1285 /* st1 = st1 * log2(st0);
1286 * pop_stack;
1287 */
1288 void x87_fyl2x( struct x86_function *p )
1289 {
1290 DUMP();
1291 emit_2ub(p, 0xd9, 0xf1);
1292 }
1293
1294 /* st1 = st1 * log2(st0 + 1.0);
1295 * pop_stack;
1296 *
1297 * A fast operation, with restrictions: -.29 < st0 < .29
1298 */
1299 void x87_fyl2xp1( struct x86_function *p )
1300 {
1301 DUMP();
1302 emit_2ub(p, 0xd9, 0xf9);
1303 }
1304
1305
1306 void x87_fld( struct x86_function *p, struct x86_reg arg )
1307 {
1308 DUMP_R( arg );
1309 if (arg.file == file_x87)
1310 emit_2ub(p, 0xd9, 0xc0 + arg.idx);
1311 else {
1312 emit_1ub(p, 0xd9);
1313 emit_modrm_noreg(p, 0, arg);
1314 }
1315 }
1316
1317 void x87_fst( struct x86_function *p, struct x86_reg dst )
1318 {
1319 DUMP_R( dst );
1320 if (dst.file == file_x87)
1321 emit_2ub(p, 0xdd, 0xd0 + dst.idx);
1322 else {
1323 emit_1ub(p, 0xd9);
1324 emit_modrm_noreg(p, 2, dst);
1325 }
1326 }
1327
1328 void x87_fstp( struct x86_function *p, struct x86_reg dst )
1329 {
1330 DUMP_R( dst );
1331 if (dst.file == file_x87)
1332 emit_2ub(p, 0xdd, 0xd8 + dst.idx);
1333 else {
1334 emit_1ub(p, 0xd9);
1335 emit_modrm_noreg(p, 3, dst);
1336 }
1337 }
1338
1339 void x87_fcom( struct x86_function *p, struct x86_reg dst )
1340 {
1341 DUMP_R( dst );
1342 if (dst.file == file_x87)
1343 emit_2ub(p, 0xd8, 0xd0 + dst.idx);
1344 else {
1345 emit_1ub(p, 0xd8);
1346 emit_modrm_noreg(p, 2, dst);
1347 }
1348 }
1349
1350 void x87_fcomp( struct x86_function *p, struct x86_reg dst )
1351 {
1352 DUMP_R( dst );
1353 if (dst.file == file_x87)
1354 emit_2ub(p, 0xd8, 0xd8 + dst.idx);
1355 else {
1356 emit_1ub(p, 0xd8);
1357 emit_modrm_noreg(p, 3, dst);
1358 }
1359 }
1360
1361
1362 void x87_fnstsw( struct x86_function *p, struct x86_reg dst )
1363 {
1364 DUMP_R( dst );
1365 assert(dst.file == file_REG32);
1366
1367 if (dst.idx == reg_AX &&
1368 dst.mod == mod_REG)
1369 emit_2ub(p, 0xdf, 0xe0);
1370 else {
1371 emit_1ub(p, 0xdd);
1372 emit_modrm_noreg(p, 7, dst);
1373 }
1374 }
1375
1376
1377
1378
1379 /***********************************************************************
1380 * MMX instructions
1381 */
1382
1383 void mmx_emms( struct x86_function *p )
1384 {
1385 DUMP();
1386 assert(p->need_emms);
1387 emit_2ub(p, 0x0f, 0x77);
1388 p->need_emms = 0;
1389 }
1390
1391 void mmx_packssdw( struct x86_function *p,
1392 struct x86_reg dst,
1393 struct x86_reg src )
1394 {
1395 DUMP_RR( dst, src );
1396 assert(dst.file == file_MMX &&
1397 (src.file == file_MMX || src.mod != mod_REG));
1398
1399 p->need_emms = 1;
1400
1401 emit_2ub(p, X86_TWOB, 0x6b);
1402 emit_modrm( p, dst, src );
1403 }
1404
1405 void mmx_packuswb( struct x86_function *p,
1406 struct x86_reg dst,
1407 struct x86_reg src )
1408 {
1409 DUMP_RR( dst, src );
1410 assert(dst.file == file_MMX &&
1411 (src.file == file_MMX || src.mod != mod_REG));
1412
1413 p->need_emms = 1;
1414
1415 emit_2ub(p, X86_TWOB, 0x67);
1416 emit_modrm( p, dst, src );
1417 }
1418
1419 void mmx_movd( struct x86_function *p,
1420 struct x86_reg dst,
1421 struct x86_reg src )
1422 {
1423 DUMP_RR( dst, src );
1424 p->need_emms = 1;
1425 emit_1ub(p, X86_TWOB);
1426 emit_op_modrm( p, 0x6e, 0x7e, dst, src );
1427 }
1428
1429 void mmx_movq( struct x86_function *p,
1430 struct x86_reg dst,
1431 struct x86_reg src )
1432 {
1433 DUMP_RR( dst, src );
1434 p->need_emms = 1;
1435 emit_1ub(p, X86_TWOB);
1436 emit_op_modrm( p, 0x6f, 0x7f, dst, src );
1437 }
1438
1439
1440 /***********************************************************************
1441 * Helper functions
1442 */
1443
1444
1445 /* Retreive a reference to one of the function arguments, taking into
1446 * account any push/pop activity:
1447 */
1448 struct x86_reg x86_fn_arg( struct x86_function *p,
1449 unsigned arg )
1450 {
1451 return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
1452 p->stack_offset + arg * 4); /* ??? */
1453 }
1454
1455
1456 void x86_init_func( struct x86_function *p )
1457 {
1458 p->size = 0;
1459 p->store = NULL;
1460 p->csr = p->store;
1461 DUMP_START();
1462 }
1463
1464 void x86_init_func_size( struct x86_function *p, unsigned code_size )
1465 {
1466 p->size = code_size;
1467 p->store = rtasm_exec_malloc(code_size);
1468 if (p->store == NULL) {
1469 p->store = p->error_overflow;
1470 }
1471 p->csr = p->store;
1472 DUMP_START();
1473 }
1474
1475 void x86_release_func( struct x86_function *p )
1476 {
1477 if (p->store && p->store != p->error_overflow)
1478 rtasm_exec_free(p->store);
1479
1480 p->store = NULL;
1481 p->csr = NULL;
1482 p->size = 0;
1483 }
1484
1485
1486 void (*x86_get_func( struct x86_function *p ))(void)
1487 {
1488 DUMP_END();
1489 if (DISASSEM && p->store)
1490 debug_printf("disassemble %p %p\n", p->store, p->csr);
1491
1492 if (p->store == p->error_overflow)
1493 return (void (*)(void)) NULL;
1494 else
1495 return (void (*)(void)) p->store;
1496 }
1497
1498 #else
1499
1500 void x86sse_dummy( void )
1501 {
1502 }
1503
1504 #endif