Remove old t_vertex.c codegen infrastructure, tie in new code.
[mesa.git] / src / mesa / tnl / t_vertex_sse.c
1 /*
2 * Copyright 2003 Tungsten Graphics, inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Keith Whitwell <keithw@tungstengraphics.com>
26 */
27
28 #include "glheader.h"
29 #include "context.h"
30 #include "colormac.h"
31 #include "t_context.h"
32 #include "t_vertex.h"
33 #include "simple_list.h"
34
35 #include <unistd.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <fcntl.h>
39
40 #define X 0
41 #define Y 1
42 #define Z 2
43 #define W 3
44
45 #define DISASSEM 1
46
47 struct x86_reg {
48 GLuint file:3;
49 GLuint idx:3;
50 GLuint mod:2; /* mod_REG if this is just a register */
51 GLint disp:24; /* only +/- 23bits of offset - should be enough... */
52 };
53
54 struct x86_program {
55 GLcontext *ctx;
56
57 GLubyte *store;
58 GLubyte *csr;
59
60 GLuint stack_offset;
61
62 GLboolean inputs_safe;
63 GLboolean outputs_safe;
64
65 struct x86_reg identity;
66 struct x86_reg vp0;
67 struct x86_reg vp1;
68 };
69
70
71 #define X86_TWOB 0x0f
72
73 /* There are more but these are all we'll use:
74 */
75 enum x86_reg_file {
76 file_REG32,
77 file_XMM
78 };
79
80 /* Values for mod field of modr/m byte
81 */
82 enum x86_reg_mod {
83 mod_INDIRECT,
84 mod_DISP8,
85 mod_DISP32,
86 mod_REG
87 };
88
89 enum x86_reg_name {
90 reg_AX,
91 reg_CX,
92 reg_DX,
93 reg_BX,
94 reg_SP,
95 reg_BP,
96 reg_SI,
97 reg_DI
98 };
99
100
101 enum x86_cc {
102 cc_O, /* overflow */
103 cc_NO, /* not overflow */
104 cc_NAE, /* not above or equal / carry */
105 cc_AE, /* above or equal / not carry */
106 cc_E, /* equal / zero */
107 cc_NE /* not equal / not zero */
108 };
109
110 #define cc_Z cc_E
111 #define cc_NZ cc_NE
112
113
114 /* Create and manipulate registers and regmem values:
115 */
116 static struct x86_reg make_reg( GLuint file,
117 GLuint idx )
118 {
119 struct x86_reg reg;
120
121 reg.file = file;
122 reg.idx = idx;
123 reg.mod = mod_REG;
124 reg.disp = 0;
125
126 return reg;
127 }
128
129 static struct x86_reg make_disp( struct x86_reg reg,
130 GLint disp )
131 {
132 assert(reg.file == file_REG32);
133
134 if (reg.mod == mod_REG)
135 reg.disp = disp;
136 else
137 reg.disp += disp;
138
139 if (reg.disp == 0)
140 reg.mod = mod_INDIRECT;
141 else if (reg.disp <= 127 && reg.disp >= -128)
142 reg.mod = mod_DISP8;
143 else
144 reg.mod = mod_DISP32;
145
146 return reg;
147 }
148
149 static struct x86_reg deref( struct x86_reg reg )
150 {
151 return make_disp(reg, 0);
152 }
153
154 static struct x86_reg get_base_reg( struct x86_reg reg )
155 {
156 return make_reg( reg.file, reg.idx );
157 }
158
159
160 /* Retreive a reference to one of the function arguments, taking into
161 * account any push/pop activity:
162 */
163 static struct x86_reg make_fn_arg( struct x86_program *p,
164 GLuint arg )
165 {
166 return make_disp(make_reg(file_REG32, reg_SP),
167 p->stack_offset + arg * 4); /* ??? */
168 }
169
170
171 static struct x86_reg get_identity( struct x86_program *p )
172 {
173 return p->identity;
174 }
175
176 static struct x86_reg get_sse_temp( struct x86_program *p )
177 {
178 return make_reg(file_XMM, 7); /* hardwired */
179 }
180
181 static void release_temp( struct x86_program *p,
182 struct x86_reg reg )
183 {
184 assert(reg.file == file_XMM &&
185 reg.idx == 7);
186 }
187
188 /* Emit bytes to the instruction stream:
189 */
190 static void emit_1b( struct x86_program *p, GLbyte b0 )
191 {
192 *(GLbyte *)(p->csr++) = b0;
193 }
194
195 static void emit_1i( struct x86_program *p, GLint i0 )
196 {
197 *(GLint *)(p->csr) = i0;
198 p->csr += 4;
199 }
200
201 static void disassem( struct x86_program *p, const char *fn )
202 {
203 #if DISASSEM
204 static const char *last_fn;
205 if (fn && fn != last_fn) {
206 _mesa_printf("0x%x: %s\n", p->csr, fn);
207 last_fn = fn;
208 }
209 #endif
210 }
211
212 static void emit_1ub_fn( struct x86_program *p, GLubyte b0, const char *fn )
213 {
214 disassem(p, fn);
215 *(p->csr++) = b0;
216 }
217
218 static void emit_2ub_fn( struct x86_program *p, GLubyte b0, GLubyte b1, const char *fn )
219 {
220 disassem(p, fn);
221 *(p->csr++) = b0;
222 *(p->csr++) = b1;
223 }
224
225 static void emit_3ub_fn( struct x86_program *p, GLubyte b0, GLubyte b1, GLubyte b2, const char *fn )
226 {
227 disassem(p, fn);
228 *(p->csr++) = b0;
229 *(p->csr++) = b1;
230 *(p->csr++) = b2;
231 }
232
233 #define emit_1ub(p, b0) emit_1ub_fn(p, b0, __FUNCTION__)
234 #define emit_2ub(p, b0, b1) emit_2ub_fn(p, b0, b1, __FUNCTION__)
235 #define emit_3ub(p, b0, b1, b2) emit_3ub_fn(p, b0, b1, b2, __FUNCTION__)
236
237
238 /* Labels, jumps and fixup:
239 */
240 static GLubyte *get_label( struct x86_program *p )
241 {
242 return p->csr;
243 }
244
245 static void emit_jcc( struct x86_program *p,
246 GLuint cc,
247 GLubyte *label )
248 {
249 GLint offset = label - (get_label(p) + 2);
250
251 if (offset <= 127 && offset >= -128) {
252 emit_1ub(p, 0x70 + cc);
253 emit_1b(p, (GLbyte) offset);
254 }
255 else {
256 offset = label - (get_label(p) + 6);
257 emit_2ub(p, 0x0f, 0x80 + cc);
258 emit_1i(p, offset);
259 }
260 }
261
262 /* Always use a 32bit offset for forward jumps:
263 */
264 static GLubyte *emit_jcc_forward( struct x86_program *p,
265 GLuint cc )
266 {
267 emit_2ub(p, 0x0f, 0x80 + cc);
268 emit_1i(p, 0);
269 return get_label(p);
270 }
271
272 /* Fixup offset from forward jump:
273 */
274 static void do_fixup( struct x86_program *p,
275 GLubyte *fixup )
276 {
277 *(int *)(fixup - 4) = get_label(p) - fixup;
278 }
279
280 static void emit_push( struct x86_program *p,
281 struct x86_reg reg )
282 {
283 assert(reg.mod == mod_REG);
284 emit_1ub(p, 0x50 + reg.idx);
285 p->stack_offset += 4;
286 }
287
288 static void emit_pop( struct x86_program *p,
289 struct x86_reg reg )
290 {
291 assert(reg.mod == mod_REG);
292 emit_1ub(p, 0x58 + reg.idx);
293 p->stack_offset -= 4;
294 }
295
296 static void emit_inc( struct x86_program *p,
297 struct x86_reg reg )
298 {
299 assert(reg.mod == mod_REG);
300 emit_1ub(p, 0x40 + reg.idx);
301 }
302
303 static void emit_dec( struct x86_program *p,
304 struct x86_reg reg )
305 {
306 assert(reg.mod == mod_REG);
307 emit_1ub(p, 0x48 + reg.idx);
308 }
309
310 static void emit_ret( struct x86_program *p )
311 {
312 emit_1ub(p, 0xc3);
313 }
314
315
316
317
318 /* Build a modRM byte + possible displacement. No treatment of SIB
319 * indexing. BZZT - no way to encode an absolute address.
320 */
321 static void emit_modrm( struct x86_program *p,
322 struct x86_reg reg,
323 struct x86_reg regmem )
324 {
325 GLubyte val = 0;
326
327 assert(reg.mod == mod_REG);
328
329 val |= regmem.mod << 6; /* mod field */
330 val |= reg.idx << 3; /* reg field */
331 val |= regmem.idx; /* r/m field */
332
333 emit_1ub_fn(p, val, 0);
334
335 /* Oh-oh we've stumbled into the SIB thing.
336 */
337 if (regmem.idx == reg_SP) {
338 emit_1ub_fn(p, 0x24, 0); /* simplistic! */
339 }
340
341 switch (regmem.mod) {
342 case mod_REG:
343 case mod_INDIRECT:
344 break;
345 case mod_DISP8:
346 emit_1b(p, regmem.disp);
347 break;
348 case mod_DISP32:
349 emit_1i(p, regmem.disp);
350 break;
351 }
352 }
353
354 /* Many x86 instructions have two opcodes to cope with the situations
355 * where the destination is a register or memory reference
356 * respectively. This function selects the correct opcode based on
357 * the arguments presented.
358 */
359 static void emit_op_modrm( struct x86_program *p,
360 GLubyte op_dst_is_reg,
361 GLubyte op_dst_is_mem,
362 struct x86_reg dst,
363 struct x86_reg src )
364 {
365 switch (dst.mod) {
366 case mod_REG:
367 emit_1ub_fn(p, op_dst_is_reg, 0);
368 emit_modrm(p, dst, src);
369 break;
370 case mod_INDIRECT:
371 case mod_DISP32:
372 case mod_DISP8:
373 assert(src.mod == mod_REG);
374 emit_1ub_fn(p, op_dst_is_mem, 0);
375 emit_modrm(p, src, dst);
376 break;
377 }
378 }
379
380 static void emit_mov( struct x86_program *p,
381 struct x86_reg dst,
382 struct x86_reg src )
383 {
384 emit_op_modrm( p, 0x8b, 0x89, dst, src );
385 }
386
387 static void emit_xor( struct x86_program *p,
388 struct x86_reg dst,
389 struct x86_reg src )
390 {
391 emit_op_modrm( p, 0x33, 0x31, dst, src );
392 }
393
394 static void emit_cmp( struct x86_program *p,
395 struct x86_reg dst,
396 struct x86_reg src )
397 {
398 emit_op_modrm( p, 0x3b, 0x39, dst, src );
399 }
400
401 static void emit_movlps( struct x86_program *p,
402 struct x86_reg dst,
403 struct x86_reg src )
404 {
405 emit_1ub(p, X86_TWOB);
406 emit_op_modrm( p, 0x12, 0x13, dst, src );
407 }
408
409 static void emit_movhps( struct x86_program *p,
410 struct x86_reg dst,
411 struct x86_reg src )
412 {
413 emit_1ub(p, X86_TWOB);
414 emit_op_modrm( p, 0x16, 0x17, dst, src );
415 }
416
417 static void emit_movd( struct x86_program *p,
418 struct x86_reg dst,
419 struct x86_reg src )
420 {
421 emit_2ub(p, 0x66, X86_TWOB);
422 emit_op_modrm( p, 0x6e, 0x7e, dst, src );
423 }
424
425 static void emit_movss( struct x86_program *p,
426 struct x86_reg dst,
427 struct x86_reg src )
428 {
429 emit_2ub(p, 0xF3, X86_TWOB);
430 emit_op_modrm( p, 0x10, 0x11, dst, src );
431 }
432
433 static void emit_movaps( struct x86_program *p,
434 struct x86_reg dst,
435 struct x86_reg src )
436 {
437 emit_1ub(p, X86_TWOB);
438 emit_op_modrm( p, 0x28, 0x29, dst, src );
439 }
440
441 static void emit_movups( struct x86_program *p,
442 struct x86_reg dst,
443 struct x86_reg src )
444 {
445 emit_1ub(p, X86_TWOB);
446 emit_op_modrm( p, 0x10, 0x11, dst, src );
447 }
448
449 /* SSE operations often only have one format, with dest constrained to
450 * be a register:
451 */
452 static void emit_mulps( struct x86_program *p,
453 struct x86_reg dst,
454 struct x86_reg src )
455 {
456 emit_2ub(p, X86_TWOB, 0x59);
457 emit_modrm( p, dst, src );
458 }
459
460 static void emit_addps( struct x86_program *p,
461 struct x86_reg dst,
462 struct x86_reg src )
463 {
464 emit_2ub(p, X86_TWOB, 0x58);
465 emit_modrm( p, dst, src );
466 }
467
468 static void emit_cvtps2dq( struct x86_program *p,
469 struct x86_reg dst,
470 struct x86_reg src )
471 {
472 emit_3ub(p, 0x66, X86_TWOB, 0x5B);
473 emit_modrm( p, dst, src );
474 }
475
476 static void emit_packssdw( struct x86_program *p,
477 struct x86_reg dst,
478 struct x86_reg src )
479 {
480 emit_3ub(p, 0x66, X86_TWOB, 0x6B);
481 emit_modrm( p, dst, src );
482 }
483
484 static void emit_packsswb( struct x86_program *p,
485 struct x86_reg dst,
486 struct x86_reg src )
487 {
488 emit_3ub(p, 0x66, X86_TWOB, 0x63);
489 emit_modrm( p, dst, src );
490 }
491
492 static void emit_packuswb( struct x86_program *p,
493 struct x86_reg dst,
494 struct x86_reg src )
495 {
496 emit_3ub(p, 0x66, X86_TWOB, 0x67);
497 emit_modrm( p, dst, src );
498 }
499
500 /* Load effective address:
501 */
502 static void emit_lea( struct x86_program *p,
503 struct x86_reg dst,
504 struct x86_reg src )
505 {
506 emit_1ub(p, 0x8d);
507 emit_modrm( p, dst, src );
508 }
509
510 static void emit_add_imm( struct x86_program *p,
511 struct x86_reg dst,
512 struct x86_reg src,
513 GLint value )
514 {
515 emit_lea(p, dst, make_disp(src, value));
516 }
517
518 static void emit_test( struct x86_program *p,
519 struct x86_reg dst,
520 struct x86_reg src )
521 {
522 emit_1ub(p, 0x85);
523 emit_modrm( p, dst, src );
524 }
525
526
527
528
529 /**
530 * Perform a reduced swizzle:
531 */
532 static void emit_pshufd( struct x86_program *p,
533 struct x86_reg dest,
534 struct x86_reg arg0,
535 GLubyte x,
536 GLubyte y,
537 GLubyte z,
538 GLubyte w)
539 {
540 emit_3ub(p, 0x66, X86_TWOB, 0x70);
541 emit_modrm(p, dest, arg0);
542 emit_1ub(p, (x|(y<<2)|(z<<4)|w<<6));
543 }
544
545
546 static void emit_pk4ub( struct x86_program *p,
547 struct x86_reg dest,
548 struct x86_reg arg0 )
549 {
550 emit_cvtps2dq(p, dest, arg0);
551 emit_packssdw(p, dest, dest);
552 emit_packuswb(p, dest, dest);
553 }
554
555 static void emit_load4f_4( struct x86_program *p,
556 struct x86_reg dest,
557 struct x86_reg arg0 )
558 {
559 emit_movups(p, dest, arg0);
560 }
561
562 static void emit_load4f_3( struct x86_program *p,
563 struct x86_reg dest,
564 struct x86_reg arg0 )
565 {
566 /* Have to jump through some hoops:
567 *
568 * 0 0 0 1 -- skip if reg[3] preserved over loop iterations
569 * c 0 0 1
570 * 0 0 c 1
571 * a b c 1
572 */
573 emit_movups(p, dest, get_identity(p));
574 emit_movss(p, dest, make_disp(arg0, 8));
575 emit_pshufd(p, dest, dest, Y,Z,X,W );
576 emit_movlps(p, dest, arg0);
577 }
578
579 static void emit_load4f_2( struct x86_program *p,
580 struct x86_reg dest,
581 struct x86_reg arg0 )
582 {
583 /* Pull in 2 dwords, then copy the top 2 dwords with 0,1 from id.
584 */
585 emit_movlps(p, dest, arg0);
586 emit_movhps(p, dest, get_identity(p));
587 }
588
589 static void emit_load4f_1( struct x86_program *p,
590 struct x86_reg dest,
591 struct x86_reg arg0 )
592 {
593 /* Initialized with [0,0,0,1] from id, then pull in the single low
594 * word.
595 */
596 emit_movups(p, dest, get_identity(p));
597 emit_movss(p, dest, arg0);
598 }
599
600
601
602 static void emit_load3f_3( struct x86_program *p,
603 struct x86_reg dest,
604 struct x86_reg arg0 )
605 {
606 /* Over-reads by 1 dword - potential SEGV if input is a vertex
607 * array.
608 */
609 if (p->inputs_safe) {
610 emit_movups(p, dest, arg0);
611 }
612 else {
613 /* c . . .
614 * c c c c
615 * a b c c
616 */
617 emit_movss(p, dest, make_disp(arg0, 8));
618 emit_pshufd(p, dest, dest, X,X,X,X);
619 emit_movlps(p, dest, arg0);
620 }
621 }
622
623 static void emit_load3f_2( struct x86_program *p,
624 struct x86_reg dest,
625 struct x86_reg arg0 )
626 {
627 emit_load4f_2(p, dest, arg0);
628 }
629
630 static void emit_load3f_1( struct x86_program *p,
631 struct x86_reg dest,
632 struct x86_reg arg0 )
633 {
634 emit_load4f_1(p, dest, arg0);
635 }
636
637 static void emit_load2f_2( struct x86_program *p,
638 struct x86_reg dest,
639 struct x86_reg arg0 )
640 {
641 emit_movlps(p, dest, arg0);
642 }
643
644 static void emit_load2f_1( struct x86_program *p,
645 struct x86_reg dest,
646 struct x86_reg arg0 )
647 {
648 emit_load4f_1(p, dest, arg0);
649 }
650
651 static void emit_load1f_1( struct x86_program *p,
652 struct x86_reg dest,
653 struct x86_reg arg0 )
654 {
655 emit_movss(p, dest, arg0);
656 }
657
658 static void (*load[4][4])( struct x86_program *p,
659 struct x86_reg dest,
660 struct x86_reg arg0 ) = {
661 { emit_load1f_1,
662 emit_load1f_1,
663 emit_load1f_1,
664 emit_load1f_1 },
665
666 { emit_load2f_1,
667 emit_load2f_2,
668 emit_load2f_2,
669 emit_load2f_2 },
670
671 { emit_load3f_1,
672 emit_load3f_2,
673 emit_load3f_3,
674 emit_load3f_3 },
675
676 { emit_load4f_1,
677 emit_load4f_2,
678 emit_load4f_3,
679 emit_load4f_4 }
680 };
681
682 static void emit_load( struct x86_program *p,
683 struct x86_reg dest,
684 GLuint sz,
685 struct x86_reg src,
686 GLuint src_sz)
687 {
688 _mesa_printf("load %d/%d\n", sz, src_sz);
689 load[sz-1][src_sz-1](p, dest, src);
690 }
691
692
693 static void emit_store4f( struct x86_program *p,
694 struct x86_reg dest,
695 struct x86_reg arg0 )
696 {
697 emit_movups(p, dest, arg0);
698 }
699
700 static void emit_store3f( struct x86_program *p,
701 struct x86_reg dest,
702 struct x86_reg arg0 )
703 {
704 if (p->outputs_safe) {
705 /* Emit the extra dword anyway. This may hurt writecombining,
706 * may cause other problems.
707 */
708 emit_movups(p, dest, arg0);
709 }
710 else {
711 /* Alternate strategy - emit two, shuffle, emit one.
712 */
713 struct x86_reg tmp = get_sse_temp(p);
714 emit_movlps(p, dest, arg0);
715
716 emit_pshufd(p, tmp, arg0, Z, Z, Z, Z );
717 emit_movss(p, make_disp(dest,8), tmp);
718 release_temp(p, tmp);
719 }
720 }
721
722 static void emit_store2f( struct x86_program *p,
723 struct x86_reg dest,
724 struct x86_reg arg0 )
725 {
726 emit_movlps(p, dest, arg0);
727 }
728
729 static void emit_store1f( struct x86_program *p,
730 struct x86_reg dest,
731 struct x86_reg arg0 )
732 {
733 emit_movss(p, dest, arg0);
734 }
735
736
737 static void (*store[4])( struct x86_program *p,
738 struct x86_reg dest,
739 struct x86_reg arg0 ) =
740 {
741 emit_store1f,
742 emit_store2f,
743 emit_store3f,
744 emit_store4f
745 };
746
747 static void emit_store( struct x86_program *p,
748 struct x86_reg dest,
749 GLuint sz,
750 struct x86_reg temp )
751
752 {
753 store[sz-1](p, dest, temp);
754 }
755
756
757 static GLint get_offset( const void *a, const void *b )
758 {
759 return (const char *)b - (const char *)a;
760 }
761
762
763
764 /* Lots of hardcoding
765 *
766 * EAX -- pointer to current output vertex
767 * ECX -- pointer to current attribute
768 *
769 */
770 static GLboolean build_vertex_emit( struct x86_program *p )
771 {
772 GLcontext *ctx = p->ctx;
773 TNLcontext *tnl = TNL_CONTEXT(ctx);
774 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
775 struct tnl_clipspace_attr *a = vtx->attr;
776 GLuint j;
777
778 struct x86_reg vertexEAX = make_reg(file_REG32, reg_AX);
779 struct x86_reg srcEDI = make_reg(file_REG32, reg_CX);
780 struct x86_reg countEBP = make_reg(file_REG32, reg_BP);
781 struct x86_reg vtxESI = make_reg(file_REG32, reg_SI);
782 struct x86_reg tmp = make_reg(file_XMM, 0);
783 struct x86_reg vp0 = make_reg(file_XMM, 1);
784 struct x86_reg vp1 = make_reg(file_XMM, 2);
785 struct x86_reg chan0 = make_reg(file_XMM, 3);
786 GLubyte *fixup, *label;
787
788 p->csr = p->store;
789
790 /* Push a few regs?
791 */
792 emit_push(p, srcEDI);
793 emit_push(p, countEBP);
794 emit_push(p, vtxESI);
795
796
797 /* Get vertex count, compare to zero
798 */
799 emit_xor(p, srcEDI, srcEDI);
800 emit_mov(p, countEBP, make_fn_arg(p, 2));
801 emit_cmp(p, countEBP, srcEDI);
802 fixup = emit_jcc_forward(p, cc_E);
803
804
805 /* Initialize destination register.
806 */
807 emit_mov(p, vertexEAX, make_fn_arg(p, 3));
808
809 /* Dereference ctx to get tnl, then vtx:
810 */
811 emit_mov(p, vtxESI, make_fn_arg(p, 1));
812 emit_mov(p, vtxESI, make_disp(vtxESI, get_offset(ctx, &ctx->swtnl_context)));
813 vtxESI = make_disp(vtxESI, get_offset(tnl, &tnl->clipspace));
814
815
816 /* Possibly load vp0, vp1 for viewport calcs:
817 */
818 if (vtx->need_viewport) {
819 emit_movups(p, vp0, make_disp(vtxESI, get_offset(vtx, &vtx->vp_scale[0])));
820 emit_movups(p, vp1, make_disp(vtxESI, get_offset(vtx, &vtx->vp_xlate[0])));
821 }
822
823 /* always load, needed or not:
824 */
825 emit_movups(p, chan0, make_disp(vtxESI, get_offset(vtx, &vtx->chan_scale[0])));
826 emit_movups(p, p->identity, make_disp(vtxESI, get_offset(vtx, &vtx->identity[0])));
827
828 /* Note address for loop jump */
829 label = get_label(p);
830
831 /* Emit code for each of the attributes. Currently routes
832 * everything through SSE registers, even when it might be more
833 * efficient to stick with regular old x86. No optimization or
834 * other tricks - enough new ground to cover here just getting
835 * things working.
836 */
837 for (j = 0; j < vtx->attr_count; j++) {
838 struct x86_reg dest = make_disp(vertexEAX, vtx->attr[j].vertoffset);
839 struct x86_reg ptr_to_src = make_disp(vtxESI, get_offset(vtx, &vtx->attr[j].inputptr));
840
841 /* Load current a[j].inputptr
842 */
843 emit_mov(p, srcEDI, ptr_to_src);
844
845 /* Now, load an XMM reg from src, perhaps transform, then save.
846 * Could be shortcircuited in specific cases:
847 */
848 switch (a[j].format) {
849 case EMIT_1F:
850 emit_load(p, tmp, 1, deref(srcEDI), vtx->attr[j].inputsize);
851 emit_store(p, dest, 1, tmp);
852 break;
853 case EMIT_2F:
854 emit_load(p, tmp, 2, deref(srcEDI), vtx->attr[j].inputsize);
855 emit_store(p, dest, 2, tmp);
856 break;
857 case EMIT_3F:
858 /* Potentially the worst case - hardcode 2+1 copying:
859 */
860 emit_load(p, tmp, 3, deref(srcEDI), vtx->attr[j].inputsize);
861 emit_store(p, dest, 3, tmp);
862 break;
863 case EMIT_4F:
864 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
865 emit_store(p, dest, 4, tmp);
866 break;
867 case EMIT_2F_VIEWPORT:
868 emit_load(p, tmp, 2, deref(srcEDI), vtx->attr[j].inputsize);
869 emit_mulps(p, tmp, vp0);
870 emit_addps(p, tmp, vp1);
871 emit_store(p, dest, 2, tmp);
872 break;
873 case EMIT_3F_VIEWPORT:
874 emit_load(p, tmp, 3, deref(srcEDI), vtx->attr[j].inputsize);
875 emit_mulps(p, tmp, vp0);
876 emit_addps(p, tmp, vp1);
877 emit_store(p, dest, 3, tmp);
878 break;
879 case EMIT_4F_VIEWPORT:
880 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
881 emit_mulps(p, tmp, vp0);
882 emit_addps(p, tmp, vp1);
883 emit_store(p, dest, 4, tmp);
884 break;
885 case EMIT_3F_XYW:
886 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
887 emit_pshufd(p, tmp, tmp, X, Y, W, Z);
888 emit_store(p, dest, 3, tmp);
889 break;
890
891 /* Try and bond 3ub + 1ub pairs into a single 4ub operation?
892 */
893 case EMIT_1UB_1F:
894 case EMIT_3UB_3F_RGB:
895 case EMIT_3UB_3F_BGR:
896 _mesa_printf("non-implemneted format %d\n", a[j].format);
897 return GL_FALSE; /* add this later */
898
899 case EMIT_4UB_4F_RGBA:
900 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
901 emit_mulps(p, tmp, chan0);
902 emit_pk4ub(p, tmp, tmp);
903 emit_store(p, dest, 1, tmp);
904 break;
905 case EMIT_4UB_4F_BGRA:
906 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
907 emit_pshufd(p, tmp, tmp, Z, Y, X, W);
908 emit_mulps(p, tmp, chan0);
909 emit_pk4ub(p, tmp, tmp);
910 emit_store(p, dest, 1, tmp);
911 break;
912 case EMIT_4UB_4F_ARGB:
913 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
914 emit_pshufd(p, tmp, tmp, W, X, Y, Z);
915 emit_mulps(p, tmp, chan0);
916 emit_pk4ub(p, tmp, tmp);
917 emit_store(p, dest, 1, tmp);
918 break;
919 case EMIT_4UB_4F_ABGR:
920 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
921 emit_pshufd(p, tmp, tmp, W, Z, Y, X);
922 emit_mulps(p, tmp, chan0);
923 emit_pk4ub(p, tmp, tmp);
924 emit_store(p, dest, 1, tmp);
925 break;
926 case EMIT_4CHAN_4F_RGBA:
927 switch (CHAN_TYPE) {
928 case GL_UNSIGNED_BYTE:
929 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
930 emit_mulps(p, tmp, chan0);
931 emit_pk4ub(p, tmp, tmp);
932 emit_store(p, dest, 1, tmp);
933 break;
934 case GL_FLOAT:
935 emit_load(p, tmp, 4, deref(srcEDI), vtx->attr[j].inputsize);
936 emit_store(p, dest, 4, tmp);
937 break;
938 case GL_UNSIGNED_SHORT:
939 default:
940 _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE));
941 return GL_FALSE;
942 }
943 break;
944 default:
945 _mesa_printf("unknown a[%d].format %d\n", j, a[j].format);
946 return GL_FALSE; /* catch any new opcodes */
947 }
948
949 /* add a[j].inputstride (hardcoded value - could just as easily
950 * pull the stride value from memory each time).
951 */
952 emit_add_imm(p, srcEDI, srcEDI, a[j].inputstride);
953
954 /* save new value of a[j].inputptr
955 */
956 emit_mov(p, ptr_to_src, srcEDI);
957
958 }
959
960 /* Next vertex:
961 */
962 emit_add_imm(p, vertexEAX, vertexEAX, vtx->vertex_size);
963
964 /* decr count, loop if not zero
965 */
966 emit_dec(p, countEBP);
967 emit_test(p, countEBP, countEBP);
968 emit_jcc(p, cc_NZ, label);
969
970 /* Land forward jump here:
971 */
972 do_fixup(p, fixup);
973
974 /* Pop regs and return
975 */
976 emit_pop(p, get_base_reg(vtxESI));
977 emit_pop(p, countEBP);
978 emit_pop(p, srcEDI);
979 emit_ret(p);
980
981 vtx->emit = (tnl_emit_func)p->store;
982 return GL_TRUE;
983 }
984
985 void _tnl_generate_sse_emit( GLcontext *ctx )
986 {
987 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
988 struct x86_program p;
989
990 memset(&p, 0, sizeof(p));
991 p.ctx = ctx;
992 p.store = MALLOC(1024);
993
994 p.inputs_safe = 1; /* for now */
995 p.outputs_safe = 1; /* for now */
996 p.identity = make_reg(file_XMM, 6);
997
998 if (build_vertex_emit(&p)) {
999 _tnl_register_fastpath( vtx, GL_TRUE );
1000 if (DISASSEM)
1001 _mesa_printf("disassemble 0x%x 0x%x\n", p.store, p.csr);
1002 }
1003 else {
1004 /* Note the failure:
1005 */
1006 _tnl_register_fastpath( vtx, GL_FALSE );
1007 FREE(p.store);
1008 }
1009
1010 (void)emit_movd;
1011 (void)emit_inc;
1012 (void)emit_xor;
1013 }