2 * Copyright 2003 Tungsten Graphics, inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Keith Whitwell <keithw@tungstengraphics.com>
31 #include "t_context.h"
33 #include "simple_list.h"
36 #include <sys/types.h>
50 GLuint mod
:2; /* mod_REG if this is just a register */
51 GLint disp
:24; /* only +/- 23bits of offset - should be enough... */
62 GLboolean inputs_safe
;
63 GLboolean outputs_safe
;
65 struct x86_reg identity
;
73 /* There are more but these are all we'll use:
80 /* Values for mod field of modr/m byte
103 cc_NO
, /* not overflow */
104 cc_NAE
, /* not above or equal / carry */
105 cc_AE
, /* above or equal / not carry */
106 cc_E
, /* equal / zero */
107 cc_NE
/* not equal / not zero */
114 /* Create and manipulate registers and regmem values:
116 static struct x86_reg
make_reg( GLuint file
,
129 static struct x86_reg
make_disp( struct x86_reg reg
,
132 assert(reg
.file
== file_REG32
);
134 if (reg
.mod
== mod_REG
)
140 reg
.mod
= mod_INDIRECT
;
141 else if (reg
.disp
<= 127 && reg
.disp
>= -128)
144 reg
.mod
= mod_DISP32
;
149 static struct x86_reg
deref( struct x86_reg reg
)
151 return make_disp(reg
, 0);
154 static struct x86_reg
get_base_reg( struct x86_reg reg
)
156 return make_reg( reg
.file
, reg
.idx
);
160 /* Retreive a reference to one of the function arguments, taking into
161 * account any push/pop activity:
163 static struct x86_reg
make_fn_arg( struct x86_program
*p
,
166 return make_disp(make_reg(file_REG32
, reg_SP
),
167 p
->stack_offset
+ arg
* 4); /* ??? */
171 static struct x86_reg
get_identity( struct x86_program
*p
)
176 static struct x86_reg
get_sse_temp( struct x86_program
*p
)
178 return make_reg(file_XMM
, 7); /* hardwired */
181 static void release_temp( struct x86_program
*p
,
184 assert(reg
.file
== file_XMM
&&
188 /* Emit bytes to the instruction stream:
190 static void emit_1b( struct x86_program
*p
, GLbyte b0
)
192 *(GLbyte
*)(p
->csr
++) = b0
;
195 static void emit_1i( struct x86_program
*p
, GLint i0
)
197 *(GLint
*)(p
->csr
) = i0
;
201 static void disassem( struct x86_program
*p
, const char *fn
)
204 static const char *last_fn
;
205 if (fn
&& fn
!= last_fn
) {
206 _mesa_printf("0x%x: %s\n", p
->csr
, fn
);
212 static void emit_1ub_fn( struct x86_program
*p
, GLubyte b0
, const char *fn
)
218 static void emit_2ub_fn( struct x86_program
*p
, GLubyte b0
, GLubyte b1
, const char *fn
)
225 static void emit_3ub_fn( struct x86_program
*p
, GLubyte b0
, GLubyte b1
, GLubyte b2
, const char *fn
)
233 #define emit_1ub(p, b0) emit_1ub_fn(p, b0, __FUNCTION__)
234 #define emit_2ub(p, b0, b1) emit_2ub_fn(p, b0, b1, __FUNCTION__)
235 #define emit_3ub(p, b0, b1, b2) emit_3ub_fn(p, b0, b1, b2, __FUNCTION__)
238 /* Labels, jumps and fixup:
240 static GLubyte
*get_label( struct x86_program
*p
)
245 static void emit_jcc( struct x86_program
*p
,
249 GLint offset
= label
- (get_label(p
) + 2);
251 if (offset
<= 127 && offset
>= -128) {
252 emit_1ub(p
, 0x70 + cc
);
253 emit_1b(p
, (GLbyte
) offset
);
256 offset
= label
- (get_label(p
) + 6);
257 emit_2ub(p
, 0x0f, 0x80 + cc
);
262 /* Always use a 32bit offset for forward jumps:
264 static GLubyte
*emit_jcc_forward( struct x86_program
*p
,
267 emit_2ub(p
, 0x0f, 0x80 + cc
);
272 /* Fixup offset from forward jump:
274 static void do_fixup( struct x86_program
*p
,
277 *(int *)(fixup
- 4) = get_label(p
) - fixup
;
280 static void emit_push( struct x86_program
*p
,
283 assert(reg
.mod
== mod_REG
);
284 emit_1ub(p
, 0x50 + reg
.idx
);
285 p
->stack_offset
+= 4;
288 static void emit_pop( struct x86_program
*p
,
291 assert(reg
.mod
== mod_REG
);
292 emit_1ub(p
, 0x58 + reg
.idx
);
293 p
->stack_offset
-= 4;
296 static void emit_inc( struct x86_program
*p
,
299 assert(reg
.mod
== mod_REG
);
300 emit_1ub(p
, 0x40 + reg
.idx
);
303 static void emit_dec( struct x86_program
*p
,
306 assert(reg
.mod
== mod_REG
);
307 emit_1ub(p
, 0x48 + reg
.idx
);
310 static void emit_ret( struct x86_program
*p
)
318 /* Build a modRM byte + possible displacement. No treatment of SIB
319 * indexing. BZZT - no way to encode an absolute address.
321 static void emit_modrm( struct x86_program
*p
,
323 struct x86_reg regmem
)
327 assert(reg
.mod
== mod_REG
);
329 val
|= regmem
.mod
<< 6; /* mod field */
330 val
|= reg
.idx
<< 3; /* reg field */
331 val
|= regmem
.idx
; /* r/m field */
333 emit_1ub_fn(p
, val
, 0);
335 /* Oh-oh we've stumbled into the SIB thing.
337 if (regmem
.idx
== reg_SP
) {
338 emit_1ub_fn(p
, 0x24, 0); /* simplistic! */
341 switch (regmem
.mod
) {
346 emit_1b(p
, regmem
.disp
);
349 emit_1i(p
, regmem
.disp
);
354 /* Many x86 instructions have two opcodes to cope with the situations
355 * where the destination is a register or memory reference
356 * respectively. This function selects the correct opcode based on
357 * the arguments presented.
359 static void emit_op_modrm( struct x86_program
*p
,
360 GLubyte op_dst_is_reg
,
361 GLubyte op_dst_is_mem
,
367 emit_1ub_fn(p
, op_dst_is_reg
, 0);
368 emit_modrm(p
, dst
, src
);
373 assert(src
.mod
== mod_REG
);
374 emit_1ub_fn(p
, op_dst_is_mem
, 0);
375 emit_modrm(p
, src
, dst
);
380 static void emit_mov( struct x86_program
*p
,
384 emit_op_modrm( p
, 0x8b, 0x89, dst
, src
);
387 static void emit_xor( struct x86_program
*p
,
391 emit_op_modrm( p
, 0x33, 0x31, dst
, src
);
394 static void emit_cmp( struct x86_program
*p
,
398 emit_op_modrm( p
, 0x3b, 0x39, dst
, src
);
401 static void emit_movlps( struct x86_program
*p
,
405 emit_1ub(p
, X86_TWOB
);
406 emit_op_modrm( p
, 0x12, 0x13, dst
, src
);
409 static void emit_movhps( struct x86_program
*p
,
413 emit_1ub(p
, X86_TWOB
);
414 emit_op_modrm( p
, 0x16, 0x17, dst
, src
);
417 static void emit_movd( struct x86_program
*p
,
421 emit_2ub(p
, 0x66, X86_TWOB
);
422 emit_op_modrm( p
, 0x6e, 0x7e, dst
, src
);
425 static void emit_movss( struct x86_program
*p
,
429 emit_2ub(p
, 0xF3, X86_TWOB
);
430 emit_op_modrm( p
, 0x10, 0x11, dst
, src
);
433 static void emit_movaps( struct x86_program
*p
,
437 emit_1ub(p
, X86_TWOB
);
438 emit_op_modrm( p
, 0x28, 0x29, dst
, src
);
441 static void emit_movups( struct x86_program
*p
,
445 emit_1ub(p
, X86_TWOB
);
446 emit_op_modrm( p
, 0x10, 0x11, dst
, src
);
449 /* SSE operations often only have one format, with dest constrained to
452 static void emit_mulps( struct x86_program
*p
,
456 emit_2ub(p
, X86_TWOB
, 0x59);
457 emit_modrm( p
, dst
, src
);
460 static void emit_addps( struct x86_program
*p
,
464 emit_2ub(p
, X86_TWOB
, 0x58);
465 emit_modrm( p
, dst
, src
);
468 static void emit_cvtps2dq( struct x86_program
*p
,
472 emit_3ub(p
, 0x66, X86_TWOB
, 0x5B);
473 emit_modrm( p
, dst
, src
);
476 static void emit_packssdw( struct x86_program
*p
,
480 emit_3ub(p
, 0x66, X86_TWOB
, 0x6B);
481 emit_modrm( p
, dst
, src
);
484 static void emit_packsswb( struct x86_program
*p
,
488 emit_3ub(p
, 0x66, X86_TWOB
, 0x63);
489 emit_modrm( p
, dst
, src
);
492 static void emit_packuswb( struct x86_program
*p
,
496 emit_3ub(p
, 0x66, X86_TWOB
, 0x67);
497 emit_modrm( p
, dst
, src
);
500 /* Load effective address:
502 static void emit_lea( struct x86_program
*p
,
507 emit_modrm( p
, dst
, src
);
510 static void emit_add_imm( struct x86_program
*p
,
515 emit_lea(p
, dst
, make_disp(src
, value
));
518 static void emit_test( struct x86_program
*p
,
523 emit_modrm( p
, dst
, src
);
530 * Perform a reduced swizzle:
532 static void emit_pshufd( struct x86_program
*p
,
540 emit_3ub(p
, 0x66, X86_TWOB
, 0x70);
541 emit_modrm(p
, dest
, arg0
);
542 emit_1ub(p
, (x
|(y
<<2)|(z
<<4)|w
<<6));
546 static void emit_pk4ub( struct x86_program
*p
,
548 struct x86_reg arg0
)
550 emit_cvtps2dq(p
, dest
, arg0
);
551 emit_packssdw(p
, dest
, dest
);
552 emit_packuswb(p
, dest
, dest
);
555 static void emit_load4f_4( struct x86_program
*p
,
557 struct x86_reg arg0
)
559 emit_movups(p
, dest
, arg0
);
562 static void emit_load4f_3( struct x86_program
*p
,
564 struct x86_reg arg0
)
566 /* Have to jump through some hoops:
568 * 0 0 0 1 -- skip if reg[3] preserved over loop iterations
573 emit_movups(p
, dest
, get_identity(p
));
574 emit_movss(p
, dest
, make_disp(arg0
, 8));
575 emit_pshufd(p
, dest
, dest
, Y
,Z
,X
,W
);
576 emit_movlps(p
, dest
, arg0
);
579 static void emit_load4f_2( struct x86_program
*p
,
581 struct x86_reg arg0
)
583 /* Pull in 2 dwords, then copy the top 2 dwords with 0,1 from id.
585 emit_movlps(p
, dest
, arg0
);
586 emit_movhps(p
, dest
, get_identity(p
));
589 static void emit_load4f_1( struct x86_program
*p
,
591 struct x86_reg arg0
)
593 /* Initialized with [0,0,0,1] from id, then pull in the single low
596 emit_movups(p
, dest
, get_identity(p
));
597 emit_movss(p
, dest
, arg0
);
602 static void emit_load3f_3( struct x86_program
*p
,
604 struct x86_reg arg0
)
606 /* Over-reads by 1 dword - potential SEGV if input is a vertex
609 if (p
->inputs_safe
) {
610 emit_movups(p
, dest
, arg0
);
617 emit_movss(p
, dest
, make_disp(arg0
, 8));
618 emit_pshufd(p
, dest
, dest
, X
,X
,X
,X
);
619 emit_movlps(p
, dest
, arg0
);
623 static void emit_load3f_2( struct x86_program
*p
,
625 struct x86_reg arg0
)
627 emit_load4f_2(p
, dest
, arg0
);
630 static void emit_load3f_1( struct x86_program
*p
,
632 struct x86_reg arg0
)
634 emit_load4f_1(p
, dest
, arg0
);
637 static void emit_load2f_2( struct x86_program
*p
,
639 struct x86_reg arg0
)
641 emit_movlps(p
, dest
, arg0
);
644 static void emit_load2f_1( struct x86_program
*p
,
646 struct x86_reg arg0
)
648 emit_load4f_1(p
, dest
, arg0
);
651 static void emit_load1f_1( struct x86_program
*p
,
653 struct x86_reg arg0
)
655 emit_movss(p
, dest
, arg0
);
658 static void (*load
[4][4])( struct x86_program
*p
,
660 struct x86_reg arg0
) = {
682 static void emit_load( struct x86_program
*p
,
688 _mesa_printf("load %d/%d\n", sz
, src_sz
);
689 load
[sz
-1][src_sz
-1](p
, dest
, src
);
693 static void emit_store4f( struct x86_program
*p
,
695 struct x86_reg arg0
)
697 emit_movups(p
, dest
, arg0
);
700 static void emit_store3f( struct x86_program
*p
,
702 struct x86_reg arg0
)
704 if (p
->outputs_safe
) {
705 /* Emit the extra dword anyway. This may hurt writecombining,
706 * may cause other problems.
708 emit_movups(p
, dest
, arg0
);
711 /* Alternate strategy - emit two, shuffle, emit one.
713 struct x86_reg tmp
= get_sse_temp(p
);
714 emit_movlps(p
, dest
, arg0
);
716 emit_pshufd(p
, tmp
, arg0
, Z
, Z
, Z
, Z
);
717 emit_movss(p
, make_disp(dest
,8), tmp
);
718 release_temp(p
, tmp
);
722 static void emit_store2f( struct x86_program
*p
,
724 struct x86_reg arg0
)
726 emit_movlps(p
, dest
, arg0
);
729 static void emit_store1f( struct x86_program
*p
,
731 struct x86_reg arg0
)
733 emit_movss(p
, dest
, arg0
);
737 static void (*store
[4])( struct x86_program
*p
,
739 struct x86_reg arg0
) =
747 static void emit_store( struct x86_program
*p
,
750 struct x86_reg temp
)
753 store
[sz
-1](p
, dest
, temp
);
757 static GLint
get_offset( const void *a
, const void *b
)
759 return (const char *)b
- (const char *)a
;
764 /* Lots of hardcoding
766 * EAX -- pointer to current output vertex
767 * ECX -- pointer to current attribute
770 static GLboolean
build_vertex_emit( struct x86_program
*p
)
772 GLcontext
*ctx
= p
->ctx
;
773 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
774 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
775 struct tnl_clipspace_attr
*a
= vtx
->attr
;
778 struct x86_reg vertexEAX
= make_reg(file_REG32
, reg_AX
);
779 struct x86_reg srcEDI
= make_reg(file_REG32
, reg_CX
);
780 struct x86_reg countEBP
= make_reg(file_REG32
, reg_BP
);
781 struct x86_reg vtxESI
= make_reg(file_REG32
, reg_SI
);
782 struct x86_reg tmp
= make_reg(file_XMM
, 0);
783 struct x86_reg vp0
= make_reg(file_XMM
, 1);
784 struct x86_reg vp1
= make_reg(file_XMM
, 2);
785 struct x86_reg chan0
= make_reg(file_XMM
, 3);
786 GLubyte
*fixup
, *label
;
792 emit_push(p
, srcEDI
);
793 emit_push(p
, countEBP
);
794 emit_push(p
, vtxESI
);
797 /* Get vertex count, compare to zero
799 emit_xor(p
, srcEDI
, srcEDI
);
800 emit_mov(p
, countEBP
, make_fn_arg(p
, 2));
801 emit_cmp(p
, countEBP
, srcEDI
);
802 fixup
= emit_jcc_forward(p
, cc_E
);
805 /* Initialize destination register.
807 emit_mov(p
, vertexEAX
, make_fn_arg(p
, 3));
809 /* Dereference ctx to get tnl, then vtx:
811 emit_mov(p
, vtxESI
, make_fn_arg(p
, 1));
812 emit_mov(p
, vtxESI
, make_disp(vtxESI
, get_offset(ctx
, &ctx
->swtnl_context
)));
813 vtxESI
= make_disp(vtxESI
, get_offset(tnl
, &tnl
->clipspace
));
816 /* Possibly load vp0, vp1 for viewport calcs:
818 if (vtx
->need_viewport
) {
819 emit_movups(p
, vp0
, make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_scale
[0])));
820 emit_movups(p
, vp1
, make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_xlate
[0])));
823 /* always load, needed or not:
825 emit_movups(p
, chan0
, make_disp(vtxESI
, get_offset(vtx
, &vtx
->chan_scale
[0])));
826 emit_movups(p
, p
->identity
, make_disp(vtxESI
, get_offset(vtx
, &vtx
->identity
[0])));
828 /* Note address for loop jump */
829 label
= get_label(p
);
831 /* Emit code for each of the attributes. Currently routes
832 * everything through SSE registers, even when it might be more
833 * efficient to stick with regular old x86. No optimization or
834 * other tricks - enough new ground to cover here just getting
837 for (j
= 0; j
< vtx
->attr_count
; j
++) {
838 struct x86_reg dest
= make_disp(vertexEAX
, vtx
->attr
[j
].vertoffset
);
839 struct x86_reg ptr_to_src
= make_disp(vtxESI
, get_offset(vtx
, &vtx
->attr
[j
].inputptr
));
841 /* Load current a[j].inputptr
843 emit_mov(p
, srcEDI
, ptr_to_src
);
845 /* Now, load an XMM reg from src, perhaps transform, then save.
846 * Could be shortcircuited in specific cases:
848 switch (a
[j
].format
) {
850 emit_load(p
, tmp
, 1, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
851 emit_store(p
, dest
, 1, tmp
);
854 emit_load(p
, tmp
, 2, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
855 emit_store(p
, dest
, 2, tmp
);
858 /* Potentially the worst case - hardcode 2+1 copying:
860 emit_load(p
, tmp
, 3, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
861 emit_store(p
, dest
, 3, tmp
);
864 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
865 emit_store(p
, dest
, 4, tmp
);
867 case EMIT_2F_VIEWPORT
:
868 emit_load(p
, tmp
, 2, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
869 emit_mulps(p
, tmp
, vp0
);
870 emit_addps(p
, tmp
, vp1
);
871 emit_store(p
, dest
, 2, tmp
);
873 case EMIT_3F_VIEWPORT
:
874 emit_load(p
, tmp
, 3, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
875 emit_mulps(p
, tmp
, vp0
);
876 emit_addps(p
, tmp
, vp1
);
877 emit_store(p
, dest
, 3, tmp
);
879 case EMIT_4F_VIEWPORT
:
880 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
881 emit_mulps(p
, tmp
, vp0
);
882 emit_addps(p
, tmp
, vp1
);
883 emit_store(p
, dest
, 4, tmp
);
886 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
887 emit_pshufd(p
, tmp
, tmp
, X
, Y
, W
, Z
);
888 emit_store(p
, dest
, 3, tmp
);
891 /* Try and bond 3ub + 1ub pairs into a single 4ub operation?
894 case EMIT_3UB_3F_RGB
:
895 case EMIT_3UB_3F_BGR
:
896 _mesa_printf("non-implemneted format %d\n", a
[j
].format
);
897 return GL_FALSE
; /* add this later */
899 case EMIT_4UB_4F_RGBA
:
900 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
901 emit_mulps(p
, tmp
, chan0
);
902 emit_pk4ub(p
, tmp
, tmp
);
903 emit_store(p
, dest
, 1, tmp
);
905 case EMIT_4UB_4F_BGRA
:
906 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
907 emit_pshufd(p
, tmp
, tmp
, Z
, Y
, X
, W
);
908 emit_mulps(p
, tmp
, chan0
);
909 emit_pk4ub(p
, tmp
, tmp
);
910 emit_store(p
, dest
, 1, tmp
);
912 case EMIT_4UB_4F_ARGB
:
913 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
914 emit_pshufd(p
, tmp
, tmp
, W
, X
, Y
, Z
);
915 emit_mulps(p
, tmp
, chan0
);
916 emit_pk4ub(p
, tmp
, tmp
);
917 emit_store(p
, dest
, 1, tmp
);
919 case EMIT_4UB_4F_ABGR
:
920 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
921 emit_pshufd(p
, tmp
, tmp
, W
, Z
, Y
, X
);
922 emit_mulps(p
, tmp
, chan0
);
923 emit_pk4ub(p
, tmp
, tmp
);
924 emit_store(p
, dest
, 1, tmp
);
926 case EMIT_4CHAN_4F_RGBA
:
928 case GL_UNSIGNED_BYTE
:
929 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
930 emit_mulps(p
, tmp
, chan0
);
931 emit_pk4ub(p
, tmp
, tmp
);
932 emit_store(p
, dest
, 1, tmp
);
935 emit_load(p
, tmp
, 4, deref(srcEDI
), vtx
->attr
[j
].inputsize
);
936 emit_store(p
, dest
, 4, tmp
);
938 case GL_UNSIGNED_SHORT
:
940 _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE
));
945 _mesa_printf("unknown a[%d].format %d\n", j
, a
[j
].format
);
946 return GL_FALSE
; /* catch any new opcodes */
949 /* add a[j].inputstride (hardcoded value - could just as easily
950 * pull the stride value from memory each time).
952 emit_add_imm(p
, srcEDI
, srcEDI
, a
[j
].inputstride
);
954 /* save new value of a[j].inputptr
956 emit_mov(p
, ptr_to_src
, srcEDI
);
962 emit_add_imm(p
, vertexEAX
, vertexEAX
, vtx
->vertex_size
);
964 /* decr count, loop if not zero
966 emit_dec(p
, countEBP
);
967 emit_test(p
, countEBP
, countEBP
);
968 emit_jcc(p
, cc_NZ
, label
);
970 /* Land forward jump here:
974 /* Pop regs and return
976 emit_pop(p
, get_base_reg(vtxESI
));
977 emit_pop(p
, countEBP
);
981 vtx
->emit
= (tnl_emit_func
)p
->store
;
985 void _tnl_generate_sse_emit( GLcontext
*ctx
)
987 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
988 struct x86_program p
;
990 memset(&p
, 0, sizeof(p
));
992 p
.store
= MALLOC(1024);
994 p
.inputs_safe
= 1; /* for now */
995 p
.outputs_safe
= 1; /* for now */
996 p
.identity
= make_reg(file_XMM
, 6);
998 if (build_vertex_emit(&p
)) {
999 _tnl_register_fastpath( vtx
, GL_TRUE
);
1001 _mesa_printf("disassemble 0x%x 0x%x\n", p
.store
, p
.csr
);
1004 /* Note the failure:
1006 _tnl_register_fastpath( vtx
, GL_FALSE
);