2 * Copyright 2003 Tungsten Graphics, inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Keith Whitwell <keithw@tungstengraphics.com>
28 #include "main/glheader.h"
29 #include "main/context.h"
30 #include "main/colormac.h"
31 #include "t_context.h"
33 #include "simple_list.h"
34 #include "main/enums.h"
36 #if defined(USE_SSE_ASM)
38 #include "x86/rtasm/x86sse.h"
39 #include "x86/common_x86_asm.h"
49 struct x86_function func
;
52 GLboolean inputs_safe
;
53 GLboolean outputs_safe
;
56 struct x86_reg identity
;
61 static struct x86_reg
get_identity( struct x86_program
*p
)
66 static void emit_load4f_4( struct x86_program
*p
,
70 sse_movups(&p
->func
, dest
, arg0
);
73 static void emit_load4f_3( struct x86_program
*p
,
77 /* Have to jump through some hoops:
84 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
85 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
86 sse_shufps(&p
->func
, dest
, dest
, SHUF(Y
,Z
,X
,W
) );
87 sse_movlps(&p
->func
, dest
, arg0
);
90 static void emit_load4f_2( struct x86_program
*p
,
94 /* Initialize from identity, then pull in low two words:
96 sse_movups(&p
->func
, dest
, get_identity(p
));
97 sse_movlps(&p
->func
, dest
, arg0
);
100 static void emit_load4f_1( struct x86_program
*p
,
102 struct x86_reg arg0
)
104 /* Pull in low word, then swizzle in identity */
105 sse_movss(&p
->func
, dest
, arg0
);
106 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
111 static void emit_load3f_3( struct x86_program
*p
,
113 struct x86_reg arg0
)
115 /* Over-reads by 1 dword - potential SEGV if input is a vertex
118 if (p
->inputs_safe
) {
119 sse_movups(&p
->func
, dest
, arg0
);
126 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
127 sse_shufps(&p
->func
, dest
, dest
, SHUF(X
,X
,X
,X
));
128 sse_movlps(&p
->func
, dest
, arg0
);
132 static void emit_load3f_2( struct x86_program
*p
,
134 struct x86_reg arg0
)
136 emit_load4f_2(p
, dest
, arg0
);
139 static void emit_load3f_1( struct x86_program
*p
,
141 struct x86_reg arg0
)
143 emit_load4f_1(p
, dest
, arg0
);
146 static void emit_load2f_2( struct x86_program
*p
,
148 struct x86_reg arg0
)
150 sse_movlps(&p
->func
, dest
, arg0
);
153 static void emit_load2f_1( struct x86_program
*p
,
155 struct x86_reg arg0
)
157 emit_load4f_1(p
, dest
, arg0
);
160 static void emit_load1f_1( struct x86_program
*p
,
162 struct x86_reg arg0
)
164 sse_movss(&p
->func
, dest
, arg0
);
167 static void (*load
[4][4])( struct x86_program
*p
,
169 struct x86_reg arg0
) = {
191 static void emit_load( struct x86_program
*p
,
197 load
[sz
-1][src_sz
-1](p
, dest
, src
);
200 static void emit_store4f( struct x86_program
*p
,
202 struct x86_reg arg0
)
204 sse_movups(&p
->func
, dest
, arg0
);
207 static void emit_store3f( struct x86_program
*p
,
209 struct x86_reg arg0
)
211 if (p
->outputs_safe
) {
212 /* Emit the extra dword anyway. This may hurt writecombining,
213 * may cause other problems.
215 sse_movups(&p
->func
, dest
, arg0
);
218 /* Alternate strategy - emit two, shuffle, emit one.
220 sse_movlps(&p
->func
, dest
, arg0
);
221 sse_shufps(&p
->func
, arg0
, arg0
, SHUF(Z
,Z
,Z
,Z
) ); /* NOTE! destructive */
222 sse_movss(&p
->func
, x86_make_disp(dest
,8), arg0
);
226 static void emit_store2f( struct x86_program
*p
,
228 struct x86_reg arg0
)
230 sse_movlps(&p
->func
, dest
, arg0
);
233 static void emit_store1f( struct x86_program
*p
,
235 struct x86_reg arg0
)
237 sse_movss(&p
->func
, dest
, arg0
);
241 static void (*store
[4])( struct x86_program
*p
,
243 struct x86_reg arg0
) =
251 static void emit_store( struct x86_program
*p
,
254 struct x86_reg temp
)
257 store
[sz
-1](p
, dest
, temp
);
260 static void emit_pack_store_4ub( struct x86_program
*p
,
262 struct x86_reg temp
)
266 sse_mulps(&p
->func
, temp
, p
->chan0
);
269 sse2_cvtps2dq(&p
->func
, temp
, temp
);
270 sse2_packssdw(&p
->func
, temp
, temp
);
271 sse2_packuswb(&p
->func
, temp
, temp
);
272 sse_movss(&p
->func
, dest
, temp
);
275 struct x86_reg mmx0
= x86_make_reg(file_MMX
, 0);
276 struct x86_reg mmx1
= x86_make_reg(file_MMX
, 1);
277 sse_cvtps2pi(&p
->func
, mmx0
, temp
);
278 sse_movhlps(&p
->func
, temp
, temp
);
279 sse_cvtps2pi(&p
->func
, mmx1
, temp
);
280 mmx_packssdw(&p
->func
, mmx0
, mmx1
);
281 mmx_packuswb(&p
->func
, mmx0
, mmx0
);
282 mmx_movd(&p
->func
, dest
, mmx0
);
286 static GLint
get_offset( const void *a
, const void *b
)
288 return (const char *)b
- (const char *)a
;
291 /* Not much happens here. Eventually use this function to try and
292 * avoid saving/reloading the source pointers each vertex (if some of
293 * them can fit in registers).
295 static void get_src_ptr( struct x86_program
*p
,
296 struct x86_reg srcREG
,
297 struct x86_reg vtxREG
,
298 struct tnl_clipspace_attr
*a
)
300 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
301 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
303 /* Load current a[j].inputptr
305 x86_mov(&p
->func
, srcREG
, ptr_to_src
);
308 static void update_src_ptr( struct x86_program
*p
,
309 struct x86_reg srcREG
,
310 struct x86_reg vtxREG
,
311 struct tnl_clipspace_attr
*a
)
313 if (a
->inputstride
) {
314 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
315 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
317 /* add a[j].inputstride (hardcoded value - could just as easily
318 * pull the stride value from memory each time).
320 x86_lea(&p
->func
, srcREG
, x86_make_disp(srcREG
, a
->inputstride
));
322 /* save new value of a[j].inputptr
324 x86_mov(&p
->func
, ptr_to_src
, srcREG
);
329 /* Lots of hardcoding
331 * EAX -- pointer to current output vertex
332 * ECX -- pointer to current attribute
335 static GLboolean
build_vertex_emit( struct x86_program
*p
)
337 GLcontext
*ctx
= p
->ctx
;
338 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
339 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
342 struct x86_reg vertexEAX
= x86_make_reg(file_REG32
, reg_AX
);
343 struct x86_reg srcECX
= x86_make_reg(file_REG32
, reg_CX
);
344 struct x86_reg countEBP
= x86_make_reg(file_REG32
, reg_BP
);
345 struct x86_reg vtxESI
= x86_make_reg(file_REG32
, reg_SI
);
346 struct x86_reg temp
= x86_make_reg(file_XMM
, 0);
347 struct x86_reg vp0
= x86_make_reg(file_XMM
, 1);
348 struct x86_reg vp1
= x86_make_reg(file_XMM
, 2);
349 GLubyte
*fixup
, *label
;
353 x86_push(&p
->func
, countEBP
);
354 x86_push(&p
->func
, vtxESI
);
357 /* Get vertex count, compare to zero
359 x86_xor(&p
->func
, srcECX
, srcECX
);
360 x86_mov(&p
->func
, countEBP
, x86_fn_arg(&p
->func
, 2));
361 x86_cmp(&p
->func
, countEBP
, srcECX
);
362 fixup
= x86_jcc_forward(&p
->func
, cc_E
);
364 /* Initialize destination register.
366 x86_mov(&p
->func
, vertexEAX
, x86_fn_arg(&p
->func
, 3));
368 /* Dereference ctx to get tnl, then vtx:
370 x86_mov(&p
->func
, vtxESI
, x86_fn_arg(&p
->func
, 1));
371 x86_mov(&p
->func
, vtxESI
, x86_make_disp(vtxESI
, get_offset(ctx
, &ctx
->swtnl_context
)));
372 vtxESI
= x86_make_disp(vtxESI
, get_offset(tnl
, &tnl
->clipspace
));
375 /* Possibly load vp0, vp1 for viewport calcs:
377 if (vtx
->need_viewport
) {
378 sse_movups(&p
->func
, vp0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_scale
[0])));
379 sse_movups(&p
->func
, vp1
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_xlate
[0])));
382 /* always load, needed or not:
384 sse_movups(&p
->func
, p
->chan0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->chan_scale
[0])));
385 sse_movups(&p
->func
, p
->identity
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->identity
[0])));
387 /* Note address for loop jump */
388 label
= x86_get_label(&p
->func
);
390 /* Emit code for each of the attributes. Currently routes
391 * everything through SSE registers, even when it might be more
392 * efficient to stick with regular old x86. No optimization or
393 * other tricks - enough new ground to cover here just getting
396 while (j
< vtx
->attr_count
) {
397 struct tnl_clipspace_attr
*a
= &vtx
->attr
[j
];
398 struct x86_reg dest
= x86_make_disp(vertexEAX
, a
->vertoffset
);
400 /* Now, load an XMM reg from src, perhaps transform, then save.
401 * Could be shortcircuited in specific cases:
405 get_src_ptr(p
, srcECX
, vtxESI
, a
);
406 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
407 emit_store(p
, dest
, 1, temp
);
408 update_src_ptr(p
, srcECX
, vtxESI
, a
);
411 get_src_ptr(p
, srcECX
, vtxESI
, a
);
412 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
413 emit_store(p
, dest
, 2, temp
);
414 update_src_ptr(p
, srcECX
, vtxESI
, a
);
417 /* Potentially the worst case - hardcode 2+1 copying:
420 get_src_ptr(p
, srcECX
, vtxESI
, a
);
421 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
422 emit_store(p
, dest
, 3, temp
);
423 update_src_ptr(p
, srcECX
, vtxESI
, a
);
426 get_src_ptr(p
, srcECX
, vtxESI
, a
);
427 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
428 emit_store(p
, dest
, 2, temp
);
429 if (a
->inputsize
> 2) {
430 emit_load(p
, temp
, 1, x86_make_disp(srcECX
, 8), 1);
431 emit_store(p
, x86_make_disp(dest
,8), 1, temp
);
434 sse_movss(&p
->func
, x86_make_disp(dest
,8), get_identity(p
));
436 update_src_ptr(p
, srcECX
, vtxESI
, a
);
440 get_src_ptr(p
, srcECX
, vtxESI
, a
);
441 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
442 emit_store(p
, dest
, 4, temp
);
443 update_src_ptr(p
, srcECX
, vtxESI
, a
);
445 case EMIT_2F_VIEWPORT
:
446 get_src_ptr(p
, srcECX
, vtxESI
, a
);
447 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
448 sse_mulps(&p
->func
, temp
, vp0
);
449 sse_addps(&p
->func
, temp
, vp1
);
450 emit_store(p
, dest
, 2, temp
);
451 update_src_ptr(p
, srcECX
, vtxESI
, a
);
453 case EMIT_3F_VIEWPORT
:
454 get_src_ptr(p
, srcECX
, vtxESI
, a
);
455 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
456 sse_mulps(&p
->func
, temp
, vp0
);
457 sse_addps(&p
->func
, temp
, vp1
);
458 emit_store(p
, dest
, 3, temp
);
459 update_src_ptr(p
, srcECX
, vtxESI
, a
);
461 case EMIT_4F_VIEWPORT
:
462 get_src_ptr(p
, srcECX
, vtxESI
, a
);
463 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
464 sse_mulps(&p
->func
, temp
, vp0
);
465 sse_addps(&p
->func
, temp
, vp1
);
466 emit_store(p
, dest
, 4, temp
);
467 update_src_ptr(p
, srcECX
, vtxESI
, a
);
470 get_src_ptr(p
, srcECX
, vtxESI
, a
);
471 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
472 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,Y
,W
,Z
));
473 emit_store(p
, dest
, 3, temp
);
474 update_src_ptr(p
, srcECX
, vtxESI
, a
);
478 /* Test for PAD3 + 1UB:
481 a
[-1].vertoffset
+ a
[-1].vertattrsize
<= a
->vertoffset
- 3)
483 get_src_ptr(p
, srcECX
, vtxESI
, a
);
484 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
485 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,X
,X
,X
));
486 emit_pack_store_4ub(p
, x86_make_disp(dest
, -3), temp
); /* overkill! */
487 update_src_ptr(p
, srcECX
, vtxESI
, a
);
490 _mesa_printf("Can't emit 1ub %x %x %d\n", a
->vertoffset
, a
[-1].vertoffset
, a
[-1].vertattrsize
);
494 case EMIT_3UB_3F_RGB
:
495 case EMIT_3UB_3F_BGR
:
496 /* Test for 3UB + PAD1:
498 if (j
== vtx
->attr_count
- 1 ||
499 a
[1].vertoffset
>= a
->vertoffset
+ 4) {
500 get_src_ptr(p
, srcECX
, vtxESI
, a
);
501 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
502 if (a
->format
== EMIT_3UB_3F_BGR
)
503 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
504 emit_pack_store_4ub(p
, dest
, temp
);
505 update_src_ptr(p
, srcECX
, vtxESI
, a
);
507 /* Test for 3UB + 1UB:
509 else if (j
< vtx
->attr_count
- 1 &&
510 a
[1].format
== EMIT_1UB_1F
&&
511 a
[1].vertoffset
== a
->vertoffset
+ 3) {
512 get_src_ptr(p
, srcECX
, vtxESI
, a
);
513 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
514 update_src_ptr(p
, srcECX
, vtxESI
, a
);
516 /* Make room for incoming value:
518 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
520 get_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
521 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
[1].inputsize
);
522 update_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
524 /* Rearrange and possibly do BGR conversion:
526 if (a
->format
== EMIT_3UB_3F_BGR
)
527 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
529 sse_shufps(&p
->func
, temp
, temp
, SHUF(Y
,Z
,W
,X
));
531 emit_pack_store_4ub(p
, dest
, temp
);
532 j
++; /* NOTE: two attrs consumed */
535 _mesa_printf("Can't emit 3ub\n");
537 return GL_FALSE
; /* add this later */
540 case EMIT_4UB_4F_RGBA
:
541 get_src_ptr(p
, srcECX
, vtxESI
, a
);
542 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
543 emit_pack_store_4ub(p
, dest
, temp
);
544 update_src_ptr(p
, srcECX
, vtxESI
, a
);
546 case EMIT_4UB_4F_BGRA
:
547 get_src_ptr(p
, srcECX
, vtxESI
, a
);
548 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
549 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
550 emit_pack_store_4ub(p
, dest
, temp
);
551 update_src_ptr(p
, srcECX
, vtxESI
, a
);
553 case EMIT_4UB_4F_ARGB
:
554 get_src_ptr(p
, srcECX
, vtxESI
, a
);
555 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
556 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
557 emit_pack_store_4ub(p
, dest
, temp
);
558 update_src_ptr(p
, srcECX
, vtxESI
, a
);
560 case EMIT_4UB_4F_ABGR
:
561 get_src_ptr(p
, srcECX
, vtxESI
, a
);
562 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
563 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
564 emit_pack_store_4ub(p
, dest
, temp
);
565 update_src_ptr(p
, srcECX
, vtxESI
, a
);
567 case EMIT_4CHAN_4F_RGBA
:
569 case GL_UNSIGNED_BYTE
:
570 get_src_ptr(p
, srcECX
, vtxESI
, a
);
571 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
572 emit_pack_store_4ub(p
, dest
, temp
);
573 update_src_ptr(p
, srcECX
, vtxESI
, a
);
576 get_src_ptr(p
, srcECX
, vtxESI
, a
);
577 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
578 emit_store(p
, dest
, 4, temp
);
579 update_src_ptr(p
, srcECX
, vtxESI
, a
);
581 case GL_UNSIGNED_SHORT
:
583 _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE
));
588 _mesa_printf("unknown a[%d].format %d\n", j
, a
->format
);
589 return GL_FALSE
; /* catch any new opcodes */
592 /* Increment j by at least 1 - may have been incremented above also:
599 x86_lea(&p
->func
, vertexEAX
, x86_make_disp(vertexEAX
, vtx
->vertex_size
));
601 /* decr count, loop if not zero
603 x86_dec(&p
->func
, countEBP
);
604 x86_test(&p
->func
, countEBP
, countEBP
);
605 x86_jcc(&p
->func
, cc_NZ
, label
);
609 if (p
->func
.need_emms
)
612 /* Land forward jump here:
614 x86_fixup_fwd_jump(&p
->func
, fixup
);
616 /* Pop regs and return
618 x86_pop(&p
->func
, x86_get_base_reg(vtxESI
));
619 x86_pop(&p
->func
, countEBP
);
622 vtx
->emit
= (tnl_emit_func
)x86_get_func(&p
->func
);
628 void _tnl_generate_sse_emit( GLcontext
*ctx
)
630 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
631 struct x86_program p
;
634 vtx
->codegen_emit
= NULL
;
638 _mesa_memset(&p
, 0, sizeof(p
));
641 p
.inputs_safe
= 0; /* for now */
642 p
.outputs_safe
= 0; /* for now */
643 p
.have_sse2
= cpu_has_xmm2
;
644 p
.identity
= x86_make_reg(file_XMM
, 6);
645 p
.chan0
= x86_make_reg(file_XMM
, 7);
647 x86_init_func(&p
.func
);
649 if (build_vertex_emit(&p
)) {
650 _tnl_register_fastpath( vtx
, GL_TRUE
);
653 /* Note the failure so that we don't keep trying to codegen an
656 _tnl_register_fastpath( vtx
, GL_FALSE
);
657 x86_release_func(&p
.func
);
663 void _tnl_generate_sse_emit( GLcontext
*ctx
)
665 /* Dummy version for when USE_SSE_ASM not defined */