2 * Copyright 2003 Tungsten Graphics, inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Keith Whitwell <keithw@tungstengraphics.com>
28 #include "main/glheader.h"
29 #include "main/context.h"
30 #include "main/colormac.h"
31 #include "main/simple_list.h"
32 #include "main/enums.h"
33 #include "t_context.h"
36 #if defined(USE_SSE_ASM)
38 #include "x86/rtasm/x86sse.h"
39 #include "x86/common_x86_asm.h"
43 * Number of bytes to allocate for generated SSE functions
45 #define MAX_SSE_CODE_SIZE 1024
55 struct x86_function func
;
57 struct gl_context
*ctx
;
58 GLboolean inputs_safe
;
59 GLboolean outputs_safe
;
62 struct x86_reg identity
;
67 static struct x86_reg
get_identity( struct x86_program
*p
)
72 static void emit_load4f_4( struct x86_program
*p
,
76 sse_movups(&p
->func
, dest
, arg0
);
79 static void emit_load4f_3( struct x86_program
*p
,
83 /* Have to jump through some hoops:
90 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
91 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
92 sse_shufps(&p
->func
, dest
, dest
, SHUF(Y
,Z
,X
,W
) );
93 sse_movlps(&p
->func
, dest
, arg0
);
96 static void emit_load4f_2( struct x86_program
*p
,
100 /* Initialize from identity, then pull in low two words:
102 sse_movups(&p
->func
, dest
, get_identity(p
));
103 sse_movlps(&p
->func
, dest
, arg0
);
106 static void emit_load4f_1( struct x86_program
*p
,
108 struct x86_reg arg0
)
110 /* Pull in low word, then swizzle in identity */
111 sse_movss(&p
->func
, dest
, arg0
);
112 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
117 static void emit_load3f_3( struct x86_program
*p
,
119 struct x86_reg arg0
)
121 /* Over-reads by 1 dword - potential SEGV if input is a vertex
124 if (p
->inputs_safe
) {
125 sse_movups(&p
->func
, dest
, arg0
);
132 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
133 sse_shufps(&p
->func
, dest
, dest
, SHUF(X
,X
,X
,X
));
134 sse_movlps(&p
->func
, dest
, arg0
);
138 static void emit_load3f_2( struct x86_program
*p
,
140 struct x86_reg arg0
)
142 emit_load4f_2(p
, dest
, arg0
);
145 static void emit_load3f_1( struct x86_program
*p
,
147 struct x86_reg arg0
)
149 /* Loading from memory erases the upper bits. */
150 sse_movss(&p
->func
, dest
, arg0
);
153 static void emit_load2f_2( struct x86_program
*p
,
155 struct x86_reg arg0
)
157 sse_movlps(&p
->func
, dest
, arg0
);
160 static void emit_load2f_1( struct x86_program
*p
,
162 struct x86_reg arg0
)
164 /* Loading from memory erases the upper bits. */
165 sse_movss(&p
->func
, dest
, arg0
);
168 static void emit_load1f_1( struct x86_program
*p
,
170 struct x86_reg arg0
)
172 sse_movss(&p
->func
, dest
, arg0
);
175 static void (*load
[4][4])( struct x86_program
*p
,
177 struct x86_reg arg0
) = {
199 static void emit_load( struct x86_program
*p
,
205 load
[sz
-1][src_sz
-1](p
, dest
, src
);
208 static void emit_store4f( struct x86_program
*p
,
210 struct x86_reg arg0
)
212 sse_movups(&p
->func
, dest
, arg0
);
215 static void emit_store3f( struct x86_program
*p
,
217 struct x86_reg arg0
)
219 if (p
->outputs_safe
) {
220 /* Emit the extra dword anyway. This may hurt writecombining,
221 * may cause other problems.
223 sse_movups(&p
->func
, dest
, arg0
);
226 /* Alternate strategy - emit two, shuffle, emit one.
228 sse_movlps(&p
->func
, dest
, arg0
);
229 sse_shufps(&p
->func
, arg0
, arg0
, SHUF(Z
,Z
,Z
,Z
) ); /* NOTE! destructive */
230 sse_movss(&p
->func
, x86_make_disp(dest
,8), arg0
);
234 static void emit_store2f( struct x86_program
*p
,
236 struct x86_reg arg0
)
238 sse_movlps(&p
->func
, dest
, arg0
);
241 static void emit_store1f( struct x86_program
*p
,
243 struct x86_reg arg0
)
245 sse_movss(&p
->func
, dest
, arg0
);
249 static void (*store
[4])( struct x86_program
*p
,
251 struct x86_reg arg0
) =
259 static void emit_store( struct x86_program
*p
,
262 struct x86_reg temp
)
265 store
[sz
-1](p
, dest
, temp
);
268 static void emit_pack_store_4ub( struct x86_program
*p
,
270 struct x86_reg temp
)
274 sse_mulps(&p
->func
, temp
, p
->chan0
);
277 sse2_cvtps2dq(&p
->func
, temp
, temp
);
278 sse2_packssdw(&p
->func
, temp
, temp
);
279 sse2_packuswb(&p
->func
, temp
, temp
);
280 sse_movss(&p
->func
, dest
, temp
);
283 struct x86_reg mmx0
= x86_make_reg(file_MMX
, 0);
284 struct x86_reg mmx1
= x86_make_reg(file_MMX
, 1);
285 sse_cvtps2pi(&p
->func
, mmx0
, temp
);
286 sse_movhlps(&p
->func
, temp
, temp
);
287 sse_cvtps2pi(&p
->func
, mmx1
, temp
);
288 mmx_packssdw(&p
->func
, mmx0
, mmx1
);
289 mmx_packuswb(&p
->func
, mmx0
, mmx0
);
290 mmx_movd(&p
->func
, dest
, mmx0
);
294 static GLint
get_offset( const void *a
, const void *b
)
296 return (const char *)b
- (const char *)a
;
299 /* Not much happens here. Eventually use this function to try and
300 * avoid saving/reloading the source pointers each vertex (if some of
301 * them can fit in registers).
303 static void get_src_ptr( struct x86_program
*p
,
304 struct x86_reg srcREG
,
305 struct x86_reg vtxREG
,
306 struct tnl_clipspace_attr
*a
)
308 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
309 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
311 /* Load current a[j].inputptr
313 x86_mov(&p
->func
, srcREG
, ptr_to_src
);
316 static void update_src_ptr( struct x86_program
*p
,
317 struct x86_reg srcREG
,
318 struct x86_reg vtxREG
,
319 struct tnl_clipspace_attr
*a
)
321 if (a
->inputstride
) {
322 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
323 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
325 /* add a[j].inputstride (hardcoded value - could just as easily
326 * pull the stride value from memory each time).
328 x86_lea(&p
->func
, srcREG
, x86_make_disp(srcREG
, a
->inputstride
));
330 /* save new value of a[j].inputptr
332 x86_mov(&p
->func
, ptr_to_src
, srcREG
);
337 /* Lots of hardcoding
339 * EAX -- pointer to current output vertex
340 * ECX -- pointer to current attribute
343 static GLboolean
build_vertex_emit( struct x86_program
*p
)
345 struct gl_context
*ctx
= p
->ctx
;
346 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
347 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
350 struct x86_reg vertexEAX
= x86_make_reg(file_REG32
, reg_AX
);
351 struct x86_reg srcECX
= x86_make_reg(file_REG32
, reg_CX
);
352 struct x86_reg countEBP
= x86_make_reg(file_REG32
, reg_BP
);
353 struct x86_reg vtxESI
= x86_make_reg(file_REG32
, reg_SI
);
354 struct x86_reg temp
= x86_make_reg(file_XMM
, 0);
355 struct x86_reg vp0
= x86_make_reg(file_XMM
, 1);
356 struct x86_reg vp1
= x86_make_reg(file_XMM
, 2);
357 struct x86_reg temp2
= x86_make_reg(file_XMM
, 3);
358 GLubyte
*fixup
, *label
;
362 x86_push(&p
->func
, countEBP
);
363 x86_push(&p
->func
, vtxESI
);
366 /* Get vertex count, compare to zero
368 x86_xor(&p
->func
, srcECX
, srcECX
);
369 x86_mov(&p
->func
, countEBP
, x86_fn_arg(&p
->func
, 2));
370 x86_cmp(&p
->func
, countEBP
, srcECX
);
371 fixup
= x86_jcc_forward(&p
->func
, cc_E
);
373 /* Initialize destination register.
375 x86_mov(&p
->func
, vertexEAX
, x86_fn_arg(&p
->func
, 3));
377 /* Dereference ctx to get tnl, then vtx:
379 x86_mov(&p
->func
, vtxESI
, x86_fn_arg(&p
->func
, 1));
380 x86_mov(&p
->func
, vtxESI
, x86_make_disp(vtxESI
, get_offset(ctx
, &ctx
->swtnl_context
)));
381 vtxESI
= x86_make_disp(vtxESI
, get_offset(tnl
, &tnl
->clipspace
));
384 /* Possibly load vp0, vp1 for viewport calcs:
386 if (vtx
->need_viewport
) {
387 sse_movups(&p
->func
, vp0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_scale
[0])));
388 sse_movups(&p
->func
, vp1
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_xlate
[0])));
391 /* always load, needed or not:
393 sse_movups(&p
->func
, p
->chan0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->chan_scale
[0])));
394 sse_movups(&p
->func
, p
->identity
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->identity
[0])));
396 /* Note address for loop jump */
397 label
= x86_get_label(&p
->func
);
399 /* Emit code for each of the attributes. Currently routes
400 * everything through SSE registers, even when it might be more
401 * efficient to stick with regular old x86. No optimization or
402 * other tricks - enough new ground to cover here just getting
405 while (j
< vtx
->attr_count
) {
406 struct tnl_clipspace_attr
*a
= &vtx
->attr
[j
];
407 struct x86_reg dest
= x86_make_disp(vertexEAX
, a
->vertoffset
);
409 /* Now, load an XMM reg from src, perhaps transform, then save.
410 * Could be shortcircuited in specific cases:
414 get_src_ptr(p
, srcECX
, vtxESI
, a
);
415 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
416 emit_store(p
, dest
, 1, temp
);
417 update_src_ptr(p
, srcECX
, vtxESI
, a
);
420 get_src_ptr(p
, srcECX
, vtxESI
, a
);
421 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
422 emit_store(p
, dest
, 2, temp
);
423 update_src_ptr(p
, srcECX
, vtxESI
, a
);
426 /* Potentially the worst case - hardcode 2+1 copying:
429 get_src_ptr(p
, srcECX
, vtxESI
, a
);
430 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
431 emit_store(p
, dest
, 3, temp
);
432 update_src_ptr(p
, srcECX
, vtxESI
, a
);
435 get_src_ptr(p
, srcECX
, vtxESI
, a
);
436 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
437 emit_store(p
, dest
, 2, temp
);
438 if (a
->inputsize
> 2) {
439 emit_load(p
, temp
, 1, x86_make_disp(srcECX
, 8), 1);
440 emit_store(p
, x86_make_disp(dest
,8), 1, temp
);
443 sse_movss(&p
->func
, x86_make_disp(dest
,8), get_identity(p
));
445 update_src_ptr(p
, srcECX
, vtxESI
, a
);
449 get_src_ptr(p
, srcECX
, vtxESI
, a
);
450 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
451 emit_store(p
, dest
, 4, temp
);
452 update_src_ptr(p
, srcECX
, vtxESI
, a
);
454 case EMIT_2F_VIEWPORT
:
455 get_src_ptr(p
, srcECX
, vtxESI
, a
);
456 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
457 sse_mulps(&p
->func
, temp
, vp0
);
458 sse_addps(&p
->func
, temp
, vp1
);
459 emit_store(p
, dest
, 2, temp
);
460 update_src_ptr(p
, srcECX
, vtxESI
, a
);
462 case EMIT_3F_VIEWPORT
:
463 get_src_ptr(p
, srcECX
, vtxESI
, a
);
464 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
465 sse_mulps(&p
->func
, temp
, vp0
);
466 sse_addps(&p
->func
, temp
, vp1
);
467 emit_store(p
, dest
, 3, temp
);
468 update_src_ptr(p
, srcECX
, vtxESI
, a
);
470 case EMIT_4F_VIEWPORT
:
471 get_src_ptr(p
, srcECX
, vtxESI
, a
);
472 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
473 sse_mulps(&p
->func
, temp
, vp0
);
474 sse_addps(&p
->func
, temp
, vp1
);
475 emit_store(p
, dest
, 4, temp
);
476 update_src_ptr(p
, srcECX
, vtxESI
, a
);
479 get_src_ptr(p
, srcECX
, vtxESI
, a
);
480 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
481 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,Y
,W
,Z
));
482 emit_store(p
, dest
, 3, temp
);
483 update_src_ptr(p
, srcECX
, vtxESI
, a
);
487 /* Test for PAD3 + 1UB:
490 a
[-1].vertoffset
+ a
[-1].vertattrsize
<= a
->vertoffset
- 3)
492 get_src_ptr(p
, srcECX
, vtxESI
, a
);
493 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
494 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,X
,X
,X
));
495 emit_pack_store_4ub(p
, x86_make_disp(dest
, -3), temp
); /* overkill! */
496 update_src_ptr(p
, srcECX
, vtxESI
, a
);
499 printf("Can't emit 1ub %x %x %d\n", a
->vertoffset
, a
[-1].vertoffset
, a
[-1].vertattrsize
);
503 case EMIT_3UB_3F_RGB
:
504 case EMIT_3UB_3F_BGR
:
505 /* Test for 3UB + PAD1:
507 if (j
== vtx
->attr_count
- 1 ||
508 a
[1].vertoffset
>= a
->vertoffset
+ 4) {
509 get_src_ptr(p
, srcECX
, vtxESI
, a
);
510 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
511 if (a
->format
== EMIT_3UB_3F_BGR
)
512 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
513 emit_pack_store_4ub(p
, dest
, temp
);
514 update_src_ptr(p
, srcECX
, vtxESI
, a
);
516 /* Test for 3UB + 1UB:
518 else if (j
< vtx
->attr_count
- 1 &&
519 a
[1].format
== EMIT_1UB_1F
&&
520 a
[1].vertoffset
== a
->vertoffset
+ 3) {
521 get_src_ptr(p
, srcECX
, vtxESI
, a
);
522 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
523 update_src_ptr(p
, srcECX
, vtxESI
, a
);
525 /* Make room for incoming value:
527 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
529 get_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
530 emit_load(p
, temp2
, 1, x86_deref(srcECX
), a
[1].inputsize
);
531 sse_movss(&p
->func
, temp
, temp2
);
532 update_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
534 /* Rearrange and possibly do BGR conversion:
536 if (a
->format
== EMIT_3UB_3F_BGR
)
537 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
539 sse_shufps(&p
->func
, temp
, temp
, SHUF(Y
,Z
,W
,X
));
541 emit_pack_store_4ub(p
, dest
, temp
);
542 j
++; /* NOTE: two attrs consumed */
545 printf("Can't emit 3ub\n");
546 return GL_FALSE
; /* add this later */
550 case EMIT_4UB_4F_RGBA
:
551 get_src_ptr(p
, srcECX
, vtxESI
, a
);
552 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
553 emit_pack_store_4ub(p
, dest
, temp
);
554 update_src_ptr(p
, srcECX
, vtxESI
, a
);
556 case EMIT_4UB_4F_BGRA
:
557 get_src_ptr(p
, srcECX
, vtxESI
, a
);
558 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
559 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
560 emit_pack_store_4ub(p
, dest
, temp
);
561 update_src_ptr(p
, srcECX
, vtxESI
, a
);
563 case EMIT_4UB_4F_ARGB
:
564 get_src_ptr(p
, srcECX
, vtxESI
, a
);
565 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
566 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
567 emit_pack_store_4ub(p
, dest
, temp
);
568 update_src_ptr(p
, srcECX
, vtxESI
, a
);
570 case EMIT_4UB_4F_ABGR
:
571 get_src_ptr(p
, srcECX
, vtxESI
, a
);
572 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
573 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
574 emit_pack_store_4ub(p
, dest
, temp
);
575 update_src_ptr(p
, srcECX
, vtxESI
, a
);
577 case EMIT_4CHAN_4F_RGBA
:
579 case GL_UNSIGNED_BYTE
:
580 get_src_ptr(p
, srcECX
, vtxESI
, a
);
581 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
582 emit_pack_store_4ub(p
, dest
, temp
);
583 update_src_ptr(p
, srcECX
, vtxESI
, a
);
586 get_src_ptr(p
, srcECX
, vtxESI
, a
);
587 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
588 emit_store(p
, dest
, 4, temp
);
589 update_src_ptr(p
, srcECX
, vtxESI
, a
);
591 case GL_UNSIGNED_SHORT
:
593 printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE
));
598 printf("unknown a[%d].format %d\n", j
, a
->format
);
599 return GL_FALSE
; /* catch any new opcodes */
602 /* Increment j by at least 1 - may have been incremented above also:
609 x86_lea(&p
->func
, vertexEAX
, x86_make_disp(vertexEAX
, vtx
->vertex_size
));
611 /* decr count, loop if not zero
613 x86_dec(&p
->func
, countEBP
);
614 x86_test(&p
->func
, countEBP
, countEBP
);
615 x86_jcc(&p
->func
, cc_NZ
, label
);
619 if (p
->func
.need_emms
)
622 /* Land forward jump here:
624 x86_fixup_fwd_jump(&p
->func
, fixup
);
626 /* Pop regs and return
628 x86_pop(&p
->func
, x86_get_base_reg(vtxESI
));
629 x86_pop(&p
->func
, countEBP
);
633 vtx
->emit
= (tnl_emit_func
)x86_get_func(&p
->func
);
635 assert( (char *) p
->func
.csr
- (char *) p
->func
.store
<= MAX_SSE_CODE_SIZE
);
641 void _tnl_generate_sse_emit( struct gl_context
*ctx
)
643 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
644 struct x86_program p
;
647 vtx
->codegen_emit
= NULL
;
651 memset(&p
, 0, sizeof(p
));
654 p
.inputs_safe
= 0; /* for now */
655 p
.outputs_safe
= 0; /* for now */
656 p
.have_sse2
= cpu_has_xmm2
;
657 p
.identity
= x86_make_reg(file_XMM
, 6);
658 p
.chan0
= x86_make_reg(file_XMM
, 7);
660 if (!x86_init_func_size(&p
.func
, MAX_SSE_CODE_SIZE
)) {
665 if (build_vertex_emit(&p
)) {
666 _tnl_register_fastpath( vtx
, GL_TRUE
);
669 /* Note the failure so that we don't keep trying to codegen an
672 _tnl_register_fastpath( vtx
, GL_FALSE
);
673 x86_release_func(&p
.func
);
679 void _tnl_generate_sse_emit( struct gl_context
*ctx
)
681 /* Dummy version for when USE_SSE_ASM not defined */