922737009af8fb6a3633a2b5474d79d0f895b198
2 * Copyright 2003 Tungsten Graphics, inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Keith Whitwell <keithw@tungstengraphics.com>
31 #include "t_context.h"
33 #include "simple_list.h"
36 #if defined(USE_SSE_ASM)
38 #include "x86/rtasm/x86sse.h"
39 #include "x86/common_x86_asm.h"
43 * Number of bytes to allocate for generated SSE functions
45 #define MAX_SSE_CODE_SIZE 1024
55 struct x86_function func
;
58 GLboolean inputs_safe
;
59 GLboolean outputs_safe
;
62 struct x86_reg identity
;
67 static struct x86_reg
get_identity( struct x86_program
*p
)
72 static void emit_load4f_4( struct x86_program
*p
,
76 sse_movups(&p
->func
, dest
, arg0
);
79 static void emit_load4f_3( struct x86_program
*p
,
83 /* Have to jump through some hoops:
90 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
91 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
92 sse_shufps(&p
->func
, dest
, dest
, SHUF(Y
,Z
,X
,W
) );
93 sse_movlps(&p
->func
, dest
, arg0
);
96 static void emit_load4f_2( struct x86_program
*p
,
100 /* Initialize from identity, then pull in low two words:
102 sse_movups(&p
->func
, dest
, get_identity(p
));
103 sse_movlps(&p
->func
, dest
, arg0
);
106 static void emit_load4f_1( struct x86_program
*p
,
108 struct x86_reg arg0
)
110 /* Pull in low word, then swizzle in identity */
111 sse_movss(&p
->func
, dest
, arg0
);
112 sse_shufps(&p
->func
, dest
, get_identity(p
), SHUF(X
,Y
,Z
,W
) );
117 static void emit_load3f_3( struct x86_program
*p
,
119 struct x86_reg arg0
)
121 /* Over-reads by 1 dword - potential SEGV if input is a vertex
124 if (p
->inputs_safe
) {
125 sse_movups(&p
->func
, dest
, arg0
);
132 sse_movss(&p
->func
, dest
, x86_make_disp(arg0
, 8));
133 sse_shufps(&p
->func
, dest
, dest
, SHUF(X
,X
,X
,X
));
134 sse_movlps(&p
->func
, dest
, arg0
);
138 static void emit_load3f_2( struct x86_program
*p
,
140 struct x86_reg arg0
)
142 emit_load4f_2(p
, dest
, arg0
);
145 static void emit_load3f_1( struct x86_program
*p
,
147 struct x86_reg arg0
)
149 emit_load4f_1(p
, dest
, arg0
);
152 static void emit_load2f_2( struct x86_program
*p
,
154 struct x86_reg arg0
)
156 sse_movlps(&p
->func
, dest
, arg0
);
159 static void emit_load2f_1( struct x86_program
*p
,
161 struct x86_reg arg0
)
163 emit_load4f_1(p
, dest
, arg0
);
166 static void emit_load1f_1( struct x86_program
*p
,
168 struct x86_reg arg0
)
170 sse_movss(&p
->func
, dest
, arg0
);
173 static void (*load
[4][4])( struct x86_program
*p
,
175 struct x86_reg arg0
) = {
197 static void emit_load( struct x86_program
*p
,
203 load
[sz
-1][src_sz
-1](p
, dest
, src
);
206 static void emit_store4f( struct x86_program
*p
,
208 struct x86_reg arg0
)
210 sse_movups(&p
->func
, dest
, arg0
);
213 static void emit_store3f( struct x86_program
*p
,
215 struct x86_reg arg0
)
217 if (p
->outputs_safe
) {
218 /* Emit the extra dword anyway. This may hurt writecombining,
219 * may cause other problems.
221 sse_movups(&p
->func
, dest
, arg0
);
224 /* Alternate strategy - emit two, shuffle, emit one.
226 sse_movlps(&p
->func
, dest
, arg0
);
227 sse_shufps(&p
->func
, arg0
, arg0
, SHUF(Z
,Z
,Z
,Z
) ); /* NOTE! destructive */
228 sse_movss(&p
->func
, x86_make_disp(dest
,8), arg0
);
232 static void emit_store2f( struct x86_program
*p
,
234 struct x86_reg arg0
)
236 sse_movlps(&p
->func
, dest
, arg0
);
239 static void emit_store1f( struct x86_program
*p
,
241 struct x86_reg arg0
)
243 sse_movss(&p
->func
, dest
, arg0
);
247 static void (*store
[4])( struct x86_program
*p
,
249 struct x86_reg arg0
) =
257 static void emit_store( struct x86_program
*p
,
260 struct x86_reg temp
)
263 store
[sz
-1](p
, dest
, temp
);
266 static void emit_pack_store_4ub( struct x86_program
*p
,
268 struct x86_reg temp
)
272 sse_mulps(&p
->func
, temp
, p
->chan0
);
275 sse2_cvtps2dq(&p
->func
, temp
, temp
);
276 sse2_packssdw(&p
->func
, temp
, temp
);
277 sse2_packuswb(&p
->func
, temp
, temp
);
278 sse_movss(&p
->func
, dest
, temp
);
281 struct x86_reg mmx0
= x86_make_reg(file_MMX
, 0);
282 struct x86_reg mmx1
= x86_make_reg(file_MMX
, 1);
283 sse_cvtps2pi(&p
->func
, mmx0
, temp
);
284 sse_movhlps(&p
->func
, temp
, temp
);
285 sse_cvtps2pi(&p
->func
, mmx1
, temp
);
286 mmx_packssdw(&p
->func
, mmx0
, mmx1
);
287 mmx_packuswb(&p
->func
, mmx0
, mmx0
);
288 mmx_movd(&p
->func
, dest
, mmx0
);
292 static GLint
get_offset( const void *a
, const void *b
)
294 return (const char *)b
- (const char *)a
;
297 /* Not much happens here. Eventually use this function to try and
298 * avoid saving/reloading the source pointers each vertex (if some of
299 * them can fit in registers).
301 static void get_src_ptr( struct x86_program
*p
,
302 struct x86_reg srcREG
,
303 struct x86_reg vtxREG
,
304 struct tnl_clipspace_attr
*a
)
306 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
307 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
309 /* Load current a[j].inputptr
311 x86_mov(&p
->func
, srcREG
, ptr_to_src
);
314 static void update_src_ptr( struct x86_program
*p
,
315 struct x86_reg srcREG
,
316 struct x86_reg vtxREG
,
317 struct tnl_clipspace_attr
*a
)
319 if (a
->inputstride
) {
320 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(p
->ctx
);
321 struct x86_reg ptr_to_src
= x86_make_disp(vtxREG
, get_offset(vtx
, &a
->inputptr
));
323 /* add a[j].inputstride (hardcoded value - could just as easily
324 * pull the stride value from memory each time).
326 x86_lea(&p
->func
, srcREG
, x86_make_disp(srcREG
, a
->inputstride
));
328 /* save new value of a[j].inputptr
330 x86_mov(&p
->func
, ptr_to_src
, srcREG
);
335 /* Lots of hardcoding
337 * EAX -- pointer to current output vertex
338 * ECX -- pointer to current attribute
341 static GLboolean
build_vertex_emit( struct x86_program
*p
)
343 GLcontext
*ctx
= p
->ctx
;
344 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
345 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
348 struct x86_reg vertexEAX
= x86_make_reg(file_REG32
, reg_AX
);
349 struct x86_reg srcECX
= x86_make_reg(file_REG32
, reg_CX
);
350 struct x86_reg countEBP
= x86_make_reg(file_REG32
, reg_BP
);
351 struct x86_reg vtxESI
= x86_make_reg(file_REG32
, reg_SI
);
352 struct x86_reg temp
= x86_make_reg(file_XMM
, 0);
353 struct x86_reg vp0
= x86_make_reg(file_XMM
, 1);
354 struct x86_reg vp1
= x86_make_reg(file_XMM
, 2);
355 GLubyte
*fixup
, *label
;
359 x86_push(&p
->func
, countEBP
);
360 x86_push(&p
->func
, vtxESI
);
363 /* Get vertex count, compare to zero
365 x86_xor(&p
->func
, srcECX
, srcECX
);
366 x86_mov(&p
->func
, countEBP
, x86_fn_arg(&p
->func
, 2));
367 x86_cmp(&p
->func
, countEBP
, srcECX
);
368 fixup
= x86_jcc_forward(&p
->func
, cc_E
);
370 /* Initialize destination register.
372 x86_mov(&p
->func
, vertexEAX
, x86_fn_arg(&p
->func
, 3));
374 /* Dereference ctx to get tnl, then vtx:
376 x86_mov(&p
->func
, vtxESI
, x86_fn_arg(&p
->func
, 1));
377 x86_mov(&p
->func
, vtxESI
, x86_make_disp(vtxESI
, get_offset(ctx
, &ctx
->swtnl_context
)));
378 vtxESI
= x86_make_disp(vtxESI
, get_offset(tnl
, &tnl
->clipspace
));
381 /* Possibly load vp0, vp1 for viewport calcs:
383 if (vtx
->need_viewport
) {
384 sse_movups(&p
->func
, vp0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_scale
[0])));
385 sse_movups(&p
->func
, vp1
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->vp_xlate
[0])));
388 /* always load, needed or not:
390 sse_movups(&p
->func
, p
->chan0
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->chan_scale
[0])));
391 sse_movups(&p
->func
, p
->identity
, x86_make_disp(vtxESI
, get_offset(vtx
, &vtx
->identity
[0])));
393 /* Note address for loop jump */
394 label
= x86_get_label(&p
->func
);
396 /* Emit code for each of the attributes. Currently routes
397 * everything through SSE registers, even when it might be more
398 * efficient to stick with regular old x86. No optimization or
399 * other tricks - enough new ground to cover here just getting
402 while (j
< vtx
->attr_count
) {
403 struct tnl_clipspace_attr
*a
= &vtx
->attr
[j
];
404 struct x86_reg dest
= x86_make_disp(vertexEAX
, a
->vertoffset
);
406 /* Now, load an XMM reg from src, perhaps transform, then save.
407 * Could be shortcircuited in specific cases:
411 get_src_ptr(p
, srcECX
, vtxESI
, a
);
412 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
413 emit_store(p
, dest
, 1, temp
);
414 update_src_ptr(p
, srcECX
, vtxESI
, a
);
417 get_src_ptr(p
, srcECX
, vtxESI
, a
);
418 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
419 emit_store(p
, dest
, 2, temp
);
420 update_src_ptr(p
, srcECX
, vtxESI
, a
);
423 /* Potentially the worst case - hardcode 2+1 copying:
426 get_src_ptr(p
, srcECX
, vtxESI
, a
);
427 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
428 emit_store(p
, dest
, 3, temp
);
429 update_src_ptr(p
, srcECX
, vtxESI
, a
);
432 get_src_ptr(p
, srcECX
, vtxESI
, a
);
433 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
434 emit_store(p
, dest
, 2, temp
);
435 if (a
->inputsize
> 2) {
436 emit_load(p
, temp
, 1, x86_make_disp(srcECX
, 8), 1);
437 emit_store(p
, x86_make_disp(dest
,8), 1, temp
);
440 sse_movss(&p
->func
, x86_make_disp(dest
,8), get_identity(p
));
442 update_src_ptr(p
, srcECX
, vtxESI
, a
);
446 get_src_ptr(p
, srcECX
, vtxESI
, a
);
447 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
448 emit_store(p
, dest
, 4, temp
);
449 update_src_ptr(p
, srcECX
, vtxESI
, a
);
451 case EMIT_2F_VIEWPORT
:
452 get_src_ptr(p
, srcECX
, vtxESI
, a
);
453 emit_load(p
, temp
, 2, x86_deref(srcECX
), a
->inputsize
);
454 sse_mulps(&p
->func
, temp
, vp0
);
455 sse_addps(&p
->func
, temp
, vp1
);
456 emit_store(p
, dest
, 2, temp
);
457 update_src_ptr(p
, srcECX
, vtxESI
, a
);
459 case EMIT_3F_VIEWPORT
:
460 get_src_ptr(p
, srcECX
, vtxESI
, a
);
461 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
462 sse_mulps(&p
->func
, temp
, vp0
);
463 sse_addps(&p
->func
, temp
, vp1
);
464 emit_store(p
, dest
, 3, temp
);
465 update_src_ptr(p
, srcECX
, vtxESI
, a
);
467 case EMIT_4F_VIEWPORT
:
468 get_src_ptr(p
, srcECX
, vtxESI
, a
);
469 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
470 sse_mulps(&p
->func
, temp
, vp0
);
471 sse_addps(&p
->func
, temp
, vp1
);
472 emit_store(p
, dest
, 4, temp
);
473 update_src_ptr(p
, srcECX
, vtxESI
, a
);
476 get_src_ptr(p
, srcECX
, vtxESI
, a
);
477 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
478 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,Y
,W
,Z
));
479 emit_store(p
, dest
, 3, temp
);
480 update_src_ptr(p
, srcECX
, vtxESI
, a
);
484 /* Test for PAD3 + 1UB:
487 a
[-1].vertoffset
+ a
[-1].vertattrsize
<= a
->vertoffset
- 3)
489 get_src_ptr(p
, srcECX
, vtxESI
, a
);
490 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
->inputsize
);
491 sse_shufps(&p
->func
, temp
, temp
, SHUF(X
,X
,X
,X
));
492 emit_pack_store_4ub(p
, x86_make_disp(dest
, -3), temp
); /* overkill! */
493 update_src_ptr(p
, srcECX
, vtxESI
, a
);
496 _mesa_printf("Can't emit 1ub %x %x %d\n", a
->vertoffset
, a
[-1].vertoffset
, a
[-1].vertattrsize
);
500 case EMIT_3UB_3F_RGB
:
501 case EMIT_3UB_3F_BGR
:
502 /* Test for 3UB + PAD1:
504 if (j
== vtx
->attr_count
- 1 ||
505 a
[1].vertoffset
>= a
->vertoffset
+ 4) {
506 get_src_ptr(p
, srcECX
, vtxESI
, a
);
507 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
508 if (a
->format
== EMIT_3UB_3F_BGR
)
509 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
510 emit_pack_store_4ub(p
, dest
, temp
);
511 update_src_ptr(p
, srcECX
, vtxESI
, a
);
513 /* Test for 3UB + 1UB:
515 else if (j
< vtx
->attr_count
- 1 &&
516 a
[1].format
== EMIT_1UB_1F
&&
517 a
[1].vertoffset
== a
->vertoffset
+ 3) {
518 get_src_ptr(p
, srcECX
, vtxESI
, a
);
519 emit_load(p
, temp
, 3, x86_deref(srcECX
), a
->inputsize
);
520 update_src_ptr(p
, srcECX
, vtxESI
, a
);
522 /* Make room for incoming value:
524 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
526 get_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
527 emit_load(p
, temp
, 1, x86_deref(srcECX
), a
[1].inputsize
);
528 update_src_ptr(p
, srcECX
, vtxESI
, &a
[1]);
530 /* Rearrange and possibly do BGR conversion:
532 if (a
->format
== EMIT_3UB_3F_BGR
)
533 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
535 sse_shufps(&p
->func
, temp
, temp
, SHUF(Y
,Z
,W
,X
));
537 emit_pack_store_4ub(p
, dest
, temp
);
538 j
++; /* NOTE: two attrs consumed */
541 _mesa_printf("Can't emit 3ub\n");
543 return GL_FALSE
; /* add this later */
546 case EMIT_4UB_4F_RGBA
:
547 get_src_ptr(p
, srcECX
, vtxESI
, a
);
548 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
549 emit_pack_store_4ub(p
, dest
, temp
);
550 update_src_ptr(p
, srcECX
, vtxESI
, a
);
552 case EMIT_4UB_4F_BGRA
:
553 get_src_ptr(p
, srcECX
, vtxESI
, a
);
554 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
555 sse_shufps(&p
->func
, temp
, temp
, SHUF(Z
,Y
,X
,W
));
556 emit_pack_store_4ub(p
, dest
, temp
);
557 update_src_ptr(p
, srcECX
, vtxESI
, a
);
559 case EMIT_4UB_4F_ARGB
:
560 get_src_ptr(p
, srcECX
, vtxESI
, a
);
561 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
562 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,X
,Y
,Z
));
563 emit_pack_store_4ub(p
, dest
, temp
);
564 update_src_ptr(p
, srcECX
, vtxESI
, a
);
566 case EMIT_4UB_4F_ABGR
:
567 get_src_ptr(p
, srcECX
, vtxESI
, a
);
568 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
569 sse_shufps(&p
->func
, temp
, temp
, SHUF(W
,Z
,Y
,X
));
570 emit_pack_store_4ub(p
, dest
, temp
);
571 update_src_ptr(p
, srcECX
, vtxESI
, a
);
573 case EMIT_4CHAN_4F_RGBA
:
575 case GL_UNSIGNED_BYTE
:
576 get_src_ptr(p
, srcECX
, vtxESI
, a
);
577 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
578 emit_pack_store_4ub(p
, dest
, temp
);
579 update_src_ptr(p
, srcECX
, vtxESI
, a
);
582 get_src_ptr(p
, srcECX
, vtxESI
, a
);
583 emit_load(p
, temp
, 4, x86_deref(srcECX
), a
->inputsize
);
584 emit_store(p
, dest
, 4, temp
);
585 update_src_ptr(p
, srcECX
, vtxESI
, a
);
587 case GL_UNSIGNED_SHORT
:
589 _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE
));
594 _mesa_printf("unknown a[%d].format %d\n", j
, a
->format
);
595 return GL_FALSE
; /* catch any new opcodes */
598 /* Increment j by at least 1 - may have been incremented above also:
605 x86_lea(&p
->func
, vertexEAX
, x86_make_disp(vertexEAX
, vtx
->vertex_size
));
607 /* decr count, loop if not zero
609 x86_dec(&p
->func
, countEBP
);
610 x86_test(&p
->func
, countEBP
, countEBP
);
611 x86_jcc(&p
->func
, cc_NZ
, label
);
615 if (p
->func
.need_emms
)
618 /* Land forward jump here:
620 x86_fixup_fwd_jump(&p
->func
, fixup
);
622 /* Pop regs and return
624 x86_pop(&p
->func
, x86_get_base_reg(vtxESI
));
625 x86_pop(&p
->func
, countEBP
);
629 vtx
->emit
= (tnl_emit_func
)x86_get_func(&p
->func
);
631 assert( (char *) p
->func
.csr
- (char *) p
->func
.store
<= MAX_SSE_CODE_SIZE
);
637 void _tnl_generate_sse_emit( GLcontext
*ctx
)
639 struct tnl_clipspace
*vtx
= GET_VERTEX_STATE(ctx
);
640 struct x86_program p
;
643 vtx
->codegen_emit
= NULL
;
647 _mesa_memset(&p
, 0, sizeof(p
));
650 p
.inputs_safe
= 0; /* for now */
651 p
.outputs_safe
= 0; /* for now */
652 p
.have_sse2
= cpu_has_xmm2
;
653 p
.identity
= x86_make_reg(file_XMM
, 6);
654 p
.chan0
= x86_make_reg(file_XMM
, 7);
656 if (!x86_init_func(&p
.func
, MAX_SSE_CODE_SIZE
)) {
661 if (build_vertex_emit(&p
)) {
662 _tnl_register_fastpath( vtx
, GL_TRUE
);
665 /* Note the failure so that we don't keep trying to codegen an
668 _tnl_register_fastpath( vtx
, GL_FALSE
);
669 x86_release_func(&p
.func
);
675 void _tnl_generate_sse_emit( GLcontext
*ctx
)
677 /* Dummy version for when USE_SSE_ASM not defined */