Code reorganization: s/aux/auxiliary/.
[mesa.git] / src / gallium / auxiliary / draw / draw_vf_sse.c
1 /*
2 * Copyright 2003 Tungsten Graphics, inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Keith Whitwell <keithw@tungstengraphics.com>
26 */
27
28
29 #include "simple_list.h"
30
31 #include "pipe/p_compiler.h"
32
33 #include "draw_vf.h"
34
35
36 #if defined(USE_SSE_ASM)
37
38 #include "x86/rtasm/x86sse.h"
39 #include "x86/common_x86_asm.h"
40
41
42 #define X 0
43 #define Y 1
44 #define Z 2
45 #define W 3
46
47
48 struct x86_program {
49 struct x86_function func;
50
51 struct draw_vertex_fetch *vf;
52 boolean inputs_safe;
53 boolean outputs_safe;
54 boolean have_sse2;
55
56 struct x86_reg identity;
57 struct x86_reg chan0;
58 };
59
60
61 static struct x86_reg get_identity( struct x86_program *p )
62 {
63 return p->identity;
64 }
65
66 static void emit_load4f_4( struct x86_program *p,
67 struct x86_reg dest,
68 struct x86_reg arg0 )
69 {
70 sse_movups(&p->func, dest, arg0);
71 }
72
73 static void emit_load4f_3( struct x86_program *p,
74 struct x86_reg dest,
75 struct x86_reg arg0 )
76 {
77 /* Have to jump through some hoops:
78 *
79 * c 0 0 0
80 * c 0 0 1
81 * 0 0 c 1
82 * a b c 1
83 */
84 sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
85 sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
86 sse_shufps(&p->func, dest, dest, SHUF(Y,Z,X,W) );
87 sse_movlps(&p->func, dest, arg0);
88 }
89
90 static void emit_load4f_2( struct x86_program *p,
91 struct x86_reg dest,
92 struct x86_reg arg0 )
93 {
94 /* Initialize from identity, then pull in low two words:
95 */
96 sse_movups(&p->func, dest, get_identity(p));
97 sse_movlps(&p->func, dest, arg0);
98 }
99
100 static void emit_load4f_1( struct x86_program *p,
101 struct x86_reg dest,
102 struct x86_reg arg0 )
103 {
104 /* Pull in low word, then swizzle in identity */
105 sse_movss(&p->func, dest, arg0);
106 sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
107 }
108
109
110
111 static void emit_load3f_3( struct x86_program *p,
112 struct x86_reg dest,
113 struct x86_reg arg0 )
114 {
115 /* Over-reads by 1 dword - potential SEGV if input is a vertex
116 * array.
117 */
118 if (p->inputs_safe) {
119 sse_movups(&p->func, dest, arg0);
120 }
121 else {
122 /* c 0 0 0
123 * c c c c
124 * a b c c
125 */
126 sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
127 sse_shufps(&p->func, dest, dest, SHUF(X,X,X,X));
128 sse_movlps(&p->func, dest, arg0);
129 }
130 }
131
132 static void emit_load3f_2( struct x86_program *p,
133 struct x86_reg dest,
134 struct x86_reg arg0 )
135 {
136 emit_load4f_2(p, dest, arg0);
137 }
138
139 static void emit_load3f_1( struct x86_program *p,
140 struct x86_reg dest,
141 struct x86_reg arg0 )
142 {
143 emit_load4f_1(p, dest, arg0);
144 }
145
146 static void emit_load2f_2( struct x86_program *p,
147 struct x86_reg dest,
148 struct x86_reg arg0 )
149 {
150 sse_movlps(&p->func, dest, arg0);
151 }
152
153 static void emit_load2f_1( struct x86_program *p,
154 struct x86_reg dest,
155 struct x86_reg arg0 )
156 {
157 emit_load4f_1(p, dest, arg0);
158 }
159
160 static void emit_load1f_1( struct x86_program *p,
161 struct x86_reg dest,
162 struct x86_reg arg0 )
163 {
164 sse_movss(&p->func, dest, arg0);
165 }
166
167 static void (*load[4][4])( struct x86_program *p,
168 struct x86_reg dest,
169 struct x86_reg arg0 ) = {
170 { emit_load1f_1,
171 emit_load1f_1,
172 emit_load1f_1,
173 emit_load1f_1 },
174
175 { emit_load2f_1,
176 emit_load2f_2,
177 emit_load2f_2,
178 emit_load2f_2 },
179
180 { emit_load3f_1,
181 emit_load3f_2,
182 emit_load3f_3,
183 emit_load3f_3 },
184
185 { emit_load4f_1,
186 emit_load4f_2,
187 emit_load4f_3,
188 emit_load4f_4 }
189 };
190
191 static void emit_load( struct x86_program *p,
192 struct x86_reg dest,
193 unsigned sz,
194 struct x86_reg src,
195 unsigned src_sz)
196 {
197 load[sz-1][src_sz-1](p, dest, src);
198 }
199
200 static void emit_store4f( struct x86_program *p,
201 struct x86_reg dest,
202 struct x86_reg arg0 )
203 {
204 sse_movups(&p->func, dest, arg0);
205 }
206
207 static void emit_store3f( struct x86_program *p,
208 struct x86_reg dest,
209 struct x86_reg arg0 )
210 {
211 if (p->outputs_safe) {
212 /* Emit the extra dword anyway. This may hurt writecombining,
213 * may cause other problems.
214 */
215 sse_movups(&p->func, dest, arg0);
216 }
217 else {
218 /* Alternate strategy - emit two, shuffle, emit one.
219 */
220 sse_movlps(&p->func, dest, arg0);
221 sse_shufps(&p->func, arg0, arg0, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
222 sse_movss(&p->func, x86_make_disp(dest,8), arg0);
223 }
224 }
225
226 static void emit_store2f( struct x86_program *p,
227 struct x86_reg dest,
228 struct x86_reg arg0 )
229 {
230 sse_movlps(&p->func, dest, arg0);
231 }
232
233 static void emit_store1f( struct x86_program *p,
234 struct x86_reg dest,
235 struct x86_reg arg0 )
236 {
237 sse_movss(&p->func, dest, arg0);
238 }
239
240
241 static void (*store[4])( struct x86_program *p,
242 struct x86_reg dest,
243 struct x86_reg arg0 ) =
244 {
245 emit_store1f,
246 emit_store2f,
247 emit_store3f,
248 emit_store4f
249 };
250
251 static void emit_store( struct x86_program *p,
252 struct x86_reg dest,
253 unsigned sz,
254 struct x86_reg temp )
255
256 {
257 store[sz-1](p, dest, temp);
258 }
259
260 static void emit_pack_store_4ub( struct x86_program *p,
261 struct x86_reg dest,
262 struct x86_reg temp )
263 {
264 /* Scale by 255.0
265 */
266 sse_mulps(&p->func, temp, p->chan0);
267
268 if (p->have_sse2) {
269 sse2_cvtps2dq(&p->func, temp, temp);
270 sse2_packssdw(&p->func, temp, temp);
271 sse2_packuswb(&p->func, temp, temp);
272 sse_movss(&p->func, dest, temp);
273 }
274 else {
275 struct x86_reg mmx0 = x86_make_reg(file_MMX, 0);
276 struct x86_reg mmx1 = x86_make_reg(file_MMX, 1);
277 sse_cvtps2pi(&p->func, mmx0, temp);
278 sse_movhlps(&p->func, temp, temp);
279 sse_cvtps2pi(&p->func, mmx1, temp);
280 mmx_packssdw(&p->func, mmx0, mmx1);
281 mmx_packuswb(&p->func, mmx0, mmx0);
282 mmx_movd(&p->func, dest, mmx0);
283 }
284 }
285
286 static int get_offset( const void *a, const void *b )
287 {
288 return (const char *)b - (const char *)a;
289 }
290
291 /* Not much happens here. Eventually use this function to try and
292 * avoid saving/reloading the source pointers each vertex (if some of
293 * them can fit in registers).
294 */
295 static void get_src_ptr( struct x86_program *p,
296 struct x86_reg srcREG,
297 struct x86_reg vfREG,
298 struct draw_vf_attr *a )
299 {
300 struct draw_vertex_fetch *vf = p->vf;
301 struct x86_reg ptr_to_src = x86_make_disp(vfREG, get_offset(vf, &a->inputptr));
302
303 /* Load current a[j].inputptr
304 */
305 x86_mov(&p->func, srcREG, ptr_to_src);
306 }
307
308 static void update_src_ptr( struct x86_program *p,
309 struct x86_reg srcREG,
310 struct x86_reg vfREG,
311 struct draw_vf_attr *a )
312 {
313 if (a->inputstride) {
314 struct draw_vertex_fetch *vf = p->vf;
315 struct x86_reg ptr_to_src = x86_make_disp(vfREG, get_offset(vf, &a->inputptr));
316
317 /* add a[j].inputstride (hardcoded value - could just as easily
318 * pull the stride value from memory each time).
319 */
320 x86_lea(&p->func, srcREG, x86_make_disp(srcREG, a->inputstride));
321
322 /* save new value of a[j].inputptr
323 */
324 x86_mov(&p->func, ptr_to_src, srcREG);
325 }
326 }
327
328
329 /* Lots of hardcoding
330 *
331 * EAX -- pointer to current output vertex
332 * ECX -- pointer to current attribute
333 *
334 */
335 static boolean build_vertex_emit( struct x86_program *p )
336 {
337 struct draw_vertex_fetch *vf = p->vf;
338 unsigned j = 0;
339
340 struct x86_reg vertexEAX = x86_make_reg(file_REG32, reg_AX);
341 struct x86_reg srcECX = x86_make_reg(file_REG32, reg_CX);
342 struct x86_reg countEBP = x86_make_reg(file_REG32, reg_BP);
343 struct x86_reg vfESI = x86_make_reg(file_REG32, reg_SI);
344 struct x86_reg temp = x86_make_reg(file_XMM, 0);
345 uint8_t *fixup, *label;
346
347 /* Push a few regs?
348 */
349 x86_push(&p->func, countEBP);
350 x86_push(&p->func, vfESI);
351
352
353 /* Get vertex count, compare to zero
354 */
355 x86_xor(&p->func, srcECX, srcECX);
356 x86_mov(&p->func, countEBP, x86_fn_arg(&p->func, 2));
357 x86_cmp(&p->func, countEBP, srcECX);
358 fixup = x86_jcc_forward(&p->func, cc_E);
359
360 /* Initialize destination register.
361 */
362 x86_mov(&p->func, vertexEAX, x86_fn_arg(&p->func, 3));
363
364 /* Move argument 1 (vf) into a reg:
365 */
366 x86_mov(&p->func, vfESI, x86_fn_arg(&p->func, 1));
367
368
369 /* always load, needed or not:
370 */
371 sse_movups(&p->func, p->identity, x86_make_disp(vfESI, get_offset(vf, &vf->identity[0])));
372
373 /* Note address for loop jump */
374 label = x86_get_label(&p->func);
375
376 /* Emit code for each of the attributes. Currently routes
377 * everything through SSE registers, even when it might be more
378 * efficient to stick with regular old x86. No optimization or
379 * other tricks - enough new ground to cover here just getting
380 * things working.
381 */
382 while (j < vf->attr_count) {
383 struct draw_vf_attr *a = &vf->attr[j];
384 struct x86_reg dest = x86_make_disp(vertexEAX, a->vertoffset);
385
386 /* Now, load an XMM reg from src, perhaps transform, then save.
387 * Could be shortcircuited in specific cases:
388 */
389 switch (a->format) {
390 case DRAW_EMIT_1F:
391 case DRAW_EMIT_1F_CONST:
392 get_src_ptr(p, srcECX, vfESI, a);
393 emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
394 emit_store(p, dest, 1, temp);
395 update_src_ptr(p, srcECX, vfESI, a);
396 break;
397 case DRAW_EMIT_2F:
398 case DRAW_EMIT_2F_CONST:
399 get_src_ptr(p, srcECX, vfESI, a);
400 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
401 emit_store(p, dest, 2, temp);
402 update_src_ptr(p, srcECX, vfESI, a);
403 break;
404 case DRAW_EMIT_3F:
405 case DRAW_EMIT_3F_CONST:
406 /* Potentially the worst case - hardcode 2+1 copying:
407 */
408 if (0) {
409 get_src_ptr(p, srcECX, vfESI, a);
410 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
411 emit_store(p, dest, 3, temp);
412 update_src_ptr(p, srcECX, vfESI, a);
413 }
414 else {
415 get_src_ptr(p, srcECX, vfESI, a);
416 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
417 emit_store(p, dest, 2, temp);
418 if (a->inputsize > 2) {
419 emit_load(p, temp, 1, x86_make_disp(srcECX, 8), 1);
420 emit_store(p, x86_make_disp(dest,8), 1, temp);
421 }
422 else {
423 sse_movss(&p->func, x86_make_disp(dest,8), get_identity(p));
424 }
425 update_src_ptr(p, srcECX, vfESI, a);
426 }
427 break;
428 case DRAW_EMIT_4F:
429 case DRAW_EMIT_4F_CONST:
430 get_src_ptr(p, srcECX, vfESI, a);
431 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
432 emit_store(p, dest, 4, temp);
433 update_src_ptr(p, srcECX, vfESI, a);
434 break;
435 case DRAW_EMIT_3F_XYW:
436 get_src_ptr(p, srcECX, vfESI, a);
437 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
438 sse_shufps(&p->func, temp, temp, SHUF(X,Y,W,Z));
439 emit_store(p, dest, 3, temp);
440 update_src_ptr(p, srcECX, vfESI, a);
441 break;
442
443 case DRAW_EMIT_1UB_1F:
444 /* Test for PAD3 + 1UB:
445 */
446 if (j > 0 &&
447 a[-1].vertoffset + a[-1].vertattrsize <= a->vertoffset - 3)
448 {
449 get_src_ptr(p, srcECX, vfESI, a);
450 emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
451 sse_shufps(&p->func, temp, temp, SHUF(X,X,X,X));
452 emit_pack_store_4ub(p, x86_make_disp(dest, -3), temp); /* overkill! */
453 update_src_ptr(p, srcECX, vfESI, a);
454 }
455 else {
456 debug_printf("Can't emit 1ub %x %x %d\n",
457 a->vertoffset, a[-1].vertoffset, a[-1].vertattrsize );
458 return FALSE;
459 }
460 break;
461 case DRAW_EMIT_3UB_3F_RGB:
462 case DRAW_EMIT_3UB_3F_BGR:
463 /* Test for 3UB + PAD1:
464 */
465 if (j == vf->attr_count - 1 ||
466 a[1].vertoffset >= a->vertoffset + 4) {
467 get_src_ptr(p, srcECX, vfESI, a);
468 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
469 if (a->format == DRAW_EMIT_3UB_3F_BGR)
470 sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
471 emit_pack_store_4ub(p, dest, temp);
472 update_src_ptr(p, srcECX, vfESI, a);
473 }
474 /* Test for 3UB + 1UB:
475 */
476 else if (j < vf->attr_count - 1 &&
477 a[1].format == DRAW_EMIT_1UB_1F &&
478 a[1].vertoffset == a->vertoffset + 3) {
479 get_src_ptr(p, srcECX, vfESI, a);
480 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
481 update_src_ptr(p, srcECX, vfESI, a);
482
483 /* Make room for incoming value:
484 */
485 sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
486
487 get_src_ptr(p, srcECX, vfESI, &a[1]);
488 emit_load(p, temp, 1, x86_deref(srcECX), a[1].inputsize);
489 update_src_ptr(p, srcECX, vfESI, &a[1]);
490
491 /* Rearrange and possibly do BGR conversion:
492 */
493 if (a->format == DRAW_EMIT_3UB_3F_BGR)
494 sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
495 else
496 sse_shufps(&p->func, temp, temp, SHUF(Y,Z,W,X));
497
498 emit_pack_store_4ub(p, dest, temp);
499 j++; /* NOTE: two attrs consumed */
500 }
501 else {
502 debug_printf("Can't emit 3ub\n");
503 }
504 return FALSE; /* add this later */
505 break;
506
507 case DRAW_EMIT_4UB_4F_RGBA:
508 get_src_ptr(p, srcECX, vfESI, a);
509 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
510 emit_pack_store_4ub(p, dest, temp);
511 update_src_ptr(p, srcECX, vfESI, a);
512 break;
513 case DRAW_EMIT_4UB_4F_BGRA:
514 get_src_ptr(p, srcECX, vfESI, a);
515 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
516 sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
517 emit_pack_store_4ub(p, dest, temp);
518 update_src_ptr(p, srcECX, vfESI, a);
519 break;
520 case DRAW_EMIT_4UB_4F_ARGB:
521 get_src_ptr(p, srcECX, vfESI, a);
522 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
523 sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
524 emit_pack_store_4ub(p, dest, temp);
525 update_src_ptr(p, srcECX, vfESI, a);
526 break;
527 case DRAW_EMIT_4UB_4F_ABGR:
528 get_src_ptr(p, srcECX, vfESI, a);
529 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
530 sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
531 emit_pack_store_4ub(p, dest, temp);
532 update_src_ptr(p, srcECX, vfESI, a);
533 break;
534 default:
535 debug_printf("unknown a[%d].format %d\n", j, a->format);
536 return FALSE; /* catch any new opcodes */
537 }
538
539 /* Increment j by at least 1 - may have been incremented above also:
540 */
541 j++;
542 }
543
544 /* Next vertex:
545 */
546 x86_lea(&p->func, vertexEAX, x86_make_disp(vertexEAX, vf->vertex_stride));
547
548 /* decr count, loop if not zero
549 */
550 x86_dec(&p->func, countEBP);
551 x86_test(&p->func, countEBP, countEBP);
552 x86_jcc(&p->func, cc_NZ, label);
553
554 /* Exit mmx state?
555 */
556 if (p->func.need_emms)
557 mmx_emms(&p->func);
558
559 /* Land forward jump here:
560 */
561 x86_fixup_fwd_jump(&p->func, fixup);
562
563 /* Pop regs and return
564 */
565 x86_pop(&p->func, x86_get_base_reg(vfESI));
566 x86_pop(&p->func, countEBP);
567 x86_ret(&p->func);
568
569 vf->emit = (draw_vf_emit_func)x86_get_func(&p->func);
570 return TRUE;
571 }
572
573
574
575 void draw_vf_generate_sse_emit( struct draw_vertex_fetch *vf )
576 {
577 struct x86_program p;
578
579 if (!cpu_has_xmm) {
580 vf->codegen_emit = NULL;
581 return;
582 }
583
584 memset(&p, 0, sizeof(p));
585
586 p.vf = vf;
587 p.inputs_safe = 0; /* for now */
588 p.outputs_safe = 1; /* for now */
589 p.have_sse2 = cpu_has_xmm2;
590 p.identity = x86_make_reg(file_XMM, 6);
591 p.chan0 = x86_make_reg(file_XMM, 7);
592
593 x86_init_func(&p.func);
594
595 if (build_vertex_emit(&p)) {
596 draw_vf_register_fastpath( vf, TRUE );
597 }
598 else {
599 /* Note the failure so that we don't keep trying to codegen an
600 * impossible state:
601 */
602 draw_vf_register_fastpath( vf, FALSE );
603 x86_release_func(&p.func);
604 }
605 }
606
607 #else
608
609 void draw_vf_generate_sse_emit( struct draw_vertex_fetch *vf )
610 {
611 /* Dummy version for when USE_SSE_ASM not defined */
612 }
613
614 #endif