Merge commit 'origin/gallium-0.1'
[mesa.git] / src / mesa / tnl / t_vertex_sse.c
1 /*
2 * Copyright 2003 Tungsten Graphics, inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Keith Whitwell <keithw@tungstengraphics.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/context.h"
30 #include "main/colormac.h"
31 #include "main/simple_list.h"
32 #include "main/enums.h"
33 #include "t_context.h"
34 #include "t_vertex.h"
35
36 #if defined(USE_SSE_ASM)
37
38 #include "x86/rtasm/x86sse.h"
39 #include "x86/common_x86_asm.h"
40
41
42 /**
43 * Number of bytes to allocate for generated SSE functions
44 */
45 #define MAX_SSE_CODE_SIZE 1024
46
47
48 #define X 0
49 #define Y 1
50 #define Z 2
51 #define W 3
52
53
54 struct x86_program {
55 struct x86_function func;
56
57 GLcontext *ctx;
58 GLboolean inputs_safe;
59 GLboolean outputs_safe;
60 GLboolean have_sse2;
61
62 struct x86_reg identity;
63 struct x86_reg chan0;
64 };
65
66
67 static struct x86_reg get_identity( struct x86_program *p )
68 {
69 return p->identity;
70 }
71
72 static void emit_load4f_4( struct x86_program *p,
73 struct x86_reg dest,
74 struct x86_reg arg0 )
75 {
76 sse_movups(&p->func, dest, arg0);
77 }
78
79 static void emit_load4f_3( struct x86_program *p,
80 struct x86_reg dest,
81 struct x86_reg arg0 )
82 {
83 /* Have to jump through some hoops:
84 *
85 * c 0 0 0
86 * c 0 0 1
87 * 0 0 c 1
88 * a b c 1
89 */
90 sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
91 sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
92 sse_shufps(&p->func, dest, dest, SHUF(Y,Z,X,W) );
93 sse_movlps(&p->func, dest, arg0);
94 }
95
96 static void emit_load4f_2( struct x86_program *p,
97 struct x86_reg dest,
98 struct x86_reg arg0 )
99 {
100 /* Initialize from identity, then pull in low two words:
101 */
102 sse_movups(&p->func, dest, get_identity(p));
103 sse_movlps(&p->func, dest, arg0);
104 }
105
106 static void emit_load4f_1( struct x86_program *p,
107 struct x86_reg dest,
108 struct x86_reg arg0 )
109 {
110 /* Pull in low word, then swizzle in identity */
111 sse_movss(&p->func, dest, arg0);
112 sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
113 }
114
115
116
117 static void emit_load3f_3( struct x86_program *p,
118 struct x86_reg dest,
119 struct x86_reg arg0 )
120 {
121 /* Over-reads by 1 dword - potential SEGV if input is a vertex
122 * array.
123 */
124 if (p->inputs_safe) {
125 sse_movups(&p->func, dest, arg0);
126 }
127 else {
128 /* c 0 0 0
129 * c c c c
130 * a b c c
131 */
132 sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
133 sse_shufps(&p->func, dest, dest, SHUF(X,X,X,X));
134 sse_movlps(&p->func, dest, arg0);
135 }
136 }
137
138 static void emit_load3f_2( struct x86_program *p,
139 struct x86_reg dest,
140 struct x86_reg arg0 )
141 {
142 emit_load4f_2(p, dest, arg0);
143 }
144
145 static void emit_load3f_1( struct x86_program *p,
146 struct x86_reg dest,
147 struct x86_reg arg0 )
148 {
149 /* Loading from memory erases the upper bits. */
150 sse_movss(&p->func, dest, arg0);
151 }
152
153 static void emit_load2f_2( struct x86_program *p,
154 struct x86_reg dest,
155 struct x86_reg arg0 )
156 {
157 sse_movlps(&p->func, dest, arg0);
158 }
159
160 static void emit_load2f_1( struct x86_program *p,
161 struct x86_reg dest,
162 struct x86_reg arg0 )
163 {
164 /* Loading from memory erases the upper bits. */
165 sse_movss(&p->func, dest, arg0);
166 }
167
168 static void emit_load1f_1( struct x86_program *p,
169 struct x86_reg dest,
170 struct x86_reg arg0 )
171 {
172 sse_movss(&p->func, dest, arg0);
173 }
174
175 static void (*load[4][4])( struct x86_program *p,
176 struct x86_reg dest,
177 struct x86_reg arg0 ) = {
178 { emit_load1f_1,
179 emit_load1f_1,
180 emit_load1f_1,
181 emit_load1f_1 },
182
183 { emit_load2f_1,
184 emit_load2f_2,
185 emit_load2f_2,
186 emit_load2f_2 },
187
188 { emit_load3f_1,
189 emit_load3f_2,
190 emit_load3f_3,
191 emit_load3f_3 },
192
193 { emit_load4f_1,
194 emit_load4f_2,
195 emit_load4f_3,
196 emit_load4f_4 }
197 };
198
199 static void emit_load( struct x86_program *p,
200 struct x86_reg dest,
201 GLuint sz,
202 struct x86_reg src,
203 GLuint src_sz)
204 {
205 load[sz-1][src_sz-1](p, dest, src);
206 }
207
208 static void emit_store4f( struct x86_program *p,
209 struct x86_reg dest,
210 struct x86_reg arg0 )
211 {
212 sse_movups(&p->func, dest, arg0);
213 }
214
215 static void emit_store3f( struct x86_program *p,
216 struct x86_reg dest,
217 struct x86_reg arg0 )
218 {
219 if (p->outputs_safe) {
220 /* Emit the extra dword anyway. This may hurt writecombining,
221 * may cause other problems.
222 */
223 sse_movups(&p->func, dest, arg0);
224 }
225 else {
226 /* Alternate strategy - emit two, shuffle, emit one.
227 */
228 sse_movlps(&p->func, dest, arg0);
229 sse_shufps(&p->func, arg0, arg0, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
230 sse_movss(&p->func, x86_make_disp(dest,8), arg0);
231 }
232 }
233
234 static void emit_store2f( struct x86_program *p,
235 struct x86_reg dest,
236 struct x86_reg arg0 )
237 {
238 sse_movlps(&p->func, dest, arg0);
239 }
240
241 static void emit_store1f( struct x86_program *p,
242 struct x86_reg dest,
243 struct x86_reg arg0 )
244 {
245 sse_movss(&p->func, dest, arg0);
246 }
247
248
249 static void (*store[4])( struct x86_program *p,
250 struct x86_reg dest,
251 struct x86_reg arg0 ) =
252 {
253 emit_store1f,
254 emit_store2f,
255 emit_store3f,
256 emit_store4f
257 };
258
259 static void emit_store( struct x86_program *p,
260 struct x86_reg dest,
261 GLuint sz,
262 struct x86_reg temp )
263
264 {
265 store[sz-1](p, dest, temp);
266 }
267
268 static void emit_pack_store_4ub( struct x86_program *p,
269 struct x86_reg dest,
270 struct x86_reg temp )
271 {
272 /* Scale by 255.0
273 */
274 sse_mulps(&p->func, temp, p->chan0);
275
276 if (p->have_sse2) {
277 sse2_cvtps2dq(&p->func, temp, temp);
278 sse2_packssdw(&p->func, temp, temp);
279 sse2_packuswb(&p->func, temp, temp);
280 sse_movss(&p->func, dest, temp);
281 }
282 else {
283 struct x86_reg mmx0 = x86_make_reg(file_MMX, 0);
284 struct x86_reg mmx1 = x86_make_reg(file_MMX, 1);
285 sse_cvtps2pi(&p->func, mmx0, temp);
286 sse_movhlps(&p->func, temp, temp);
287 sse_cvtps2pi(&p->func, mmx1, temp);
288 mmx_packssdw(&p->func, mmx0, mmx1);
289 mmx_packuswb(&p->func, mmx0, mmx0);
290 mmx_movd(&p->func, dest, mmx0);
291 }
292 }
293
294 static GLint get_offset( const void *a, const void *b )
295 {
296 return (const char *)b - (const char *)a;
297 }
298
299 /* Not much happens here. Eventually use this function to try and
300 * avoid saving/reloading the source pointers each vertex (if some of
301 * them can fit in registers).
302 */
303 static void get_src_ptr( struct x86_program *p,
304 struct x86_reg srcREG,
305 struct x86_reg vtxREG,
306 struct tnl_clipspace_attr *a )
307 {
308 struct tnl_clipspace *vtx = GET_VERTEX_STATE(p->ctx);
309 struct x86_reg ptr_to_src = x86_make_disp(vtxREG, get_offset(vtx, &a->inputptr));
310
311 /* Load current a[j].inputptr
312 */
313 x86_mov(&p->func, srcREG, ptr_to_src);
314 }
315
316 static void update_src_ptr( struct x86_program *p,
317 struct x86_reg srcREG,
318 struct x86_reg vtxREG,
319 struct tnl_clipspace_attr *a )
320 {
321 if (a->inputstride) {
322 struct tnl_clipspace *vtx = GET_VERTEX_STATE(p->ctx);
323 struct x86_reg ptr_to_src = x86_make_disp(vtxREG, get_offset(vtx, &a->inputptr));
324
325 /* add a[j].inputstride (hardcoded value - could just as easily
326 * pull the stride value from memory each time).
327 */
328 x86_lea(&p->func, srcREG, x86_make_disp(srcREG, a->inputstride));
329
330 /* save new value of a[j].inputptr
331 */
332 x86_mov(&p->func, ptr_to_src, srcREG);
333 }
334 }
335
336
337 /* Lots of hardcoding
338 *
339 * EAX -- pointer to current output vertex
340 * ECX -- pointer to current attribute
341 *
342 */
343 static GLboolean build_vertex_emit( struct x86_program *p )
344 {
345 GLcontext *ctx = p->ctx;
346 TNLcontext *tnl = TNL_CONTEXT(ctx);
347 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
348 GLuint j = 0;
349
350 struct x86_reg vertexEAX = x86_make_reg(file_REG32, reg_AX);
351 struct x86_reg srcECX = x86_make_reg(file_REG32, reg_CX);
352 struct x86_reg countEBP = x86_make_reg(file_REG32, reg_BP);
353 struct x86_reg vtxESI = x86_make_reg(file_REG32, reg_SI);
354 struct x86_reg temp = x86_make_reg(file_XMM, 0);
355 struct x86_reg vp0 = x86_make_reg(file_XMM, 1);
356 struct x86_reg vp1 = x86_make_reg(file_XMM, 2);
357 struct x86_reg temp2 = x86_make_reg(file_XMM, 3);
358 GLubyte *fixup, *label;
359
360 /* Push a few regs?
361 */
362 x86_push(&p->func, countEBP);
363 x86_push(&p->func, vtxESI);
364
365
366 /* Get vertex count, compare to zero
367 */
368 x86_xor(&p->func, srcECX, srcECX);
369 x86_mov(&p->func, countEBP, x86_fn_arg(&p->func, 2));
370 x86_cmp(&p->func, countEBP, srcECX);
371 fixup = x86_jcc_forward(&p->func, cc_E);
372
373 /* Initialize destination register.
374 */
375 x86_mov(&p->func, vertexEAX, x86_fn_arg(&p->func, 3));
376
377 /* Dereference ctx to get tnl, then vtx:
378 */
379 x86_mov(&p->func, vtxESI, x86_fn_arg(&p->func, 1));
380 x86_mov(&p->func, vtxESI, x86_make_disp(vtxESI, get_offset(ctx, &ctx->swtnl_context)));
381 vtxESI = x86_make_disp(vtxESI, get_offset(tnl, &tnl->clipspace));
382
383
384 /* Possibly load vp0, vp1 for viewport calcs:
385 */
386 if (vtx->need_viewport) {
387 sse_movups(&p->func, vp0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_scale[0])));
388 sse_movups(&p->func, vp1, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_xlate[0])));
389 }
390
391 /* always load, needed or not:
392 */
393 sse_movups(&p->func, p->chan0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->chan_scale[0])));
394 sse_movups(&p->func, p->identity, x86_make_disp(vtxESI, get_offset(vtx, &vtx->identity[0])));
395
396 /* Note address for loop jump */
397 label = x86_get_label(&p->func);
398
399 /* Emit code for each of the attributes. Currently routes
400 * everything through SSE registers, even when it might be more
401 * efficient to stick with regular old x86. No optimization or
402 * other tricks - enough new ground to cover here just getting
403 * things working.
404 */
405 while (j < vtx->attr_count) {
406 struct tnl_clipspace_attr *a = &vtx->attr[j];
407 struct x86_reg dest = x86_make_disp(vertexEAX, a->vertoffset);
408
409 /* Now, load an XMM reg from src, perhaps transform, then save.
410 * Could be shortcircuited in specific cases:
411 */
412 switch (a->format) {
413 case EMIT_1F:
414 get_src_ptr(p, srcECX, vtxESI, a);
415 emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
416 emit_store(p, dest, 1, temp);
417 update_src_ptr(p, srcECX, vtxESI, a);
418 break;
419 case EMIT_2F:
420 get_src_ptr(p, srcECX, vtxESI, a);
421 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
422 emit_store(p, dest, 2, temp);
423 update_src_ptr(p, srcECX, vtxESI, a);
424 break;
425 case EMIT_3F:
426 /* Potentially the worst case - hardcode 2+1 copying:
427 */
428 if (0) {
429 get_src_ptr(p, srcECX, vtxESI, a);
430 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
431 emit_store(p, dest, 3, temp);
432 update_src_ptr(p, srcECX, vtxESI, a);
433 }
434 else {
435 get_src_ptr(p, srcECX, vtxESI, a);
436 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
437 emit_store(p, dest, 2, temp);
438 if (a->inputsize > 2) {
439 emit_load(p, temp, 1, x86_make_disp(srcECX, 8), 1);
440 emit_store(p, x86_make_disp(dest,8), 1, temp);
441 }
442 else {
443 sse_movss(&p->func, x86_make_disp(dest,8), get_identity(p));
444 }
445 update_src_ptr(p, srcECX, vtxESI, a);
446 }
447 break;
448 case EMIT_4F:
449 get_src_ptr(p, srcECX, vtxESI, a);
450 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
451 emit_store(p, dest, 4, temp);
452 update_src_ptr(p, srcECX, vtxESI, a);
453 break;
454 case EMIT_2F_VIEWPORT:
455 get_src_ptr(p, srcECX, vtxESI, a);
456 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
457 sse_mulps(&p->func, temp, vp0);
458 sse_addps(&p->func, temp, vp1);
459 emit_store(p, dest, 2, temp);
460 update_src_ptr(p, srcECX, vtxESI, a);
461 break;
462 case EMIT_3F_VIEWPORT:
463 get_src_ptr(p, srcECX, vtxESI, a);
464 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
465 sse_mulps(&p->func, temp, vp0);
466 sse_addps(&p->func, temp, vp1);
467 emit_store(p, dest, 3, temp);
468 update_src_ptr(p, srcECX, vtxESI, a);
469 break;
470 case EMIT_4F_VIEWPORT:
471 get_src_ptr(p, srcECX, vtxESI, a);
472 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
473 sse_mulps(&p->func, temp, vp0);
474 sse_addps(&p->func, temp, vp1);
475 emit_store(p, dest, 4, temp);
476 update_src_ptr(p, srcECX, vtxESI, a);
477 break;
478 case EMIT_3F_XYW:
479 get_src_ptr(p, srcECX, vtxESI, a);
480 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
481 sse_shufps(&p->func, temp, temp, SHUF(X,Y,W,Z));
482 emit_store(p, dest, 3, temp);
483 update_src_ptr(p, srcECX, vtxESI, a);
484 break;
485
486 case EMIT_1UB_1F:
487 /* Test for PAD3 + 1UB:
488 */
489 if (j > 0 &&
490 a[-1].vertoffset + a[-1].vertattrsize <= a->vertoffset - 3)
491 {
492 get_src_ptr(p, srcECX, vtxESI, a);
493 emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
494 sse_shufps(&p->func, temp, temp, SHUF(X,X,X,X));
495 emit_pack_store_4ub(p, x86_make_disp(dest, -3), temp); /* overkill! */
496 update_src_ptr(p, srcECX, vtxESI, a);
497 }
498 else {
499 _mesa_printf("Can't emit 1ub %x %x %d\n", a->vertoffset, a[-1].vertoffset, a[-1].vertattrsize );
500 return GL_FALSE;
501 }
502 break;
503 case EMIT_3UB_3F_RGB:
504 case EMIT_3UB_3F_BGR:
505 /* Test for 3UB + PAD1:
506 */
507 if (j == vtx->attr_count - 1 ||
508 a[1].vertoffset >= a->vertoffset + 4) {
509 get_src_ptr(p, srcECX, vtxESI, a);
510 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
511 if (a->format == EMIT_3UB_3F_BGR)
512 sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
513 emit_pack_store_4ub(p, dest, temp);
514 update_src_ptr(p, srcECX, vtxESI, a);
515 }
516 /* Test for 3UB + 1UB:
517 */
518 else if (j < vtx->attr_count - 1 &&
519 a[1].format == EMIT_1UB_1F &&
520 a[1].vertoffset == a->vertoffset + 3) {
521 get_src_ptr(p, srcECX, vtxESI, a);
522 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
523 update_src_ptr(p, srcECX, vtxESI, a);
524
525 /* Make room for incoming value:
526 */
527 sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
528
529 get_src_ptr(p, srcECX, vtxESI, &a[1]);
530 emit_load(p, temp2, 1, x86_deref(srcECX), a[1].inputsize);
531 sse_movss(&p->func, temp, temp2);
532 update_src_ptr(p, srcECX, vtxESI, &a[1]);
533
534 /* Rearrange and possibly do BGR conversion:
535 */
536 if (a->format == EMIT_3UB_3F_BGR)
537 sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
538 else
539 sse_shufps(&p->func, temp, temp, SHUF(Y,Z,W,X));
540
541 emit_pack_store_4ub(p, dest, temp);
542 j++; /* NOTE: two attrs consumed */
543 }
544 else {
545 _mesa_printf("Can't emit 3ub\n");
546 return GL_FALSE; /* add this later */
547 }
548 break;
549
550 case EMIT_4UB_4F_RGBA:
551 get_src_ptr(p, srcECX, vtxESI, a);
552 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
553 emit_pack_store_4ub(p, dest, temp);
554 update_src_ptr(p, srcECX, vtxESI, a);
555 break;
556 case EMIT_4UB_4F_BGRA:
557 get_src_ptr(p, srcECX, vtxESI, a);
558 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
559 sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
560 emit_pack_store_4ub(p, dest, temp);
561 update_src_ptr(p, srcECX, vtxESI, a);
562 break;
563 case EMIT_4UB_4F_ARGB:
564 get_src_ptr(p, srcECX, vtxESI, a);
565 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
566 sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
567 emit_pack_store_4ub(p, dest, temp);
568 update_src_ptr(p, srcECX, vtxESI, a);
569 break;
570 case EMIT_4UB_4F_ABGR:
571 get_src_ptr(p, srcECX, vtxESI, a);
572 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
573 sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
574 emit_pack_store_4ub(p, dest, temp);
575 update_src_ptr(p, srcECX, vtxESI, a);
576 break;
577 case EMIT_4CHAN_4F_RGBA:
578 switch (CHAN_TYPE) {
579 case GL_UNSIGNED_BYTE:
580 get_src_ptr(p, srcECX, vtxESI, a);
581 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
582 emit_pack_store_4ub(p, dest, temp);
583 update_src_ptr(p, srcECX, vtxESI, a);
584 break;
585 case GL_FLOAT:
586 get_src_ptr(p, srcECX, vtxESI, a);
587 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
588 emit_store(p, dest, 4, temp);
589 update_src_ptr(p, srcECX, vtxESI, a);
590 break;
591 case GL_UNSIGNED_SHORT:
592 default:
593 _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE));
594 return GL_FALSE;
595 }
596 break;
597 default:
598 _mesa_printf("unknown a[%d].format %d\n", j, a->format);
599 return GL_FALSE; /* catch any new opcodes */
600 }
601
602 /* Increment j by at least 1 - may have been incremented above also:
603 */
604 j++;
605 }
606
607 /* Next vertex:
608 */
609 x86_lea(&p->func, vertexEAX, x86_make_disp(vertexEAX, vtx->vertex_size));
610
611 /* decr count, loop if not zero
612 */
613 x86_dec(&p->func, countEBP);
614 x86_test(&p->func, countEBP, countEBP);
615 x86_jcc(&p->func, cc_NZ, label);
616
617 /* Exit mmx state?
618 */
619 if (p->func.need_emms)
620 mmx_emms(&p->func);
621
622 /* Land forward jump here:
623 */
624 x86_fixup_fwd_jump(&p->func, fixup);
625
626 /* Pop regs and return
627 */
628 x86_pop(&p->func, x86_get_base_reg(vtxESI));
629 x86_pop(&p->func, countEBP);
630 x86_ret(&p->func);
631
632 assert(!vtx->emit);
633 vtx->emit = (tnl_emit_func)x86_get_func(&p->func);
634
635 assert( (char *) p->func.csr - (char *) p->func.store <= MAX_SSE_CODE_SIZE );
636 return GL_TRUE;
637 }
638
639
640
641 void _tnl_generate_sse_emit( GLcontext *ctx )
642 {
643 struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
644 struct x86_program p;
645
646 if (!cpu_has_xmm) {
647 vtx->codegen_emit = NULL;
648 return;
649 }
650
651 _mesa_memset(&p, 0, sizeof(p));
652
653 p.ctx = ctx;
654 p.inputs_safe = 0; /* for now */
655 p.outputs_safe = 0; /* for now */
656 p.have_sse2 = cpu_has_xmm2;
657 p.identity = x86_make_reg(file_XMM, 6);
658 p.chan0 = x86_make_reg(file_XMM, 7);
659
660 if (!x86_init_func_size(&p.func, MAX_SSE_CODE_SIZE)) {
661 vtx->emit = NULL;
662 return;
663 }
664
665 if (build_vertex_emit(&p)) {
666 _tnl_register_fastpath( vtx, GL_TRUE );
667 }
668 else {
669 /* Note the failure so that we don't keep trying to codegen an
670 * impossible state:
671 */
672 _tnl_register_fastpath( vtx, GL_FALSE );
673 x86_release_func(&p.func);
674 }
675 }
676
677 #else
678
679 void _tnl_generate_sse_emit( GLcontext *ctx )
680 {
681 /* Dummy version for when USE_SSE_ASM not defined */
682 }
683
684 #endif