Merge branch 'mesa_7_5_branch'
[mesa.git] / src / gallium / drivers / cell / spu / spu_exec.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * TGSI interpretor/executor.
30 *
31 * Flow control information:
32 *
33 * Since we operate on 'quads' (4 pixels or 4 vertices in parallel)
34 * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special
35 * care since a condition may be true for some quad components but false
36 * for other components.
37 *
38 * We basically execute all statements (even if they're in the part of
39 * an IF/ELSE clause that's "not taken") and use a special mask to
40 * control writing to destination registers. This is the ExecMask.
41 * See store_dest().
42 *
43 * The ExecMask is computed from three other masks (CondMask, LoopMask and
44 * ContMask) which are controlled by the flow control instructions (namely:
45 * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT).
46 *
47 *
48 * Authors:
49 * Michal Krol
50 * Brian Paul
51 */
52
53 #include <transpose_matrix4x4.h>
54 #include <simdmath/ceilf4.h>
55 #include <simdmath/cosf4.h>
56 #include <simdmath/divf4.h>
57 #include <simdmath/floorf4.h>
58 #include <simdmath/log2f4.h>
59 #include <simdmath/powf4.h>
60 #include <simdmath/sinf4.h>
61 #include <simdmath/sqrtf4.h>
62 #include <simdmath/truncf4.h>
63
64 #include "pipe/p_compiler.h"
65 #include "pipe/p_state.h"
66 #include "pipe/p_shader_tokens.h"
67 #include "tgsi/tgsi_parse.h"
68 #include "tgsi/tgsi_util.h"
69 #include "spu_exec.h"
70 #include "spu_main.h"
71 #include "spu_vertex_shader.h"
72 #include "spu_dcache.h"
73 #include "cell/common.h"
74
75 #define TILE_TOP_LEFT 0
76 #define TILE_TOP_RIGHT 1
77 #define TILE_BOTTOM_LEFT 2
78 #define TILE_BOTTOM_RIGHT 3
79
80 /*
81 * Shorthand locations of various utility registers (_I = Index, _C = Channel)
82 */
83 #define TEMP_0_I TGSI_EXEC_TEMP_00000000_I
84 #define TEMP_0_C TGSI_EXEC_TEMP_00000000_C
85 #define TEMP_7F_I TGSI_EXEC_TEMP_7FFFFFFF_I
86 #define TEMP_7F_C TGSI_EXEC_TEMP_7FFFFFFF_C
87 #define TEMP_80_I TGSI_EXEC_TEMP_80000000_I
88 #define TEMP_80_C TGSI_EXEC_TEMP_80000000_C
89 #define TEMP_FF_I TGSI_EXEC_TEMP_FFFFFFFF_I
90 #define TEMP_FF_C TGSI_EXEC_TEMP_FFFFFFFF_C
91 #define TEMP_1_I TGSI_EXEC_TEMP_ONE_I
92 #define TEMP_1_C TGSI_EXEC_TEMP_ONE_C
93 #define TEMP_2_I TGSI_EXEC_TEMP_TWO_I
94 #define TEMP_2_C TGSI_EXEC_TEMP_TWO_C
95 #define TEMP_128_I TGSI_EXEC_TEMP_128_I
96 #define TEMP_128_C TGSI_EXEC_TEMP_128_C
97 #define TEMP_M128_I TGSI_EXEC_TEMP_MINUS_128_I
98 #define TEMP_M128_C TGSI_EXEC_TEMP_MINUS_128_C
99 #define TEMP_KILMASK_I TGSI_EXEC_TEMP_KILMASK_I
100 #define TEMP_KILMASK_C TGSI_EXEC_TEMP_KILMASK_C
101 #define TEMP_OUTPUT_I TGSI_EXEC_TEMP_OUTPUT_I
102 #define TEMP_OUTPUT_C TGSI_EXEC_TEMP_OUTPUT_C
103 #define TEMP_PRIMITIVE_I TGSI_EXEC_TEMP_PRIMITIVE_I
104 #define TEMP_PRIMITIVE_C TGSI_EXEC_TEMP_PRIMITIVE_C
105 #define TEMP_R0 TGSI_EXEC_TEMP_R0
106
107 #define FOR_EACH_CHANNEL(CHAN)\
108 for (CHAN = 0; CHAN < 4; CHAN++)
109
110 #define IS_CHANNEL_ENABLED(INST, CHAN)\
111 ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
112
113 #define IS_CHANNEL_ENABLED2(INST, CHAN)\
114 ((INST).FullDstRegisters[1].DstRegister.WriteMask & (1 << (CHAN)))
115
116 #define FOR_EACH_ENABLED_CHANNEL(INST, CHAN)\
117 FOR_EACH_CHANNEL( CHAN )\
118 if (IS_CHANNEL_ENABLED( INST, CHAN ))
119
120 #define FOR_EACH_ENABLED_CHANNEL2(INST, CHAN)\
121 FOR_EACH_CHANNEL( CHAN )\
122 if (IS_CHANNEL_ENABLED2( INST, CHAN ))
123
124
125 /** The execution mask depends on the conditional mask and the loop mask */
126 #define UPDATE_EXEC_MASK(MACH) \
127 MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->FuncMask
128
129
130 #define CHAN_X 0
131 #define CHAN_Y 1
132 #define CHAN_Z 2
133 #define CHAN_W 3
134
135
136
137 /**
138 * Initialize machine state by expanding tokens to full instructions,
139 * allocating temporary storage, setting up constants, etc.
140 * After this, we can call spu_exec_machine_run() many times.
141 */
142 void
143 spu_exec_machine_init(struct spu_exec_machine *mach,
144 uint numSamplers,
145 struct spu_sampler *samplers,
146 unsigned processor)
147 {
148 const qword zero = si_il(0);
149 const qword not_zero = si_il(~0);
150
151 (void) numSamplers;
152 mach->Samplers = samplers;
153 mach->Processor = processor;
154 mach->Addrs = &mach->Temps[TGSI_EXEC_NUM_TEMPS];
155
156 /* Setup constants. */
157 mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q = zero;
158 mach->Temps[TEMP_FF_I].xyzw[TEMP_FF_C].q = not_zero;
159 mach->Temps[TEMP_7F_I].xyzw[TEMP_7F_C].q = si_shli(not_zero, -1);
160 mach->Temps[TEMP_80_I].xyzw[TEMP_80_C].q = si_shli(not_zero, 31);
161
162 mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q = (qword) spu_splats(1.0f);
163 mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q = (qword) spu_splats(2.0f);
164 mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q = (qword) spu_splats(128.0f);
165 mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q = (qword) spu_splats(-128.0f);
166 }
167
168
169 static INLINE qword
170 micro_abs(qword src)
171 {
172 return si_rotmi(si_shli(src, 1), -1);
173 }
174
175 static INLINE qword
176 micro_ceil(qword src)
177 {
178 return (qword) _ceilf4((vec_float4) src);
179 }
180
181 static INLINE qword
182 micro_cos(qword src)
183 {
184 return (qword) _cosf4((vec_float4) src);
185 }
186
187 static const qword br_shuf = {
188 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
189 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
190 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
191 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
192 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
193 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
194 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
195 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
196 };
197
198 static const qword bl_shuf = {
199 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
200 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
201 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
202 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
203 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
204 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
205 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
206 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
207 };
208
209 static const qword tl_shuf = {
210 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
211 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
212 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
213 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
214 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
215 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
216 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
217 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
218 };
219
220 static qword
221 micro_ddx(qword src)
222 {
223 qword bottom_right = si_shufb(src, src, br_shuf);
224 qword bottom_left = si_shufb(src, src, bl_shuf);
225
226 return si_fs(bottom_right, bottom_left);
227 }
228
229 static qword
230 micro_ddy(qword src)
231 {
232 qword top_left = si_shufb(src, src, tl_shuf);
233 qword bottom_left = si_shufb(src, src, bl_shuf);
234
235 return si_fs(top_left, bottom_left);
236 }
237
238 static INLINE qword
239 micro_div(qword src0, qword src1)
240 {
241 return (qword) _divf4((vec_float4) src0, (vec_float4) src1);
242 }
243
244 static qword
245 micro_flr(qword src)
246 {
247 return (qword) _floorf4((vec_float4) src);
248 }
249
250 static qword
251 micro_frc(qword src)
252 {
253 return si_fs(src, (qword) _floorf4((vec_float4) src));
254 }
255
256 static INLINE qword
257 micro_ge(qword src0, qword src1)
258 {
259 return si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
260 }
261
262 static qword
263 micro_lg2(qword src)
264 {
265 return (qword) _log2f4((vec_float4) src);
266 }
267
268 static INLINE qword
269 micro_lt(qword src0, qword src1)
270 {
271 const qword tmp = si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
272
273 return si_xori(tmp, 0xff);
274 }
275
276 static INLINE qword
277 micro_max(qword src0, qword src1)
278 {
279 return si_selb(src1, src0, si_fcgt(src0, src1));
280 }
281
282 static INLINE qword
283 micro_min(qword src0, qword src1)
284 {
285 return si_selb(src0, src1, si_fcgt(src0, src1));
286 }
287
288 static qword
289 micro_neg(qword src)
290 {
291 return si_xor(src, (qword) spu_splats(0x80000000));
292 }
293
294 static qword
295 micro_set_sign(qword src)
296 {
297 return si_or(src, (qword) spu_splats(0x80000000));
298 }
299
300 static qword
301 micro_pow(qword src0, qword src1)
302 {
303 return (qword) _powf4((vec_float4) src0, (vec_float4) src1);
304 }
305
306 static qword
307 micro_rnd(qword src)
308 {
309 const qword half = (qword) spu_splats(0.5f);
310
311 /* May be able to use _roundf4. There may be some difference, though.
312 */
313 return (qword) _floorf4((vec_float4) si_fa(src, half));
314 }
315
316 static INLINE qword
317 micro_ishr(qword src0, qword src1)
318 {
319 return si_rotma(src0, si_sfi(src1, 0));
320 }
321
322 static qword
323 micro_trunc(qword src)
324 {
325 return (qword) _truncf4((vec_float4) src);
326 }
327
328 static qword
329 micro_sin(qword src)
330 {
331 return (qword) _sinf4((vec_float4) src);
332 }
333
334 static INLINE qword
335 micro_sqrt(qword src)
336 {
337 return (qword) _sqrtf4((vec_float4) src);
338 }
339
340 static void
341 fetch_src_file_channel(
342 const struct spu_exec_machine *mach,
343 const uint file,
344 const uint swizzle,
345 const union spu_exec_channel *index,
346 union spu_exec_channel *chan )
347 {
348 switch( swizzle ) {
349 case TGSI_EXTSWIZZLE_X:
350 case TGSI_EXTSWIZZLE_Y:
351 case TGSI_EXTSWIZZLE_Z:
352 case TGSI_EXTSWIZZLE_W:
353 switch( file ) {
354 case TGSI_FILE_CONSTANT: {
355 unsigned i;
356
357 for (i = 0; i < 4; i++) {
358 const float *ptr = mach->Consts[index->i[i]];
359 float tmp[4];
360
361 spu_dcache_fetch_unaligned((qword *) tmp,
362 (uintptr_t)(ptr + swizzle),
363 sizeof(float));
364
365 chan->f[i] = tmp[0];
366 }
367 break;
368 }
369
370 case TGSI_FILE_INPUT:
371 chan->u[0] = mach->Inputs[index->i[0]].xyzw[swizzle].u[0];
372 chan->u[1] = mach->Inputs[index->i[1]].xyzw[swizzle].u[1];
373 chan->u[2] = mach->Inputs[index->i[2]].xyzw[swizzle].u[2];
374 chan->u[3] = mach->Inputs[index->i[3]].xyzw[swizzle].u[3];
375 break;
376
377 case TGSI_FILE_TEMPORARY:
378 chan->u[0] = mach->Temps[index->i[0]].xyzw[swizzle].u[0];
379 chan->u[1] = mach->Temps[index->i[1]].xyzw[swizzle].u[1];
380 chan->u[2] = mach->Temps[index->i[2]].xyzw[swizzle].u[2];
381 chan->u[3] = mach->Temps[index->i[3]].xyzw[swizzle].u[3];
382 break;
383
384 case TGSI_FILE_IMMEDIATE:
385 ASSERT( index->i[0] < (int) mach->ImmLimit );
386 ASSERT( index->i[1] < (int) mach->ImmLimit );
387 ASSERT( index->i[2] < (int) mach->ImmLimit );
388 ASSERT( index->i[3] < (int) mach->ImmLimit );
389
390 chan->f[0] = mach->Imms[index->i[0]][swizzle];
391 chan->f[1] = mach->Imms[index->i[1]][swizzle];
392 chan->f[2] = mach->Imms[index->i[2]][swizzle];
393 chan->f[3] = mach->Imms[index->i[3]][swizzle];
394 break;
395
396 case TGSI_FILE_ADDRESS:
397 chan->u[0] = mach->Addrs[index->i[0]].xyzw[swizzle].u[0];
398 chan->u[1] = mach->Addrs[index->i[1]].xyzw[swizzle].u[1];
399 chan->u[2] = mach->Addrs[index->i[2]].xyzw[swizzle].u[2];
400 chan->u[3] = mach->Addrs[index->i[3]].xyzw[swizzle].u[3];
401 break;
402
403 case TGSI_FILE_OUTPUT:
404 /* vertex/fragment output vars can be read too */
405 chan->u[0] = mach->Outputs[index->i[0]].xyzw[swizzle].u[0];
406 chan->u[1] = mach->Outputs[index->i[1]].xyzw[swizzle].u[1];
407 chan->u[2] = mach->Outputs[index->i[2]].xyzw[swizzle].u[2];
408 chan->u[3] = mach->Outputs[index->i[3]].xyzw[swizzle].u[3];
409 break;
410
411 default:
412 ASSERT( 0 );
413 }
414 break;
415
416 case TGSI_EXTSWIZZLE_ZERO:
417 *chan = mach->Temps[TEMP_0_I].xyzw[TEMP_0_C];
418 break;
419
420 case TGSI_EXTSWIZZLE_ONE:
421 *chan = mach->Temps[TEMP_1_I].xyzw[TEMP_1_C];
422 break;
423
424 default:
425 ASSERT( 0 );
426 }
427 }
428
429 static void
430 fetch_source(
431 const struct spu_exec_machine *mach,
432 union spu_exec_channel *chan,
433 const struct tgsi_full_src_register *reg,
434 const uint chan_index )
435 {
436 union spu_exec_channel index;
437 uint swizzle;
438
439 index.i[0] =
440 index.i[1] =
441 index.i[2] =
442 index.i[3] = reg->SrcRegister.Index;
443
444 if (reg->SrcRegister.Indirect) {
445 union spu_exec_channel index2;
446 union spu_exec_channel indir_index;
447
448 index2.i[0] =
449 index2.i[1] =
450 index2.i[2] =
451 index2.i[3] = reg->SrcRegisterInd.Index;
452
453 swizzle = tgsi_util_get_src_register_swizzle(&reg->SrcRegisterInd,
454 CHAN_X);
455 fetch_src_file_channel(
456 mach,
457 reg->SrcRegisterInd.File,
458 swizzle,
459 &index2,
460 &indir_index );
461
462 index.q = si_a(index.q, indir_index.q);
463 }
464
465 if( reg->SrcRegister.Dimension ) {
466 switch( reg->SrcRegister.File ) {
467 case TGSI_FILE_INPUT:
468 index.q = si_mpyi(index.q, 17);
469 break;
470 case TGSI_FILE_CONSTANT:
471 index.q = si_shli(index.q, 12);
472 break;
473 default:
474 ASSERT( 0 );
475 }
476
477 index.i[0] += reg->SrcRegisterDim.Index;
478 index.i[1] += reg->SrcRegisterDim.Index;
479 index.i[2] += reg->SrcRegisterDim.Index;
480 index.i[3] += reg->SrcRegisterDim.Index;
481
482 if (reg->SrcRegisterDim.Indirect) {
483 union spu_exec_channel index2;
484 union spu_exec_channel indir_index;
485
486 index2.i[0] =
487 index2.i[1] =
488 index2.i[2] =
489 index2.i[3] = reg->SrcRegisterDimInd.Index;
490
491 swizzle = tgsi_util_get_src_register_swizzle( &reg->SrcRegisterDimInd, CHAN_X );
492 fetch_src_file_channel(
493 mach,
494 reg->SrcRegisterDimInd.File,
495 swizzle,
496 &index2,
497 &indir_index );
498
499 index.q = si_a(index.q, indir_index.q);
500 }
501 }
502
503 swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
504 fetch_src_file_channel(
505 mach,
506 reg->SrcRegister.File,
507 swizzle,
508 &index,
509 chan );
510
511 switch (tgsi_util_get_full_src_register_sign_mode( reg, chan_index )) {
512 case TGSI_UTIL_SIGN_CLEAR:
513 chan->q = micro_abs(chan->q);
514 break;
515
516 case TGSI_UTIL_SIGN_SET:
517 chan->q = micro_set_sign(chan->q);
518 break;
519
520 case TGSI_UTIL_SIGN_TOGGLE:
521 chan->q = micro_neg(chan->q);
522 break;
523
524 case TGSI_UTIL_SIGN_KEEP:
525 break;
526 }
527
528 if (reg->SrcRegisterExtMod.Complement) {
529 chan->q = si_fs(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, chan->q);
530 }
531 }
532
533 static void
534 store_dest(
535 struct spu_exec_machine *mach,
536 const union spu_exec_channel *chan,
537 const struct tgsi_full_dst_register *reg,
538 const struct tgsi_full_instruction *inst,
539 uint chan_index )
540 {
541 union spu_exec_channel *dst;
542
543 switch( reg->DstRegister.File ) {
544 case TGSI_FILE_NULL:
545 return;
546
547 case TGSI_FILE_OUTPUT:
548 dst = &mach->Outputs[mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0]
549 + reg->DstRegister.Index].xyzw[chan_index];
550 break;
551
552 case TGSI_FILE_TEMPORARY:
553 dst = &mach->Temps[reg->DstRegister.Index].xyzw[chan_index];
554 break;
555
556 case TGSI_FILE_ADDRESS:
557 dst = &mach->Addrs[reg->DstRegister.Index].xyzw[chan_index];
558 break;
559
560 default:
561 ASSERT( 0 );
562 return;
563 }
564
565 switch (inst->Instruction.Saturate)
566 {
567 case TGSI_SAT_NONE:
568 if (mach->ExecMask & 0x1)
569 dst->i[0] = chan->i[0];
570 if (mach->ExecMask & 0x2)
571 dst->i[1] = chan->i[1];
572 if (mach->ExecMask & 0x4)
573 dst->i[2] = chan->i[2];
574 if (mach->ExecMask & 0x8)
575 dst->i[3] = chan->i[3];
576 break;
577
578 case TGSI_SAT_ZERO_ONE:
579 /* XXX need to obey ExecMask here */
580 dst->q = micro_max(chan->q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
581 dst->q = micro_min(dst->q, mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q);
582 break;
583
584 case TGSI_SAT_MINUS_PLUS_ONE:
585 ASSERT( 0 );
586 break;
587
588 default:
589 ASSERT( 0 );
590 }
591 }
592
593 #define FETCH(VAL,INDEX,CHAN)\
594 fetch_source (mach, VAL, &inst->FullSrcRegisters[INDEX], CHAN)
595
596 #define STORE(VAL,INDEX,CHAN)\
597 store_dest (mach, VAL, &inst->FullDstRegisters[INDEX], inst, CHAN )
598
599
600 /**
601 * Execute ARB-style KIL which is predicated by a src register.
602 * Kill fragment if any of the four values is less than zero.
603 */
604 static void
605 exec_kil(struct spu_exec_machine *mach,
606 const struct tgsi_full_instruction *inst)
607 {
608 uint uniquemask;
609 uint chan_index;
610 uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
611 union spu_exec_channel r[1];
612
613 /* This mask stores component bits that were already tested. Note that
614 * we test if the value is less than zero, so 1.0 and 0.0 need not to be
615 * tested. */
616 uniquemask = (1 << TGSI_EXTSWIZZLE_ZERO) | (1 << TGSI_EXTSWIZZLE_ONE);
617
618 for (chan_index = 0; chan_index < 4; chan_index++)
619 {
620 uint swizzle;
621 uint i;
622
623 /* unswizzle channel */
624 swizzle = tgsi_util_get_full_src_register_extswizzle (
625 &inst->FullSrcRegisters[0],
626 chan_index);
627
628 /* check if the component has not been already tested */
629 if (uniquemask & (1 << swizzle))
630 continue;
631 uniquemask |= 1 << swizzle;
632
633 FETCH(&r[0], 0, chan_index);
634 for (i = 0; i < 4; i++)
635 if (r[0].f[i] < 0.0f)
636 kilmask |= 1 << i;
637 }
638
639 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
640 }
641
642 /**
643 * Execute NVIDIA-style KIL which is predicated by a condition code.
644 * Kill fragment if the condition code is TRUE.
645 */
646 static void
647 exec_kilp(struct tgsi_exec_machine *mach,
648 const struct tgsi_full_instruction *inst)
649 {
650 uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
651
652 /* TODO: build kilmask from CC mask */
653
654 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
655 }
656
657 /*
658 * Fetch a texel using STR texture coordinates.
659 */
660 static void
661 fetch_texel( struct spu_sampler *sampler,
662 const union spu_exec_channel *s,
663 const union spu_exec_channel *t,
664 const union spu_exec_channel *p,
665 float lodbias, /* XXX should be float[4] */
666 union spu_exec_channel *r,
667 union spu_exec_channel *g,
668 union spu_exec_channel *b,
669 union spu_exec_channel *a )
670 {
671 qword rgba[4];
672 qword out[4];
673
674 sampler->get_samples(sampler, s->f, t->f, p->f, lodbias,
675 (float (*)[4]) rgba);
676
677 _transpose_matrix4x4((vec_float4 *) out, (vec_float4 *) rgba);
678 r->q = out[0];
679 g->q = out[1];
680 b->q = out[2];
681 a->q = out[3];
682 }
683
684
685 static void
686 exec_tex(struct spu_exec_machine *mach,
687 const struct tgsi_full_instruction *inst,
688 boolean biasLod, boolean projected)
689 {
690 const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
691 union spu_exec_channel r[8];
692 uint chan_index;
693 float lodBias;
694
695 /* printf("Sampler %u unit %u\n", sampler, unit); */
696
697 switch (inst->InstructionExtTexture.Texture) {
698 case TGSI_TEXTURE_1D:
699
700 FETCH(&r[0], 0, CHAN_X);
701
702 if (projected) {
703 FETCH(&r[1], 0, CHAN_W);
704 r[0].q = micro_div(r[0].q, r[1].q);
705 }
706
707 if (biasLod) {
708 FETCH(&r[1], 0, CHAN_W);
709 lodBias = r[2].f[0];
710 }
711 else
712 lodBias = 0.0;
713
714 fetch_texel(&mach->Samplers[unit],
715 &r[0], NULL, NULL, lodBias, /* S, T, P, BIAS */
716 &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
717 break;
718
719 case TGSI_TEXTURE_2D:
720 case TGSI_TEXTURE_RECT:
721
722 FETCH(&r[0], 0, CHAN_X);
723 FETCH(&r[1], 0, CHAN_Y);
724 FETCH(&r[2], 0, CHAN_Z);
725
726 if (projected) {
727 FETCH(&r[3], 0, CHAN_W);
728 r[0].q = micro_div(r[0].q, r[3].q);
729 r[1].q = micro_div(r[1].q, r[3].q);
730 r[2].q = micro_div(r[2].q, r[3].q);
731 }
732
733 if (biasLod) {
734 FETCH(&r[3], 0, CHAN_W);
735 lodBias = r[3].f[0];
736 }
737 else
738 lodBias = 0.0;
739
740 fetch_texel(&mach->Samplers[unit],
741 &r[0], &r[1], &r[2], lodBias, /* inputs */
742 &r[0], &r[1], &r[2], &r[3]); /* outputs */
743 break;
744
745 case TGSI_TEXTURE_3D:
746 case TGSI_TEXTURE_CUBE:
747
748 FETCH(&r[0], 0, CHAN_X);
749 FETCH(&r[1], 0, CHAN_Y);
750 FETCH(&r[2], 0, CHAN_Z);
751
752 if (projected) {
753 FETCH(&r[3], 0, CHAN_W);
754 r[0].q = micro_div(r[0].q, r[3].q);
755 r[1].q = micro_div(r[1].q, r[3].q);
756 r[2].q = micro_div(r[2].q, r[3].q);
757 }
758
759 if (biasLod) {
760 FETCH(&r[3], 0, CHAN_W);
761 lodBias = r[3].f[0];
762 }
763 else
764 lodBias = 0.0;
765
766 fetch_texel(&mach->Samplers[unit],
767 &r[0], &r[1], &r[2], lodBias,
768 &r[0], &r[1], &r[2], &r[3]);
769 break;
770
771 default:
772 ASSERT (0);
773 }
774
775 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
776 STORE( &r[chan_index], 0, chan_index );
777 }
778 }
779
780
781
782 static void
783 constant_interpolation(
784 struct spu_exec_machine *mach,
785 unsigned attrib,
786 unsigned chan )
787 {
788 unsigned i;
789
790 for( i = 0; i < QUAD_SIZE; i++ ) {
791 mach->Inputs[attrib].xyzw[chan].f[i] = mach->InterpCoefs[attrib].a0[chan];
792 }
793 }
794
795 static void
796 linear_interpolation(
797 struct spu_exec_machine *mach,
798 unsigned attrib,
799 unsigned chan )
800 {
801 const float x = mach->QuadPos.xyzw[0].f[0];
802 const float y = mach->QuadPos.xyzw[1].f[0];
803 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
804 const float dady = mach->InterpCoefs[attrib].dady[chan];
805 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
806 mach->Inputs[attrib].xyzw[chan].f[0] = a0;
807 mach->Inputs[attrib].xyzw[chan].f[1] = a0 + dadx;
808 mach->Inputs[attrib].xyzw[chan].f[2] = a0 + dady;
809 mach->Inputs[attrib].xyzw[chan].f[3] = a0 + dadx + dady;
810 }
811
812 static void
813 perspective_interpolation(
814 struct spu_exec_machine *mach,
815 unsigned attrib,
816 unsigned chan )
817 {
818 const float x = mach->QuadPos.xyzw[0].f[0];
819 const float y = mach->QuadPos.xyzw[1].f[0];
820 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
821 const float dady = mach->InterpCoefs[attrib].dady[chan];
822 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
823 const float *w = mach->QuadPos.xyzw[3].f;
824 /* divide by W here */
825 mach->Inputs[attrib].xyzw[chan].f[0] = a0 / w[0];
826 mach->Inputs[attrib].xyzw[chan].f[1] = (a0 + dadx) / w[1];
827 mach->Inputs[attrib].xyzw[chan].f[2] = (a0 + dady) / w[2];
828 mach->Inputs[attrib].xyzw[chan].f[3] = (a0 + dadx + dady) / w[3];
829 }
830
831
832 typedef void (* interpolation_func)(
833 struct spu_exec_machine *mach,
834 unsigned attrib,
835 unsigned chan );
836
837 static void
838 exec_declaration(struct spu_exec_machine *mach,
839 const struct tgsi_full_declaration *decl)
840 {
841 if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
842 if( decl->Declaration.File == TGSI_FILE_INPUT ) {
843 unsigned first, last, mask;
844 interpolation_func interp;
845
846 first = decl->DeclarationRange.First;
847 last = decl->DeclarationRange.Last;
848 mask = decl->Declaration.UsageMask;
849
850 switch( decl->Declaration.Interpolate ) {
851 case TGSI_INTERPOLATE_CONSTANT:
852 interp = constant_interpolation;
853 break;
854
855 case TGSI_INTERPOLATE_LINEAR:
856 interp = linear_interpolation;
857 break;
858
859 case TGSI_INTERPOLATE_PERSPECTIVE:
860 interp = perspective_interpolation;
861 break;
862
863 default:
864 ASSERT( 0 );
865 }
866
867 if( mask == TGSI_WRITEMASK_XYZW ) {
868 unsigned i, j;
869
870 for( i = first; i <= last; i++ ) {
871 for( j = 0; j < NUM_CHANNELS; j++ ) {
872 interp( mach, i, j );
873 }
874 }
875 }
876 else {
877 unsigned i, j;
878
879 for( j = 0; j < NUM_CHANNELS; j++ ) {
880 if( mask & (1 << j) ) {
881 for( i = first; i <= last; i++ ) {
882 interp( mach, i, j );
883 }
884 }
885 }
886 }
887 }
888 }
889 }
890
891 static void
892 exec_instruction(
893 struct spu_exec_machine *mach,
894 const struct tgsi_full_instruction *inst,
895 int *pc )
896 {
897 uint chan_index;
898 union spu_exec_channel r[8];
899
900 (*pc)++;
901
902 switch (inst->Instruction.Opcode) {
903 case TGSI_OPCODE_ARL:
904 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
905 FETCH( &r[0], 0, chan_index );
906 r[0].q = si_cflts(r[0].q, 0);
907 STORE( &r[0], 0, chan_index );
908 }
909 break;
910
911 case TGSI_OPCODE_MOV:
912 case TGSI_OPCODE_SWZ:
913 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
914 FETCH( &r[0], 0, chan_index );
915 STORE( &r[0], 0, chan_index );
916 }
917 break;
918
919 case TGSI_OPCODE_LIT:
920 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
921 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
922 }
923
924 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y ) || IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
925 FETCH( &r[0], 0, CHAN_X );
926 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
927 r[0].q = micro_max(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
928 STORE( &r[0], 0, CHAN_Y );
929 }
930
931 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
932 FETCH( &r[1], 0, CHAN_Y );
933 r[1].q = micro_max(r[1].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
934
935 FETCH( &r[2], 0, CHAN_W );
936 r[2].q = micro_min(r[2].q, mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q);
937 r[2].q = micro_max(r[2].q, mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q);
938 r[1].q = micro_pow(r[1].q, r[2].q);
939
940 /* r0 = (r0 > 0.0) ? r1 : 0.0
941 */
942 r[0].q = si_fcgt(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
943 r[0].q = si_selb(mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q, r[1].q,
944 r[0].q);
945 STORE( &r[0], 0, CHAN_Z );
946 }
947 }
948
949 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
950 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
951 }
952 break;
953
954 case TGSI_OPCODE_RCP:
955 FETCH( &r[0], 0, CHAN_X );
956 r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
957 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
958 STORE( &r[0], 0, chan_index );
959 }
960 break;
961
962 case TGSI_OPCODE_RSQ:
963 FETCH( &r[0], 0, CHAN_X );
964 r[0].q = micro_sqrt(r[0].q);
965 r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
966 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
967 STORE( &r[0], 0, chan_index );
968 }
969 break;
970
971 case TGSI_OPCODE_EXP:
972 ASSERT (0);
973 break;
974
975 case TGSI_OPCODE_LOG:
976 ASSERT (0);
977 break;
978
979 case TGSI_OPCODE_MUL:
980 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index )
981 {
982 FETCH(&r[0], 0, chan_index);
983 FETCH(&r[1], 1, chan_index);
984
985 r[0].q = si_fm(r[0].q, r[1].q);
986
987 STORE(&r[0], 0, chan_index);
988 }
989 break;
990
991 case TGSI_OPCODE_ADD:
992 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
993 FETCH( &r[0], 0, chan_index );
994 FETCH( &r[1], 1, chan_index );
995 r[0].q = si_fa(r[0].q, r[1].q);
996 STORE( &r[0], 0, chan_index );
997 }
998 break;
999
1000 case TGSI_OPCODE_DP3:
1001 /* TGSI_OPCODE_DOT3 */
1002 FETCH( &r[0], 0, CHAN_X );
1003 FETCH( &r[1], 1, CHAN_X );
1004 r[0].q = si_fm(r[0].q, r[1].q);
1005
1006 FETCH( &r[1], 0, CHAN_Y );
1007 FETCH( &r[2], 1, CHAN_Y );
1008 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1009
1010
1011 FETCH( &r[1], 0, CHAN_Z );
1012 FETCH( &r[2], 1, CHAN_Z );
1013 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1014
1015 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1016 STORE( &r[0], 0, chan_index );
1017 }
1018 break;
1019
1020 case TGSI_OPCODE_DP4:
1021 /* TGSI_OPCODE_DOT4 */
1022 FETCH(&r[0], 0, CHAN_X);
1023 FETCH(&r[1], 1, CHAN_X);
1024
1025 r[0].q = si_fm(r[0].q, r[1].q);
1026
1027 FETCH(&r[1], 0, CHAN_Y);
1028 FETCH(&r[2], 1, CHAN_Y);
1029
1030 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1031
1032 FETCH(&r[1], 0, CHAN_Z);
1033 FETCH(&r[2], 1, CHAN_Z);
1034
1035 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1036
1037 FETCH(&r[1], 0, CHAN_W);
1038 FETCH(&r[2], 1, CHAN_W);
1039
1040 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1041
1042 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1043 STORE( &r[0], 0, chan_index );
1044 }
1045 break;
1046
1047 case TGSI_OPCODE_DST:
1048 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
1049 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
1050 }
1051
1052 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
1053 FETCH( &r[0], 0, CHAN_Y );
1054 FETCH( &r[1], 1, CHAN_Y);
1055 r[0].q = si_fm(r[0].q, r[1].q);
1056 STORE( &r[0], 0, CHAN_Y );
1057 }
1058
1059 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
1060 FETCH( &r[0], 0, CHAN_Z );
1061 STORE( &r[0], 0, CHAN_Z );
1062 }
1063
1064 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
1065 FETCH( &r[0], 1, CHAN_W );
1066 STORE( &r[0], 0, CHAN_W );
1067 }
1068 break;
1069
1070 case TGSI_OPCODE_MIN:
1071 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1072 FETCH(&r[0], 0, chan_index);
1073 FETCH(&r[1], 1, chan_index);
1074
1075 r[0].q = micro_min(r[0].q, r[1].q);
1076
1077 STORE(&r[0], 0, chan_index);
1078 }
1079 break;
1080
1081 case TGSI_OPCODE_MAX:
1082 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1083 FETCH(&r[0], 0, chan_index);
1084 FETCH(&r[1], 1, chan_index);
1085
1086 r[0].q = micro_max(r[0].q, r[1].q);
1087
1088 STORE(&r[0], 0, chan_index );
1089 }
1090 break;
1091
1092 case TGSI_OPCODE_SLT:
1093 /* TGSI_OPCODE_SETLT */
1094 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1095 FETCH( &r[0], 0, chan_index );
1096 FETCH( &r[1], 1, chan_index );
1097
1098 r[0].q = micro_ge(r[0].q, r[1].q);
1099 r[0].q = si_xori(r[0].q, 0xff);
1100
1101 STORE( &r[0], 0, chan_index );
1102 }
1103 break;
1104
1105 case TGSI_OPCODE_SGE:
1106 /* TGSI_OPCODE_SETGE */
1107 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1108 FETCH( &r[0], 0, chan_index );
1109 FETCH( &r[1], 1, chan_index );
1110 r[0].q = micro_ge(r[0].q, r[1].q);
1111 STORE( &r[0], 0, chan_index );
1112 }
1113 break;
1114
1115 case TGSI_OPCODE_MAD:
1116 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1117 FETCH( &r[0], 0, chan_index );
1118 FETCH( &r[1], 1, chan_index );
1119 FETCH( &r[2], 2, chan_index );
1120 r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
1121 STORE( &r[0], 0, chan_index );
1122 }
1123 break;
1124
1125 case TGSI_OPCODE_SUB:
1126 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1127 FETCH(&r[0], 0, chan_index);
1128 FETCH(&r[1], 1, chan_index);
1129
1130 r[0].q = si_fs(r[0].q, r[1].q);
1131
1132 STORE(&r[0], 0, chan_index);
1133 }
1134 break;
1135
1136 case TGSI_OPCODE_LRP:
1137 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1138 FETCH(&r[0], 0, chan_index);
1139 FETCH(&r[1], 1, chan_index);
1140 FETCH(&r[2], 2, chan_index);
1141
1142 r[1].q = si_fs(r[1].q, r[2].q);
1143 r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
1144
1145 STORE(&r[0], 0, chan_index);
1146 }
1147 break;
1148
1149 case TGSI_OPCODE_CND:
1150 ASSERT (0);
1151 break;
1152
1153 case TGSI_OPCODE_CND0:
1154 ASSERT (0);
1155 break;
1156
1157 case TGSI_OPCODE_DP2A:
1158 ASSERT (0);
1159 break;
1160
1161 case TGSI_OPCODE_FRC:
1162 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1163 FETCH( &r[0], 0, chan_index );
1164 r[0].q = micro_frc(r[0].q);
1165 STORE( &r[0], 0, chan_index );
1166 }
1167 break;
1168
1169 case TGSI_OPCODE_CLAMP:
1170 ASSERT (0);
1171 break;
1172
1173 case TGSI_OPCODE_FLR:
1174 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1175 FETCH( &r[0], 0, chan_index );
1176 r[0].q = micro_flr(r[0].q);
1177 STORE( &r[0], 0, chan_index );
1178 }
1179 break;
1180
1181 case TGSI_OPCODE_ROUND:
1182 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1183 FETCH( &r[0], 0, chan_index );
1184 r[0].q = micro_rnd(r[0].q);
1185 STORE( &r[0], 0, chan_index );
1186 }
1187 break;
1188
1189 case TGSI_OPCODE_EX2:
1190 FETCH(&r[0], 0, CHAN_X);
1191
1192 r[0].q = micro_pow(mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q, r[0].q);
1193
1194 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1195 STORE( &r[0], 0, chan_index );
1196 }
1197 break;
1198
1199 case TGSI_OPCODE_LG2:
1200 FETCH( &r[0], 0, CHAN_X );
1201 r[0].q = micro_lg2(r[0].q);
1202 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1203 STORE( &r[0], 0, chan_index );
1204 }
1205 break;
1206
1207 case TGSI_OPCODE_POW:
1208 FETCH(&r[0], 0, CHAN_X);
1209 FETCH(&r[1], 1, CHAN_X);
1210
1211 r[0].q = micro_pow(r[0].q, r[1].q);
1212
1213 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1214 STORE( &r[0], 0, chan_index );
1215 }
1216 break;
1217
1218 case TGSI_OPCODE_XPD:
1219 /* TGSI_OPCODE_XPD */
1220 FETCH(&r[0], 0, CHAN_Y);
1221 FETCH(&r[1], 1, CHAN_Z);
1222 FETCH(&r[3], 0, CHAN_Z);
1223 FETCH(&r[4], 1, CHAN_Y);
1224
1225 /* r2 = (r0 * r1) - (r3 * r5)
1226 */
1227 r[2].q = si_fm(r[3].q, r[5].q);
1228 r[2].q = si_fms(r[0].q, r[1].q, r[2].q);
1229
1230 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
1231 STORE( &r[2], 0, CHAN_X );
1232 }
1233
1234 FETCH(&r[2], 1, CHAN_X);
1235 FETCH(&r[5], 0, CHAN_X);
1236
1237 /* r3 = (r3 * r2) - (r1 * r5)
1238 */
1239 r[1].q = si_fm(r[1].q, r[5].q);
1240 r[3].q = si_fms(r[3].q, r[2].q, r[1].q);
1241
1242 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
1243 STORE( &r[3], 0, CHAN_Y );
1244 }
1245
1246 /* r5 = (r5 * r4) - (r0 * r2)
1247 */
1248 r[0].q = si_fm(r[0].q, r[2].q);
1249 r[5].q = si_fms(r[5].q, r[4].q, r[0].q);
1250
1251 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
1252 STORE( &r[5], 0, CHAN_Z );
1253 }
1254
1255 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
1256 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
1257 }
1258 break;
1259
1260 case TGSI_OPCODE_ABS:
1261 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1262 FETCH(&r[0], 0, chan_index);
1263
1264 r[0].q = micro_abs(r[0].q);
1265
1266 STORE(&r[0], 0, chan_index);
1267 }
1268 break;
1269
1270 case TGSI_OPCODE_RCC:
1271 ASSERT (0);
1272 break;
1273
1274 case TGSI_OPCODE_DPH:
1275 FETCH(&r[0], 0, CHAN_X);
1276 FETCH(&r[1], 1, CHAN_X);
1277
1278 r[0].q = si_fm(r[0].q, r[1].q);
1279
1280 FETCH(&r[1], 0, CHAN_Y);
1281 FETCH(&r[2], 1, CHAN_Y);
1282
1283 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1284
1285 FETCH(&r[1], 0, CHAN_Z);
1286 FETCH(&r[2], 1, CHAN_Z);
1287
1288 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1289
1290 FETCH(&r[1], 1, CHAN_W);
1291
1292 r[0].q = si_fa(r[0].q, r[1].q);
1293
1294 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1295 STORE( &r[0], 0, chan_index );
1296 }
1297 break;
1298
1299 case TGSI_OPCODE_COS:
1300 FETCH(&r[0], 0, CHAN_X);
1301
1302 r[0].q = micro_cos(r[0].q);
1303
1304 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1305 STORE( &r[0], 0, chan_index );
1306 }
1307 break;
1308
1309 case TGSI_OPCODE_DDX:
1310 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1311 FETCH( &r[0], 0, chan_index );
1312 r[0].q = micro_ddx(r[0].q);
1313 STORE( &r[0], 0, chan_index );
1314 }
1315 break;
1316
1317 case TGSI_OPCODE_DDY:
1318 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1319 FETCH( &r[0], 0, chan_index );
1320 r[0].q = micro_ddy(r[0].q);
1321 STORE( &r[0], 0, chan_index );
1322 }
1323 break;
1324
1325 case TGSI_OPCODE_KILP:
1326 exec_kilp (mach, inst);
1327 break;
1328
1329 case TGSI_OPCODE_KIL:
1330 exec_kil (mach, inst);
1331 break;
1332
1333 case TGSI_OPCODE_PK2H:
1334 ASSERT (0);
1335 break;
1336
1337 case TGSI_OPCODE_PK2US:
1338 ASSERT (0);
1339 break;
1340
1341 case TGSI_OPCODE_PK4B:
1342 ASSERT (0);
1343 break;
1344
1345 case TGSI_OPCODE_PK4UB:
1346 ASSERT (0);
1347 break;
1348
1349 case TGSI_OPCODE_RFL:
1350 ASSERT (0);
1351 break;
1352
1353 case TGSI_OPCODE_SEQ:
1354 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1355 FETCH( &r[0], 0, chan_index );
1356 FETCH( &r[1], 1, chan_index );
1357
1358 r[0].q = si_fceq(r[0].q, r[1].q);
1359
1360 STORE( &r[0], 0, chan_index );
1361 }
1362 break;
1363
1364 case TGSI_OPCODE_SFL:
1365 ASSERT (0);
1366 break;
1367
1368 case TGSI_OPCODE_SGT:
1369 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1370 FETCH( &r[0], 0, chan_index );
1371 FETCH( &r[1], 1, chan_index );
1372 r[0].q = si_fcgt(r[0].q, r[1].q);
1373 STORE( &r[0], 0, chan_index );
1374 }
1375 break;
1376
1377 case TGSI_OPCODE_SIN:
1378 FETCH( &r[0], 0, CHAN_X );
1379 r[0].q = micro_sin(r[0].q);
1380 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1381 STORE( &r[0], 0, chan_index );
1382 }
1383 break;
1384
1385 case TGSI_OPCODE_SLE:
1386 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1387 FETCH( &r[0], 0, chan_index );
1388 FETCH( &r[1], 1, chan_index );
1389
1390 r[0].q = si_fcgt(r[0].q, r[1].q);
1391 r[0].q = si_xori(r[0].q, 0xff);
1392
1393 STORE( &r[0], 0, chan_index );
1394 }
1395 break;
1396
1397 case TGSI_OPCODE_SNE:
1398 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1399 FETCH( &r[0], 0, chan_index );
1400 FETCH( &r[1], 1, chan_index );
1401
1402 r[0].q = si_fceq(r[0].q, r[1].q);
1403 r[0].q = si_xori(r[0].q, 0xff);
1404
1405 STORE( &r[0], 0, chan_index );
1406 }
1407 break;
1408
1409 case TGSI_OPCODE_STR:
1410 ASSERT (0);
1411 break;
1412
1413 case TGSI_OPCODE_TEX:
1414 /* simple texture lookup */
1415 /* src[0] = texcoord */
1416 /* src[1] = sampler unit */
1417 exec_tex(mach, inst, FALSE, FALSE);
1418 break;
1419
1420 case TGSI_OPCODE_TXB:
1421 /* Texture lookup with lod bias */
1422 /* src[0] = texcoord (src[0].w = load bias) */
1423 /* src[1] = sampler unit */
1424 exec_tex(mach, inst, TRUE, FALSE);
1425 break;
1426
1427 case TGSI_OPCODE_TXD:
1428 /* Texture lookup with explict partial derivatives */
1429 /* src[0] = texcoord */
1430 /* src[1] = d[strq]/dx */
1431 /* src[2] = d[strq]/dy */
1432 /* src[3] = sampler unit */
1433 ASSERT (0);
1434 break;
1435
1436 case TGSI_OPCODE_TXL:
1437 /* Texture lookup with explit LOD */
1438 /* src[0] = texcoord (src[0].w = load bias) */
1439 /* src[1] = sampler unit */
1440 exec_tex(mach, inst, TRUE, FALSE);
1441 break;
1442
1443 case TGSI_OPCODE_TXP:
1444 /* Texture lookup with projection */
1445 /* src[0] = texcoord (src[0].w = projection) */
1446 /* src[1] = sampler unit */
1447 exec_tex(mach, inst, TRUE, TRUE);
1448 break;
1449
1450 case TGSI_OPCODE_UP2H:
1451 ASSERT (0);
1452 break;
1453
1454 case TGSI_OPCODE_UP2US:
1455 ASSERT (0);
1456 break;
1457
1458 case TGSI_OPCODE_UP4B:
1459 ASSERT (0);
1460 break;
1461
1462 case TGSI_OPCODE_UP4UB:
1463 ASSERT (0);
1464 break;
1465
1466 case TGSI_OPCODE_X2D:
1467 ASSERT (0);
1468 break;
1469
1470 case TGSI_OPCODE_ARA:
1471 ASSERT (0);
1472 break;
1473
1474 case TGSI_OPCODE_ARR:
1475 ASSERT (0);
1476 break;
1477
1478 case TGSI_OPCODE_BRA:
1479 ASSERT (0);
1480 break;
1481
1482 case TGSI_OPCODE_CAL:
1483 /* skip the call if no execution channels are enabled */
1484 if (mach->ExecMask) {
1485 /* do the call */
1486
1487 /* push the Cond, Loop, Cont stacks */
1488 ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
1489 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
1490 ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1491 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
1492 ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1493 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
1494
1495 ASSERT(mach->FuncStackTop < TGSI_EXEC_MAX_CALL_NESTING);
1496 mach->FuncStack[mach->FuncStackTop++] = mach->FuncMask;
1497
1498 /* note that PC was already incremented above */
1499 mach->CallStack[mach->CallStackTop++] = *pc;
1500 *pc = inst->InstructionExtLabel.Label;
1501 }
1502 break;
1503
1504 case TGSI_OPCODE_RET:
1505 mach->FuncMask &= ~mach->ExecMask;
1506 UPDATE_EXEC_MASK(mach);
1507
1508 if (mach->ExecMask == 0x0) {
1509 /* really return now (otherwise, keep executing */
1510
1511 if (mach->CallStackTop == 0) {
1512 /* returning from main() */
1513 *pc = -1;
1514 return;
1515 }
1516 *pc = mach->CallStack[--mach->CallStackTop];
1517
1518 /* pop the Cond, Loop, Cont stacks */
1519 ASSERT(mach->CondStackTop > 0);
1520 mach->CondMask = mach->CondStack[--mach->CondStackTop];
1521 ASSERT(mach->LoopStackTop > 0);
1522 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
1523 ASSERT(mach->ContStackTop > 0);
1524 mach->ContMask = mach->ContStack[--mach->ContStackTop];
1525 ASSERT(mach->FuncStackTop > 0);
1526 mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
1527
1528 UPDATE_EXEC_MASK(mach);
1529 }
1530 break;
1531
1532 case TGSI_OPCODE_SSG:
1533 ASSERT (0);
1534 break;
1535
1536 case TGSI_OPCODE_CMP:
1537 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1538 FETCH(&r[0], 0, chan_index);
1539 FETCH(&r[1], 1, chan_index);
1540 FETCH(&r[2], 2, chan_index);
1541
1542 /* r0 = (r0 < 0.0) ? r1 : r2
1543 */
1544 r[3].q = si_xor(r[3].q, r[3].q);
1545 r[0].q = micro_lt(r[0].q, r[3].q);
1546 r[0].q = si_selb(r[1].q, r[2].q, r[0].q);
1547
1548 STORE(&r[0], 0, chan_index);
1549 }
1550 break;
1551
1552 case TGSI_OPCODE_SCS:
1553 if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) || IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
1554 FETCH( &r[0], 0, CHAN_X );
1555 }
1556 if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) ) {
1557 r[1].q = micro_cos(r[0].q);
1558 STORE( &r[1], 0, CHAN_X );
1559 }
1560 if( IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
1561 r[1].q = micro_sin(r[0].q);
1562 STORE( &r[1], 0, CHAN_Y );
1563 }
1564 if( IS_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
1565 STORE( &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, CHAN_Z );
1566 }
1567 if( IS_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
1568 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
1569 }
1570 break;
1571
1572 case TGSI_OPCODE_NRM:
1573 ASSERT (0);
1574 break;
1575
1576 case TGSI_OPCODE_DIV:
1577 ASSERT( 0 );
1578 break;
1579
1580 case TGSI_OPCODE_DP2:
1581 FETCH( &r[0], 0, CHAN_X );
1582 FETCH( &r[1], 1, CHAN_X );
1583 r[0].q = si_fm(r[0].q, r[1].q);
1584
1585 FETCH( &r[1], 0, CHAN_Y );
1586 FETCH( &r[2], 1, CHAN_Y );
1587 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1588
1589 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1590 STORE( &r[0], 0, chan_index );
1591 }
1592 break;
1593
1594 case TGSI_OPCODE_IF:
1595 /* push CondMask */
1596 ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
1597 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
1598 FETCH( &r[0], 0, CHAN_X );
1599 /* update CondMask */
1600 if( ! r[0].u[0] ) {
1601 mach->CondMask &= ~0x1;
1602 }
1603 if( ! r[0].u[1] ) {
1604 mach->CondMask &= ~0x2;
1605 }
1606 if( ! r[0].u[2] ) {
1607 mach->CondMask &= ~0x4;
1608 }
1609 if( ! r[0].u[3] ) {
1610 mach->CondMask &= ~0x8;
1611 }
1612 UPDATE_EXEC_MASK(mach);
1613 /* Todo: If CondMask==0, jump to ELSE */
1614 break;
1615
1616 case TGSI_OPCODE_ELSE:
1617 /* invert CondMask wrt previous mask */
1618 {
1619 uint prevMask;
1620 ASSERT(mach->CondStackTop > 0);
1621 prevMask = mach->CondStack[mach->CondStackTop - 1];
1622 mach->CondMask = ~mach->CondMask & prevMask;
1623 UPDATE_EXEC_MASK(mach);
1624 /* Todo: If CondMask==0, jump to ENDIF */
1625 }
1626 break;
1627
1628 case TGSI_OPCODE_ENDIF:
1629 /* pop CondMask */
1630 ASSERT(mach->CondStackTop > 0);
1631 mach->CondMask = mach->CondStack[--mach->CondStackTop];
1632 UPDATE_EXEC_MASK(mach);
1633 break;
1634
1635 case TGSI_OPCODE_END:
1636 /* halt execution */
1637 *pc = -1;
1638 break;
1639
1640 case TGSI_OPCODE_REP:
1641 ASSERT (0);
1642 break;
1643
1644 case TGSI_OPCODE_ENDREP:
1645 ASSERT (0);
1646 break;
1647
1648 case TGSI_OPCODE_PUSHA:
1649 ASSERT (0);
1650 break;
1651
1652 case TGSI_OPCODE_POPA:
1653 ASSERT (0);
1654 break;
1655
1656 case TGSI_OPCODE_CEIL:
1657 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1658 FETCH( &r[0], 0, chan_index );
1659 r[0].q = micro_ceil(r[0].q);
1660 STORE( &r[0], 0, chan_index );
1661 }
1662 break;
1663
1664 case TGSI_OPCODE_I2F:
1665 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1666 FETCH( &r[0], 0, chan_index );
1667 r[0].q = si_csflt(r[0].q, 0);
1668 STORE( &r[0], 0, chan_index );
1669 }
1670 break;
1671
1672 case TGSI_OPCODE_NOT:
1673 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1674 FETCH( &r[0], 0, chan_index );
1675 r[0].q = si_xorbi(r[0].q, 0xff);
1676 STORE( &r[0], 0, chan_index );
1677 }
1678 break;
1679
1680 case TGSI_OPCODE_TRUNC:
1681 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1682 FETCH( &r[0], 0, chan_index );
1683 r[0].q = micro_trunc(r[0].q);
1684 STORE( &r[0], 0, chan_index );
1685 }
1686 break;
1687
1688 case TGSI_OPCODE_SHL:
1689 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1690 FETCH( &r[0], 0, chan_index );
1691 FETCH( &r[1], 1, chan_index );
1692
1693 r[0].q = si_shl(r[0].q, r[1].q);
1694
1695 STORE( &r[0], 0, chan_index );
1696 }
1697 break;
1698
1699 case TGSI_OPCODE_SHR:
1700 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1701 FETCH( &r[0], 0, chan_index );
1702 FETCH( &r[1], 1, chan_index );
1703 r[0].q = micro_ishr(r[0].q, r[1].q);
1704 STORE( &r[0], 0, chan_index );
1705 }
1706 break;
1707
1708 case TGSI_OPCODE_AND:
1709 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1710 FETCH( &r[0], 0, chan_index );
1711 FETCH( &r[1], 1, chan_index );
1712 r[0].q = si_and(r[0].q, r[1].q);
1713 STORE( &r[0], 0, chan_index );
1714 }
1715 break;
1716
1717 case TGSI_OPCODE_OR:
1718 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1719 FETCH( &r[0], 0, chan_index );
1720 FETCH( &r[1], 1, chan_index );
1721 r[0].q = si_or(r[0].q, r[1].q);
1722 STORE( &r[0], 0, chan_index );
1723 }
1724 break;
1725
1726 case TGSI_OPCODE_MOD:
1727 ASSERT (0);
1728 break;
1729
1730 case TGSI_OPCODE_XOR:
1731 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1732 FETCH( &r[0], 0, chan_index );
1733 FETCH( &r[1], 1, chan_index );
1734 r[0].q = si_xor(r[0].q, r[1].q);
1735 STORE( &r[0], 0, chan_index );
1736 }
1737 break;
1738
1739 case TGSI_OPCODE_SAD:
1740 ASSERT (0);
1741 break;
1742
1743 case TGSI_OPCODE_TXF:
1744 ASSERT (0);
1745 break;
1746
1747 case TGSI_OPCODE_TXQ:
1748 ASSERT (0);
1749 break;
1750
1751 case TGSI_OPCODE_EMIT:
1752 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] += 16;
1753 mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]]++;
1754 break;
1755
1756 case TGSI_OPCODE_ENDPRIM:
1757 mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]++;
1758 mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]] = 0;
1759 break;
1760
1761 case TGSI_OPCODE_BGNFOR:
1762 /* fall-through (for now) */
1763 case TGSI_OPCODE_BGNLOOP:
1764 /* push LoopMask and ContMasks */
1765 ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1766 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
1767 ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1768 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
1769 break;
1770
1771 case TGSI_OPCODE_ENDFOR:
1772 /* fall-through (for now at least) */
1773 case TGSI_OPCODE_ENDLOOP:
1774 /* Restore ContMask, but don't pop */
1775 ASSERT(mach->ContStackTop > 0);
1776 mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
1777 if (mach->LoopMask) {
1778 /* repeat loop: jump to instruction just past BGNLOOP */
1779 *pc = inst->InstructionExtLabel.Label + 1;
1780 }
1781 else {
1782 /* exit loop: pop LoopMask */
1783 ASSERT(mach->LoopStackTop > 0);
1784 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
1785 /* pop ContMask */
1786 ASSERT(mach->ContStackTop > 0);
1787 mach->ContMask = mach->ContStack[--mach->ContStackTop];
1788 }
1789 UPDATE_EXEC_MASK(mach);
1790 break;
1791
1792 case TGSI_OPCODE_BRK:
1793 /* turn off loop channels for each enabled exec channel */
1794 mach->LoopMask &= ~mach->ExecMask;
1795 /* Todo: if mach->LoopMask == 0, jump to end of loop */
1796 UPDATE_EXEC_MASK(mach);
1797 break;
1798
1799 case TGSI_OPCODE_CONT:
1800 /* turn off cont channels for each enabled exec channel */
1801 mach->ContMask &= ~mach->ExecMask;
1802 /* Todo: if mach->LoopMask == 0, jump to end of loop */
1803 UPDATE_EXEC_MASK(mach);
1804 break;
1805
1806 case TGSI_OPCODE_BGNSUB:
1807 /* no-op */
1808 break;
1809
1810 case TGSI_OPCODE_ENDSUB:
1811 /* no-op */
1812 break;
1813
1814 case TGSI_OPCODE_NOISE1:
1815 ASSERT( 0 );
1816 break;
1817
1818 case TGSI_OPCODE_NOISE2:
1819 ASSERT( 0 );
1820 break;
1821
1822 case TGSI_OPCODE_NOISE3:
1823 ASSERT( 0 );
1824 break;
1825
1826 case TGSI_OPCODE_NOISE4:
1827 ASSERT( 0 );
1828 break;
1829
1830 case TGSI_OPCODE_NOP:
1831 break;
1832
1833 default:
1834 ASSERT( 0 );
1835 }
1836 }
1837
1838
1839 /**
1840 * Run TGSI interpreter.
1841 * \return bitmask of "alive" quad components
1842 */
1843 uint
1844 spu_exec_machine_run( struct spu_exec_machine *mach )
1845 {
1846 uint i;
1847 int pc = 0;
1848
1849 mach->CondMask = 0xf;
1850 mach->LoopMask = 0xf;
1851 mach->ContMask = 0xf;
1852 mach->FuncMask = 0xf;
1853 mach->ExecMask = 0xf;
1854
1855 mach->CondStackTop = 0; /* temporarily subvert this ASSERTion */
1856 ASSERT(mach->CondStackTop == 0);
1857 ASSERT(mach->LoopStackTop == 0);
1858 ASSERT(mach->ContStackTop == 0);
1859 ASSERT(mach->CallStackTop == 0);
1860
1861 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] = 0;
1862 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] = 0;
1863
1864 if( mach->Processor == TGSI_PROCESSOR_GEOMETRY ) {
1865 mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0] = 0;
1866 mach->Primitives[0] = 0;
1867 }
1868
1869
1870 /* execute declarations (interpolants) */
1871 if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
1872 for (i = 0; i < mach->NumDeclarations; i++) {
1873 union {
1874 struct tgsi_full_declaration decl;
1875 qword buffer[ROUNDUP16(sizeof(struct tgsi_full_declaration)) / 16];
1876 } d ALIGN16_ATTRIB;
1877 unsigned ea = (unsigned) (mach->Declarations + pc);
1878
1879 spu_dcache_fetch_unaligned(d.buffer, ea, sizeof(d.decl));
1880
1881 exec_declaration( mach, &d.decl );
1882 }
1883 }
1884
1885 /* execute instructions, until pc is set to -1 */
1886 while (pc != -1) {
1887 union {
1888 struct tgsi_full_instruction inst;
1889 qword buffer[ROUNDUP16(sizeof(struct tgsi_full_instruction)) / 16];
1890 } i ALIGN16_ATTRIB;
1891 unsigned ea = (unsigned) (mach->Instructions + pc);
1892
1893 spu_dcache_fetch_unaligned(i.buffer, ea, sizeof(i.inst));
1894 exec_instruction( mach, & i.inst, &pc );
1895 }
1896
1897 #if 0
1898 /* we scale from floats in [0,1] to Zbuffer ints in sp_quad_depth_test.c */
1899 if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
1900 /*
1901 * Scale back depth component.
1902 */
1903 for (i = 0; i < 4; i++)
1904 mach->Outputs[0].xyzw[2].f[i] *= ctx->DrawBuffer->_DepthMaxF;
1905 }
1906 #endif
1907
1908 return ~mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0];
1909 }
1910
1911