725a72b326aef915d1625c1dbc7019503a7fd7b2
[mesa.git] / src / gallium / drivers / cell / spu / spu_exec.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * TGSI interpretor/executor.
30 *
31 * Flow control information:
32 *
33 * Since we operate on 'quads' (4 pixels or 4 vertices in parallel)
34 * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special
35 * care since a condition may be true for some quad components but false
36 * for other components.
37 *
38 * We basically execute all statements (even if they're in the part of
39 * an IF/ELSE clause that's "not taken") and use a special mask to
40 * control writing to destination registers. This is the ExecMask.
41 * See store_dest().
42 *
43 * The ExecMask is computed from three other masks (CondMask, LoopMask and
44 * ContMask) which are controlled by the flow control instructions (namely:
45 * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT).
46 *
47 *
48 * Authors:
49 * Michal Krol
50 * Brian Paul
51 */
52
53 #include <transpose_matrix4x4.h>
54 #include <simdmath/ceilf4.h>
55 #include <simdmath/cosf4.h>
56 #include <simdmath/divf4.h>
57 #include <simdmath/floorf4.h>
58 #include <simdmath/log2f4.h>
59 #include <simdmath/powf4.h>
60 #include <simdmath/sinf4.h>
61 #include <simdmath/sqrtf4.h>
62 #include <simdmath/truncf4.h>
63
64 #include "pipe/p_compiler.h"
65 #include "pipe/p_state.h"
66 #include "pipe/p_shader_tokens.h"
67 #include "tgsi/tgsi_parse.h"
68 #include "tgsi/tgsi_util.h"
69 #include "spu_exec.h"
70 #include "spu_main.h"
71 #include "spu_vertex_shader.h"
72 #include "spu_dcache.h"
73 #include "cell/common.h"
74
75 #define TILE_TOP_LEFT 0
76 #define TILE_TOP_RIGHT 1
77 #define TILE_BOTTOM_LEFT 2
78 #define TILE_BOTTOM_RIGHT 3
79
80 /*
81 * Shorthand locations of various utility registers (_I = Index, _C = Channel)
82 */
83 #define TEMP_0_I TGSI_EXEC_TEMP_00000000_I
84 #define TEMP_0_C TGSI_EXEC_TEMP_00000000_C
85 #define TEMP_7F_I TGSI_EXEC_TEMP_7FFFFFFF_I
86 #define TEMP_7F_C TGSI_EXEC_TEMP_7FFFFFFF_C
87 #define TEMP_80_I TGSI_EXEC_TEMP_80000000_I
88 #define TEMP_80_C TGSI_EXEC_TEMP_80000000_C
89 #define TEMP_FF_I TGSI_EXEC_TEMP_FFFFFFFF_I
90 #define TEMP_FF_C TGSI_EXEC_TEMP_FFFFFFFF_C
91 #define TEMP_1_I TGSI_EXEC_TEMP_ONE_I
92 #define TEMP_1_C TGSI_EXEC_TEMP_ONE_C
93 #define TEMP_2_I TGSI_EXEC_TEMP_TWO_I
94 #define TEMP_2_C TGSI_EXEC_TEMP_TWO_C
95 #define TEMP_128_I TGSI_EXEC_TEMP_128_I
96 #define TEMP_128_C TGSI_EXEC_TEMP_128_C
97 #define TEMP_M128_I TGSI_EXEC_TEMP_MINUS_128_I
98 #define TEMP_M128_C TGSI_EXEC_TEMP_MINUS_128_C
99 #define TEMP_KILMASK_I TGSI_EXEC_TEMP_KILMASK_I
100 #define TEMP_KILMASK_C TGSI_EXEC_TEMP_KILMASK_C
101 #define TEMP_OUTPUT_I TGSI_EXEC_TEMP_OUTPUT_I
102 #define TEMP_OUTPUT_C TGSI_EXEC_TEMP_OUTPUT_C
103 #define TEMP_PRIMITIVE_I TGSI_EXEC_TEMP_PRIMITIVE_I
104 #define TEMP_PRIMITIVE_C TGSI_EXEC_TEMP_PRIMITIVE_C
105 #define TEMP_R0 TGSI_EXEC_TEMP_R0
106
107 #define FOR_EACH_CHANNEL(CHAN)\
108 for (CHAN = 0; CHAN < 4; CHAN++)
109
110 #define IS_CHANNEL_ENABLED(INST, CHAN)\
111 ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
112
113 #define IS_CHANNEL_ENABLED2(INST, CHAN)\
114 ((INST).FullDstRegisters[1].DstRegister.WriteMask & (1 << (CHAN)))
115
116 #define FOR_EACH_ENABLED_CHANNEL(INST, CHAN)\
117 FOR_EACH_CHANNEL( CHAN )\
118 if (IS_CHANNEL_ENABLED( INST, CHAN ))
119
120 #define FOR_EACH_ENABLED_CHANNEL2(INST, CHAN)\
121 FOR_EACH_CHANNEL( CHAN )\
122 if (IS_CHANNEL_ENABLED2( INST, CHAN ))
123
124
125 /** The execution mask depends on the conditional mask and the loop mask */
126 #define UPDATE_EXEC_MASK(MACH) \
127 MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->FuncMask
128
129
130 #define CHAN_X 0
131 #define CHAN_Y 1
132 #define CHAN_Z 2
133 #define CHAN_W 3
134
135
136
137 /**
138 * Initialize machine state by expanding tokens to full instructions,
139 * allocating temporary storage, setting up constants, etc.
140 * After this, we can call spu_exec_machine_run() many times.
141 */
142 void
143 spu_exec_machine_init(struct spu_exec_machine *mach,
144 uint numSamplers,
145 struct spu_sampler *samplers,
146 unsigned processor)
147 {
148 const qword zero = si_il(0);
149 const qword not_zero = si_il(~0);
150
151 (void) numSamplers;
152 mach->Samplers = samplers;
153 mach->Processor = processor;
154 mach->Addrs = &mach->Temps[TGSI_EXEC_NUM_TEMPS];
155
156 /* Setup constants. */
157 mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q = zero;
158 mach->Temps[TEMP_FF_I].xyzw[TEMP_FF_C].q = not_zero;
159 mach->Temps[TEMP_7F_I].xyzw[TEMP_7F_C].q = si_shli(not_zero, -1);
160 mach->Temps[TEMP_80_I].xyzw[TEMP_80_C].q = si_shli(not_zero, 31);
161
162 mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q = (qword) spu_splats(1.0f);
163 mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q = (qword) spu_splats(2.0f);
164 mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q = (qword) spu_splats(128.0f);
165 mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q = (qword) spu_splats(-128.0f);
166 }
167
168
169 static INLINE qword
170 micro_abs(qword src)
171 {
172 return si_rotmi(si_shli(src, 1), -1);
173 }
174
175 static INLINE qword
176 micro_ceil(qword src)
177 {
178 return (qword) _ceilf4((vec_float4) src);
179 }
180
181 static INLINE qword
182 micro_cos(qword src)
183 {
184 return (qword) _cosf4((vec_float4) src);
185 }
186
187 static const qword br_shuf = {
188 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
189 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
190 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
191 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
192 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
193 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
194 TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
195 TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
196 };
197
198 static const qword bl_shuf = {
199 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
200 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
201 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
202 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
203 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
204 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
205 TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
206 TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
207 };
208
209 static const qword tl_shuf = {
210 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
211 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
212 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
213 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
214 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
215 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
216 TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
217 TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
218 };
219
220 static qword
221 micro_ddx(qword src)
222 {
223 qword bottom_right = si_shufb(src, src, br_shuf);
224 qword bottom_left = si_shufb(src, src, bl_shuf);
225
226 return si_fs(bottom_right, bottom_left);
227 }
228
229 static qword
230 micro_ddy(qword src)
231 {
232 qword top_left = si_shufb(src, src, tl_shuf);
233 qword bottom_left = si_shufb(src, src, bl_shuf);
234
235 return si_fs(top_left, bottom_left);
236 }
237
238 static INLINE qword
239 micro_div(qword src0, qword src1)
240 {
241 return (qword) _divf4((vec_float4) src0, (vec_float4) src1);
242 }
243
244 static qword
245 micro_flr(qword src)
246 {
247 return (qword) _floorf4((vec_float4) src);
248 }
249
250 static qword
251 micro_frc(qword src)
252 {
253 return si_fs(src, (qword) _floorf4((vec_float4) src));
254 }
255
256 static INLINE qword
257 micro_ge(qword src0, qword src1)
258 {
259 return si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
260 }
261
262 static qword
263 micro_lg2(qword src)
264 {
265 return (qword) _log2f4((vec_float4) src);
266 }
267
268 static INLINE qword
269 micro_lt(qword src0, qword src1)
270 {
271 const qword tmp = si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
272
273 return si_xori(tmp, 0xff);
274 }
275
276 static INLINE qword
277 micro_max(qword src0, qword src1)
278 {
279 return si_selb(src1, src0, si_fcgt(src0, src1));
280 }
281
282 static INLINE qword
283 micro_min(qword src0, qword src1)
284 {
285 return si_selb(src0, src1, si_fcgt(src0, src1));
286 }
287
288 static qword
289 micro_neg(qword src)
290 {
291 return si_xor(src, (qword) spu_splats(0x80000000));
292 }
293
294 static qword
295 micro_set_sign(qword src)
296 {
297 return si_or(src, (qword) spu_splats(0x80000000));
298 }
299
300 static qword
301 micro_pow(qword src0, qword src1)
302 {
303 return (qword) _powf4((vec_float4) src0, (vec_float4) src1);
304 }
305
306 static qword
307 micro_rnd(qword src)
308 {
309 const qword half = (qword) spu_splats(0.5f);
310
311 /* May be able to use _roundf4. There may be some difference, though.
312 */
313 return (qword) _floorf4((vec_float4) si_fa(src, half));
314 }
315
316 static INLINE qword
317 micro_ishr(qword src0, qword src1)
318 {
319 return si_rotma(src0, si_sfi(src1, 0));
320 }
321
322 static qword
323 micro_trunc(qword src)
324 {
325 return (qword) _truncf4((vec_float4) src);
326 }
327
328 static qword
329 micro_sin(qword src)
330 {
331 return (qword) _sinf4((vec_float4) src);
332 }
333
334 static INLINE qword
335 micro_sqrt(qword src)
336 {
337 return (qword) _sqrtf4((vec_float4) src);
338 }
339
340 static void
341 fetch_src_file_channel(
342 const struct spu_exec_machine *mach,
343 const uint file,
344 const uint swizzle,
345 const union spu_exec_channel *index,
346 union spu_exec_channel *chan )
347 {
348 switch( swizzle ) {
349 case TGSI_EXTSWIZZLE_X:
350 case TGSI_EXTSWIZZLE_Y:
351 case TGSI_EXTSWIZZLE_Z:
352 case TGSI_EXTSWIZZLE_W:
353 switch( file ) {
354 case TGSI_FILE_CONSTANT: {
355 unsigned i;
356
357 for (i = 0; i < 4; i++) {
358 const float *ptr = mach->Consts[index->i[i]];
359 float tmp[4];
360
361 spu_dcache_fetch_unaligned((qword *) tmp,
362 (uintptr_t)(ptr + swizzle),
363 sizeof(float));
364
365 chan->f[i] = tmp[0];
366 }
367 break;
368 }
369
370 case TGSI_FILE_INPUT:
371 chan->u[0] = mach->Inputs[index->i[0]].xyzw[swizzle].u[0];
372 chan->u[1] = mach->Inputs[index->i[1]].xyzw[swizzle].u[1];
373 chan->u[2] = mach->Inputs[index->i[2]].xyzw[swizzle].u[2];
374 chan->u[3] = mach->Inputs[index->i[3]].xyzw[swizzle].u[3];
375 break;
376
377 case TGSI_FILE_TEMPORARY:
378 chan->u[0] = mach->Temps[index->i[0]].xyzw[swizzle].u[0];
379 chan->u[1] = mach->Temps[index->i[1]].xyzw[swizzle].u[1];
380 chan->u[2] = mach->Temps[index->i[2]].xyzw[swizzle].u[2];
381 chan->u[3] = mach->Temps[index->i[3]].xyzw[swizzle].u[3];
382 break;
383
384 case TGSI_FILE_IMMEDIATE:
385 ASSERT( index->i[0] < (int) mach->ImmLimit );
386 ASSERT( index->i[1] < (int) mach->ImmLimit );
387 ASSERT( index->i[2] < (int) mach->ImmLimit );
388 ASSERT( index->i[3] < (int) mach->ImmLimit );
389
390 chan->f[0] = mach->Imms[index->i[0]][swizzle];
391 chan->f[1] = mach->Imms[index->i[1]][swizzle];
392 chan->f[2] = mach->Imms[index->i[2]][swizzle];
393 chan->f[3] = mach->Imms[index->i[3]][swizzle];
394 break;
395
396 case TGSI_FILE_ADDRESS:
397 chan->u[0] = mach->Addrs[index->i[0]].xyzw[swizzle].u[0];
398 chan->u[1] = mach->Addrs[index->i[1]].xyzw[swizzle].u[1];
399 chan->u[2] = mach->Addrs[index->i[2]].xyzw[swizzle].u[2];
400 chan->u[3] = mach->Addrs[index->i[3]].xyzw[swizzle].u[3];
401 break;
402
403 case TGSI_FILE_OUTPUT:
404 /* vertex/fragment output vars can be read too */
405 chan->u[0] = mach->Outputs[index->i[0]].xyzw[swizzle].u[0];
406 chan->u[1] = mach->Outputs[index->i[1]].xyzw[swizzle].u[1];
407 chan->u[2] = mach->Outputs[index->i[2]].xyzw[swizzle].u[2];
408 chan->u[3] = mach->Outputs[index->i[3]].xyzw[swizzle].u[3];
409 break;
410
411 default:
412 ASSERT( 0 );
413 }
414 break;
415
416 case TGSI_EXTSWIZZLE_ZERO:
417 *chan = mach->Temps[TEMP_0_I].xyzw[TEMP_0_C];
418 break;
419
420 case TGSI_EXTSWIZZLE_ONE:
421 *chan = mach->Temps[TEMP_1_I].xyzw[TEMP_1_C];
422 break;
423
424 default:
425 ASSERT( 0 );
426 }
427 }
428
429 static void
430 fetch_source(
431 const struct spu_exec_machine *mach,
432 union spu_exec_channel *chan,
433 const struct tgsi_full_src_register *reg,
434 const uint chan_index )
435 {
436 union spu_exec_channel index;
437 uint swizzle;
438
439 index.i[0] =
440 index.i[1] =
441 index.i[2] =
442 index.i[3] = reg->SrcRegister.Index;
443
444 if (reg->SrcRegister.Indirect) {
445 union spu_exec_channel index2;
446 union spu_exec_channel indir_index;
447
448 index2.i[0] =
449 index2.i[1] =
450 index2.i[2] =
451 index2.i[3] = reg->SrcRegisterInd.Index;
452
453 swizzle = tgsi_util_get_src_register_swizzle(&reg->SrcRegisterInd,
454 CHAN_X);
455 fetch_src_file_channel(
456 mach,
457 reg->SrcRegisterInd.File,
458 swizzle,
459 &index2,
460 &indir_index );
461
462 index.q = si_a(index.q, indir_index.q);
463 }
464
465 if( reg->SrcRegister.Dimension ) {
466 switch( reg->SrcRegister.File ) {
467 case TGSI_FILE_INPUT:
468 index.q = si_mpyi(index.q, 17);
469 break;
470 case TGSI_FILE_CONSTANT:
471 index.q = si_shli(index.q, 12);
472 break;
473 default:
474 ASSERT( 0 );
475 }
476
477 index.i[0] += reg->SrcRegisterDim.Index;
478 index.i[1] += reg->SrcRegisterDim.Index;
479 index.i[2] += reg->SrcRegisterDim.Index;
480 index.i[3] += reg->SrcRegisterDim.Index;
481
482 if (reg->SrcRegisterDim.Indirect) {
483 union spu_exec_channel index2;
484 union spu_exec_channel indir_index;
485
486 index2.i[0] =
487 index2.i[1] =
488 index2.i[2] =
489 index2.i[3] = reg->SrcRegisterDimInd.Index;
490
491 swizzle = tgsi_util_get_src_register_swizzle( &reg->SrcRegisterDimInd, CHAN_X );
492 fetch_src_file_channel(
493 mach,
494 reg->SrcRegisterDimInd.File,
495 swizzle,
496 &index2,
497 &indir_index );
498
499 index.q = si_a(index.q, indir_index.q);
500 }
501 }
502
503 swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
504 fetch_src_file_channel(
505 mach,
506 reg->SrcRegister.File,
507 swizzle,
508 &index,
509 chan );
510
511 switch (tgsi_util_get_full_src_register_sign_mode( reg, chan_index )) {
512 case TGSI_UTIL_SIGN_CLEAR:
513 chan->q = micro_abs(chan->q);
514 break;
515
516 case TGSI_UTIL_SIGN_SET:
517 chan->q = micro_set_sign(chan->q);
518 break;
519
520 case TGSI_UTIL_SIGN_TOGGLE:
521 chan->q = micro_neg(chan->q);
522 break;
523
524 case TGSI_UTIL_SIGN_KEEP:
525 break;
526 }
527
528 if (reg->SrcRegisterExtMod.Complement) {
529 chan->q = si_fs(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, chan->q);
530 }
531 }
532
533 static void
534 store_dest(
535 struct spu_exec_machine *mach,
536 const union spu_exec_channel *chan,
537 const struct tgsi_full_dst_register *reg,
538 const struct tgsi_full_instruction *inst,
539 uint chan_index )
540 {
541 union spu_exec_channel *dst;
542
543 switch( reg->DstRegister.File ) {
544 case TGSI_FILE_NULL:
545 return;
546
547 case TGSI_FILE_OUTPUT:
548 dst = &mach->Outputs[mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0]
549 + reg->DstRegister.Index].xyzw[chan_index];
550 break;
551
552 case TGSI_FILE_TEMPORARY:
553 dst = &mach->Temps[reg->DstRegister.Index].xyzw[chan_index];
554 break;
555
556 case TGSI_FILE_ADDRESS:
557 dst = &mach->Addrs[reg->DstRegister.Index].xyzw[chan_index];
558 break;
559
560 default:
561 ASSERT( 0 );
562 return;
563 }
564
565 switch (inst->Instruction.Saturate)
566 {
567 case TGSI_SAT_NONE:
568 if (mach->ExecMask & 0x1)
569 dst->i[0] = chan->i[0];
570 if (mach->ExecMask & 0x2)
571 dst->i[1] = chan->i[1];
572 if (mach->ExecMask & 0x4)
573 dst->i[2] = chan->i[2];
574 if (mach->ExecMask & 0x8)
575 dst->i[3] = chan->i[3];
576 break;
577
578 case TGSI_SAT_ZERO_ONE:
579 /* XXX need to obey ExecMask here */
580 dst->q = micro_max(chan->q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
581 dst->q = micro_min(dst->q, mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q);
582 break;
583
584 case TGSI_SAT_MINUS_PLUS_ONE:
585 ASSERT( 0 );
586 break;
587
588 default:
589 ASSERT( 0 );
590 }
591 }
592
593 #define FETCH(VAL,INDEX,CHAN)\
594 fetch_source (mach, VAL, &inst->FullSrcRegisters[INDEX], CHAN)
595
596 #define STORE(VAL,INDEX,CHAN)\
597 store_dest (mach, VAL, &inst->FullDstRegisters[INDEX], inst, CHAN )
598
599
600 /**
601 * Execute ARB-style KIL which is predicated by a src register.
602 * Kill fragment if any of the four values is less than zero.
603 */
604 static void
605 exec_kil(struct spu_exec_machine *mach,
606 const struct tgsi_full_instruction *inst)
607 {
608 uint uniquemask;
609 uint chan_index;
610 uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
611 union spu_exec_channel r[1];
612
613 /* This mask stores component bits that were already tested. Note that
614 * we test if the value is less than zero, so 1.0 and 0.0 need not to be
615 * tested. */
616 uniquemask = (1 << TGSI_EXTSWIZZLE_ZERO) | (1 << TGSI_EXTSWIZZLE_ONE);
617
618 for (chan_index = 0; chan_index < 4; chan_index++)
619 {
620 uint swizzle;
621 uint i;
622
623 /* unswizzle channel */
624 swizzle = tgsi_util_get_full_src_register_extswizzle (
625 &inst->FullSrcRegisters[0],
626 chan_index);
627
628 /* check if the component has not been already tested */
629 if (uniquemask & (1 << swizzle))
630 continue;
631 uniquemask |= 1 << swizzle;
632
633 FETCH(&r[0], 0, chan_index);
634 for (i = 0; i < 4; i++)
635 if (r[0].f[i] < 0.0f)
636 kilmask |= 1 << i;
637 }
638
639 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
640 }
641
642 /**
643 * Execute NVIDIA-style KIL which is predicated by a condition code.
644 * Kill fragment if the condition code is TRUE.
645 */
646 static void
647 exec_kilp(struct tgsi_exec_machine *mach,
648 const struct tgsi_full_instruction *inst)
649 {
650 uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
651
652 /* TODO: build kilmask from CC mask */
653
654 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
655 }
656
657 /*
658 * Fetch a texel using STR texture coordinates.
659 */
660 static void
661 fetch_texel( struct spu_sampler *sampler,
662 const union spu_exec_channel *s,
663 const union spu_exec_channel *t,
664 const union spu_exec_channel *p,
665 float lodbias, /* XXX should be float[4] */
666 union spu_exec_channel *r,
667 union spu_exec_channel *g,
668 union spu_exec_channel *b,
669 union spu_exec_channel *a )
670 {
671 qword rgba[4];
672 qword out[4];
673
674 sampler->get_samples(sampler, s->f, t->f, p->f, lodbias,
675 (float (*)[4]) rgba);
676
677 _transpose_matrix4x4((vec_float4 *) out, (vec_float4 *) rgba);
678 r->q = out[0];
679 g->q = out[1];
680 b->q = out[2];
681 a->q = out[3];
682 }
683
684
685 static void
686 exec_tex(struct spu_exec_machine *mach,
687 const struct tgsi_full_instruction *inst,
688 boolean biasLod, boolean projected)
689 {
690 const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
691 union spu_exec_channel r[8];
692 uint chan_index;
693 float lodBias;
694
695 /* printf("Sampler %u unit %u\n", sampler, unit); */
696
697 switch (inst->InstructionExtTexture.Texture) {
698 case TGSI_TEXTURE_1D:
699
700 FETCH(&r[0], 0, CHAN_X);
701
702 if (projected) {
703 FETCH(&r[1], 0, CHAN_W);
704 r[0].q = micro_div(r[0].q, r[1].q);
705 }
706
707 if (biasLod) {
708 FETCH(&r[1], 0, CHAN_W);
709 lodBias = r[2].f[0];
710 }
711 else
712 lodBias = 0.0;
713
714 fetch_texel(&mach->Samplers[unit],
715 &r[0], NULL, NULL, lodBias, /* S, T, P, BIAS */
716 &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
717 break;
718
719 case TGSI_TEXTURE_2D:
720 case TGSI_TEXTURE_RECT:
721
722 FETCH(&r[0], 0, CHAN_X);
723 FETCH(&r[1], 0, CHAN_Y);
724 FETCH(&r[2], 0, CHAN_Z);
725
726 if (projected) {
727 FETCH(&r[3], 0, CHAN_W);
728 r[0].q = micro_div(r[0].q, r[3].q);
729 r[1].q = micro_div(r[1].q, r[3].q);
730 r[2].q = micro_div(r[2].q, r[3].q);
731 }
732
733 if (biasLod) {
734 FETCH(&r[3], 0, CHAN_W);
735 lodBias = r[3].f[0];
736 }
737 else
738 lodBias = 0.0;
739
740 fetch_texel(&mach->Samplers[unit],
741 &r[0], &r[1], &r[2], lodBias, /* inputs */
742 &r[0], &r[1], &r[2], &r[3]); /* outputs */
743 break;
744
745 case TGSI_TEXTURE_3D:
746 case TGSI_TEXTURE_CUBE:
747
748 FETCH(&r[0], 0, CHAN_X);
749 FETCH(&r[1], 0, CHAN_Y);
750 FETCH(&r[2], 0, CHAN_Z);
751
752 if (projected) {
753 FETCH(&r[3], 0, CHAN_W);
754 r[0].q = micro_div(r[0].q, r[3].q);
755 r[1].q = micro_div(r[1].q, r[3].q);
756 r[2].q = micro_div(r[2].q, r[3].q);
757 }
758
759 if (biasLod) {
760 FETCH(&r[3], 0, CHAN_W);
761 lodBias = r[3].f[0];
762 }
763 else
764 lodBias = 0.0;
765
766 fetch_texel(&mach->Samplers[unit],
767 &r[0], &r[1], &r[2], lodBias,
768 &r[0], &r[1], &r[2], &r[3]);
769 break;
770
771 default:
772 ASSERT (0);
773 }
774
775 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
776 STORE( &r[chan_index], 0, chan_index );
777 }
778 }
779
780
781
782 static void
783 constant_interpolation(
784 struct spu_exec_machine *mach,
785 unsigned attrib,
786 unsigned chan )
787 {
788 unsigned i;
789
790 for( i = 0; i < QUAD_SIZE; i++ ) {
791 mach->Inputs[attrib].xyzw[chan].f[i] = mach->InterpCoefs[attrib].a0[chan];
792 }
793 }
794
795 static void
796 linear_interpolation(
797 struct spu_exec_machine *mach,
798 unsigned attrib,
799 unsigned chan )
800 {
801 const float x = mach->QuadPos.xyzw[0].f[0];
802 const float y = mach->QuadPos.xyzw[1].f[0];
803 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
804 const float dady = mach->InterpCoefs[attrib].dady[chan];
805 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
806 mach->Inputs[attrib].xyzw[chan].f[0] = a0;
807 mach->Inputs[attrib].xyzw[chan].f[1] = a0 + dadx;
808 mach->Inputs[attrib].xyzw[chan].f[2] = a0 + dady;
809 mach->Inputs[attrib].xyzw[chan].f[3] = a0 + dadx + dady;
810 }
811
812 static void
813 perspective_interpolation(
814 struct spu_exec_machine *mach,
815 unsigned attrib,
816 unsigned chan )
817 {
818 const float x = mach->QuadPos.xyzw[0].f[0];
819 const float y = mach->QuadPos.xyzw[1].f[0];
820 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
821 const float dady = mach->InterpCoefs[attrib].dady[chan];
822 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
823 const float *w = mach->QuadPos.xyzw[3].f;
824 /* divide by W here */
825 mach->Inputs[attrib].xyzw[chan].f[0] = a0 / w[0];
826 mach->Inputs[attrib].xyzw[chan].f[1] = (a0 + dadx) / w[1];
827 mach->Inputs[attrib].xyzw[chan].f[2] = (a0 + dady) / w[2];
828 mach->Inputs[attrib].xyzw[chan].f[3] = (a0 + dadx + dady) / w[3];
829 }
830
831
832 typedef void (* interpolation_func)(
833 struct spu_exec_machine *mach,
834 unsigned attrib,
835 unsigned chan );
836
837 static void
838 exec_declaration(struct spu_exec_machine *mach,
839 const struct tgsi_full_declaration *decl)
840 {
841 if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
842 if( decl->Declaration.File == TGSI_FILE_INPUT ) {
843 unsigned first, last, mask;
844 interpolation_func interp;
845
846 first = decl->DeclarationRange.First;
847 last = decl->DeclarationRange.Last;
848 mask = decl->Declaration.UsageMask;
849
850 switch( decl->Declaration.Interpolate ) {
851 case TGSI_INTERPOLATE_CONSTANT:
852 interp = constant_interpolation;
853 break;
854
855 case TGSI_INTERPOLATE_LINEAR:
856 interp = linear_interpolation;
857 break;
858
859 case TGSI_INTERPOLATE_PERSPECTIVE:
860 interp = perspective_interpolation;
861 break;
862
863 default:
864 ASSERT( 0 );
865 }
866
867 if( mask == TGSI_WRITEMASK_XYZW ) {
868 unsigned i, j;
869
870 for( i = first; i <= last; i++ ) {
871 for( j = 0; j < NUM_CHANNELS; j++ ) {
872 interp( mach, i, j );
873 }
874 }
875 }
876 else {
877 unsigned i, j;
878
879 for( j = 0; j < NUM_CHANNELS; j++ ) {
880 if( mask & (1 << j) ) {
881 for( i = first; i <= last; i++ ) {
882 interp( mach, i, j );
883 }
884 }
885 }
886 }
887 }
888 }
889 }
890
891 static void
892 exec_instruction(
893 struct spu_exec_machine *mach,
894 const struct tgsi_full_instruction *inst,
895 int *pc )
896 {
897 uint chan_index;
898 union spu_exec_channel r[8];
899
900 (*pc)++;
901
902 switch (inst->Instruction.Opcode) {
903 case TGSI_OPCODE_ARL:
904 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
905 FETCH( &r[0], 0, chan_index );
906 r[0].q = si_cflts(r[0].q, 0);
907 STORE( &r[0], 0, chan_index );
908 }
909 break;
910
911 case TGSI_OPCODE_MOV:
912 case TGSI_OPCODE_SWZ:
913 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
914 FETCH( &r[0], 0, chan_index );
915 STORE( &r[0], 0, chan_index );
916 }
917 break;
918
919 case TGSI_OPCODE_LIT:
920 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
921 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
922 }
923
924 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y ) || IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
925 FETCH( &r[0], 0, CHAN_X );
926 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
927 r[0].q = micro_max(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
928 STORE( &r[0], 0, CHAN_Y );
929 }
930
931 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
932 FETCH( &r[1], 0, CHAN_Y );
933 r[1].q = micro_max(r[1].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
934
935 FETCH( &r[2], 0, CHAN_W );
936 r[2].q = micro_min(r[2].q, mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q);
937 r[2].q = micro_max(r[2].q, mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q);
938 r[1].q = micro_pow(r[1].q, r[2].q);
939
940 /* r0 = (r0 > 0.0) ? r1 : 0.0
941 */
942 r[0].q = si_fcgt(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
943 r[0].q = si_selb(mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q, r[1].q,
944 r[0].q);
945 STORE( &r[0], 0, CHAN_Z );
946 }
947 }
948
949 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
950 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
951 }
952 break;
953
954 case TGSI_OPCODE_RCP:
955 FETCH( &r[0], 0, CHAN_X );
956 r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
957 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
958 STORE( &r[0], 0, chan_index );
959 }
960 break;
961
962 case TGSI_OPCODE_RSQ:
963 FETCH( &r[0], 0, CHAN_X );
964 r[0].q = micro_sqrt(r[0].q);
965 r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
966 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
967 STORE( &r[0], 0, chan_index );
968 }
969 break;
970
971 case TGSI_OPCODE_EXP:
972 ASSERT (0);
973 break;
974
975 case TGSI_OPCODE_LOG:
976 ASSERT (0);
977 break;
978
979 case TGSI_OPCODE_MUL:
980 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index )
981 {
982 FETCH(&r[0], 0, chan_index);
983 FETCH(&r[1], 1, chan_index);
984
985 r[0].q = si_fm(r[0].q, r[1].q);
986
987 STORE(&r[0], 0, chan_index);
988 }
989 break;
990
991 case TGSI_OPCODE_ADD:
992 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
993 FETCH( &r[0], 0, chan_index );
994 FETCH( &r[1], 1, chan_index );
995 r[0].q = si_fa(r[0].q, r[1].q);
996 STORE( &r[0], 0, chan_index );
997 }
998 break;
999
1000 case TGSI_OPCODE_DP3:
1001 /* TGSI_OPCODE_DOT3 */
1002 FETCH( &r[0], 0, CHAN_X );
1003 FETCH( &r[1], 1, CHAN_X );
1004 r[0].q = si_fm(r[0].q, r[1].q);
1005
1006 FETCH( &r[1], 0, CHAN_Y );
1007 FETCH( &r[2], 1, CHAN_Y );
1008 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1009
1010
1011 FETCH( &r[1], 0, CHAN_Z );
1012 FETCH( &r[2], 1, CHAN_Z );
1013 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1014
1015 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1016 STORE( &r[0], 0, chan_index );
1017 }
1018 break;
1019
1020 case TGSI_OPCODE_DP4:
1021 /* TGSI_OPCODE_DOT4 */
1022 FETCH(&r[0], 0, CHAN_X);
1023 FETCH(&r[1], 1, CHAN_X);
1024
1025 r[0].q = si_fm(r[0].q, r[1].q);
1026
1027 FETCH(&r[1], 0, CHAN_Y);
1028 FETCH(&r[2], 1, CHAN_Y);
1029
1030 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1031
1032 FETCH(&r[1], 0, CHAN_Z);
1033 FETCH(&r[2], 1, CHAN_Z);
1034
1035 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1036
1037 FETCH(&r[1], 0, CHAN_W);
1038 FETCH(&r[2], 1, CHAN_W);
1039
1040 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1041
1042 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1043 STORE( &r[0], 0, chan_index );
1044 }
1045 break;
1046
1047 case TGSI_OPCODE_DST:
1048 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
1049 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
1050 }
1051
1052 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
1053 FETCH( &r[0], 0, CHAN_Y );
1054 FETCH( &r[1], 1, CHAN_Y);
1055 r[0].q = si_fm(r[0].q, r[1].q);
1056 STORE( &r[0], 0, CHAN_Y );
1057 }
1058
1059 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
1060 FETCH( &r[0], 0, CHAN_Z );
1061 STORE( &r[0], 0, CHAN_Z );
1062 }
1063
1064 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
1065 FETCH( &r[0], 1, CHAN_W );
1066 STORE( &r[0], 0, CHAN_W );
1067 }
1068 break;
1069
1070 case TGSI_OPCODE_MIN:
1071 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1072 FETCH(&r[0], 0, chan_index);
1073 FETCH(&r[1], 1, chan_index);
1074
1075 r[0].q = micro_min(r[0].q, r[1].q);
1076
1077 STORE(&r[0], 0, chan_index);
1078 }
1079 break;
1080
1081 case TGSI_OPCODE_MAX:
1082 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1083 FETCH(&r[0], 0, chan_index);
1084 FETCH(&r[1], 1, chan_index);
1085
1086 r[0].q = micro_max(r[0].q, r[1].q);
1087
1088 STORE(&r[0], 0, chan_index );
1089 }
1090 break;
1091
1092 case TGSI_OPCODE_SLT:
1093 /* TGSI_OPCODE_SETLT */
1094 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1095 FETCH( &r[0], 0, chan_index );
1096 FETCH( &r[1], 1, chan_index );
1097
1098 r[0].q = micro_ge(r[0].q, r[1].q);
1099 r[0].q = si_xori(r[0].q, 0xff);
1100
1101 STORE( &r[0], 0, chan_index );
1102 }
1103 break;
1104
1105 case TGSI_OPCODE_SGE:
1106 /* TGSI_OPCODE_SETGE */
1107 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1108 FETCH( &r[0], 0, chan_index );
1109 FETCH( &r[1], 1, chan_index );
1110 r[0].q = micro_ge(r[0].q, r[1].q);
1111 STORE( &r[0], 0, chan_index );
1112 }
1113 break;
1114
1115 case TGSI_OPCODE_MAD:
1116 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1117 FETCH( &r[0], 0, chan_index );
1118 FETCH( &r[1], 1, chan_index );
1119 FETCH( &r[2], 2, chan_index );
1120 r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
1121 STORE( &r[0], 0, chan_index );
1122 }
1123 break;
1124
1125 case TGSI_OPCODE_SUB:
1126 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1127 FETCH(&r[0], 0, chan_index);
1128 FETCH(&r[1], 1, chan_index);
1129
1130 r[0].q = si_fs(r[0].q, r[1].q);
1131
1132 STORE(&r[0], 0, chan_index);
1133 }
1134 break;
1135
1136 case TGSI_OPCODE_LRP:
1137 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1138 FETCH(&r[0], 0, chan_index);
1139 FETCH(&r[1], 1, chan_index);
1140 FETCH(&r[2], 2, chan_index);
1141
1142 r[1].q = si_fs(r[1].q, r[2].q);
1143 r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
1144
1145 STORE(&r[0], 0, chan_index);
1146 }
1147 break;
1148
1149 case TGSI_OPCODE_CND:
1150 ASSERT (0);
1151 break;
1152
1153 case TGSI_OPCODE_DP2A:
1154 ASSERT (0);
1155 break;
1156
1157 case TGSI_OPCODE_FRC:
1158 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1159 FETCH( &r[0], 0, chan_index );
1160 r[0].q = micro_frc(r[0].q);
1161 STORE( &r[0], 0, chan_index );
1162 }
1163 break;
1164
1165 case TGSI_OPCODE_CLAMP:
1166 ASSERT (0);
1167 break;
1168
1169 case TGSI_OPCODE_FLR:
1170 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1171 FETCH( &r[0], 0, chan_index );
1172 r[0].q = micro_flr(r[0].q);
1173 STORE( &r[0], 0, chan_index );
1174 }
1175 break;
1176
1177 case TGSI_OPCODE_ROUND:
1178 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1179 FETCH( &r[0], 0, chan_index );
1180 r[0].q = micro_rnd(r[0].q);
1181 STORE( &r[0], 0, chan_index );
1182 }
1183 break;
1184
1185 case TGSI_OPCODE_EX2:
1186 FETCH(&r[0], 0, CHAN_X);
1187
1188 r[0].q = micro_pow(mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q, r[0].q);
1189
1190 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1191 STORE( &r[0], 0, chan_index );
1192 }
1193 break;
1194
1195 case TGSI_OPCODE_LG2:
1196 FETCH( &r[0], 0, CHAN_X );
1197 r[0].q = micro_lg2(r[0].q);
1198 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1199 STORE( &r[0], 0, chan_index );
1200 }
1201 break;
1202
1203 case TGSI_OPCODE_POW:
1204 FETCH(&r[0], 0, CHAN_X);
1205 FETCH(&r[1], 1, CHAN_X);
1206
1207 r[0].q = micro_pow(r[0].q, r[1].q);
1208
1209 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1210 STORE( &r[0], 0, chan_index );
1211 }
1212 break;
1213
1214 case TGSI_OPCODE_XPD:
1215 /* TGSI_OPCODE_XPD */
1216 FETCH(&r[0], 0, CHAN_Y);
1217 FETCH(&r[1], 1, CHAN_Z);
1218 FETCH(&r[3], 0, CHAN_Z);
1219 FETCH(&r[4], 1, CHAN_Y);
1220
1221 /* r2 = (r0 * r1) - (r3 * r5)
1222 */
1223 r[2].q = si_fm(r[3].q, r[5].q);
1224 r[2].q = si_fms(r[0].q, r[1].q, r[2].q);
1225
1226 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
1227 STORE( &r[2], 0, CHAN_X );
1228 }
1229
1230 FETCH(&r[2], 1, CHAN_X);
1231 FETCH(&r[5], 0, CHAN_X);
1232
1233 /* r3 = (r3 * r2) - (r1 * r5)
1234 */
1235 r[1].q = si_fm(r[1].q, r[5].q);
1236 r[3].q = si_fms(r[3].q, r[2].q, r[1].q);
1237
1238 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
1239 STORE( &r[3], 0, CHAN_Y );
1240 }
1241
1242 /* r5 = (r5 * r4) - (r0 * r2)
1243 */
1244 r[0].q = si_fm(r[0].q, r[2].q);
1245 r[5].q = si_fms(r[5].q, r[4].q, r[0].q);
1246
1247 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
1248 STORE( &r[5], 0, CHAN_Z );
1249 }
1250
1251 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
1252 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
1253 }
1254 break;
1255
1256 case TGSI_OPCODE_ABS:
1257 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1258 FETCH(&r[0], 0, chan_index);
1259
1260 r[0].q = micro_abs(r[0].q);
1261
1262 STORE(&r[0], 0, chan_index);
1263 }
1264 break;
1265
1266 case TGSI_OPCODE_RCC:
1267 ASSERT (0);
1268 break;
1269
1270 case TGSI_OPCODE_DPH:
1271 FETCH(&r[0], 0, CHAN_X);
1272 FETCH(&r[1], 1, CHAN_X);
1273
1274 r[0].q = si_fm(r[0].q, r[1].q);
1275
1276 FETCH(&r[1], 0, CHAN_Y);
1277 FETCH(&r[2], 1, CHAN_Y);
1278
1279 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1280
1281 FETCH(&r[1], 0, CHAN_Z);
1282 FETCH(&r[2], 1, CHAN_Z);
1283
1284 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1285
1286 FETCH(&r[1], 1, CHAN_W);
1287
1288 r[0].q = si_fa(r[0].q, r[1].q);
1289
1290 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1291 STORE( &r[0], 0, chan_index );
1292 }
1293 break;
1294
1295 case TGSI_OPCODE_COS:
1296 FETCH(&r[0], 0, CHAN_X);
1297
1298 r[0].q = micro_cos(r[0].q);
1299
1300 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1301 STORE( &r[0], 0, chan_index );
1302 }
1303 break;
1304
1305 case TGSI_OPCODE_DDX:
1306 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1307 FETCH( &r[0], 0, chan_index );
1308 r[0].q = micro_ddx(r[0].q);
1309 STORE( &r[0], 0, chan_index );
1310 }
1311 break;
1312
1313 case TGSI_OPCODE_DDY:
1314 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1315 FETCH( &r[0], 0, chan_index );
1316 r[0].q = micro_ddy(r[0].q);
1317 STORE( &r[0], 0, chan_index );
1318 }
1319 break;
1320
1321 case TGSI_OPCODE_KILP:
1322 exec_kilp (mach, inst);
1323 break;
1324
1325 case TGSI_OPCODE_KIL:
1326 exec_kil (mach, inst);
1327 break;
1328
1329 case TGSI_OPCODE_PK2H:
1330 ASSERT (0);
1331 break;
1332
1333 case TGSI_OPCODE_PK2US:
1334 ASSERT (0);
1335 break;
1336
1337 case TGSI_OPCODE_PK4B:
1338 ASSERT (0);
1339 break;
1340
1341 case TGSI_OPCODE_PK4UB:
1342 ASSERT (0);
1343 break;
1344
1345 case TGSI_OPCODE_RFL:
1346 ASSERT (0);
1347 break;
1348
1349 case TGSI_OPCODE_SEQ:
1350 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1351 FETCH( &r[0], 0, chan_index );
1352 FETCH( &r[1], 1, chan_index );
1353
1354 r[0].q = si_fceq(r[0].q, r[1].q);
1355
1356 STORE( &r[0], 0, chan_index );
1357 }
1358 break;
1359
1360 case TGSI_OPCODE_SFL:
1361 ASSERT (0);
1362 break;
1363
1364 case TGSI_OPCODE_SGT:
1365 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1366 FETCH( &r[0], 0, chan_index );
1367 FETCH( &r[1], 1, chan_index );
1368 r[0].q = si_fcgt(r[0].q, r[1].q);
1369 STORE( &r[0], 0, chan_index );
1370 }
1371 break;
1372
1373 case TGSI_OPCODE_SIN:
1374 FETCH( &r[0], 0, CHAN_X );
1375 r[0].q = micro_sin(r[0].q);
1376 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1377 STORE( &r[0], 0, chan_index );
1378 }
1379 break;
1380
1381 case TGSI_OPCODE_SLE:
1382 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1383 FETCH( &r[0], 0, chan_index );
1384 FETCH( &r[1], 1, chan_index );
1385
1386 r[0].q = si_fcgt(r[0].q, r[1].q);
1387 r[0].q = si_xori(r[0].q, 0xff);
1388
1389 STORE( &r[0], 0, chan_index );
1390 }
1391 break;
1392
1393 case TGSI_OPCODE_SNE:
1394 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1395 FETCH( &r[0], 0, chan_index );
1396 FETCH( &r[1], 1, chan_index );
1397
1398 r[0].q = si_fceq(r[0].q, r[1].q);
1399 r[0].q = si_xori(r[0].q, 0xff);
1400
1401 STORE( &r[0], 0, chan_index );
1402 }
1403 break;
1404
1405 case TGSI_OPCODE_STR:
1406 ASSERT (0);
1407 break;
1408
1409 case TGSI_OPCODE_TEX:
1410 /* simple texture lookup */
1411 /* src[0] = texcoord */
1412 /* src[1] = sampler unit */
1413 exec_tex(mach, inst, FALSE, FALSE);
1414 break;
1415
1416 case TGSI_OPCODE_TXB:
1417 /* Texture lookup with lod bias */
1418 /* src[0] = texcoord (src[0].w = load bias) */
1419 /* src[1] = sampler unit */
1420 exec_tex(mach, inst, TRUE, FALSE);
1421 break;
1422
1423 case TGSI_OPCODE_TXD:
1424 /* Texture lookup with explict partial derivatives */
1425 /* src[0] = texcoord */
1426 /* src[1] = d[strq]/dx */
1427 /* src[2] = d[strq]/dy */
1428 /* src[3] = sampler unit */
1429 ASSERT (0);
1430 break;
1431
1432 case TGSI_OPCODE_TXL:
1433 /* Texture lookup with explit LOD */
1434 /* src[0] = texcoord (src[0].w = load bias) */
1435 /* src[1] = sampler unit */
1436 exec_tex(mach, inst, TRUE, FALSE);
1437 break;
1438
1439 case TGSI_OPCODE_TXP:
1440 /* Texture lookup with projection */
1441 /* src[0] = texcoord (src[0].w = projection) */
1442 /* src[1] = sampler unit */
1443 exec_tex(mach, inst, TRUE, TRUE);
1444 break;
1445
1446 case TGSI_OPCODE_UP2H:
1447 ASSERT (0);
1448 break;
1449
1450 case TGSI_OPCODE_UP2US:
1451 ASSERT (0);
1452 break;
1453
1454 case TGSI_OPCODE_UP4B:
1455 ASSERT (0);
1456 break;
1457
1458 case TGSI_OPCODE_UP4UB:
1459 ASSERT (0);
1460 break;
1461
1462 case TGSI_OPCODE_X2D:
1463 ASSERT (0);
1464 break;
1465
1466 case TGSI_OPCODE_ARA:
1467 ASSERT (0);
1468 break;
1469
1470 case TGSI_OPCODE_ARR:
1471 ASSERT (0);
1472 break;
1473
1474 case TGSI_OPCODE_BRA:
1475 ASSERT (0);
1476 break;
1477
1478 case TGSI_OPCODE_CAL:
1479 /* skip the call if no execution channels are enabled */
1480 if (mach->ExecMask) {
1481 /* do the call */
1482
1483 /* push the Cond, Loop, Cont stacks */
1484 ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
1485 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
1486 ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1487 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
1488 ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1489 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
1490
1491 ASSERT(mach->FuncStackTop < TGSI_EXEC_MAX_CALL_NESTING);
1492 mach->FuncStack[mach->FuncStackTop++] = mach->FuncMask;
1493
1494 /* note that PC was already incremented above */
1495 mach->CallStack[mach->CallStackTop++] = *pc;
1496 *pc = inst->InstructionExtLabel.Label;
1497 }
1498 break;
1499
1500 case TGSI_OPCODE_RET:
1501 mach->FuncMask &= ~mach->ExecMask;
1502 UPDATE_EXEC_MASK(mach);
1503
1504 if (mach->ExecMask == 0x0) {
1505 /* really return now (otherwise, keep executing */
1506
1507 if (mach->CallStackTop == 0) {
1508 /* returning from main() */
1509 *pc = -1;
1510 return;
1511 }
1512 *pc = mach->CallStack[--mach->CallStackTop];
1513
1514 /* pop the Cond, Loop, Cont stacks */
1515 ASSERT(mach->CondStackTop > 0);
1516 mach->CondMask = mach->CondStack[--mach->CondStackTop];
1517 ASSERT(mach->LoopStackTop > 0);
1518 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
1519 ASSERT(mach->ContStackTop > 0);
1520 mach->ContMask = mach->ContStack[--mach->ContStackTop];
1521 ASSERT(mach->FuncStackTop > 0);
1522 mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
1523
1524 UPDATE_EXEC_MASK(mach);
1525 }
1526 break;
1527
1528 case TGSI_OPCODE_SSG:
1529 ASSERT (0);
1530 break;
1531
1532 case TGSI_OPCODE_CMP:
1533 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1534 FETCH(&r[0], 0, chan_index);
1535 FETCH(&r[1], 1, chan_index);
1536 FETCH(&r[2], 2, chan_index);
1537
1538 /* r0 = (r0 < 0.0) ? r1 : r2
1539 */
1540 r[3].q = si_xor(r[3].q, r[3].q);
1541 r[0].q = micro_lt(r[0].q, r[3].q);
1542 r[0].q = si_selb(r[1].q, r[2].q, r[0].q);
1543
1544 STORE(&r[0], 0, chan_index);
1545 }
1546 break;
1547
1548 case TGSI_OPCODE_SCS:
1549 if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) || IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
1550 FETCH( &r[0], 0, CHAN_X );
1551 }
1552 if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) ) {
1553 r[1].q = micro_cos(r[0].q);
1554 STORE( &r[1], 0, CHAN_X );
1555 }
1556 if( IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
1557 r[1].q = micro_sin(r[0].q);
1558 STORE( &r[1], 0, CHAN_Y );
1559 }
1560 if( IS_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
1561 STORE( &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, CHAN_Z );
1562 }
1563 if( IS_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
1564 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
1565 }
1566 break;
1567
1568 case TGSI_OPCODE_NRM:
1569 ASSERT (0);
1570 break;
1571
1572 case TGSI_OPCODE_DIV:
1573 ASSERT( 0 );
1574 break;
1575
1576 case TGSI_OPCODE_DP2:
1577 FETCH( &r[0], 0, CHAN_X );
1578 FETCH( &r[1], 1, CHAN_X );
1579 r[0].q = si_fm(r[0].q, r[1].q);
1580
1581 FETCH( &r[1], 0, CHAN_Y );
1582 FETCH( &r[2], 1, CHAN_Y );
1583 r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
1584
1585 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1586 STORE( &r[0], 0, chan_index );
1587 }
1588 break;
1589
1590 case TGSI_OPCODE_IF:
1591 /* push CondMask */
1592 ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
1593 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
1594 FETCH( &r[0], 0, CHAN_X );
1595 /* update CondMask */
1596 if( ! r[0].u[0] ) {
1597 mach->CondMask &= ~0x1;
1598 }
1599 if( ! r[0].u[1] ) {
1600 mach->CondMask &= ~0x2;
1601 }
1602 if( ! r[0].u[2] ) {
1603 mach->CondMask &= ~0x4;
1604 }
1605 if( ! r[0].u[3] ) {
1606 mach->CondMask &= ~0x8;
1607 }
1608 UPDATE_EXEC_MASK(mach);
1609 /* Todo: If CondMask==0, jump to ELSE */
1610 break;
1611
1612 case TGSI_OPCODE_ELSE:
1613 /* invert CondMask wrt previous mask */
1614 {
1615 uint prevMask;
1616 ASSERT(mach->CondStackTop > 0);
1617 prevMask = mach->CondStack[mach->CondStackTop - 1];
1618 mach->CondMask = ~mach->CondMask & prevMask;
1619 UPDATE_EXEC_MASK(mach);
1620 /* Todo: If CondMask==0, jump to ENDIF */
1621 }
1622 break;
1623
1624 case TGSI_OPCODE_ENDIF:
1625 /* pop CondMask */
1626 ASSERT(mach->CondStackTop > 0);
1627 mach->CondMask = mach->CondStack[--mach->CondStackTop];
1628 UPDATE_EXEC_MASK(mach);
1629 break;
1630
1631 case TGSI_OPCODE_END:
1632 /* halt execution */
1633 *pc = -1;
1634 break;
1635
1636 case TGSI_OPCODE_REP:
1637 ASSERT (0);
1638 break;
1639
1640 case TGSI_OPCODE_ENDREP:
1641 ASSERT (0);
1642 break;
1643
1644 case TGSI_OPCODE_PUSHA:
1645 ASSERT (0);
1646 break;
1647
1648 case TGSI_OPCODE_POPA:
1649 ASSERT (0);
1650 break;
1651
1652 case TGSI_OPCODE_CEIL:
1653 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1654 FETCH( &r[0], 0, chan_index );
1655 r[0].q = micro_ceil(r[0].q);
1656 STORE( &r[0], 0, chan_index );
1657 }
1658 break;
1659
1660 case TGSI_OPCODE_I2F:
1661 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1662 FETCH( &r[0], 0, chan_index );
1663 r[0].q = si_csflt(r[0].q, 0);
1664 STORE( &r[0], 0, chan_index );
1665 }
1666 break;
1667
1668 case TGSI_OPCODE_NOT:
1669 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1670 FETCH( &r[0], 0, chan_index );
1671 r[0].q = si_xorbi(r[0].q, 0xff);
1672 STORE( &r[0], 0, chan_index );
1673 }
1674 break;
1675
1676 case TGSI_OPCODE_TRUNC:
1677 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1678 FETCH( &r[0], 0, chan_index );
1679 r[0].q = micro_trunc(r[0].q);
1680 STORE( &r[0], 0, chan_index );
1681 }
1682 break;
1683
1684 case TGSI_OPCODE_SHL:
1685 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1686 FETCH( &r[0], 0, chan_index );
1687 FETCH( &r[1], 1, chan_index );
1688
1689 r[0].q = si_shl(r[0].q, r[1].q);
1690
1691 STORE( &r[0], 0, chan_index );
1692 }
1693 break;
1694
1695 case TGSI_OPCODE_SHR:
1696 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1697 FETCH( &r[0], 0, chan_index );
1698 FETCH( &r[1], 1, chan_index );
1699 r[0].q = micro_ishr(r[0].q, r[1].q);
1700 STORE( &r[0], 0, chan_index );
1701 }
1702 break;
1703
1704 case TGSI_OPCODE_AND:
1705 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1706 FETCH( &r[0], 0, chan_index );
1707 FETCH( &r[1], 1, chan_index );
1708 r[0].q = si_and(r[0].q, r[1].q);
1709 STORE( &r[0], 0, chan_index );
1710 }
1711 break;
1712
1713 case TGSI_OPCODE_OR:
1714 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1715 FETCH( &r[0], 0, chan_index );
1716 FETCH( &r[1], 1, chan_index );
1717 r[0].q = si_or(r[0].q, r[1].q);
1718 STORE( &r[0], 0, chan_index );
1719 }
1720 break;
1721
1722 case TGSI_OPCODE_MOD:
1723 ASSERT (0);
1724 break;
1725
1726 case TGSI_OPCODE_XOR:
1727 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
1728 FETCH( &r[0], 0, chan_index );
1729 FETCH( &r[1], 1, chan_index );
1730 r[0].q = si_xor(r[0].q, r[1].q);
1731 STORE( &r[0], 0, chan_index );
1732 }
1733 break;
1734
1735 case TGSI_OPCODE_SAD:
1736 ASSERT (0);
1737 break;
1738
1739 case TGSI_OPCODE_TXF:
1740 ASSERT (0);
1741 break;
1742
1743 case TGSI_OPCODE_TXQ:
1744 ASSERT (0);
1745 break;
1746
1747 case TGSI_OPCODE_EMIT:
1748 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] += 16;
1749 mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]]++;
1750 break;
1751
1752 case TGSI_OPCODE_ENDPRIM:
1753 mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]++;
1754 mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]] = 0;
1755 break;
1756
1757 case TGSI_OPCODE_BGNFOR:
1758 /* fall-through (for now) */
1759 case TGSI_OPCODE_BGNLOOP:
1760 /* push LoopMask and ContMasks */
1761 ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1762 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
1763 ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
1764 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
1765 break;
1766
1767 case TGSI_OPCODE_ENDFOR:
1768 /* fall-through (for now at least) */
1769 case TGSI_OPCODE_ENDLOOP:
1770 /* Restore ContMask, but don't pop */
1771 ASSERT(mach->ContStackTop > 0);
1772 mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
1773 if (mach->LoopMask) {
1774 /* repeat loop: jump to instruction just past BGNLOOP */
1775 *pc = inst->InstructionExtLabel.Label + 1;
1776 }
1777 else {
1778 /* exit loop: pop LoopMask */
1779 ASSERT(mach->LoopStackTop > 0);
1780 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
1781 /* pop ContMask */
1782 ASSERT(mach->ContStackTop > 0);
1783 mach->ContMask = mach->ContStack[--mach->ContStackTop];
1784 }
1785 UPDATE_EXEC_MASK(mach);
1786 break;
1787
1788 case TGSI_OPCODE_BRK:
1789 /* turn off loop channels for each enabled exec channel */
1790 mach->LoopMask &= ~mach->ExecMask;
1791 /* Todo: if mach->LoopMask == 0, jump to end of loop */
1792 UPDATE_EXEC_MASK(mach);
1793 break;
1794
1795 case TGSI_OPCODE_CONT:
1796 /* turn off cont channels for each enabled exec channel */
1797 mach->ContMask &= ~mach->ExecMask;
1798 /* Todo: if mach->LoopMask == 0, jump to end of loop */
1799 UPDATE_EXEC_MASK(mach);
1800 break;
1801
1802 case TGSI_OPCODE_BGNSUB:
1803 /* no-op */
1804 break;
1805
1806 case TGSI_OPCODE_ENDSUB:
1807 /* no-op */
1808 break;
1809
1810 case TGSI_OPCODE_NOP:
1811 break;
1812
1813 default:
1814 ASSERT( 0 );
1815 }
1816 }
1817
1818
1819 /**
1820 * Run TGSI interpreter.
1821 * \return bitmask of "alive" quad components
1822 */
1823 uint
1824 spu_exec_machine_run( struct spu_exec_machine *mach )
1825 {
1826 uint i;
1827 int pc = 0;
1828
1829 mach->CondMask = 0xf;
1830 mach->LoopMask = 0xf;
1831 mach->ContMask = 0xf;
1832 mach->FuncMask = 0xf;
1833 mach->ExecMask = 0xf;
1834
1835 mach->CondStackTop = 0; /* temporarily subvert this ASSERTion */
1836 ASSERT(mach->CondStackTop == 0);
1837 ASSERT(mach->LoopStackTop == 0);
1838 ASSERT(mach->ContStackTop == 0);
1839 ASSERT(mach->CallStackTop == 0);
1840
1841 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] = 0;
1842 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] = 0;
1843
1844 if( mach->Processor == TGSI_PROCESSOR_GEOMETRY ) {
1845 mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0] = 0;
1846 mach->Primitives[0] = 0;
1847 }
1848
1849
1850 /* execute declarations (interpolants) */
1851 if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
1852 for (i = 0; i < mach->NumDeclarations; i++) {
1853 union {
1854 struct tgsi_full_declaration decl;
1855 qword buffer[ROUNDUP16(sizeof(struct tgsi_full_declaration)) / 16];
1856 } d ALIGN16_ATTRIB;
1857 unsigned ea = (unsigned) (mach->Declarations + pc);
1858
1859 spu_dcache_fetch_unaligned(d.buffer, ea, sizeof(d.decl));
1860
1861 exec_declaration( mach, &d.decl );
1862 }
1863 }
1864
1865 /* execute instructions, until pc is set to -1 */
1866 while (pc != -1) {
1867 union {
1868 struct tgsi_full_instruction inst;
1869 qword buffer[ROUNDUP16(sizeof(struct tgsi_full_instruction)) / 16];
1870 } i ALIGN16_ATTRIB;
1871 unsigned ea = (unsigned) (mach->Instructions + pc);
1872
1873 spu_dcache_fetch_unaligned(i.buffer, ea, sizeof(i.inst));
1874 exec_instruction( mach, & i.inst, &pc );
1875 }
1876
1877 #if 0
1878 /* we scale from floats in [0,1] to Zbuffer ints in sp_quad_depth_test.c */
1879 if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
1880 /*
1881 * Scale back depth component.
1882 */
1883 for (i = 0; i < 4; i++)
1884 mach->Outputs[0].xyzw[2].f[i] *= ctx->DrawBuffer->_DepthMaxF;
1885 }
1886 #endif
1887
1888 return ~mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0];
1889 }
1890
1891