tgsi: Cleanup exec_tex().
[mesa.git] / src / gallium / auxiliary / tgsi / tgsi_exec.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 * Copyright 2009-2010 VMware, Inc. All rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * TGSI interpreter/executor.
31 *
32 * Flow control information:
33 *
34 * Since we operate on 'quads' (4 pixels or 4 vertices in parallel)
35 * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special
36 * care since a condition may be true for some quad components but false
37 * for other components.
38 *
39 * We basically execute all statements (even if they're in the part of
40 * an IF/ELSE clause that's "not taken") and use a special mask to
41 * control writing to destination registers. This is the ExecMask.
42 * See store_dest().
43 *
44 * The ExecMask is computed from three other masks (CondMask, LoopMask and
45 * ContMask) which are controlled by the flow control instructions (namely:
46 * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT).
47 *
48 *
49 * Authors:
50 * Michal Krol
51 * Brian Paul
52 */
53
54 #include "pipe/p_compiler.h"
55 #include "pipe/p_state.h"
56 #include "pipe/p_shader_tokens.h"
57 #include "tgsi/tgsi_dump.h"
58 #include "tgsi/tgsi_parse.h"
59 #include "tgsi/tgsi_util.h"
60 #include "tgsi_exec.h"
61 #include "util/u_memory.h"
62 #include "util/u_math.h"
63
64
65 #define FAST_MATH 1
66
67 #define TILE_TOP_LEFT 0
68 #define TILE_TOP_RIGHT 1
69 #define TILE_BOTTOM_LEFT 2
70 #define TILE_BOTTOM_RIGHT 3
71
72 static void
73 micro_abs(union tgsi_exec_channel *dst,
74 const union tgsi_exec_channel *src)
75 {
76 dst->f[0] = fabsf(src->f[0]);
77 dst->f[1] = fabsf(src->f[1]);
78 dst->f[2] = fabsf(src->f[2]);
79 dst->f[3] = fabsf(src->f[3]);
80 }
81
82 static void
83 micro_arl(union tgsi_exec_channel *dst,
84 const union tgsi_exec_channel *src)
85 {
86 dst->i[0] = (int)floorf(src->f[0]);
87 dst->i[1] = (int)floorf(src->f[1]);
88 dst->i[2] = (int)floorf(src->f[2]);
89 dst->i[3] = (int)floorf(src->f[3]);
90 }
91
92 static void
93 micro_arr(union tgsi_exec_channel *dst,
94 const union tgsi_exec_channel *src)
95 {
96 dst->i[0] = (int)floorf(src->f[0] + 0.5f);
97 dst->i[1] = (int)floorf(src->f[1] + 0.5f);
98 dst->i[2] = (int)floorf(src->f[2] + 0.5f);
99 dst->i[3] = (int)floorf(src->f[3] + 0.5f);
100 }
101
102 static void
103 micro_ceil(union tgsi_exec_channel *dst,
104 const union tgsi_exec_channel *src)
105 {
106 dst->f[0] = ceilf(src->f[0]);
107 dst->f[1] = ceilf(src->f[1]);
108 dst->f[2] = ceilf(src->f[2]);
109 dst->f[3] = ceilf(src->f[3]);
110 }
111
112 static void
113 micro_cos(union tgsi_exec_channel *dst,
114 const union tgsi_exec_channel *src)
115 {
116 dst->f[0] = cosf(src->f[0]);
117 dst->f[1] = cosf(src->f[1]);
118 dst->f[2] = cosf(src->f[2]);
119 dst->f[3] = cosf(src->f[3]);
120 }
121
122 static void
123 micro_ddx(union tgsi_exec_channel *dst,
124 const union tgsi_exec_channel *src)
125 {
126 dst->f[0] =
127 dst->f[1] =
128 dst->f[2] =
129 dst->f[3] = src->f[TILE_BOTTOM_RIGHT] - src->f[TILE_BOTTOM_LEFT];
130 }
131
132 static void
133 micro_ddy(union tgsi_exec_channel *dst,
134 const union tgsi_exec_channel *src)
135 {
136 dst->f[0] =
137 dst->f[1] =
138 dst->f[2] =
139 dst->f[3] = src->f[TILE_BOTTOM_LEFT] - src->f[TILE_TOP_LEFT];
140 }
141
142 static void
143 micro_exp2(union tgsi_exec_channel *dst,
144 const union tgsi_exec_channel *src)
145 {
146 #if FAST_MATH
147 dst->f[0] = util_fast_exp2(src->f[0]);
148 dst->f[1] = util_fast_exp2(src->f[1]);
149 dst->f[2] = util_fast_exp2(src->f[2]);
150 dst->f[3] = util_fast_exp2(src->f[3]);
151 #else
152 #if DEBUG
153 /* Inf is okay for this instruction, so clamp it to silence assertions. */
154 uint i;
155 union tgsi_exec_channel clamped;
156
157 for (i = 0; i < 4; i++) {
158 if (src->f[i] > 127.99999f) {
159 clamped.f[i] = 127.99999f;
160 } else if (src->f[i] < -126.99999f) {
161 clamped.f[i] = -126.99999f;
162 } else {
163 clamped.f[i] = src->f[i];
164 }
165 }
166 src = &clamped;
167 #endif /* DEBUG */
168
169 dst->f[0] = powf(2.0f, src->f[0]);
170 dst->f[1] = powf(2.0f, src->f[1]);
171 dst->f[2] = powf(2.0f, src->f[2]);
172 dst->f[3] = powf(2.0f, src->f[3]);
173 #endif /* FAST_MATH */
174 }
175
176 static void
177 micro_flr(union tgsi_exec_channel *dst,
178 const union tgsi_exec_channel *src)
179 {
180 dst->f[0] = floorf(src->f[0]);
181 dst->f[1] = floorf(src->f[1]);
182 dst->f[2] = floorf(src->f[2]);
183 dst->f[3] = floorf(src->f[3]);
184 }
185
186 static void
187 micro_frc(union tgsi_exec_channel *dst,
188 const union tgsi_exec_channel *src)
189 {
190 dst->f[0] = src->f[0] - floorf(src->f[0]);
191 dst->f[1] = src->f[1] - floorf(src->f[1]);
192 dst->f[2] = src->f[2] - floorf(src->f[2]);
193 dst->f[3] = src->f[3] - floorf(src->f[3]);
194 }
195
196 static void
197 micro_iabs(union tgsi_exec_channel *dst,
198 const union tgsi_exec_channel *src)
199 {
200 dst->i[0] = src->i[0] >= 0 ? src->i[0] : -src->i[0];
201 dst->i[1] = src->i[1] >= 0 ? src->i[1] : -src->i[1];
202 dst->i[2] = src->i[2] >= 0 ? src->i[2] : -src->i[2];
203 dst->i[3] = src->i[3] >= 0 ? src->i[3] : -src->i[3];
204 }
205
206 static void
207 micro_ineg(union tgsi_exec_channel *dst,
208 const union tgsi_exec_channel *src)
209 {
210 dst->i[0] = -src->i[0];
211 dst->i[1] = -src->i[1];
212 dst->i[2] = -src->i[2];
213 dst->i[3] = -src->i[3];
214 }
215
216 static void
217 micro_lg2(union tgsi_exec_channel *dst,
218 const union tgsi_exec_channel *src)
219 {
220 #if FAST_MATH
221 dst->f[0] = util_fast_log2(src->f[0]);
222 dst->f[1] = util_fast_log2(src->f[1]);
223 dst->f[2] = util_fast_log2(src->f[2]);
224 dst->f[3] = util_fast_log2(src->f[3]);
225 #else
226 dst->f[0] = logf(src->f[0]) * 1.442695f;
227 dst->f[1] = logf(src->f[1]) * 1.442695f;
228 dst->f[2] = logf(src->f[2]) * 1.442695f;
229 dst->f[3] = logf(src->f[3]) * 1.442695f;
230 #endif
231 }
232
233 static void
234 micro_lrp(union tgsi_exec_channel *dst,
235 const union tgsi_exec_channel *src)
236 {
237 dst->f[0] = src[0].f[0] * (src[1].f[0] - src[2].f[0]) + src[2].f[0];
238 dst->f[1] = src[0].f[1] * (src[1].f[1] - src[2].f[1]) + src[2].f[1];
239 dst->f[2] = src[0].f[2] * (src[1].f[2] - src[2].f[2]) + src[2].f[2];
240 dst->f[3] = src[0].f[3] * (src[1].f[3] - src[2].f[3]) + src[2].f[3];
241 }
242
243 static void
244 micro_mad(union tgsi_exec_channel *dst,
245 const union tgsi_exec_channel *src)
246 {
247 dst->f[0] = src[0].f[0] * src[1].f[0] + src[2].f[0];
248 dst->f[1] = src[0].f[1] * src[1].f[1] + src[2].f[1];
249 dst->f[2] = src[0].f[2] * src[1].f[2] + src[2].f[2];
250 dst->f[3] = src[0].f[3] * src[1].f[3] + src[2].f[3];
251 }
252
253 static void
254 micro_mov(union tgsi_exec_channel *dst,
255 const union tgsi_exec_channel *src)
256 {
257 dst->u[0] = src->u[0];
258 dst->u[1] = src->u[1];
259 dst->u[2] = src->u[2];
260 dst->u[3] = src->u[3];
261 }
262
263 static void
264 micro_rcp(union tgsi_exec_channel *dst,
265 const union tgsi_exec_channel *src)
266 {
267 dst->f[0] = 1.0f / src->f[0];
268 dst->f[1] = 1.0f / src->f[1];
269 dst->f[2] = 1.0f / src->f[2];
270 dst->f[3] = 1.0f / src->f[3];
271 }
272
273 static void
274 micro_rnd(union tgsi_exec_channel *dst,
275 const union tgsi_exec_channel *src)
276 {
277 dst->f[0] = floorf(src->f[0] + 0.5f);
278 dst->f[1] = floorf(src->f[1] + 0.5f);
279 dst->f[2] = floorf(src->f[2] + 0.5f);
280 dst->f[3] = floorf(src->f[3] + 0.5f);
281 }
282
283 static void
284 micro_rsq(union tgsi_exec_channel *dst,
285 const union tgsi_exec_channel *src)
286 {
287 dst->f[0] = 1.0f / sqrtf(fabsf(src->f[0]));
288 dst->f[1] = 1.0f / sqrtf(fabsf(src->f[1]));
289 dst->f[2] = 1.0f / sqrtf(fabsf(src->f[2]));
290 dst->f[3] = 1.0f / sqrtf(fabsf(src->f[3]));
291 }
292
293 static void
294 micro_seq(union tgsi_exec_channel *dst,
295 const union tgsi_exec_channel *src)
296 {
297 dst->f[0] = src[0].f[0] == src[1].f[0] ? 1.0f : 0.0f;
298 dst->f[1] = src[0].f[1] == src[1].f[1] ? 1.0f : 0.0f;
299 dst->f[2] = src[0].f[2] == src[1].f[2] ? 1.0f : 0.0f;
300 dst->f[3] = src[0].f[3] == src[1].f[3] ? 1.0f : 0.0f;
301 }
302
303 static void
304 micro_sge(union tgsi_exec_channel *dst,
305 const union tgsi_exec_channel *src)
306 {
307 dst->f[0] = src[0].f[0] >= src[1].f[0] ? 1.0f : 0.0f;
308 dst->f[1] = src[0].f[1] >= src[1].f[1] ? 1.0f : 0.0f;
309 dst->f[2] = src[0].f[2] >= src[1].f[2] ? 1.0f : 0.0f;
310 dst->f[3] = src[0].f[3] >= src[1].f[3] ? 1.0f : 0.0f;
311 }
312
313 static void
314 micro_sgn(union tgsi_exec_channel *dst,
315 const union tgsi_exec_channel *src)
316 {
317 dst->f[0] = src->f[0] < 0.0f ? -1.0f : src->f[0] > 0.0f ? 1.0f : 0.0f;
318 dst->f[1] = src->f[1] < 0.0f ? -1.0f : src->f[1] > 0.0f ? 1.0f : 0.0f;
319 dst->f[2] = src->f[2] < 0.0f ? -1.0f : src->f[2] > 0.0f ? 1.0f : 0.0f;
320 dst->f[3] = src->f[3] < 0.0f ? -1.0f : src->f[3] > 0.0f ? 1.0f : 0.0f;
321 }
322
323 static void
324 micro_sgt(union tgsi_exec_channel *dst,
325 const union tgsi_exec_channel *src)
326 {
327 dst->f[0] = src[0].f[0] > src[1].f[0] ? 1.0f : 0.0f;
328 dst->f[1] = src[0].f[1] > src[1].f[1] ? 1.0f : 0.0f;
329 dst->f[2] = src[0].f[2] > src[1].f[2] ? 1.0f : 0.0f;
330 dst->f[3] = src[0].f[3] > src[1].f[3] ? 1.0f : 0.0f;
331 }
332
333 static void
334 micro_sin(union tgsi_exec_channel *dst,
335 const union tgsi_exec_channel *src)
336 {
337 dst->f[0] = sinf(src->f[0]);
338 dst->f[1] = sinf(src->f[1]);
339 dst->f[2] = sinf(src->f[2]);
340 dst->f[3] = sinf(src->f[3]);
341 }
342
343 static void
344 micro_sle(union tgsi_exec_channel *dst,
345 const union tgsi_exec_channel *src)
346 {
347 dst->f[0] = src[0].f[0] <= src[1].f[0] ? 1.0f : 0.0f;
348 dst->f[1] = src[0].f[1] <= src[1].f[1] ? 1.0f : 0.0f;
349 dst->f[2] = src[0].f[2] <= src[1].f[2] ? 1.0f : 0.0f;
350 dst->f[3] = src[0].f[3] <= src[1].f[3] ? 1.0f : 0.0f;
351 }
352
353 static void
354 micro_slt(union tgsi_exec_channel *dst,
355 const union tgsi_exec_channel *src)
356 {
357 dst->f[0] = src[0].f[0] < src[1].f[0] ? 1.0f : 0.0f;
358 dst->f[1] = src[0].f[1] < src[1].f[1] ? 1.0f : 0.0f;
359 dst->f[2] = src[0].f[2] < src[1].f[2] ? 1.0f : 0.0f;
360 dst->f[3] = src[0].f[3] < src[1].f[3] ? 1.0f : 0.0f;
361 }
362
363 static void
364 micro_sne(union tgsi_exec_channel *dst,
365 const union tgsi_exec_channel *src)
366 {
367 dst->f[0] = src[0].f[0] != src[1].f[0] ? 1.0f : 0.0f;
368 dst->f[1] = src[0].f[1] != src[1].f[1] ? 1.0f : 0.0f;
369 dst->f[2] = src[0].f[2] != src[1].f[2] ? 1.0f : 0.0f;
370 dst->f[3] = src[0].f[3] != src[1].f[3] ? 1.0f : 0.0f;
371 }
372
373 static void
374 micro_trunc(union tgsi_exec_channel *dst,
375 const union tgsi_exec_channel *src)
376 {
377 dst->f[0] = (float)(int)src->f[0];
378 dst->f[1] = (float)(int)src->f[1];
379 dst->f[2] = (float)(int)src->f[2];
380 dst->f[3] = (float)(int)src->f[3];
381 }
382
383
384 #define CHAN_X 0
385 #define CHAN_Y 1
386 #define CHAN_Z 2
387 #define CHAN_W 3
388
389 enum tgsi_exec_datatype {
390 TGSI_EXEC_DATA_FLOAT,
391 TGSI_EXEC_DATA_INT,
392 TGSI_EXEC_DATA_UINT
393 };
394
395 /*
396 * Shorthand locations of various utility registers (_I = Index, _C = Channel)
397 */
398 #define TEMP_0_I TGSI_EXEC_TEMP_00000000_I
399 #define TEMP_0_C TGSI_EXEC_TEMP_00000000_C
400 #define TEMP_7F_I TGSI_EXEC_TEMP_7FFFFFFF_I
401 #define TEMP_7F_C TGSI_EXEC_TEMP_7FFFFFFF_C
402 #define TEMP_80_I TGSI_EXEC_TEMP_80000000_I
403 #define TEMP_80_C TGSI_EXEC_TEMP_80000000_C
404 #define TEMP_FF_I TGSI_EXEC_TEMP_FFFFFFFF_I
405 #define TEMP_FF_C TGSI_EXEC_TEMP_FFFFFFFF_C
406 #define TEMP_1_I TGSI_EXEC_TEMP_ONE_I
407 #define TEMP_1_C TGSI_EXEC_TEMP_ONE_C
408 #define TEMP_2_I TGSI_EXEC_TEMP_TWO_I
409 #define TEMP_2_C TGSI_EXEC_TEMP_TWO_C
410 #define TEMP_128_I TGSI_EXEC_TEMP_128_I
411 #define TEMP_128_C TGSI_EXEC_TEMP_128_C
412 #define TEMP_M128_I TGSI_EXEC_TEMP_MINUS_128_I
413 #define TEMP_M128_C TGSI_EXEC_TEMP_MINUS_128_C
414 #define TEMP_KILMASK_I TGSI_EXEC_TEMP_KILMASK_I
415 #define TEMP_KILMASK_C TGSI_EXEC_TEMP_KILMASK_C
416 #define TEMP_OUTPUT_I TGSI_EXEC_TEMP_OUTPUT_I
417 #define TEMP_OUTPUT_C TGSI_EXEC_TEMP_OUTPUT_C
418 #define TEMP_PRIMITIVE_I TGSI_EXEC_TEMP_PRIMITIVE_I
419 #define TEMP_PRIMITIVE_C TGSI_EXEC_TEMP_PRIMITIVE_C
420 #define TEMP_CC_I TGSI_EXEC_TEMP_CC_I
421 #define TEMP_CC_C TGSI_EXEC_TEMP_CC_C
422 #define TEMP_3_I TGSI_EXEC_TEMP_THREE_I
423 #define TEMP_3_C TGSI_EXEC_TEMP_THREE_C
424 #define TEMP_HALF_I TGSI_EXEC_TEMP_HALF_I
425 #define TEMP_HALF_C TGSI_EXEC_TEMP_HALF_C
426 #define TEMP_R0 TGSI_EXEC_TEMP_R0
427 #define TEMP_P0 TGSI_EXEC_TEMP_P0
428
429 #define IS_CHANNEL_ENABLED(INST, CHAN)\
430 ((INST).Dst[0].Register.WriteMask & (1 << (CHAN)))
431
432 #define IS_CHANNEL_ENABLED2(INST, CHAN)\
433 ((INST).Dst[1].Register.WriteMask & (1 << (CHAN)))
434
435 #define FOR_EACH_ENABLED_CHANNEL(INST, CHAN)\
436 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)\
437 if (IS_CHANNEL_ENABLED( INST, CHAN ))
438
439 #define FOR_EACH_ENABLED_CHANNEL2(INST, CHAN)\
440 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)\
441 if (IS_CHANNEL_ENABLED2( INST, CHAN ))
442
443
444 /** The execution mask depends on the conditional mask and the loop mask */
445 #define UPDATE_EXEC_MASK(MACH) \
446 MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->Switch.mask & MACH->FuncMask
447
448
449 static const union tgsi_exec_channel ZeroVec =
450 { { 0.0, 0.0, 0.0, 0.0 } };
451
452
453 #define CHECK_INF_OR_NAN(chan) do {\
454 assert(!util_is_inf_or_nan((chan)->f[0]));\
455 assert(!util_is_inf_or_nan((chan)->f[1]));\
456 assert(!util_is_inf_or_nan((chan)->f[2]));\
457 assert(!util_is_inf_or_nan((chan)->f[3]));\
458 } while (0)
459
460
461 #ifdef DEBUG
462 static void
463 print_chan(const char *msg, const union tgsi_exec_channel *chan)
464 {
465 debug_printf("%s = {%f, %f, %f, %f}\n",
466 msg, chan->f[0], chan->f[1], chan->f[2], chan->f[3]);
467 }
468 #endif
469
470
471 #ifdef DEBUG
472 static void
473 print_temp(const struct tgsi_exec_machine *mach, uint index)
474 {
475 const struct tgsi_exec_vector *tmp = &mach->Temps[index];
476 int i;
477 debug_printf("Temp[%u] =\n", index);
478 for (i = 0; i < 4; i++) {
479 debug_printf(" %c: { %f, %f, %f, %f }\n",
480 "XYZW"[i],
481 tmp->xyzw[i].f[0],
482 tmp->xyzw[i].f[1],
483 tmp->xyzw[i].f[2],
484 tmp->xyzw[i].f[3]);
485 }
486 }
487 #endif
488
489
490 /**
491 * Check if there's a potential src/dst register data dependency when
492 * using SOA execution.
493 * Example:
494 * MOV T, T.yxwz;
495 * This would expand into:
496 * MOV t0, t1;
497 * MOV t1, t0;
498 * MOV t2, t3;
499 * MOV t3, t2;
500 * The second instruction will have the wrong value for t0 if executed as-is.
501 */
502 boolean
503 tgsi_check_soa_dependencies(const struct tgsi_full_instruction *inst)
504 {
505 uint i, chan;
506
507 uint writemask = inst->Dst[0].Register.WriteMask;
508 if (writemask == TGSI_WRITEMASK_X ||
509 writemask == TGSI_WRITEMASK_Y ||
510 writemask == TGSI_WRITEMASK_Z ||
511 writemask == TGSI_WRITEMASK_W ||
512 writemask == TGSI_WRITEMASK_NONE) {
513 /* no chance of data dependency */
514 return FALSE;
515 }
516
517 /* loop over src regs */
518 for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
519 if ((inst->Src[i].Register.File ==
520 inst->Dst[0].Register.File) &&
521 (inst->Src[i].Register.Index ==
522 inst->Dst[0].Register.Index)) {
523 /* loop over dest channels */
524 uint channelsWritten = 0x0;
525 FOR_EACH_ENABLED_CHANNEL(*inst, chan) {
526 /* check if we're reading a channel that's been written */
527 uint swizzle = tgsi_util_get_full_src_register_swizzle(&inst->Src[i], chan);
528 if (channelsWritten & (1 << swizzle)) {
529 return TRUE;
530 }
531
532 channelsWritten |= (1 << chan);
533 }
534 }
535 }
536 return FALSE;
537 }
538
539
540 /**
541 * Initialize machine state by expanding tokens to full instructions,
542 * allocating temporary storage, setting up constants, etc.
543 * After this, we can call tgsi_exec_machine_run() many times.
544 */
545 void
546 tgsi_exec_machine_bind_shader(
547 struct tgsi_exec_machine *mach,
548 const struct tgsi_token *tokens,
549 uint numSamplers,
550 struct tgsi_sampler **samplers)
551 {
552 uint k;
553 struct tgsi_parse_context parse;
554 struct tgsi_exec_labels *labels = &mach->Labels;
555 struct tgsi_full_instruction *instructions;
556 struct tgsi_full_declaration *declarations;
557 uint maxInstructions = 10, numInstructions = 0;
558 uint maxDeclarations = 10, numDeclarations = 0;
559 uint instno = 0;
560
561 #if 0
562 tgsi_dump(tokens, 0);
563 #endif
564
565 util_init_math();
566
567 mach->Tokens = tokens;
568 mach->Samplers = samplers;
569
570 k = tgsi_parse_init (&parse, mach->Tokens);
571 if (k != TGSI_PARSE_OK) {
572 debug_printf( "Problem parsing!\n" );
573 return;
574 }
575
576 mach->Processor = parse.FullHeader.Processor.Processor;
577 mach->ImmLimit = 0;
578 labels->count = 0;
579
580 declarations = (struct tgsi_full_declaration *)
581 MALLOC( maxDeclarations * sizeof(struct tgsi_full_declaration) );
582
583 if (!declarations) {
584 return;
585 }
586
587 instructions = (struct tgsi_full_instruction *)
588 MALLOC( maxInstructions * sizeof(struct tgsi_full_instruction) );
589
590 if (!instructions) {
591 FREE( declarations );
592 return;
593 }
594
595 while( !tgsi_parse_end_of_tokens( &parse ) ) {
596 uint pointer = parse.Position;
597 uint i;
598
599 tgsi_parse_token( &parse );
600 switch( parse.FullToken.Token.Type ) {
601 case TGSI_TOKEN_TYPE_DECLARATION:
602 /* save expanded declaration */
603 if (numDeclarations == maxDeclarations) {
604 declarations = REALLOC(declarations,
605 maxDeclarations
606 * sizeof(struct tgsi_full_declaration),
607 (maxDeclarations + 10)
608 * sizeof(struct tgsi_full_declaration));
609 maxDeclarations += 10;
610 }
611 if (parse.FullToken.FullDeclaration.Declaration.File == TGSI_FILE_OUTPUT) {
612 unsigned reg;
613 for (reg = parse.FullToken.FullDeclaration.Range.First;
614 reg <= parse.FullToken.FullDeclaration.Range.Last;
615 ++reg) {
616 ++mach->NumOutputs;
617 }
618 }
619 memcpy(declarations + numDeclarations,
620 &parse.FullToken.FullDeclaration,
621 sizeof(declarations[0]));
622 numDeclarations++;
623 break;
624
625 case TGSI_TOKEN_TYPE_IMMEDIATE:
626 {
627 uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
628 assert( size <= 4 );
629 assert( mach->ImmLimit + 1 <= TGSI_EXEC_NUM_IMMEDIATES );
630
631 for( i = 0; i < size; i++ ) {
632 mach->Imms[mach->ImmLimit][i] =
633 parse.FullToken.FullImmediate.u[i].Float;
634 }
635 mach->ImmLimit += 1;
636 }
637 break;
638
639 case TGSI_TOKEN_TYPE_INSTRUCTION:
640 assert( labels->count < MAX_LABELS );
641
642 labels->labels[labels->count][0] = instno;
643 labels->labels[labels->count][1] = pointer;
644 labels->count++;
645
646 /* save expanded instruction */
647 if (numInstructions == maxInstructions) {
648 instructions = REALLOC(instructions,
649 maxInstructions
650 * sizeof(struct tgsi_full_instruction),
651 (maxInstructions + 10)
652 * sizeof(struct tgsi_full_instruction));
653 maxInstructions += 10;
654 }
655
656 memcpy(instructions + numInstructions,
657 &parse.FullToken.FullInstruction,
658 sizeof(instructions[0]));
659
660 numInstructions++;
661 break;
662
663 case TGSI_TOKEN_TYPE_PROPERTY:
664 break;
665
666 default:
667 assert( 0 );
668 }
669 }
670 tgsi_parse_free (&parse);
671
672 if (mach->Declarations) {
673 FREE( mach->Declarations );
674 }
675 mach->Declarations = declarations;
676 mach->NumDeclarations = numDeclarations;
677
678 if (mach->Instructions) {
679 FREE( mach->Instructions );
680 }
681 mach->Instructions = instructions;
682 mach->NumInstructions = numInstructions;
683 }
684
685
686 struct tgsi_exec_machine *
687 tgsi_exec_machine_create( void )
688 {
689 struct tgsi_exec_machine *mach;
690 uint i;
691
692 mach = align_malloc( sizeof *mach, 16 );
693 if (!mach)
694 goto fail;
695
696 memset(mach, 0, sizeof(*mach));
697
698 mach->Addrs = &mach->Temps[TGSI_EXEC_TEMP_ADDR];
699 mach->MaxGeometryShaderOutputs = TGSI_MAX_TOTAL_VERTICES;
700 mach->Predicates = &mach->Temps[TGSI_EXEC_TEMP_P0];
701
702 /* Setup constants. */
703 for( i = 0; i < 4; i++ ) {
704 mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].u[i] = 0x00000000;
705 mach->Temps[TEMP_7F_I].xyzw[TEMP_7F_C].u[i] = 0x7FFFFFFF;
706 mach->Temps[TEMP_80_I].xyzw[TEMP_80_C].u[i] = 0x80000000;
707 mach->Temps[TEMP_FF_I].xyzw[TEMP_FF_C].u[i] = 0xFFFFFFFF;
708 mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].f[i] = 1.0f;
709 mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].f[i] = 2.0f;
710 mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].f[i] = 128.0f;
711 mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].f[i] = -128.0f;
712 mach->Temps[TEMP_3_I].xyzw[TEMP_3_C].f[i] = 3.0f;
713 mach->Temps[TEMP_HALF_I].xyzw[TEMP_HALF_C].f[i] = 0.5f;
714 }
715
716 #ifdef DEBUG
717 /* silence warnings */
718 (void) print_chan;
719 (void) print_temp;
720 #endif
721
722 return mach;
723
724 fail:
725 align_free(mach);
726 return NULL;
727 }
728
729
730 void
731 tgsi_exec_machine_destroy(struct tgsi_exec_machine *mach)
732 {
733 if (mach) {
734 FREE(mach->Instructions);
735 FREE(mach->Declarations);
736 }
737
738 align_free(mach);
739 }
740
741 static void
742 micro_add(
743 union tgsi_exec_channel *dst,
744 const union tgsi_exec_channel *src0,
745 const union tgsi_exec_channel *src1 )
746 {
747 dst->f[0] = src0->f[0] + src1->f[0];
748 dst->f[1] = src0->f[1] + src1->f[1];
749 dst->f[2] = src0->f[2] + src1->f[2];
750 dst->f[3] = src0->f[3] + src1->f[3];
751 }
752
753 static void
754 micro_div(
755 union tgsi_exec_channel *dst,
756 const union tgsi_exec_channel *src0,
757 const union tgsi_exec_channel *src1 )
758 {
759 if (src1->f[0] != 0) {
760 dst->f[0] = src0->f[0] / src1->f[0];
761 }
762 if (src1->f[1] != 0) {
763 dst->f[1] = src0->f[1] / src1->f[1];
764 }
765 if (src1->f[2] != 0) {
766 dst->f[2] = src0->f[2] / src1->f[2];
767 }
768 if (src1->f[3] != 0) {
769 dst->f[3] = src0->f[3] / src1->f[3];
770 }
771 }
772
773 static void
774 micro_float_clamp(union tgsi_exec_channel *dst,
775 const union tgsi_exec_channel *src)
776 {
777 uint i;
778
779 for (i = 0; i < 4; i++) {
780 if (src->f[i] > 0.0f) {
781 if (src->f[i] > 1.884467e+019f)
782 dst->f[i] = 1.884467e+019f;
783 else if (src->f[i] < 5.42101e-020f)
784 dst->f[i] = 5.42101e-020f;
785 else
786 dst->f[i] = src->f[i];
787 }
788 else {
789 if (src->f[i] < -1.884467e+019f)
790 dst->f[i] = -1.884467e+019f;
791 else if (src->f[i] > -5.42101e-020f)
792 dst->f[i] = -5.42101e-020f;
793 else
794 dst->f[i] = src->f[i];
795 }
796 }
797 }
798
799 static void
800 micro_lt(
801 union tgsi_exec_channel *dst,
802 const union tgsi_exec_channel *src0,
803 const union tgsi_exec_channel *src1,
804 const union tgsi_exec_channel *src2,
805 const union tgsi_exec_channel *src3 )
806 {
807 dst->f[0] = src0->f[0] < src1->f[0] ? src2->f[0] : src3->f[0];
808 dst->f[1] = src0->f[1] < src1->f[1] ? src2->f[1] : src3->f[1];
809 dst->f[2] = src0->f[2] < src1->f[2] ? src2->f[2] : src3->f[2];
810 dst->f[3] = src0->f[3] < src1->f[3] ? src2->f[3] : src3->f[3];
811 }
812
813 static void
814 micro_max(
815 union tgsi_exec_channel *dst,
816 const union tgsi_exec_channel *src0,
817 const union tgsi_exec_channel *src1 )
818 {
819 dst->f[0] = src0->f[0] > src1->f[0] ? src0->f[0] : src1->f[0];
820 dst->f[1] = src0->f[1] > src1->f[1] ? src0->f[1] : src1->f[1];
821 dst->f[2] = src0->f[2] > src1->f[2] ? src0->f[2] : src1->f[2];
822 dst->f[3] = src0->f[3] > src1->f[3] ? src0->f[3] : src1->f[3];
823 }
824
825 static void
826 micro_min(
827 union tgsi_exec_channel *dst,
828 const union tgsi_exec_channel *src0,
829 const union tgsi_exec_channel *src1 )
830 {
831 dst->f[0] = src0->f[0] < src1->f[0] ? src0->f[0] : src1->f[0];
832 dst->f[1] = src0->f[1] < src1->f[1] ? src0->f[1] : src1->f[1];
833 dst->f[2] = src0->f[2] < src1->f[2] ? src0->f[2] : src1->f[2];
834 dst->f[3] = src0->f[3] < src1->f[3] ? src0->f[3] : src1->f[3];
835 }
836
837 static void
838 micro_mul(
839 union tgsi_exec_channel *dst,
840 const union tgsi_exec_channel *src0,
841 const union tgsi_exec_channel *src1 )
842 {
843 dst->f[0] = src0->f[0] * src1->f[0];
844 dst->f[1] = src0->f[1] * src1->f[1];
845 dst->f[2] = src0->f[2] * src1->f[2];
846 dst->f[3] = src0->f[3] * src1->f[3];
847 }
848
849 #if 0
850 static void
851 micro_imul64(
852 union tgsi_exec_channel *dst0,
853 union tgsi_exec_channel *dst1,
854 const union tgsi_exec_channel *src0,
855 const union tgsi_exec_channel *src1 )
856 {
857 dst1->i[0] = src0->i[0] * src1->i[0];
858 dst1->i[1] = src0->i[1] * src1->i[1];
859 dst1->i[2] = src0->i[2] * src1->i[2];
860 dst1->i[3] = src0->i[3] * src1->i[3];
861 dst0->i[0] = 0;
862 dst0->i[1] = 0;
863 dst0->i[2] = 0;
864 dst0->i[3] = 0;
865 }
866 #endif
867
868 #if 0
869 static void
870 micro_umul64(
871 union tgsi_exec_channel *dst0,
872 union tgsi_exec_channel *dst1,
873 const union tgsi_exec_channel *src0,
874 const union tgsi_exec_channel *src1 )
875 {
876 dst1->u[0] = src0->u[0] * src1->u[0];
877 dst1->u[1] = src0->u[1] * src1->u[1];
878 dst1->u[2] = src0->u[2] * src1->u[2];
879 dst1->u[3] = src0->u[3] * src1->u[3];
880 dst0->u[0] = 0;
881 dst0->u[1] = 0;
882 dst0->u[2] = 0;
883 dst0->u[3] = 0;
884 }
885 #endif
886
887
888 #if 0
889 static void
890 micro_movc(
891 union tgsi_exec_channel *dst,
892 const union tgsi_exec_channel *src0,
893 const union tgsi_exec_channel *src1,
894 const union tgsi_exec_channel *src2 )
895 {
896 dst->u[0] = src0->u[0] ? src1->u[0] : src2->u[0];
897 dst->u[1] = src0->u[1] ? src1->u[1] : src2->u[1];
898 dst->u[2] = src0->u[2] ? src1->u[2] : src2->u[2];
899 dst->u[3] = src0->u[3] ? src1->u[3] : src2->u[3];
900 }
901 #endif
902
903 static void
904 micro_neg(
905 union tgsi_exec_channel *dst,
906 const union tgsi_exec_channel *src )
907 {
908 dst->f[0] = -src->f[0];
909 dst->f[1] = -src->f[1];
910 dst->f[2] = -src->f[2];
911 dst->f[3] = -src->f[3];
912 }
913
914 static void
915 micro_pow(
916 union tgsi_exec_channel *dst,
917 const union tgsi_exec_channel *src0,
918 const union tgsi_exec_channel *src1 )
919 {
920 #if FAST_MATH
921 dst->f[0] = util_fast_pow( src0->f[0], src1->f[0] );
922 dst->f[1] = util_fast_pow( src0->f[1], src1->f[1] );
923 dst->f[2] = util_fast_pow( src0->f[2], src1->f[2] );
924 dst->f[3] = util_fast_pow( src0->f[3], src1->f[3] );
925 #else
926 dst->f[0] = powf( src0->f[0], src1->f[0] );
927 dst->f[1] = powf( src0->f[1], src1->f[1] );
928 dst->f[2] = powf( src0->f[2], src1->f[2] );
929 dst->f[3] = powf( src0->f[3], src1->f[3] );
930 #endif
931 }
932
933 static void
934 micro_sqrt( union tgsi_exec_channel *dst,
935 const union tgsi_exec_channel *src )
936 {
937 dst->f[0] = sqrtf( src->f[0] );
938 dst->f[1] = sqrtf( src->f[1] );
939 dst->f[2] = sqrtf( src->f[2] );
940 dst->f[3] = sqrtf( src->f[3] );
941 }
942
943 static void
944 micro_sub(
945 union tgsi_exec_channel *dst,
946 const union tgsi_exec_channel *src0,
947 const union tgsi_exec_channel *src1 )
948 {
949 dst->f[0] = src0->f[0] - src1->f[0];
950 dst->f[1] = src0->f[1] - src1->f[1];
951 dst->f[2] = src0->f[2] - src1->f[2];
952 dst->f[3] = src0->f[3] - src1->f[3];
953 }
954
955 static void
956 fetch_src_file_channel(
957 const struct tgsi_exec_machine *mach,
958 const uint file,
959 const uint swizzle,
960 const union tgsi_exec_channel *index,
961 union tgsi_exec_channel *chan )
962 {
963 switch( swizzle ) {
964 case TGSI_SWIZZLE_X:
965 case TGSI_SWIZZLE_Y:
966 case TGSI_SWIZZLE_Z:
967 case TGSI_SWIZZLE_W:
968 switch( file ) {
969 case TGSI_FILE_CONSTANT:
970 assert(mach->Consts);
971 if (index->i[0] < 0)
972 chan->f[0] = 0.0f;
973 else
974 chan->f[0] = mach->Consts[index->i[0]][swizzle];
975 if (index->i[1] < 0)
976 chan->f[1] = 0.0f;
977 else
978 chan->f[1] = mach->Consts[index->i[1]][swizzle];
979 if (index->i[2] < 0)
980 chan->f[2] = 0.0f;
981 else
982 chan->f[2] = mach->Consts[index->i[2]][swizzle];
983 if (index->i[3] < 0)
984 chan->f[3] = 0.0f;
985 else
986 chan->f[3] = mach->Consts[index->i[3]][swizzle];
987 break;
988
989 case TGSI_FILE_INPUT:
990 case TGSI_FILE_SYSTEM_VALUE:
991 chan->u[0] = mach->Inputs[index->i[0]].xyzw[swizzle].u[0];
992 chan->u[1] = mach->Inputs[index->i[1]].xyzw[swizzle].u[1];
993 chan->u[2] = mach->Inputs[index->i[2]].xyzw[swizzle].u[2];
994 chan->u[3] = mach->Inputs[index->i[3]].xyzw[swizzle].u[3];
995 break;
996
997 case TGSI_FILE_TEMPORARY:
998 assert(index->i[0] < TGSI_EXEC_NUM_TEMPS);
999 chan->u[0] = mach->Temps[index->i[0]].xyzw[swizzle].u[0];
1000 chan->u[1] = mach->Temps[index->i[1]].xyzw[swizzle].u[1];
1001 chan->u[2] = mach->Temps[index->i[2]].xyzw[swizzle].u[2];
1002 chan->u[3] = mach->Temps[index->i[3]].xyzw[swizzle].u[3];
1003 break;
1004
1005 case TGSI_FILE_IMMEDIATE:
1006 assert( index->i[0] < (int) mach->ImmLimit );
1007 chan->f[0] = mach->Imms[index->i[0]][swizzle];
1008 assert( index->i[1] < (int) mach->ImmLimit );
1009 chan->f[1] = mach->Imms[index->i[1]][swizzle];
1010 assert( index->i[2] < (int) mach->ImmLimit );
1011 chan->f[2] = mach->Imms[index->i[2]][swizzle];
1012 assert( index->i[3] < (int) mach->ImmLimit );
1013 chan->f[3] = mach->Imms[index->i[3]][swizzle];
1014 break;
1015
1016 case TGSI_FILE_ADDRESS:
1017 chan->u[0] = mach->Addrs[index->i[0]].xyzw[swizzle].u[0];
1018 chan->u[1] = mach->Addrs[index->i[1]].xyzw[swizzle].u[1];
1019 chan->u[2] = mach->Addrs[index->i[2]].xyzw[swizzle].u[2];
1020 chan->u[3] = mach->Addrs[index->i[3]].xyzw[swizzle].u[3];
1021 break;
1022
1023 case TGSI_FILE_PREDICATE:
1024 assert(index->i[0] < TGSI_EXEC_NUM_PREDS);
1025 assert(index->i[1] < TGSI_EXEC_NUM_PREDS);
1026 assert(index->i[2] < TGSI_EXEC_NUM_PREDS);
1027 assert(index->i[3] < TGSI_EXEC_NUM_PREDS);
1028 chan->u[0] = mach->Predicates[0].xyzw[swizzle].u[0];
1029 chan->u[1] = mach->Predicates[0].xyzw[swizzle].u[1];
1030 chan->u[2] = mach->Predicates[0].xyzw[swizzle].u[2];
1031 chan->u[3] = mach->Predicates[0].xyzw[swizzle].u[3];
1032 break;
1033
1034 case TGSI_FILE_OUTPUT:
1035 /* vertex/fragment output vars can be read too */
1036 chan->u[0] = mach->Outputs[index->i[0]].xyzw[swizzle].u[0];
1037 chan->u[1] = mach->Outputs[index->i[1]].xyzw[swizzle].u[1];
1038 chan->u[2] = mach->Outputs[index->i[2]].xyzw[swizzle].u[2];
1039 chan->u[3] = mach->Outputs[index->i[3]].xyzw[swizzle].u[3];
1040 break;
1041
1042 default:
1043 assert( 0 );
1044 }
1045 break;
1046
1047 default:
1048 assert( 0 );
1049 }
1050 }
1051
1052 static void
1053 fetch_source(const struct tgsi_exec_machine *mach,
1054 union tgsi_exec_channel *chan,
1055 const struct tgsi_full_src_register *reg,
1056 const uint chan_index,
1057 enum tgsi_exec_datatype src_datatype)
1058 {
1059 union tgsi_exec_channel index;
1060 uint swizzle;
1061
1062 /* We start with a direct index into a register file.
1063 *
1064 * file[1],
1065 * where:
1066 * file = Register.File
1067 * [1] = Register.Index
1068 */
1069 index.i[0] =
1070 index.i[1] =
1071 index.i[2] =
1072 index.i[3] = reg->Register.Index;
1073
1074 /* There is an extra source register that indirectly subscripts
1075 * a register file. The direct index now becomes an offset
1076 * that is being added to the indirect register.
1077 *
1078 * file[ind[2].x+1],
1079 * where:
1080 * ind = Indirect.File
1081 * [2] = Indirect.Index
1082 * .x = Indirect.SwizzleX
1083 */
1084 if (reg->Register.Indirect) {
1085 union tgsi_exec_channel index2;
1086 union tgsi_exec_channel indir_index;
1087 const uint execmask = mach->ExecMask;
1088 uint i;
1089
1090 /* which address register (always zero now) */
1091 index2.i[0] =
1092 index2.i[1] =
1093 index2.i[2] =
1094 index2.i[3] = reg->Indirect.Index;
1095
1096 /* get current value of address register[swizzle] */
1097 swizzle = tgsi_util_get_src_register_swizzle( &reg->Indirect, CHAN_X );
1098 fetch_src_file_channel(
1099 mach,
1100 reg->Indirect.File,
1101 swizzle,
1102 &index2,
1103 &indir_index );
1104
1105 /* add value of address register to the offset */
1106 index.i[0] += indir_index.i[0];
1107 index.i[1] += indir_index.i[1];
1108 index.i[2] += indir_index.i[2];
1109 index.i[3] += indir_index.i[3];
1110
1111 /* for disabled execution channels, zero-out the index to
1112 * avoid using a potential garbage value.
1113 */
1114 for (i = 0; i < QUAD_SIZE; i++) {
1115 if ((execmask & (1 << i)) == 0)
1116 index.i[i] = 0;
1117 }
1118 }
1119
1120 /* There is an extra source register that is a second
1121 * subscript to a register file. Effectively it means that
1122 * the register file is actually a 2D array of registers.
1123 *
1124 * file[1][3] == file[1*sizeof(file[1])+3],
1125 * where:
1126 * [3] = Dimension.Index
1127 */
1128 if (reg->Register.Dimension) {
1129 /* The size of the first-order array depends on the register file type.
1130 * We need to multiply the index to the first array to get an effective,
1131 * "flat" index that points to the beginning of the second-order array.
1132 */
1133 switch (reg->Register.File) {
1134 case TGSI_FILE_INPUT:
1135 case TGSI_FILE_SYSTEM_VALUE:
1136 index.i[0] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
1137 index.i[1] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
1138 index.i[2] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
1139 index.i[3] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
1140 break;
1141 case TGSI_FILE_CONSTANT:
1142 index.i[0] *= TGSI_EXEC_MAX_CONST_BUFFER;
1143 index.i[1] *= TGSI_EXEC_MAX_CONST_BUFFER;
1144 index.i[2] *= TGSI_EXEC_MAX_CONST_BUFFER;
1145 index.i[3] *= TGSI_EXEC_MAX_CONST_BUFFER;
1146 break;
1147 default:
1148 assert( 0 );
1149 }
1150
1151 index.i[0] += reg->Dimension.Index;
1152 index.i[1] += reg->Dimension.Index;
1153 index.i[2] += reg->Dimension.Index;
1154 index.i[3] += reg->Dimension.Index;
1155
1156 /* Again, the second subscript index can be addressed indirectly
1157 * identically to the first one.
1158 * Nothing stops us from indirectly addressing the indirect register,
1159 * but there is no need for that, so we won't exercise it.
1160 *
1161 * file[1][ind[4].y+3],
1162 * where:
1163 * ind = DimIndirect.File
1164 * [4] = DimIndirect.Index
1165 * .y = DimIndirect.SwizzleX
1166 */
1167 if (reg->Dimension.Indirect) {
1168 union tgsi_exec_channel index2;
1169 union tgsi_exec_channel indir_index;
1170 const uint execmask = mach->ExecMask;
1171 uint i;
1172
1173 index2.i[0] =
1174 index2.i[1] =
1175 index2.i[2] =
1176 index2.i[3] = reg->DimIndirect.Index;
1177
1178 swizzle = tgsi_util_get_src_register_swizzle( &reg->DimIndirect, CHAN_X );
1179 fetch_src_file_channel(
1180 mach,
1181 reg->DimIndirect.File,
1182 swizzle,
1183 &index2,
1184 &indir_index );
1185
1186 index.i[0] += indir_index.i[0];
1187 index.i[1] += indir_index.i[1];
1188 index.i[2] += indir_index.i[2];
1189 index.i[3] += indir_index.i[3];
1190
1191 /* for disabled execution channels, zero-out the index to
1192 * avoid using a potential garbage value.
1193 */
1194 for (i = 0; i < QUAD_SIZE; i++) {
1195 if ((execmask & (1 << i)) == 0)
1196 index.i[i] = 0;
1197 }
1198 }
1199
1200 /* If by any chance there was a need for a 3D array of register
1201 * files, we would have to check whether Dimension is followed
1202 * by a dimension register and continue the saga.
1203 */
1204 }
1205
1206 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
1207 fetch_src_file_channel(
1208 mach,
1209 reg->Register.File,
1210 swizzle,
1211 &index,
1212 chan );
1213
1214 if (reg->Register.Absolute) {
1215 if (src_datatype == TGSI_EXEC_DATA_FLOAT) {
1216 micro_abs(chan, chan);
1217 } else {
1218 micro_iabs(chan, chan);
1219 }
1220 }
1221
1222 if (reg->Register.Negate) {
1223 if (src_datatype == TGSI_EXEC_DATA_FLOAT) {
1224 micro_neg(chan, chan);
1225 } else {
1226 micro_ineg(chan, chan);
1227 }
1228 }
1229 }
1230
1231 static void
1232 store_dest(struct tgsi_exec_machine *mach,
1233 const union tgsi_exec_channel *chan,
1234 const struct tgsi_full_dst_register *reg,
1235 const struct tgsi_full_instruction *inst,
1236 uint chan_index,
1237 enum tgsi_exec_datatype dst_datatype)
1238 {
1239 uint i;
1240 union tgsi_exec_channel null;
1241 union tgsi_exec_channel *dst;
1242 uint execmask = mach->ExecMask;
1243 int offset = 0; /* indirection offset */
1244 int index;
1245
1246 if (dst_datatype == TGSI_EXEC_DATA_FLOAT) {
1247 CHECK_INF_OR_NAN(chan);
1248 }
1249
1250 /* There is an extra source register that indirectly subscripts
1251 * a register file. The direct index now becomes an offset
1252 * that is being added to the indirect register.
1253 *
1254 * file[ind[2].x+1],
1255 * where:
1256 * ind = Indirect.File
1257 * [2] = Indirect.Index
1258 * .x = Indirect.SwizzleX
1259 */
1260 if (reg->Register.Indirect) {
1261 union tgsi_exec_channel index;
1262 union tgsi_exec_channel indir_index;
1263 uint swizzle;
1264
1265 /* which address register (always zero for now) */
1266 index.i[0] =
1267 index.i[1] =
1268 index.i[2] =
1269 index.i[3] = reg->Indirect.Index;
1270
1271 /* get current value of address register[swizzle] */
1272 swizzle = tgsi_util_get_src_register_swizzle( &reg->Indirect, CHAN_X );
1273
1274 /* fetch values from the address/indirection register */
1275 fetch_src_file_channel(
1276 mach,
1277 reg->Indirect.File,
1278 swizzle,
1279 &index,
1280 &indir_index );
1281
1282 /* save indirection offset */
1283 offset = indir_index.i[0];
1284 }
1285
1286 switch (reg->Register.File) {
1287 case TGSI_FILE_NULL:
1288 dst = &null;
1289 break;
1290
1291 case TGSI_FILE_OUTPUT:
1292 index = mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0]
1293 + reg->Register.Index;
1294 dst = &mach->Outputs[offset + index].xyzw[chan_index];
1295 #if 0
1296 if (TGSI_PROCESSOR_GEOMETRY == mach->Processor) {
1297 fprintf(stderr, "STORING OUT[%d] mask(%d), = (", offset + index, execmask);
1298 for (i = 0; i < QUAD_SIZE; i++)
1299 if (execmask & (1 << i))
1300 fprintf(stderr, "%f, ", chan->f[i]);
1301 fprintf(stderr, ")\n");
1302 }
1303 #endif
1304 break;
1305
1306 case TGSI_FILE_TEMPORARY:
1307 index = reg->Register.Index;
1308 assert( index < TGSI_EXEC_NUM_TEMPS );
1309 dst = &mach->Temps[offset + index].xyzw[chan_index];
1310 break;
1311
1312 case TGSI_FILE_ADDRESS:
1313 index = reg->Register.Index;
1314 dst = &mach->Addrs[index].xyzw[chan_index];
1315 break;
1316
1317 case TGSI_FILE_LOOP:
1318 assert(reg->Register.Index == 0);
1319 assert(mach->LoopCounterStackTop > 0);
1320 assert(chan_index == CHAN_X);
1321 dst = &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[chan_index];
1322 break;
1323
1324 case TGSI_FILE_PREDICATE:
1325 index = reg->Register.Index;
1326 assert(index < TGSI_EXEC_NUM_PREDS);
1327 dst = &mach->Predicates[index].xyzw[chan_index];
1328 break;
1329
1330 default:
1331 assert( 0 );
1332 return;
1333 }
1334
1335 if (inst->Instruction.Predicate) {
1336 uint swizzle;
1337 union tgsi_exec_channel *pred;
1338
1339 switch (chan_index) {
1340 case CHAN_X:
1341 swizzle = inst->Predicate.SwizzleX;
1342 break;
1343 case CHAN_Y:
1344 swizzle = inst->Predicate.SwizzleY;
1345 break;
1346 case CHAN_Z:
1347 swizzle = inst->Predicate.SwizzleZ;
1348 break;
1349 case CHAN_W:
1350 swizzle = inst->Predicate.SwizzleW;
1351 break;
1352 default:
1353 assert(0);
1354 return;
1355 }
1356
1357 assert(inst->Predicate.Index == 0);
1358
1359 pred = &mach->Predicates[inst->Predicate.Index].xyzw[swizzle];
1360
1361 if (inst->Predicate.Negate) {
1362 for (i = 0; i < QUAD_SIZE; i++) {
1363 if (pred->u[i]) {
1364 execmask &= ~(1 << i);
1365 }
1366 }
1367 } else {
1368 for (i = 0; i < QUAD_SIZE; i++) {
1369 if (!pred->u[i]) {
1370 execmask &= ~(1 << i);
1371 }
1372 }
1373 }
1374 }
1375
1376 switch (inst->Instruction.Saturate) {
1377 case TGSI_SAT_NONE:
1378 for (i = 0; i < QUAD_SIZE; i++)
1379 if (execmask & (1 << i))
1380 dst->i[i] = chan->i[i];
1381 break;
1382
1383 case TGSI_SAT_ZERO_ONE:
1384 for (i = 0; i < QUAD_SIZE; i++)
1385 if (execmask & (1 << i)) {
1386 if (chan->f[i] < 0.0f)
1387 dst->f[i] = 0.0f;
1388 else if (chan->f[i] > 1.0f)
1389 dst->f[i] = 1.0f;
1390 else
1391 dst->i[i] = chan->i[i];
1392 }
1393 break;
1394
1395 case TGSI_SAT_MINUS_PLUS_ONE:
1396 for (i = 0; i < QUAD_SIZE; i++)
1397 if (execmask & (1 << i)) {
1398 if (chan->f[i] < -1.0f)
1399 dst->f[i] = -1.0f;
1400 else if (chan->f[i] > 1.0f)
1401 dst->f[i] = 1.0f;
1402 else
1403 dst->i[i] = chan->i[i];
1404 }
1405 break;
1406
1407 default:
1408 assert( 0 );
1409 }
1410 }
1411
1412 #define FETCH(VAL,INDEX,CHAN)\
1413 fetch_source(mach, VAL, &inst->Src[INDEX], CHAN, TGSI_EXEC_DATA_FLOAT)
1414
1415 #define STORE(VAL,INDEX,CHAN)\
1416 store_dest(mach, VAL, &inst->Dst[INDEX], inst, CHAN, TGSI_EXEC_DATA_FLOAT)
1417
1418
1419 /**
1420 * Execute ARB-style KIL which is predicated by a src register.
1421 * Kill fragment if any of the four values is less than zero.
1422 */
1423 static void
1424 exec_kil(struct tgsi_exec_machine *mach,
1425 const struct tgsi_full_instruction *inst)
1426 {
1427 uint uniquemask;
1428 uint chan_index;
1429 uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
1430 union tgsi_exec_channel r[1];
1431
1432 /* This mask stores component bits that were already tested. */
1433 uniquemask = 0;
1434
1435 for (chan_index = 0; chan_index < 4; chan_index++)
1436 {
1437 uint swizzle;
1438 uint i;
1439
1440 /* unswizzle channel */
1441 swizzle = tgsi_util_get_full_src_register_swizzle (
1442 &inst->Src[0],
1443 chan_index);
1444
1445 /* check if the component has not been already tested */
1446 if (uniquemask & (1 << swizzle))
1447 continue;
1448 uniquemask |= 1 << swizzle;
1449
1450 FETCH(&r[0], 0, chan_index);
1451 for (i = 0; i < 4; i++)
1452 if (r[0].f[i] < 0.0f)
1453 kilmask |= 1 << i;
1454 }
1455
1456 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
1457 }
1458
1459 /**
1460 * Execute NVIDIA-style KIL which is predicated by a condition code.
1461 * Kill fragment if the condition code is TRUE.
1462 */
1463 static void
1464 exec_kilp(struct tgsi_exec_machine *mach,
1465 const struct tgsi_full_instruction *inst)
1466 {
1467 uint kilmask; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
1468
1469 /* "unconditional" kil */
1470 kilmask = mach->ExecMask;
1471 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
1472 }
1473
1474 static void
1475 emit_vertex(struct tgsi_exec_machine *mach)
1476 {
1477 /* FIXME: check for exec mask correctly
1478 unsigned i;
1479 for (i = 0; i < QUAD_SIZE; ++i) {
1480 if ((mach->ExecMask & (1 << i)))
1481 */
1482 if (mach->ExecMask) {
1483 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] += mach->NumOutputs;
1484 mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]]++;
1485 }
1486 }
1487
1488 static void
1489 emit_primitive(struct tgsi_exec_machine *mach)
1490 {
1491 unsigned *prim_count = &mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0];
1492 /* FIXME: check for exec mask correctly
1493 unsigned i;
1494 for (i = 0; i < QUAD_SIZE; ++i) {
1495 if ((mach->ExecMask & (1 << i)))
1496 */
1497 if (mach->ExecMask) {
1498 ++(*prim_count);
1499 debug_assert((*prim_count * mach->NumOutputs) < mach->MaxGeometryShaderOutputs);
1500 mach->Primitives[*prim_count] = 0;
1501 }
1502 }
1503
1504 /*
1505 * Fetch a four texture samples using STR texture coordinates.
1506 */
1507 static void
1508 fetch_texel( struct tgsi_sampler *sampler,
1509 const union tgsi_exec_channel *s,
1510 const union tgsi_exec_channel *t,
1511 const union tgsi_exec_channel *p,
1512 float lodbias, /* XXX should be float[4] */
1513 union tgsi_exec_channel *r,
1514 union tgsi_exec_channel *g,
1515 union tgsi_exec_channel *b,
1516 union tgsi_exec_channel *a )
1517 {
1518 uint j;
1519 float rgba[NUM_CHANNELS][QUAD_SIZE];
1520
1521 sampler->get_samples(sampler, s->f, t->f, p->f, lodbias, rgba);
1522
1523 for (j = 0; j < 4; j++) {
1524 r->f[j] = rgba[0][j];
1525 g->f[j] = rgba[1][j];
1526 b->f[j] = rgba[2][j];
1527 a->f[j] = rgba[3][j];
1528 }
1529 }
1530
1531
1532 #define TEX_MODIFIER_NONE 0
1533 #define TEX_MODIFIER_PROJECTED 1
1534 #define TEX_MODIFIER_LOD_BIAS 2
1535 #define TEX_MODIFIER_EXPLICIT_LOD 3
1536
1537
1538 static void
1539 exec_tex(struct tgsi_exec_machine *mach,
1540 const struct tgsi_full_instruction *inst,
1541 uint modifier)
1542 {
1543 const uint unit = inst->Src[1].Register.Index;
1544 union tgsi_exec_channel r[4];
1545 uint chan_index;
1546 float lodBias = 0.0f;
1547
1548 switch (inst->Texture.Texture) {
1549 case TGSI_TEXTURE_1D:
1550 case TGSI_TEXTURE_SHADOW1D:
1551 FETCH(&r[0], 0, CHAN_X);
1552
1553 if (modifier != TEX_MODIFIER_NONE) {
1554 FETCH(&r[1], 0, CHAN_W);
1555 if (modifier == TEX_MODIFIER_PROJECTED) {
1556 micro_div(&r[0], &r[0], &r[1]);
1557 } else {
1558 lodBias = r[1].f[0];
1559 }
1560 }
1561
1562 fetch_texel(mach->Samplers[unit],
1563 &r[0], &ZeroVec, &ZeroVec, lodBias, /* S, T, P, BIAS */
1564 &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
1565 break;
1566
1567 case TGSI_TEXTURE_2D:
1568 case TGSI_TEXTURE_RECT:
1569 case TGSI_TEXTURE_SHADOW2D:
1570 case TGSI_TEXTURE_SHADOWRECT:
1571 FETCH(&r[0], 0, CHAN_X);
1572 FETCH(&r[1], 0, CHAN_Y);
1573 FETCH(&r[2], 0, CHAN_Z);
1574
1575 if (modifier != TEX_MODIFIER_NONE) {
1576 FETCH(&r[3], 0, CHAN_W);
1577 if (modifier == TEX_MODIFIER_PROJECTED) {
1578 micro_div(&r[0], &r[0], &r[3]);
1579 micro_div(&r[1], &r[1], &r[3]);
1580 micro_div(&r[2], &r[2], &r[3]);
1581 } else {
1582 lodBias = r[3].f[0];
1583 }
1584 }
1585
1586 fetch_texel(mach->Samplers[unit],
1587 &r[0], &r[1], &r[2], lodBias, /* inputs */
1588 &r[0], &r[1], &r[2], &r[3]); /* outputs */
1589 break;
1590
1591 case TGSI_TEXTURE_3D:
1592 case TGSI_TEXTURE_CUBE:
1593 FETCH(&r[0], 0, CHAN_X);
1594 FETCH(&r[1], 0, CHAN_Y);
1595 FETCH(&r[2], 0, CHAN_Z);
1596
1597 if (modifier != TEX_MODIFIER_NONE) {
1598 FETCH(&r[3], 0, CHAN_W);
1599 if (modifier == TEX_MODIFIER_PROJECTED) {
1600 micro_div(&r[0], &r[0], &r[3]);
1601 micro_div(&r[1], &r[1], &r[3]);
1602 micro_div(&r[2], &r[2], &r[3]);
1603 } else {
1604 lodBias = r[3].f[0];
1605 }
1606 }
1607
1608 fetch_texel(mach->Samplers[unit],
1609 &r[0], &r[1], &r[2], lodBias,
1610 &r[0], &r[1], &r[2], &r[3]);
1611 break;
1612
1613 default:
1614 assert(0);
1615 }
1616
1617 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
1618 STORE(&r[chan_index], 0, chan_index);
1619 }
1620 }
1621
1622 static void
1623 exec_txd(struct tgsi_exec_machine *mach,
1624 const struct tgsi_full_instruction *inst)
1625 {
1626 const uint unit = inst->Src[3].Register.Index;
1627 union tgsi_exec_channel r[4];
1628 uint chan_index;
1629
1630 /*
1631 * XXX: This is fake TXD -- the derivatives are not taken into account, yet.
1632 */
1633
1634 switch (inst->Texture.Texture) {
1635 case TGSI_TEXTURE_1D:
1636 case TGSI_TEXTURE_SHADOW1D:
1637
1638 FETCH(&r[0], 0, CHAN_X);
1639
1640 fetch_texel(mach->Samplers[unit],
1641 &r[0], &ZeroVec, &ZeroVec, 0.0f, /* S, T, P, BIAS */
1642 &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
1643 break;
1644
1645 case TGSI_TEXTURE_2D:
1646 case TGSI_TEXTURE_RECT:
1647 case TGSI_TEXTURE_SHADOW2D:
1648 case TGSI_TEXTURE_SHADOWRECT:
1649
1650 FETCH(&r[0], 0, CHAN_X);
1651 FETCH(&r[1], 0, CHAN_Y);
1652 FETCH(&r[2], 0, CHAN_Z);
1653
1654 fetch_texel(mach->Samplers[unit],
1655 &r[0], &r[1], &r[2], 0.0f, /* inputs */
1656 &r[0], &r[1], &r[2], &r[3]); /* outputs */
1657 break;
1658
1659 case TGSI_TEXTURE_3D:
1660 case TGSI_TEXTURE_CUBE:
1661
1662 FETCH(&r[0], 0, CHAN_X);
1663 FETCH(&r[1], 0, CHAN_Y);
1664 FETCH(&r[2], 0, CHAN_Z);
1665
1666 fetch_texel(mach->Samplers[unit],
1667 &r[0], &r[1], &r[2], 0.0f,
1668 &r[0], &r[1], &r[2], &r[3]);
1669 break;
1670
1671 default:
1672 assert(0);
1673 }
1674
1675 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
1676 STORE(&r[chan_index], 0, chan_index);
1677 }
1678 }
1679
1680
1681 /**
1682 * Evaluate a constant-valued coefficient at the position of the
1683 * current quad.
1684 */
1685 static void
1686 eval_constant_coef(
1687 struct tgsi_exec_machine *mach,
1688 unsigned attrib,
1689 unsigned chan )
1690 {
1691 unsigned i;
1692
1693 for( i = 0; i < QUAD_SIZE; i++ ) {
1694 mach->Inputs[attrib].xyzw[chan].f[i] = mach->InterpCoefs[attrib].a0[chan];
1695 }
1696 }
1697
1698 /**
1699 * Evaluate a linear-valued coefficient at the position of the
1700 * current quad.
1701 */
1702 static void
1703 eval_linear_coef(
1704 struct tgsi_exec_machine *mach,
1705 unsigned attrib,
1706 unsigned chan )
1707 {
1708 const float x = mach->QuadPos.xyzw[0].f[0];
1709 const float y = mach->QuadPos.xyzw[1].f[0];
1710 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
1711 const float dady = mach->InterpCoefs[attrib].dady[chan];
1712 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
1713 mach->Inputs[attrib].xyzw[chan].f[0] = a0;
1714 mach->Inputs[attrib].xyzw[chan].f[1] = a0 + dadx;
1715 mach->Inputs[attrib].xyzw[chan].f[2] = a0 + dady;
1716 mach->Inputs[attrib].xyzw[chan].f[3] = a0 + dadx + dady;
1717 }
1718
1719 /**
1720 * Evaluate a perspective-valued coefficient at the position of the
1721 * current quad.
1722 */
1723 static void
1724 eval_perspective_coef(
1725 struct tgsi_exec_machine *mach,
1726 unsigned attrib,
1727 unsigned chan )
1728 {
1729 const float x = mach->QuadPos.xyzw[0].f[0];
1730 const float y = mach->QuadPos.xyzw[1].f[0];
1731 const float dadx = mach->InterpCoefs[attrib].dadx[chan];
1732 const float dady = mach->InterpCoefs[attrib].dady[chan];
1733 const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
1734 const float *w = mach->QuadPos.xyzw[3].f;
1735 /* divide by W here */
1736 mach->Inputs[attrib].xyzw[chan].f[0] = a0 / w[0];
1737 mach->Inputs[attrib].xyzw[chan].f[1] = (a0 + dadx) / w[1];
1738 mach->Inputs[attrib].xyzw[chan].f[2] = (a0 + dady) / w[2];
1739 mach->Inputs[attrib].xyzw[chan].f[3] = (a0 + dadx + dady) / w[3];
1740 }
1741
1742
1743 typedef void (* eval_coef_func)(
1744 struct tgsi_exec_machine *mach,
1745 unsigned attrib,
1746 unsigned chan );
1747
1748 static void
1749 exec_declaration(struct tgsi_exec_machine *mach,
1750 const struct tgsi_full_declaration *decl)
1751 {
1752 if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
1753 if (decl->Declaration.File == TGSI_FILE_INPUT ||
1754 decl->Declaration.File == TGSI_FILE_SYSTEM_VALUE) {
1755 uint first, last, mask;
1756
1757 first = decl->Range.First;
1758 last = decl->Range.Last;
1759 mask = decl->Declaration.UsageMask;
1760
1761 if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
1762 assert(decl->Semantic.Index == 0);
1763 assert(first == last);
1764 assert(mask == TGSI_WRITEMASK_XYZW);
1765
1766 mach->Inputs[first] = mach->QuadPos;
1767 } else if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
1768 uint i;
1769
1770 assert(decl->Semantic.Index == 0);
1771 assert(first == last);
1772
1773 for (i = 0; i < QUAD_SIZE; i++) {
1774 mach->Inputs[first].xyzw[0].f[i] = mach->Face;
1775 }
1776 } else {
1777 eval_coef_func eval;
1778 uint i, j;
1779
1780 switch (decl->Declaration.Interpolate) {
1781 case TGSI_INTERPOLATE_CONSTANT:
1782 eval = eval_constant_coef;
1783 break;
1784
1785 case TGSI_INTERPOLATE_LINEAR:
1786 eval = eval_linear_coef;
1787 break;
1788
1789 case TGSI_INTERPOLATE_PERSPECTIVE:
1790 eval = eval_perspective_coef;
1791 break;
1792
1793 default:
1794 assert(0);
1795 return;
1796 }
1797
1798 for (j = 0; j < NUM_CHANNELS; j++) {
1799 if (mask & (1 << j)) {
1800 for (i = first; i <= last; i++) {
1801 eval(mach, i, j);
1802 }
1803 }
1804 }
1805 }
1806 }
1807 }
1808 }
1809
1810 typedef void (* micro_op)(union tgsi_exec_channel *dst,
1811 const union tgsi_exec_channel *src);
1812
1813 static void
1814 exec_scalar_unary(struct tgsi_exec_machine *mach,
1815 const struct tgsi_full_instruction *inst,
1816 micro_op op,
1817 enum tgsi_exec_datatype dst_datatype,
1818 enum tgsi_exec_datatype src_datatype)
1819 {
1820 unsigned int chan;
1821 union tgsi_exec_channel src;
1822 union tgsi_exec_channel dst;
1823
1824 fetch_source(mach, &src, &inst->Src[0], CHAN_X, src_datatype);
1825 op(&dst, &src);
1826 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1827 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1828 store_dest(mach, &dst, &inst->Dst[0], inst, chan, dst_datatype);
1829 }
1830 }
1831 }
1832
1833 static void
1834 exec_vector_unary(struct tgsi_exec_machine *mach,
1835 const struct tgsi_full_instruction *inst,
1836 micro_op op,
1837 enum tgsi_exec_datatype dst_datatype,
1838 enum tgsi_exec_datatype src_datatype)
1839 {
1840 unsigned int chan;
1841 struct tgsi_exec_vector dst;
1842
1843 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1844 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1845 union tgsi_exec_channel src;
1846
1847 fetch_source(mach, &src, &inst->Src[0], chan, src_datatype);
1848 op(&dst.xyzw[chan], &src);
1849 }
1850 }
1851 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1852 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1853 store_dest(mach, &dst.xyzw[chan], &inst->Dst[0], inst, chan, dst_datatype);
1854 }
1855 }
1856 }
1857
1858 static void
1859 exec_vector_binary(struct tgsi_exec_machine *mach,
1860 const struct tgsi_full_instruction *inst,
1861 micro_op op,
1862 enum tgsi_exec_datatype dst_datatype,
1863 enum tgsi_exec_datatype src_datatype)
1864 {
1865 unsigned int chan;
1866 struct tgsi_exec_vector dst;
1867
1868 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1869 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1870 union tgsi_exec_channel src[2];
1871
1872 fetch_source(mach, &src[0], &inst->Src[0], chan, src_datatype);
1873 fetch_source(mach, &src[1], &inst->Src[1], chan, src_datatype);
1874 op(&dst.xyzw[chan], src);
1875 }
1876 }
1877 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1878 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1879 store_dest(mach, &dst.xyzw[chan], &inst->Dst[0], inst, chan, dst_datatype);
1880 }
1881 }
1882 }
1883
1884 static void
1885 exec_vector_trinary(struct tgsi_exec_machine *mach,
1886 const struct tgsi_full_instruction *inst,
1887 micro_op op,
1888 enum tgsi_exec_datatype dst_datatype,
1889 enum tgsi_exec_datatype src_datatype)
1890 {
1891 unsigned int chan;
1892 struct tgsi_exec_vector dst;
1893
1894 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1895 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1896 union tgsi_exec_channel src[3];
1897
1898 fetch_source(mach, &src[0], &inst->Src[0], chan, src_datatype);
1899 fetch_source(mach, &src[1], &inst->Src[1], chan, src_datatype);
1900 fetch_source(mach, &src[2], &inst->Src[2], chan, src_datatype);
1901 op(&dst.xyzw[chan], src);
1902 }
1903 }
1904 for (chan = 0; chan < NUM_CHANNELS; chan++) {
1905 if (inst->Dst[0].Register.WriteMask & (1 << chan)) {
1906 store_dest(mach, &dst.xyzw[chan], &inst->Dst[0], inst, chan, dst_datatype);
1907 }
1908 }
1909 }
1910
1911 static void
1912 exec_break(struct tgsi_exec_machine *mach)
1913 {
1914 if (mach->BreakType == TGSI_EXEC_BREAK_INSIDE_LOOP) {
1915 /* turn off loop channels for each enabled exec channel */
1916 mach->LoopMask &= ~mach->ExecMask;
1917 /* Todo: if mach->LoopMask == 0, jump to end of loop */
1918 UPDATE_EXEC_MASK(mach);
1919 } else {
1920 assert(mach->BreakType == TGSI_EXEC_BREAK_INSIDE_SWITCH);
1921
1922 mach->Switch.mask = 0x0;
1923
1924 UPDATE_EXEC_MASK(mach);
1925 }
1926 }
1927
1928 static void
1929 exec_switch(struct tgsi_exec_machine *mach,
1930 const struct tgsi_full_instruction *inst)
1931 {
1932 assert(mach->SwitchStackTop < TGSI_EXEC_MAX_SWITCH_NESTING);
1933 assert(mach->BreakStackTop < TGSI_EXEC_MAX_BREAK_STACK);
1934
1935 mach->SwitchStack[mach->SwitchStackTop++] = mach->Switch;
1936 fetch_source(mach, &mach->Switch.selector, &inst->Src[0], CHAN_X, TGSI_EXEC_DATA_UINT);
1937 mach->Switch.mask = 0x0;
1938 mach->Switch.defaultMask = 0x0;
1939
1940 mach->BreakStack[mach->BreakStackTop++] = mach->BreakType;
1941 mach->BreakType = TGSI_EXEC_BREAK_INSIDE_SWITCH;
1942
1943 UPDATE_EXEC_MASK(mach);
1944 }
1945
1946 static void
1947 exec_case(struct tgsi_exec_machine *mach,
1948 const struct tgsi_full_instruction *inst)
1949 {
1950 uint prevMask = mach->SwitchStack[mach->SwitchStackTop - 1].mask;
1951 union tgsi_exec_channel src;
1952 uint mask = 0;
1953
1954 fetch_source(mach, &src, &inst->Src[0], CHAN_X, TGSI_EXEC_DATA_UINT);
1955
1956 if (mach->Switch.selector.u[0] == src.u[0]) {
1957 mask |= 0x1;
1958 }
1959 if (mach->Switch.selector.u[1] == src.u[1]) {
1960 mask |= 0x2;
1961 }
1962 if (mach->Switch.selector.u[2] == src.u[2]) {
1963 mask |= 0x4;
1964 }
1965 if (mach->Switch.selector.u[3] == src.u[3]) {
1966 mask |= 0x8;
1967 }
1968
1969 mach->Switch.defaultMask |= mask;
1970
1971 mach->Switch.mask |= mask & prevMask;
1972
1973 UPDATE_EXEC_MASK(mach);
1974 }
1975
1976 static void
1977 exec_default(struct tgsi_exec_machine *mach)
1978 {
1979 uint prevMask = mach->SwitchStack[mach->SwitchStackTop - 1].mask;
1980
1981 mach->Switch.mask |= ~mach->Switch.defaultMask & prevMask;
1982
1983 UPDATE_EXEC_MASK(mach);
1984 }
1985
1986 static void
1987 exec_endswitch(struct tgsi_exec_machine *mach)
1988 {
1989 mach->Switch = mach->SwitchStack[--mach->SwitchStackTop];
1990 mach->BreakType = mach->BreakStack[--mach->BreakStackTop];
1991
1992 UPDATE_EXEC_MASK(mach);
1993 }
1994
1995 static void
1996 micro_i2f(union tgsi_exec_channel *dst,
1997 const union tgsi_exec_channel *src)
1998 {
1999 dst->f[0] = (float)src->i[0];
2000 dst->f[1] = (float)src->i[1];
2001 dst->f[2] = (float)src->i[2];
2002 dst->f[3] = (float)src->i[3];
2003 }
2004
2005 static void
2006 micro_not(union tgsi_exec_channel *dst,
2007 const union tgsi_exec_channel *src)
2008 {
2009 dst->u[0] = ~src->u[0];
2010 dst->u[1] = ~src->u[1];
2011 dst->u[2] = ~src->u[2];
2012 dst->u[3] = ~src->u[3];
2013 }
2014
2015 static void
2016 micro_shl(union tgsi_exec_channel *dst,
2017 const union tgsi_exec_channel *src)
2018 {
2019 dst->u[0] = src[0].u[0] << src[1].u[0];
2020 dst->u[1] = src[0].u[1] << src[1].u[1];
2021 dst->u[2] = src[0].u[2] << src[1].u[2];
2022 dst->u[3] = src[0].u[3] << src[1].u[3];
2023 }
2024
2025 static void
2026 micro_and(union tgsi_exec_channel *dst,
2027 const union tgsi_exec_channel *src)
2028 {
2029 dst->u[0] = src[0].u[0] & src[1].u[0];
2030 dst->u[1] = src[0].u[1] & src[1].u[1];
2031 dst->u[2] = src[0].u[2] & src[1].u[2];
2032 dst->u[3] = src[0].u[3] & src[1].u[3];
2033 }
2034
2035 static void
2036 micro_or(union tgsi_exec_channel *dst,
2037 const union tgsi_exec_channel *src)
2038 {
2039 dst->u[0] = src[0].u[0] | src[1].u[0];
2040 dst->u[1] = src[0].u[1] | src[1].u[1];
2041 dst->u[2] = src[0].u[2] | src[1].u[2];
2042 dst->u[3] = src[0].u[3] | src[1].u[3];
2043 }
2044
2045 static void
2046 micro_xor(union tgsi_exec_channel *dst,
2047 const union tgsi_exec_channel *src)
2048 {
2049 dst->u[0] = src[0].u[0] ^ src[1].u[0];
2050 dst->u[1] = src[0].u[1] ^ src[1].u[1];
2051 dst->u[2] = src[0].u[2] ^ src[1].u[2];
2052 dst->u[3] = src[0].u[3] ^ src[1].u[3];
2053 }
2054
2055 static void
2056 micro_f2i(union tgsi_exec_channel *dst,
2057 const union tgsi_exec_channel *src)
2058 {
2059 dst->i[0] = (int)src->f[0];
2060 dst->i[1] = (int)src->f[1];
2061 dst->i[2] = (int)src->f[2];
2062 dst->i[3] = (int)src->f[3];
2063 }
2064
2065 static void
2066 micro_idiv(union tgsi_exec_channel *dst,
2067 const union tgsi_exec_channel *src)
2068 {
2069 dst->i[0] = src[0].i[0] / src[1].i[0];
2070 dst->i[1] = src[0].i[1] / src[1].i[1];
2071 dst->i[2] = src[0].i[2] / src[1].i[2];
2072 dst->i[3] = src[0].i[3] / src[1].i[3];
2073 }
2074
2075 static void
2076 micro_imax(union tgsi_exec_channel *dst,
2077 const union tgsi_exec_channel *src)
2078 {
2079 dst->i[0] = src[0].i[0] > src[1].i[0] ? src[0].i[0] : src[1].i[0];
2080 dst->i[1] = src[0].i[1] > src[1].i[1] ? src[0].i[1] : src[1].i[1];
2081 dst->i[2] = src[0].i[2] > src[1].i[2] ? src[0].i[2] : src[1].i[2];
2082 dst->i[3] = src[0].i[3] > src[1].i[3] ? src[0].i[3] : src[1].i[3];
2083 }
2084
2085 static void
2086 micro_imin(union tgsi_exec_channel *dst,
2087 const union tgsi_exec_channel *src)
2088 {
2089 dst->i[0] = src[0].i[0] < src[1].i[0] ? src[0].i[0] : src[1].i[0];
2090 dst->i[1] = src[0].i[1] < src[1].i[1] ? src[0].i[1] : src[1].i[1];
2091 dst->i[2] = src[0].i[2] < src[1].i[2] ? src[0].i[2] : src[1].i[2];
2092 dst->i[3] = src[0].i[3] < src[1].i[3] ? src[0].i[3] : src[1].i[3];
2093 }
2094
2095 static void
2096 micro_isge(union tgsi_exec_channel *dst,
2097 const union tgsi_exec_channel *src)
2098 {
2099 dst->i[0] = src[0].i[0] >= src[1].i[0] ? -1 : 0;
2100 dst->i[1] = src[0].i[1] >= src[1].i[1] ? -1 : 0;
2101 dst->i[2] = src[0].i[2] >= src[1].i[2] ? -1 : 0;
2102 dst->i[3] = src[0].i[3] >= src[1].i[3] ? -1 : 0;
2103 }
2104
2105 static void
2106 micro_ishr(union tgsi_exec_channel *dst,
2107 const union tgsi_exec_channel *src)
2108 {
2109 dst->i[0] = src[0].i[0] >> src[1].i[0];
2110 dst->i[1] = src[0].i[1] >> src[1].i[1];
2111 dst->i[2] = src[0].i[2] >> src[1].i[2];
2112 dst->i[3] = src[0].i[3] >> src[1].i[3];
2113 }
2114
2115 static void
2116 micro_islt(union tgsi_exec_channel *dst,
2117 const union tgsi_exec_channel *src)
2118 {
2119 dst->i[0] = src[0].i[0] < src[1].i[0] ? -1 : 0;
2120 dst->i[1] = src[0].i[1] < src[1].i[1] ? -1 : 0;
2121 dst->i[2] = src[0].i[2] < src[1].i[2] ? -1 : 0;
2122 dst->i[3] = src[0].i[3] < src[1].i[3] ? -1 : 0;
2123 }
2124
2125 static void
2126 micro_f2u(union tgsi_exec_channel *dst,
2127 const union tgsi_exec_channel *src)
2128 {
2129 dst->u[0] = (uint)src->f[0];
2130 dst->u[1] = (uint)src->f[1];
2131 dst->u[2] = (uint)src->f[2];
2132 dst->u[3] = (uint)src->f[3];
2133 }
2134
2135 static void
2136 micro_u2f(union tgsi_exec_channel *dst,
2137 const union tgsi_exec_channel *src)
2138 {
2139 dst->f[0] = (float)src->u[0];
2140 dst->f[1] = (float)src->u[1];
2141 dst->f[2] = (float)src->u[2];
2142 dst->f[3] = (float)src->u[3];
2143 }
2144
2145 static void
2146 micro_uadd(union tgsi_exec_channel *dst,
2147 const union tgsi_exec_channel *src)
2148 {
2149 dst->u[0] = src[0].u[0] + src[1].u[0];
2150 dst->u[1] = src[0].u[1] + src[1].u[1];
2151 dst->u[2] = src[0].u[2] + src[1].u[2];
2152 dst->u[3] = src[0].u[3] + src[1].u[3];
2153 }
2154
2155 static void
2156 micro_udiv(union tgsi_exec_channel *dst,
2157 const union tgsi_exec_channel *src)
2158 {
2159 dst->u[0] = src[0].u[0] / src[1].u[0];
2160 dst->u[1] = src[0].u[1] / src[1].u[1];
2161 dst->u[2] = src[0].u[2] / src[1].u[2];
2162 dst->u[3] = src[0].u[3] / src[1].u[3];
2163 }
2164
2165 static void
2166 micro_umad(union tgsi_exec_channel *dst,
2167 const union tgsi_exec_channel *src)
2168 {
2169 dst->u[0] = src[0].u[0] * src[1].u[0] + src[2].u[0];
2170 dst->u[1] = src[0].u[1] * src[1].u[1] + src[2].u[1];
2171 dst->u[2] = src[0].u[2] * src[1].u[2] + src[2].u[2];
2172 dst->u[3] = src[0].u[3] * src[1].u[3] + src[2].u[3];
2173 }
2174
2175 static void
2176 micro_umax(union tgsi_exec_channel *dst,
2177 const union tgsi_exec_channel *src)
2178 {
2179 dst->u[0] = src[0].u[0] > src[1].u[0] ? src[0].u[0] : src[1].u[0];
2180 dst->u[1] = src[0].u[1] > src[1].u[1] ? src[0].u[1] : src[1].u[1];
2181 dst->u[2] = src[0].u[2] > src[1].u[2] ? src[0].u[2] : src[1].u[2];
2182 dst->u[3] = src[0].u[3] > src[1].u[3] ? src[0].u[3] : src[1].u[3];
2183 }
2184
2185 static void
2186 micro_umin(union tgsi_exec_channel *dst,
2187 const union tgsi_exec_channel *src)
2188 {
2189 dst->u[0] = src[0].u[0] < src[1].u[0] ? src[0].u[0] : src[1].u[0];
2190 dst->u[1] = src[0].u[1] < src[1].u[1] ? src[0].u[1] : src[1].u[1];
2191 dst->u[2] = src[0].u[2] < src[1].u[2] ? src[0].u[2] : src[1].u[2];
2192 dst->u[3] = src[0].u[3] < src[1].u[3] ? src[0].u[3] : src[1].u[3];
2193 }
2194
2195 static void
2196 micro_umod(union tgsi_exec_channel *dst,
2197 const union tgsi_exec_channel *src)
2198 {
2199 dst->u[0] = src[0].u[0] % src[1].u[0];
2200 dst->u[1] = src[0].u[1] % src[1].u[1];
2201 dst->u[2] = src[0].u[2] % src[1].u[2];
2202 dst->u[3] = src[0].u[3] % src[1].u[3];
2203 }
2204
2205 static void
2206 micro_umul(union tgsi_exec_channel *dst,
2207 const union tgsi_exec_channel *src)
2208 {
2209 dst->u[0] = src[0].u[0] * src[1].u[0];
2210 dst->u[1] = src[0].u[1] * src[1].u[1];
2211 dst->u[2] = src[0].u[2] * src[1].u[2];
2212 dst->u[3] = src[0].u[3] * src[1].u[3];
2213 }
2214
2215 static void
2216 micro_useq(union tgsi_exec_channel *dst,
2217 const union tgsi_exec_channel *src)
2218 {
2219 dst->u[0] = src[0].u[0] == src[1].u[0] ? ~0 : 0;
2220 dst->u[1] = src[0].u[1] == src[1].u[1] ? ~0 : 0;
2221 dst->u[2] = src[0].u[2] == src[1].u[2] ? ~0 : 0;
2222 dst->u[3] = src[0].u[3] == src[1].u[3] ? ~0 : 0;
2223 }
2224
2225 static void
2226 micro_usge(union tgsi_exec_channel *dst,
2227 const union tgsi_exec_channel *src)
2228 {
2229 dst->u[0] = src[0].u[0] >= src[1].u[0] ? ~0 : 0;
2230 dst->u[1] = src[0].u[1] >= src[1].u[1] ? ~0 : 0;
2231 dst->u[2] = src[0].u[2] >= src[1].u[2] ? ~0 : 0;
2232 dst->u[3] = src[0].u[3] >= src[1].u[3] ? ~0 : 0;
2233 }
2234
2235 static void
2236 micro_ushr(union tgsi_exec_channel *dst,
2237 const union tgsi_exec_channel *src)
2238 {
2239 dst->u[0] = src[0].u[0] >> src[1].u[0];
2240 dst->u[1] = src[0].u[1] >> src[1].u[1];
2241 dst->u[2] = src[0].u[2] >> src[1].u[2];
2242 dst->u[3] = src[0].u[3] >> src[1].u[3];
2243 }
2244
2245 static void
2246 micro_uslt(union tgsi_exec_channel *dst,
2247 const union tgsi_exec_channel *src)
2248 {
2249 dst->u[0] = src[0].u[0] < src[1].u[0] ? ~0 : 0;
2250 dst->u[1] = src[0].u[1] < src[1].u[1] ? ~0 : 0;
2251 dst->u[2] = src[0].u[2] < src[1].u[2] ? ~0 : 0;
2252 dst->u[3] = src[0].u[3] < src[1].u[3] ? ~0 : 0;
2253 }
2254
2255 static void
2256 micro_usne(union tgsi_exec_channel *dst,
2257 const union tgsi_exec_channel *src)
2258 {
2259 dst->u[0] = src[0].u[0] != src[1].u[0] ? ~0 : 0;
2260 dst->u[1] = src[0].u[1] != src[1].u[1] ? ~0 : 0;
2261 dst->u[2] = src[0].u[2] != src[1].u[2] ? ~0 : 0;
2262 dst->u[3] = src[0].u[3] != src[1].u[3] ? ~0 : 0;
2263 }
2264
2265 static void
2266 exec_instruction(
2267 struct tgsi_exec_machine *mach,
2268 const struct tgsi_full_instruction *inst,
2269 int *pc )
2270 {
2271 uint chan_index;
2272 union tgsi_exec_channel r[10];
2273 union tgsi_exec_channel d[8];
2274
2275 (*pc)++;
2276
2277 switch (inst->Instruction.Opcode) {
2278 case TGSI_OPCODE_ARL:
2279 exec_vector_unary(mach, inst, micro_arl, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_FLOAT);
2280 break;
2281
2282 case TGSI_OPCODE_MOV:
2283 exec_vector_unary(mach, inst, micro_mov, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_FLOAT);
2284 break;
2285
2286 case TGSI_OPCODE_LIT:
2287 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y ) || IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
2288 FETCH( &r[0], 0, CHAN_X );
2289 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
2290 micro_max(&d[CHAN_Y], &r[0], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C]);
2291 }
2292
2293 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
2294 FETCH( &r[1], 0, CHAN_Y );
2295 micro_max( &r[1], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
2296
2297 FETCH( &r[2], 0, CHAN_W );
2298 micro_min( &r[2], &r[2], &mach->Temps[TEMP_128_I].xyzw[TEMP_128_C] );
2299 micro_max( &r[2], &r[2], &mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C] );
2300 micro_pow( &r[1], &r[1], &r[2] );
2301 micro_lt(&d[CHAN_Z], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &r[0], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C]);
2302 }
2303
2304 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
2305 STORE(&d[CHAN_Y], 0, CHAN_Y);
2306 }
2307 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2308 STORE(&d[CHAN_Z], 0, CHAN_Z);
2309 }
2310 }
2311 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
2312 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
2313 }
2314 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
2315 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
2316 }
2317 break;
2318
2319 case TGSI_OPCODE_RCP:
2320 exec_scalar_unary(mach, inst, micro_rcp, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2321 break;
2322
2323 case TGSI_OPCODE_RSQ:
2324 exec_scalar_unary(mach, inst, micro_rsq, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2325 break;
2326
2327 case TGSI_OPCODE_EXP:
2328 FETCH( &r[0], 0, CHAN_X );
2329 micro_flr( &r[1], &r[0] ); /* r1 = floor(r0) */
2330 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
2331 micro_exp2( &r[2], &r[1] ); /* r2 = 2 ^ r1 */
2332 STORE( &r[2], 0, CHAN_X ); /* store r2 */
2333 }
2334 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
2335 micro_sub( &r[2], &r[0], &r[1] ); /* r2 = r0 - r1 */
2336 STORE( &r[2], 0, CHAN_Y ); /* store r2 */
2337 }
2338 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
2339 micro_exp2( &r[2], &r[0] ); /* r2 = 2 ^ r0 */
2340 STORE( &r[2], 0, CHAN_Z ); /* store r2 */
2341 }
2342 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
2343 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
2344 }
2345 break;
2346
2347 case TGSI_OPCODE_LOG:
2348 FETCH( &r[0], 0, CHAN_X );
2349 micro_abs( &r[2], &r[0] ); /* r2 = abs(r0) */
2350 micro_lg2( &r[1], &r[2] ); /* r1 = lg2(r2) */
2351 micro_flr( &r[0], &r[1] ); /* r0 = floor(r1) */
2352 if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
2353 STORE( &r[0], 0, CHAN_X );
2354 }
2355 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
2356 micro_exp2( &r[0], &r[0] ); /* r0 = 2 ^ r0 */
2357 micro_div( &r[0], &r[2], &r[0] ); /* r0 = r2 / r0 */
2358 STORE( &r[0], 0, CHAN_Y );
2359 }
2360 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
2361 STORE( &r[1], 0, CHAN_Z );
2362 }
2363 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
2364 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
2365 }
2366 break;
2367
2368 case TGSI_OPCODE_MUL:
2369 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2370 FETCH(&r[0], 0, chan_index);
2371 FETCH(&r[1], 1, chan_index);
2372 micro_mul(&d[chan_index], &r[0], &r[1]);
2373 }
2374 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2375 STORE(&d[chan_index], 0, chan_index);
2376 }
2377 break;
2378
2379 case TGSI_OPCODE_ADD:
2380 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2381 FETCH( &r[0], 0, chan_index );
2382 FETCH( &r[1], 1, chan_index );
2383 micro_add(&d[chan_index], &r[0], &r[1]);
2384 }
2385 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2386 STORE(&d[chan_index], 0, chan_index);
2387 }
2388 break;
2389
2390 case TGSI_OPCODE_DP3:
2391 /* TGSI_OPCODE_DOT3 */
2392 FETCH( &r[0], 0, CHAN_X );
2393 FETCH( &r[1], 1, CHAN_X );
2394 micro_mul( &r[0], &r[0], &r[1] );
2395
2396 FETCH( &r[1], 0, CHAN_Y );
2397 FETCH( &r[2], 1, CHAN_Y );
2398 micro_mul( &r[1], &r[1], &r[2] );
2399 micro_add( &r[0], &r[0], &r[1] );
2400
2401 FETCH( &r[1], 0, CHAN_Z );
2402 FETCH( &r[2], 1, CHAN_Z );
2403 micro_mul( &r[1], &r[1], &r[2] );
2404 micro_add( &r[0], &r[0], &r[1] );
2405
2406 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2407 STORE( &r[0], 0, chan_index );
2408 }
2409 break;
2410
2411 case TGSI_OPCODE_DP4:
2412 /* TGSI_OPCODE_DOT4 */
2413 FETCH(&r[0], 0, CHAN_X);
2414 FETCH(&r[1], 1, CHAN_X);
2415
2416 micro_mul( &r[0], &r[0], &r[1] );
2417
2418 FETCH(&r[1], 0, CHAN_Y);
2419 FETCH(&r[2], 1, CHAN_Y);
2420
2421 micro_mul( &r[1], &r[1], &r[2] );
2422 micro_add( &r[0], &r[0], &r[1] );
2423
2424 FETCH(&r[1], 0, CHAN_Z);
2425 FETCH(&r[2], 1, CHAN_Z);
2426
2427 micro_mul( &r[1], &r[1], &r[2] );
2428 micro_add( &r[0], &r[0], &r[1] );
2429
2430 FETCH(&r[1], 0, CHAN_W);
2431 FETCH(&r[2], 1, CHAN_W);
2432
2433 micro_mul( &r[1], &r[1], &r[2] );
2434 micro_add( &r[0], &r[0], &r[1] );
2435
2436 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2437 STORE( &r[0], 0, chan_index );
2438 }
2439 break;
2440
2441 case TGSI_OPCODE_DST:
2442 if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
2443 FETCH( &r[0], 0, CHAN_Y );
2444 FETCH( &r[1], 1, CHAN_Y);
2445 micro_mul(&d[CHAN_Y], &r[0], &r[1]);
2446 }
2447 if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
2448 FETCH(&d[CHAN_Z], 0, CHAN_Z);
2449 }
2450 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
2451 FETCH(&d[CHAN_W], 1, CHAN_W);
2452 }
2453
2454 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
2455 STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X);
2456 }
2457 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
2458 STORE(&d[CHAN_Y], 0, CHAN_Y);
2459 }
2460 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2461 STORE(&d[CHAN_Z], 0, CHAN_Z);
2462 }
2463 if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
2464 STORE(&d[CHAN_W], 0, CHAN_W);
2465 }
2466 break;
2467
2468 case TGSI_OPCODE_MIN:
2469 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2470 FETCH(&r[0], 0, chan_index);
2471 FETCH(&r[1], 1, chan_index);
2472
2473 /* XXX use micro_min()?? */
2474 micro_lt(&d[chan_index], &r[0], &r[1], &r[0], &r[1]);
2475 }
2476 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2477 STORE(&d[chan_index], 0, chan_index);
2478 }
2479 break;
2480
2481 case TGSI_OPCODE_MAX:
2482 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2483 FETCH(&r[0], 0, chan_index);
2484 FETCH(&r[1], 1, chan_index);
2485
2486 /* XXX use micro_max()?? */
2487 micro_lt(&d[chan_index], &r[0], &r[1], &r[1], &r[0] );
2488 }
2489 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2490 STORE(&d[chan_index], 0, chan_index);
2491 }
2492 break;
2493
2494 case TGSI_OPCODE_SLT:
2495 exec_vector_binary(mach, inst, micro_slt, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2496 break;
2497
2498 case TGSI_OPCODE_SGE:
2499 exec_vector_binary(mach, inst, micro_sge, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2500 break;
2501
2502 case TGSI_OPCODE_MAD:
2503 exec_vector_trinary(mach, inst, micro_mad, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2504 break;
2505
2506 case TGSI_OPCODE_SUB:
2507 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2508 FETCH(&r[0], 0, chan_index);
2509 FETCH(&r[1], 1, chan_index);
2510 micro_sub(&d[chan_index], &r[0], &r[1]);
2511 }
2512 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2513 STORE(&d[chan_index], 0, chan_index);
2514 }
2515 break;
2516
2517 case TGSI_OPCODE_LRP:
2518 exec_vector_trinary(mach, inst, micro_lrp, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2519 break;
2520
2521 case TGSI_OPCODE_CND:
2522 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2523 FETCH(&r[0], 0, chan_index);
2524 FETCH(&r[1], 1, chan_index);
2525 FETCH(&r[2], 2, chan_index);
2526 micro_lt(&d[chan_index], &mach->Temps[TEMP_HALF_I].xyzw[TEMP_HALF_C], &r[2], &r[0], &r[1]);
2527 }
2528 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2529 STORE(&d[chan_index], 0, chan_index);
2530 }
2531 break;
2532
2533 case TGSI_OPCODE_DP2A:
2534 FETCH( &r[0], 0, CHAN_X );
2535 FETCH( &r[1], 1, CHAN_X );
2536 micro_mul( &r[0], &r[0], &r[1] );
2537
2538 FETCH( &r[1], 0, CHAN_Y );
2539 FETCH( &r[2], 1, CHAN_Y );
2540 micro_mul( &r[1], &r[1], &r[2] );
2541 micro_add( &r[0], &r[0], &r[1] );
2542
2543 FETCH( &r[2], 2, CHAN_X );
2544 micro_add( &r[0], &r[0], &r[2] );
2545
2546 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2547 STORE( &r[0], 0, chan_index );
2548 }
2549 break;
2550
2551 case TGSI_OPCODE_FRC:
2552 exec_vector_unary(mach, inst, micro_frc, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2553 break;
2554
2555 case TGSI_OPCODE_CLAMP:
2556 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2557 FETCH(&r[0], 0, chan_index);
2558 FETCH(&r[1], 1, chan_index);
2559 micro_max(&r[0], &r[0], &r[1]);
2560 FETCH(&r[1], 2, chan_index);
2561 micro_min(&d[chan_index], &r[0], &r[1]);
2562 }
2563 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2564 STORE(&d[chan_index], 0, chan_index);
2565 }
2566 break;
2567
2568 case TGSI_OPCODE_FLR:
2569 exec_vector_unary(mach, inst, micro_flr, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2570 break;
2571
2572 case TGSI_OPCODE_ROUND:
2573 exec_vector_unary(mach, inst, micro_rnd, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2574 break;
2575
2576 case TGSI_OPCODE_EX2:
2577 exec_scalar_unary(mach, inst, micro_exp2, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2578 break;
2579
2580 case TGSI_OPCODE_LG2:
2581 exec_scalar_unary(mach, inst, micro_lg2, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2582 break;
2583
2584 case TGSI_OPCODE_POW:
2585 FETCH(&r[0], 0, CHAN_X);
2586 FETCH(&r[1], 1, CHAN_X);
2587
2588 micro_pow( &r[0], &r[0], &r[1] );
2589
2590 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2591 STORE( &r[0], 0, chan_index );
2592 }
2593 break;
2594
2595 case TGSI_OPCODE_XPD:
2596 FETCH(&r[0], 0, CHAN_Y);
2597 FETCH(&r[1], 1, CHAN_Z);
2598
2599 micro_mul( &r[2], &r[0], &r[1] );
2600
2601 FETCH(&r[3], 0, CHAN_Z);
2602 FETCH(&r[4], 1, CHAN_Y);
2603
2604 micro_mul( &r[5], &r[3], &r[4] );
2605 micro_sub(&d[CHAN_X], &r[2], &r[5]);
2606
2607 FETCH(&r[2], 1, CHAN_X);
2608
2609 micro_mul( &r[3], &r[3], &r[2] );
2610
2611 FETCH(&r[5], 0, CHAN_X);
2612
2613 micro_mul( &r[1], &r[1], &r[5] );
2614 micro_sub(&d[CHAN_Y], &r[3], &r[1]);
2615
2616 micro_mul( &r[5], &r[5], &r[4] );
2617 micro_mul( &r[0], &r[0], &r[2] );
2618 micro_sub(&d[CHAN_Z], &r[5], &r[0]);
2619
2620 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
2621 STORE(&d[CHAN_X], 0, CHAN_X);
2622 }
2623 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
2624 STORE(&d[CHAN_Y], 0, CHAN_Y);
2625 }
2626 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2627 STORE(&d[CHAN_Z], 0, CHAN_Z);
2628 }
2629 if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
2630 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
2631 }
2632 break;
2633
2634 case TGSI_OPCODE_ABS:
2635 exec_vector_unary(mach, inst, micro_abs, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2636 break;
2637
2638 case TGSI_OPCODE_RCC:
2639 FETCH(&r[0], 0, CHAN_X);
2640 micro_div(&r[0], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &r[0]);
2641 micro_float_clamp(&r[0], &r[0]);
2642 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2643 STORE(&r[0], 0, chan_index);
2644 }
2645 break;
2646
2647 case TGSI_OPCODE_DPH:
2648 FETCH(&r[0], 0, CHAN_X);
2649 FETCH(&r[1], 1, CHAN_X);
2650
2651 micro_mul( &r[0], &r[0], &r[1] );
2652
2653 FETCH(&r[1], 0, CHAN_Y);
2654 FETCH(&r[2], 1, CHAN_Y);
2655
2656 micro_mul( &r[1], &r[1], &r[2] );
2657 micro_add( &r[0], &r[0], &r[1] );
2658
2659 FETCH(&r[1], 0, CHAN_Z);
2660 FETCH(&r[2], 1, CHAN_Z);
2661
2662 micro_mul( &r[1], &r[1], &r[2] );
2663 micro_add( &r[0], &r[0], &r[1] );
2664
2665 FETCH(&r[1], 1, CHAN_W);
2666
2667 micro_add( &r[0], &r[0], &r[1] );
2668
2669 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2670 STORE( &r[0], 0, chan_index );
2671 }
2672 break;
2673
2674 case TGSI_OPCODE_COS:
2675 exec_scalar_unary(mach, inst, micro_cos, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2676 break;
2677
2678 case TGSI_OPCODE_DDX:
2679 exec_vector_unary(mach, inst, micro_ddx, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2680 break;
2681
2682 case TGSI_OPCODE_DDY:
2683 exec_vector_unary(mach, inst, micro_ddy, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2684 break;
2685
2686 case TGSI_OPCODE_KILP:
2687 exec_kilp (mach, inst);
2688 break;
2689
2690 case TGSI_OPCODE_KIL:
2691 exec_kil (mach, inst);
2692 break;
2693
2694 case TGSI_OPCODE_PK2H:
2695 assert (0);
2696 break;
2697
2698 case TGSI_OPCODE_PK2US:
2699 assert (0);
2700 break;
2701
2702 case TGSI_OPCODE_PK4B:
2703 assert (0);
2704 break;
2705
2706 case TGSI_OPCODE_PK4UB:
2707 assert (0);
2708 break;
2709
2710 case TGSI_OPCODE_RFL:
2711 if (IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
2712 IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
2713 IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2714 /* r0 = dp3(src0, src0) */
2715 FETCH(&r[2], 0, CHAN_X);
2716 micro_mul(&r[0], &r[2], &r[2]);
2717 FETCH(&r[4], 0, CHAN_Y);
2718 micro_mul(&r[8], &r[4], &r[4]);
2719 micro_add(&r[0], &r[0], &r[8]);
2720 FETCH(&r[6], 0, CHAN_Z);
2721 micro_mul(&r[8], &r[6], &r[6]);
2722 micro_add(&r[0], &r[0], &r[8]);
2723
2724 /* r1 = dp3(src0, src1) */
2725 FETCH(&r[3], 1, CHAN_X);
2726 micro_mul(&r[1], &r[2], &r[3]);
2727 FETCH(&r[5], 1, CHAN_Y);
2728 micro_mul(&r[8], &r[4], &r[5]);
2729 micro_add(&r[1], &r[1], &r[8]);
2730 FETCH(&r[7], 1, CHAN_Z);
2731 micro_mul(&r[8], &r[6], &r[7]);
2732 micro_add(&r[1], &r[1], &r[8]);
2733
2734 /* r1 = 2 * r1 / r0 */
2735 micro_add(&r[1], &r[1], &r[1]);
2736 micro_div(&r[1], &r[1], &r[0]);
2737
2738 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
2739 micro_mul(&r[2], &r[2], &r[1]);
2740 micro_sub(&r[2], &r[2], &r[3]);
2741 STORE(&r[2], 0, CHAN_X);
2742 }
2743 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
2744 micro_mul(&r[4], &r[4], &r[1]);
2745 micro_sub(&r[4], &r[4], &r[5]);
2746 STORE(&r[4], 0, CHAN_Y);
2747 }
2748 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2749 micro_mul(&r[6], &r[6], &r[1]);
2750 micro_sub(&r[6], &r[6], &r[7]);
2751 STORE(&r[6], 0, CHAN_Z);
2752 }
2753 }
2754 if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
2755 STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W);
2756 }
2757 break;
2758
2759 case TGSI_OPCODE_SEQ:
2760 exec_vector_binary(mach, inst, micro_seq, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2761 break;
2762
2763 case TGSI_OPCODE_SFL:
2764 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2765 STORE(&mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, chan_index);
2766 }
2767 break;
2768
2769 case TGSI_OPCODE_SGT:
2770 exec_vector_binary(mach, inst, micro_sgt, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2771 break;
2772
2773 case TGSI_OPCODE_SIN:
2774 exec_scalar_unary(mach, inst, micro_sin, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2775 break;
2776
2777 case TGSI_OPCODE_SLE:
2778 exec_vector_binary(mach, inst, micro_sle, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2779 break;
2780
2781 case TGSI_OPCODE_SNE:
2782 exec_vector_binary(mach, inst, micro_sne, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2783 break;
2784
2785 case TGSI_OPCODE_STR:
2786 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2787 STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, chan_index);
2788 }
2789 break;
2790
2791 case TGSI_OPCODE_TEX:
2792 /* simple texture lookup */
2793 /* src[0] = texcoord */
2794 /* src[1] = sampler unit */
2795 exec_tex(mach, inst, TEX_MODIFIER_NONE);
2796 break;
2797
2798 case TGSI_OPCODE_TXB:
2799 /* Texture lookup with lod bias */
2800 /* src[0] = texcoord (src[0].w = LOD bias) */
2801 /* src[1] = sampler unit */
2802 exec_tex(mach, inst, TEX_MODIFIER_LOD_BIAS);
2803 break;
2804
2805 case TGSI_OPCODE_TXD:
2806 /* Texture lookup with explict partial derivatives */
2807 /* src[0] = texcoord */
2808 /* src[1] = d[strq]/dx */
2809 /* src[2] = d[strq]/dy */
2810 /* src[3] = sampler unit */
2811 exec_txd(mach, inst);
2812 break;
2813
2814 case TGSI_OPCODE_TXL:
2815 /* Texture lookup with explit LOD */
2816 /* src[0] = texcoord (src[0].w = LOD) */
2817 /* src[1] = sampler unit */
2818 exec_tex(mach, inst, TEX_MODIFIER_EXPLICIT_LOD);
2819 break;
2820
2821 case TGSI_OPCODE_TXP:
2822 /* Texture lookup with projection */
2823 /* src[0] = texcoord (src[0].w = projection) */
2824 /* src[1] = sampler unit */
2825 exec_tex(mach, inst, TEX_MODIFIER_PROJECTED);
2826 break;
2827
2828 case TGSI_OPCODE_UP2H:
2829 assert (0);
2830 break;
2831
2832 case TGSI_OPCODE_UP2US:
2833 assert (0);
2834 break;
2835
2836 case TGSI_OPCODE_UP4B:
2837 assert (0);
2838 break;
2839
2840 case TGSI_OPCODE_UP4UB:
2841 assert (0);
2842 break;
2843
2844 case TGSI_OPCODE_X2D:
2845 FETCH(&r[0], 1, CHAN_X);
2846 FETCH(&r[1], 1, CHAN_Y);
2847 if (IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
2848 IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2849 FETCH(&r[2], 2, CHAN_X);
2850 micro_mul(&r[2], &r[2], &r[0]);
2851 FETCH(&r[3], 2, CHAN_Y);
2852 micro_mul(&r[3], &r[3], &r[1]);
2853 micro_add(&r[2], &r[2], &r[3]);
2854 FETCH(&r[3], 0, CHAN_X);
2855 micro_add(&d[CHAN_X], &r[2], &r[3]);
2856
2857 }
2858 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
2859 IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
2860 FETCH(&r[2], 2, CHAN_Z);
2861 micro_mul(&r[2], &r[2], &r[0]);
2862 FETCH(&r[3], 2, CHAN_W);
2863 micro_mul(&r[3], &r[3], &r[1]);
2864 micro_add(&r[2], &r[2], &r[3]);
2865 FETCH(&r[3], 0, CHAN_Y);
2866 micro_add(&d[CHAN_Y], &r[2], &r[3]);
2867
2868 }
2869 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
2870 STORE(&d[CHAN_X], 0, CHAN_X);
2871 }
2872 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
2873 STORE(&d[CHAN_Y], 0, CHAN_Y);
2874 }
2875 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
2876 STORE(&d[CHAN_X], 0, CHAN_Z);
2877 }
2878 if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
2879 STORE(&d[CHAN_Y], 0, CHAN_W);
2880 }
2881 break;
2882
2883 case TGSI_OPCODE_ARA:
2884 assert (0);
2885 break;
2886
2887 case TGSI_OPCODE_ARR:
2888 exec_vector_unary(mach, inst, micro_arr, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_FLOAT);
2889 break;
2890
2891 case TGSI_OPCODE_BRA:
2892 assert (0);
2893 break;
2894
2895 case TGSI_OPCODE_CAL:
2896 /* skip the call if no execution channels are enabled */
2897 if (mach->ExecMask) {
2898 /* do the call */
2899
2900 /* First, record the depths of the execution stacks.
2901 * This is important for deeply nested/looped return statements.
2902 * We have to unwind the stacks by the correct amount. For a
2903 * real code generator, we could determine the number of entries
2904 * to pop off each stack with simple static analysis and avoid
2905 * implementing this data structure at run time.
2906 */
2907 mach->CallStack[mach->CallStackTop].CondStackTop = mach->CondStackTop;
2908 mach->CallStack[mach->CallStackTop].LoopStackTop = mach->LoopStackTop;
2909 mach->CallStack[mach->CallStackTop].ContStackTop = mach->ContStackTop;
2910 mach->CallStack[mach->CallStackTop].SwitchStackTop = mach->SwitchStackTop;
2911 mach->CallStack[mach->CallStackTop].BreakStackTop = mach->BreakStackTop;
2912 /* note that PC was already incremented above */
2913 mach->CallStack[mach->CallStackTop].ReturnAddr = *pc;
2914
2915 mach->CallStackTop++;
2916
2917 /* Second, push the Cond, Loop, Cont, Func stacks */
2918 assert(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
2919 assert(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
2920 assert(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
2921 assert(mach->SwitchStackTop < TGSI_EXEC_MAX_SWITCH_NESTING);
2922 assert(mach->BreakStackTop < TGSI_EXEC_MAX_BREAK_STACK);
2923 assert(mach->FuncStackTop < TGSI_EXEC_MAX_CALL_NESTING);
2924
2925 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
2926 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
2927 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
2928 mach->SwitchStack[mach->SwitchStackTop++] = mach->Switch;
2929 mach->BreakStack[mach->BreakStackTop++] = mach->BreakType;
2930 mach->FuncStack[mach->FuncStackTop++] = mach->FuncMask;
2931
2932 /* Finally, jump to the subroutine */
2933 *pc = inst->Label.Label;
2934 }
2935 break;
2936
2937 case TGSI_OPCODE_RET:
2938 mach->FuncMask &= ~mach->ExecMask;
2939 UPDATE_EXEC_MASK(mach);
2940
2941 if (mach->FuncMask == 0x0) {
2942 /* really return now (otherwise, keep executing */
2943
2944 if (mach->CallStackTop == 0) {
2945 /* returning from main() */
2946 *pc = -1;
2947 return;
2948 }
2949
2950 assert(mach->CallStackTop > 0);
2951 mach->CallStackTop--;
2952
2953 mach->CondStackTop = mach->CallStack[mach->CallStackTop].CondStackTop;
2954 mach->CondMask = mach->CondStack[mach->CondStackTop];
2955
2956 mach->LoopStackTop = mach->CallStack[mach->CallStackTop].LoopStackTop;
2957 mach->LoopMask = mach->LoopStack[mach->LoopStackTop];
2958
2959 mach->ContStackTop = mach->CallStack[mach->CallStackTop].ContStackTop;
2960 mach->ContMask = mach->ContStack[mach->ContStackTop];
2961
2962 mach->SwitchStackTop = mach->CallStack[mach->CallStackTop].SwitchStackTop;
2963 mach->Switch = mach->SwitchStack[mach->SwitchStackTop];
2964
2965 mach->BreakStackTop = mach->CallStack[mach->CallStackTop].BreakStackTop;
2966 mach->BreakType = mach->BreakStack[mach->BreakStackTop];
2967
2968 assert(mach->FuncStackTop > 0);
2969 mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
2970
2971 *pc = mach->CallStack[mach->CallStackTop].ReturnAddr;
2972
2973 UPDATE_EXEC_MASK(mach);
2974 }
2975 break;
2976
2977 case TGSI_OPCODE_SSG:
2978 exec_vector_unary(mach, inst, micro_sgn, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
2979 break;
2980
2981 case TGSI_OPCODE_CMP:
2982 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
2983 FETCH(&r[0], 0, chan_index);
2984 FETCH(&r[1], 1, chan_index);
2985 FETCH(&r[2], 2, chan_index);
2986 micro_lt(&d[chan_index], &r[0], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &r[1], &r[2]);
2987 }
2988 FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
2989 STORE(&d[chan_index], 0, chan_index);
2990 }
2991 break;
2992
2993 case TGSI_OPCODE_SCS:
2994 if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) || IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
2995 FETCH( &r[0], 0, CHAN_X );
2996 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
2997 micro_cos(&r[1], &r[0]);
2998 STORE(&r[1], 0, CHAN_X);
2999 }
3000 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
3001 micro_sin(&r[1], &r[0]);
3002 STORE(&r[1], 0, CHAN_Y);
3003 }
3004 }
3005 if( IS_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
3006 STORE( &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, CHAN_Z );
3007 }
3008 if( IS_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
3009 STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
3010 }
3011 break;
3012
3013 case TGSI_OPCODE_NRM:
3014 /* 3-component vector normalize */
3015 if(IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
3016 IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
3017 IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
3018 /* r3 = sqrt(dp3(src0, src0)) */
3019 FETCH(&r[0], 0, CHAN_X);
3020 micro_mul(&r[3], &r[0], &r[0]);
3021 FETCH(&r[1], 0, CHAN_Y);
3022 micro_mul(&r[4], &r[1], &r[1]);
3023 micro_add(&r[3], &r[3], &r[4]);
3024 FETCH(&r[2], 0, CHAN_Z);
3025 micro_mul(&r[4], &r[2], &r[2]);
3026 micro_add(&r[3], &r[3], &r[4]);
3027 micro_sqrt(&r[3], &r[3]);
3028
3029 if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
3030 micro_div(&r[0], &r[0], &r[3]);
3031 STORE(&r[0], 0, CHAN_X);
3032 }
3033 if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
3034 micro_div(&r[1], &r[1], &r[3]);
3035 STORE(&r[1], 0, CHAN_Y);
3036 }
3037 if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
3038 micro_div(&r[2], &r[2], &r[3]);
3039 STORE(&r[2], 0, CHAN_Z);
3040 }
3041 }
3042 if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
3043 STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W);
3044 }
3045 break;
3046
3047 case TGSI_OPCODE_NRM4:
3048 /* 4-component vector normalize */
3049 {
3050 union tgsi_exec_channel tmp, dot;
3051
3052 /* tmp = dp4(src0, src0): */
3053 FETCH( &r[0], 0, CHAN_X );
3054 micro_mul( &tmp, &r[0], &r[0] );
3055
3056 FETCH( &r[1], 0, CHAN_Y );
3057 micro_mul( &dot, &r[1], &r[1] );
3058 micro_add( &tmp, &tmp, &dot );
3059
3060 FETCH( &r[2], 0, CHAN_Z );
3061 micro_mul( &dot, &r[2], &r[2] );
3062 micro_add( &tmp, &tmp, &dot );
3063
3064 FETCH( &r[3], 0, CHAN_W );
3065 micro_mul( &dot, &r[3], &r[3] );
3066 micro_add( &tmp, &tmp, &dot );
3067
3068 /* tmp = 1 / sqrt(tmp) */
3069 micro_sqrt( &tmp, &tmp );
3070 micro_div( &tmp, &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &tmp );
3071
3072 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
3073 /* chan = chan * tmp */
3074 micro_mul( &r[chan_index], &tmp, &r[chan_index] );
3075 STORE( &r[chan_index], 0, chan_index );
3076 }
3077 }
3078 break;
3079
3080 case TGSI_OPCODE_DIV:
3081 assert( 0 );
3082 break;
3083
3084 case TGSI_OPCODE_DP2:
3085 FETCH( &r[0], 0, CHAN_X );
3086 FETCH( &r[1], 1, CHAN_X );
3087 micro_mul( &r[0], &r[0], &r[1] );
3088
3089 FETCH( &r[1], 0, CHAN_Y );
3090 FETCH( &r[2], 1, CHAN_Y );
3091 micro_mul( &r[1], &r[1], &r[2] );
3092 micro_add( &r[0], &r[0], &r[1] );
3093
3094 FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
3095 STORE( &r[0], 0, chan_index );
3096 }
3097 break;
3098
3099 case TGSI_OPCODE_IF:
3100 /* push CondMask */
3101 assert(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
3102 mach->CondStack[mach->CondStackTop++] = mach->CondMask;
3103 FETCH( &r[0], 0, CHAN_X );
3104 /* update CondMask */
3105 if( ! r[0].u[0] ) {
3106 mach->CondMask &= ~0x1;
3107 }
3108 if( ! r[0].u[1] ) {
3109 mach->CondMask &= ~0x2;
3110 }
3111 if( ! r[0].u[2] ) {
3112 mach->CondMask &= ~0x4;
3113 }
3114 if( ! r[0].u[3] ) {
3115 mach->CondMask &= ~0x8;
3116 }
3117 UPDATE_EXEC_MASK(mach);
3118 /* Todo: If CondMask==0, jump to ELSE */
3119 break;
3120
3121 case TGSI_OPCODE_ELSE:
3122 /* invert CondMask wrt previous mask */
3123 {
3124 uint prevMask;
3125 assert(mach->CondStackTop > 0);
3126 prevMask = mach->CondStack[mach->CondStackTop - 1];
3127 mach->CondMask = ~mach->CondMask & prevMask;
3128 UPDATE_EXEC_MASK(mach);
3129 /* Todo: If CondMask==0, jump to ENDIF */
3130 }
3131 break;
3132
3133 case TGSI_OPCODE_ENDIF:
3134 /* pop CondMask */
3135 assert(mach->CondStackTop > 0);
3136 mach->CondMask = mach->CondStack[--mach->CondStackTop];
3137 UPDATE_EXEC_MASK(mach);
3138 break;
3139
3140 case TGSI_OPCODE_END:
3141 /* halt execution */
3142 *pc = -1;
3143 break;
3144
3145 case TGSI_OPCODE_REP:
3146 assert (0);
3147 break;
3148
3149 case TGSI_OPCODE_ENDREP:
3150 assert (0);
3151 break;
3152
3153 case TGSI_OPCODE_PUSHA:
3154 assert (0);
3155 break;
3156
3157 case TGSI_OPCODE_POPA:
3158 assert (0);
3159 break;
3160
3161 case TGSI_OPCODE_CEIL:
3162 exec_vector_unary(mach, inst, micro_ceil, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
3163 break;
3164
3165 case TGSI_OPCODE_I2F:
3166 exec_vector_unary(mach, inst, micro_i2f, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_INT);
3167 break;
3168
3169 case TGSI_OPCODE_NOT:
3170 exec_vector_unary(mach, inst, micro_not, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3171 break;
3172
3173 case TGSI_OPCODE_TRUNC:
3174 exec_vector_unary(mach, inst, micro_trunc, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_FLOAT);
3175 break;
3176
3177 case TGSI_OPCODE_SHL:
3178 exec_vector_binary(mach, inst, micro_shl, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3179 break;
3180
3181 case TGSI_OPCODE_AND:
3182 exec_vector_binary(mach, inst, micro_and, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3183 break;
3184
3185 case TGSI_OPCODE_OR:
3186 exec_vector_binary(mach, inst, micro_or, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3187 break;
3188
3189 case TGSI_OPCODE_MOD:
3190 assert (0);
3191 break;
3192
3193 case TGSI_OPCODE_XOR:
3194 exec_vector_binary(mach, inst, micro_xor, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3195 break;
3196
3197 case TGSI_OPCODE_SAD:
3198 assert (0);
3199 break;
3200
3201 case TGSI_OPCODE_TXF:
3202 assert (0);
3203 break;
3204
3205 case TGSI_OPCODE_TXQ:
3206 assert (0);
3207 break;
3208
3209 case TGSI_OPCODE_EMIT:
3210 emit_vertex(mach);
3211 break;
3212
3213 case TGSI_OPCODE_ENDPRIM:
3214 emit_primitive(mach);
3215 break;
3216
3217 case TGSI_OPCODE_BGNFOR:
3218 assert(mach->LoopCounterStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
3219 for (chan_index = 0; chan_index < 3; chan_index++) {
3220 FETCH( &mach->LoopCounterStack[mach->LoopCounterStackTop].xyzw[chan_index], 0, chan_index );
3221 }
3222 ++mach->LoopCounterStackTop;
3223 STORE(&mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X], 0, CHAN_X);
3224 /* update LoopMask */
3225 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[0] <= 0.0f) {
3226 mach->LoopMask &= ~0x1;
3227 }
3228 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[1] <= 0.0f) {
3229 mach->LoopMask &= ~0x2;
3230 }
3231 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[2] <= 0.0f) {
3232 mach->LoopMask &= ~0x4;
3233 }
3234 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[3] <= 0.0f) {
3235 mach->LoopMask &= ~0x8;
3236 }
3237 /* TODO: if mach->LoopMask == 0, jump to end of loop */
3238 UPDATE_EXEC_MASK(mach);
3239 /* fall-through (for now) */
3240 case TGSI_OPCODE_BGNLOOP:
3241 /* push LoopMask and ContMasks */
3242 assert(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
3243 assert(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
3244 assert(mach->LoopLabelStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
3245 assert(mach->BreakStackTop < TGSI_EXEC_MAX_BREAK_STACK);
3246
3247 mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
3248 mach->ContStack[mach->ContStackTop++] = mach->ContMask;
3249 mach->LoopLabelStack[mach->LoopLabelStackTop++] = *pc - 1;
3250 mach->BreakStack[mach->BreakStackTop++] = mach->BreakType;
3251 mach->BreakType = TGSI_EXEC_BREAK_INSIDE_LOOP;
3252 break;
3253
3254 case TGSI_OPCODE_ENDFOR:
3255 assert(mach->LoopCounterStackTop > 0);
3256 micro_sub(&mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y],
3257 &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y],
3258 &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C]);
3259 /* update LoopMask */
3260 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[0] <= 0.0f) {
3261 mach->LoopMask &= ~0x1;
3262 }
3263 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[1] <= 0.0f) {
3264 mach->LoopMask &= ~0x2;
3265 }
3266 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[2] <= 0.0f) {
3267 mach->LoopMask &= ~0x4;
3268 }
3269 if (mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y].f[3] <= 0.0f) {
3270 mach->LoopMask &= ~0x8;
3271 }
3272 micro_add(&mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X],
3273 &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X],
3274 &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Z]);
3275 assert(mach->LoopLabelStackTop > 0);
3276 inst = mach->Instructions + mach->LoopLabelStack[mach->LoopLabelStackTop - 1];
3277 STORE(&mach->LoopCounterStack[mach->LoopCounterStackTop].xyzw[CHAN_X], 0, CHAN_X);
3278 /* Restore ContMask, but don't pop */
3279 assert(mach->ContStackTop > 0);
3280 mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
3281 UPDATE_EXEC_MASK(mach);
3282 if (mach->ExecMask) {
3283 /* repeat loop: jump to instruction just past BGNLOOP */
3284 assert(mach->LoopLabelStackTop > 0);
3285 *pc = mach->LoopLabelStack[mach->LoopLabelStackTop - 1] + 1;
3286 }
3287 else {
3288 /* exit loop: pop LoopMask */
3289 assert(mach->LoopStackTop > 0);
3290 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
3291 /* pop ContMask */
3292 assert(mach->ContStackTop > 0);
3293 mach->ContMask = mach->ContStack[--mach->ContStackTop];
3294 assert(mach->LoopLabelStackTop > 0);
3295 --mach->LoopLabelStackTop;
3296 assert(mach->LoopCounterStackTop > 0);
3297 --mach->LoopCounterStackTop;
3298
3299 mach->BreakType = mach->BreakStack[--mach->BreakStackTop];
3300 }
3301 UPDATE_EXEC_MASK(mach);
3302 break;
3303
3304 case TGSI_OPCODE_ENDLOOP:
3305 /* Restore ContMask, but don't pop */
3306 assert(mach->ContStackTop > 0);
3307 mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
3308 UPDATE_EXEC_MASK(mach);
3309 if (mach->ExecMask) {
3310 /* repeat loop: jump to instruction just past BGNLOOP */
3311 assert(mach->LoopLabelStackTop > 0);
3312 *pc = mach->LoopLabelStack[mach->LoopLabelStackTop - 1] + 1;
3313 }
3314 else {
3315 /* exit loop: pop LoopMask */
3316 assert(mach->LoopStackTop > 0);
3317 mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
3318 /* pop ContMask */
3319 assert(mach->ContStackTop > 0);
3320 mach->ContMask = mach->ContStack[--mach->ContStackTop];
3321 assert(mach->LoopLabelStackTop > 0);
3322 --mach->LoopLabelStackTop;
3323
3324 mach->BreakType = mach->BreakStack[--mach->BreakStackTop];
3325 }
3326 UPDATE_EXEC_MASK(mach);
3327 break;
3328
3329 case TGSI_OPCODE_BRK:
3330 exec_break(mach);
3331 break;
3332
3333 case TGSI_OPCODE_CONT:
3334 /* turn off cont channels for each enabled exec channel */
3335 mach->ContMask &= ~mach->ExecMask;
3336 /* Todo: if mach->LoopMask == 0, jump to end of loop */
3337 UPDATE_EXEC_MASK(mach);
3338 break;
3339
3340 case TGSI_OPCODE_BGNSUB:
3341 /* no-op */
3342 break;
3343
3344 case TGSI_OPCODE_ENDSUB:
3345 /*
3346 * XXX: This really should be a no-op. We should never reach this opcode.
3347 */
3348
3349 assert(mach->CallStackTop > 0);
3350 mach->CallStackTop--;
3351
3352 mach->CondStackTop = mach->CallStack[mach->CallStackTop].CondStackTop;
3353 mach->CondMask = mach->CondStack[mach->CondStackTop];
3354
3355 mach->LoopStackTop = mach->CallStack[mach->CallStackTop].LoopStackTop;
3356 mach->LoopMask = mach->LoopStack[mach->LoopStackTop];
3357
3358 mach->ContStackTop = mach->CallStack[mach->CallStackTop].ContStackTop;
3359 mach->ContMask = mach->ContStack[mach->ContStackTop];
3360
3361 mach->SwitchStackTop = mach->CallStack[mach->CallStackTop].SwitchStackTop;
3362 mach->Switch = mach->SwitchStack[mach->SwitchStackTop];
3363
3364 mach->BreakStackTop = mach->CallStack[mach->CallStackTop].BreakStackTop;
3365 mach->BreakType = mach->BreakStack[mach->BreakStackTop];
3366
3367 assert(mach->FuncStackTop > 0);
3368 mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
3369
3370 *pc = mach->CallStack[mach->CallStackTop].ReturnAddr;
3371
3372 UPDATE_EXEC_MASK(mach);
3373 break;
3374
3375 case TGSI_OPCODE_NOP:
3376 break;
3377
3378 case TGSI_OPCODE_BREAKC:
3379 FETCH(&r[0], 0, CHAN_X);
3380 /* update CondMask */
3381 if (r[0].u[0] && (mach->ExecMask & 0x1)) {
3382 mach->LoopMask &= ~0x1;
3383 }
3384 if (r[0].u[1] && (mach->ExecMask & 0x2)) {
3385 mach->LoopMask &= ~0x2;
3386 }
3387 if (r[0].u[2] && (mach->ExecMask & 0x4)) {
3388 mach->LoopMask &= ~0x4;
3389 }
3390 if (r[0].u[3] && (mach->ExecMask & 0x8)) {
3391 mach->LoopMask &= ~0x8;
3392 }
3393 /* Todo: if mach->LoopMask == 0, jump to end of loop */
3394 UPDATE_EXEC_MASK(mach);
3395 break;
3396
3397 case TGSI_OPCODE_F2I:
3398 exec_vector_unary(mach, inst, micro_f2i, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_FLOAT);
3399 break;
3400
3401 case TGSI_OPCODE_IDIV:
3402 exec_vector_binary(mach, inst, micro_idiv, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3403 break;
3404
3405 case TGSI_OPCODE_IMAX:
3406 exec_vector_binary(mach, inst, micro_imax, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3407 break;
3408
3409 case TGSI_OPCODE_IMIN:
3410 exec_vector_binary(mach, inst, micro_imin, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3411 break;
3412
3413 case TGSI_OPCODE_INEG:
3414 exec_vector_unary(mach, inst, micro_ineg, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3415 break;
3416
3417 case TGSI_OPCODE_ISGE:
3418 exec_vector_binary(mach, inst, micro_isge, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3419 break;
3420
3421 case TGSI_OPCODE_ISHR:
3422 exec_vector_binary(mach, inst, micro_ishr, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3423 break;
3424
3425 case TGSI_OPCODE_ISLT:
3426 exec_vector_binary(mach, inst, micro_islt, TGSI_EXEC_DATA_INT, TGSI_EXEC_DATA_INT);
3427 break;
3428
3429 case TGSI_OPCODE_F2U:
3430 exec_vector_unary(mach, inst, micro_f2u, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_FLOAT);
3431 break;
3432
3433 case TGSI_OPCODE_U2F:
3434 exec_vector_unary(mach, inst, micro_u2f, TGSI_EXEC_DATA_FLOAT, TGSI_EXEC_DATA_UINT);
3435 break;
3436
3437 case TGSI_OPCODE_UADD:
3438 exec_vector_binary(mach, inst, micro_uadd, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3439 break;
3440
3441 case TGSI_OPCODE_UDIV:
3442 exec_vector_binary(mach, inst, micro_udiv, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3443 break;
3444
3445 case TGSI_OPCODE_UMAD:
3446 exec_vector_trinary(mach, inst, micro_umad, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3447 break;
3448
3449 case TGSI_OPCODE_UMAX:
3450 exec_vector_binary(mach, inst, micro_umax, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3451 break;
3452
3453 case TGSI_OPCODE_UMIN:
3454 exec_vector_binary(mach, inst, micro_umin, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3455 break;
3456
3457 case TGSI_OPCODE_UMOD:
3458 exec_vector_binary(mach, inst, micro_umod, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3459 break;
3460
3461 case TGSI_OPCODE_UMUL:
3462 exec_vector_binary(mach, inst, micro_umul, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3463 break;
3464
3465 case TGSI_OPCODE_USEQ:
3466 exec_vector_binary(mach, inst, micro_useq, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3467 break;
3468
3469 case TGSI_OPCODE_USGE:
3470 exec_vector_binary(mach, inst, micro_usge, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3471 break;
3472
3473 case TGSI_OPCODE_USHR:
3474 exec_vector_binary(mach, inst, micro_ushr, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3475 break;
3476
3477 case TGSI_OPCODE_USLT:
3478 exec_vector_binary(mach, inst, micro_uslt, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3479 break;
3480
3481 case TGSI_OPCODE_USNE:
3482 exec_vector_binary(mach, inst, micro_usne, TGSI_EXEC_DATA_UINT, TGSI_EXEC_DATA_UINT);
3483 break;
3484
3485 case TGSI_OPCODE_SWITCH:
3486 exec_switch(mach, inst);
3487 break;
3488
3489 case TGSI_OPCODE_CASE:
3490 exec_case(mach, inst);
3491 break;
3492
3493 case TGSI_OPCODE_DEFAULT:
3494 exec_default(mach);
3495 break;
3496
3497 case TGSI_OPCODE_ENDSWITCH:
3498 exec_endswitch(mach);
3499 break;
3500
3501 default:
3502 assert( 0 );
3503 }
3504 }
3505
3506
3507 #define DEBUG_EXECUTION 0
3508
3509
3510 /**
3511 * Run TGSI interpreter.
3512 * \return bitmask of "alive" quad components
3513 */
3514 uint
3515 tgsi_exec_machine_run( struct tgsi_exec_machine *mach )
3516 {
3517 uint i;
3518 int pc = 0;
3519
3520 mach->CondMask = 0xf;
3521 mach->LoopMask = 0xf;
3522 mach->ContMask = 0xf;
3523 mach->FuncMask = 0xf;
3524 mach->ExecMask = 0xf;
3525
3526 mach->Switch.mask = 0xf;
3527
3528 assert(mach->CondStackTop == 0);
3529 assert(mach->LoopStackTop == 0);
3530 assert(mach->ContStackTop == 0);
3531 assert(mach->SwitchStackTop == 0);
3532 assert(mach->BreakStackTop == 0);
3533 assert(mach->CallStackTop == 0);
3534
3535 mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] = 0;
3536 mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] = 0;
3537
3538 if( mach->Processor == TGSI_PROCESSOR_GEOMETRY ) {
3539 mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0] = 0;
3540 mach->Primitives[0] = 0;
3541 }
3542
3543 for (i = 0; i < QUAD_SIZE; i++) {
3544 mach->Temps[TEMP_CC_I].xyzw[TEMP_CC_C].u[i] =
3545 (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_X_SHIFT) |
3546 (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_Y_SHIFT) |
3547 (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_Z_SHIFT) |
3548 (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_W_SHIFT);
3549 }
3550
3551 /* execute declarations (interpolants) */
3552 for (i = 0; i < mach->NumDeclarations; i++) {
3553 exec_declaration( mach, mach->Declarations+i );
3554 }
3555
3556 {
3557 #if DEBUG_EXECUTION
3558 struct tgsi_exec_vector temps[TGSI_EXEC_NUM_TEMPS + TGSI_EXEC_NUM_TEMP_EXTRAS];
3559 struct tgsi_exec_vector outputs[PIPE_MAX_ATTRIBS];
3560 uint inst = 1;
3561
3562 memcpy(temps, mach->Temps, sizeof(temps));
3563 memcpy(outputs, mach->Outputs, sizeof(outputs));
3564 #endif
3565
3566 /* execute instructions, until pc is set to -1 */
3567 while (pc != -1) {
3568
3569 #if DEBUG_EXECUTION
3570 uint i;
3571
3572 tgsi_dump_instruction(&mach->Instructions[pc], inst++);
3573 #endif
3574
3575 assert(pc < (int) mach->NumInstructions);
3576 exec_instruction(mach, mach->Instructions + pc, &pc);
3577
3578 #if DEBUG_EXECUTION
3579 for (i = 0; i < TGSI_EXEC_NUM_TEMPS + TGSI_EXEC_NUM_TEMP_EXTRAS; i++) {
3580 if (memcmp(&temps[i], &mach->Temps[i], sizeof(temps[i]))) {
3581 uint j;
3582
3583 memcpy(&temps[i], &mach->Temps[i], sizeof(temps[i]));
3584 debug_printf("TEMP[%2u] = ", i);
3585 for (j = 0; j < 4; j++) {
3586 if (j > 0) {
3587 debug_printf(" ");
3588 }
3589 debug_printf("(%6f %u, %6f %u, %6f %u, %6f %u)\n",
3590 temps[i].xyzw[0].f[j], temps[i].xyzw[0].u[j],
3591 temps[i].xyzw[1].f[j], temps[i].xyzw[1].u[j],
3592 temps[i].xyzw[2].f[j], temps[i].xyzw[2].u[j],
3593 temps[i].xyzw[3].f[j], temps[i].xyzw[3].u[j]);
3594 }
3595 }
3596 }
3597 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
3598 if (memcmp(&outputs[i], &mach->Outputs[i], sizeof(outputs[i]))) {
3599 uint j;
3600
3601 memcpy(&outputs[i], &mach->Outputs[i], sizeof(outputs[i]));
3602 debug_printf("OUT[%2u] = ", i);
3603 for (j = 0; j < 4; j++) {
3604 if (j > 0) {
3605 debug_printf(" ");
3606 }
3607 debug_printf("(%6f %u, %6f %u, %6f %u, %6f %u)\n",
3608 outputs[i].xyzw[0].f[j], outputs[i].xyzw[0].u[j],
3609 outputs[i].xyzw[1].f[j], outputs[i].xyzw[1].u[j],
3610 outputs[i].xyzw[2].f[j], outputs[i].xyzw[2].u[j],
3611 outputs[i].xyzw[3].f[j], outputs[i].xyzw[3].u[j]);
3612 }
3613 }
3614 }
3615 #endif
3616 }
3617 }
3618
3619 #if 0
3620 /* we scale from floats in [0,1] to Zbuffer ints in sp_quad_depth_test.c */
3621 if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
3622 /*
3623 * Scale back depth component.
3624 */
3625 for (i = 0; i < 4; i++)
3626 mach->Outputs[0].xyzw[2].f[i] *= ctx->DrawBuffer->_DepthMaxF;
3627 }
3628 #endif
3629
3630 assert(mach->CondStackTop == 0);
3631 assert(mach->LoopStackTop == 0);
3632 assert(mach->ContStackTop == 0);
3633 assert(mach->SwitchStackTop == 0);
3634 assert(mach->BreakStackTop == 0);
3635 assert(mach->CallStackTop == 0);
3636
3637 return ~mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0];
3638 }