svga: add comment about 'extra' constant locations
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_vgpu10.c
1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file svga_tgsi_vgpu10.c
28 *
29 * TGSI -> VGPU10 shader translation.
30 *
31 * \author Mingcheng Chen
32 * \author Brian Paul
33 */
34
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
51
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
57
58 #include "VGPU10ShaderTokens.h"
59
60
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
67
68
69 /**
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
72 */
73 enum clipping_mode
74 {
75 CLIP_NONE, /**< No clipping enabled */
76 CLIP_LEGACY, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
79 */
80 CLIP_DISTANCE, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
82 */
83 CLIP_VERTEX /**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
86 */
87 };
88
89
90 struct svga_shader_emitter_v10
91 {
92 /* The token output buffer */
93 unsigned size;
94 char *buf;
95 char *ptr;
96
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key;
99 struct tgsi_shader_info info;
100 unsigned unit;
101
102 unsigned inst_start_token;
103 boolean discard_instruction; /**< throw away current instruction? */
104
105 union tgsi_immediate_data immediates[MAX_IMMEDIATE_COUNT][4];
106 unsigned num_immediates; /**< Number of immediates emitted */
107 unsigned common_immediate_pos[8]; /**< literals for common immediates */
108 unsigned num_common_immediates;
109 boolean immediates_emitted;
110
111 unsigned num_outputs; /**< include any extra outputs */
112 /** The first extra output is reserved for
113 * non-adjusted vertex position for
114 * stream output purpose
115 */
116
117 /* Temporary Registers */
118 unsigned num_shader_temps; /**< num of temps used by original shader */
119 unsigned internal_temp_count; /**< currently allocated internal temps */
120 struct {
121 unsigned start, size;
122 } temp_arrays[MAX_TEMP_ARRAYS];
123 unsigned num_temp_arrays;
124
125 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
126 struct {
127 unsigned arrayId, index;
128 } temp_map[VGPU10_MAX_TEMPS]; /**< arrayId, element */
129
130 /** Number of constants used by original shader for each constant buffer.
131 * The size should probably always match with that of svga_state.constbufs.
132 */
133 unsigned num_shader_consts[SVGA_MAX_CONST_BUFS];
134
135 /* Samplers */
136 unsigned num_samplers;
137 ubyte sampler_target[PIPE_MAX_SAMPLERS]; /**< TGSI_TEXTURE_x */
138 ubyte sampler_return_type[PIPE_MAX_SAMPLERS]; /**< TGSI_RETURN_TYPE_x */
139
140 /* Address regs (really implemented with temps) */
141 unsigned num_address_regs;
142 unsigned address_reg_index[MAX_VGPU10_ADDR_REGS];
143
144 /* Output register usage masks */
145 ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
146
147 /* To map TGSI system value index to VGPU shader input indexes */
148 ubyte system_value_indexes[MAX_SYSTEM_VALUES];
149
150 struct {
151 /* vertex position scale/translation */
152 unsigned out_index; /**< the real position output reg */
153 unsigned tmp_index; /**< the fake/temp position output reg */
154 unsigned so_index; /**< the non-adjusted position output reg */
155 unsigned prescale_scale_index, prescale_trans_index;
156 boolean need_prescale;
157 } vposition;
158
159 /* For vertex shaders only */
160 struct {
161 /* viewport constant */
162 unsigned viewport_index;
163
164 /* temp index of adjusted vertex attributes */
165 unsigned adjusted_input[PIPE_MAX_SHADER_INPUTS];
166 } vs;
167
168 /* For fragment shaders only */
169 struct {
170 /* apha test */
171 unsigned color_out_index[PIPE_MAX_COLOR_BUFS]; /**< the real color output regs */
172 unsigned color_tmp_index; /**< fake/temp color output reg */
173 unsigned alpha_ref_index; /**< immediate constant for alpha ref */
174
175 /* front-face */
176 unsigned face_input_index; /**< real fragment shader face reg (bool) */
177 unsigned face_tmp_index; /**< temp face reg converted to -1 / +1 */
178
179 unsigned pstipple_sampler_unit;
180
181 unsigned fragcoord_input_index; /**< real fragment position input reg */
182 unsigned fragcoord_tmp_index; /**< 1/w modified position temp reg */
183 } fs;
184
185 /* For geometry shaders only */
186 struct {
187 VGPU10_PRIMITIVE prim_type;/**< VGPU10 primitive type */
188 VGPU10_PRIMITIVE_TOPOLOGY prim_topology; /**< VGPU10 primitive topology */
189 unsigned input_size; /**< size of input arrays */
190 unsigned prim_id_index; /**< primitive id register index */
191 unsigned max_out_vertices; /**< maximum number of output vertices */
192 } gs;
193
194 /* For vertex or geometry shaders */
195 enum clipping_mode clip_mode;
196 unsigned clip_dist_out_index; /**< clip distance output register index */
197 unsigned clip_dist_tmp_index; /**< clip distance temporary register */
198 unsigned clip_dist_so_index; /**< clip distance shadow copy */
199
200 /** Index of temporary holding the clipvertex coordinate */
201 unsigned clip_vertex_out_index; /**< clip vertex output register index */
202 unsigned clip_vertex_tmp_index; /**< clip vertex temporary index */
203
204 /* user clip plane constant slot indexes */
205 unsigned clip_plane_const[PIPE_MAX_CLIP_PLANES];
206
207 unsigned num_output_writes;
208 boolean constant_color_output;
209
210 boolean uses_flat_interp;
211
212 /* For all shaders: const reg index for RECT coord scaling */
213 unsigned texcoord_scale_index[PIPE_MAX_SAMPLERS];
214
215 /* For all shaders: const reg index for texture buffer size */
216 unsigned texture_buffer_size_index[PIPE_MAX_SAMPLERS];
217
218 /* VS/GS/FS Linkage info */
219 struct shader_linkage linkage;
220
221 bool register_overflow; /**< Set if we exceed a VGPU10 register limit */
222 };
223
224
225 static boolean
226 emit_post_helpers(struct svga_shader_emitter_v10 *emit);
227
228 static boolean
229 emit_vertex(struct svga_shader_emitter_v10 *emit,
230 const struct tgsi_full_instruction *inst);
231
232 static char err_buf[128];
233
234 static boolean
235 expand(struct svga_shader_emitter_v10 *emit)
236 {
237 char *new_buf;
238 unsigned newsize = emit->size * 2;
239
240 if (emit->buf != err_buf)
241 new_buf = REALLOC(emit->buf, emit->size, newsize);
242 else
243 new_buf = NULL;
244
245 if (!new_buf) {
246 emit->ptr = err_buf;
247 emit->buf = err_buf;
248 emit->size = sizeof(err_buf);
249 return FALSE;
250 }
251
252 emit->size = newsize;
253 emit->ptr = new_buf + (emit->ptr - emit->buf);
254 emit->buf = new_buf;
255 return TRUE;
256 }
257
258 /**
259 * Create and initialize a new svga_shader_emitter_v10 object.
260 */
261 static struct svga_shader_emitter_v10 *
262 alloc_emitter(void)
263 {
264 struct svga_shader_emitter_v10 *emit = CALLOC(1, sizeof(*emit));
265
266 if (!emit)
267 return NULL;
268
269 /* to initialize the output buffer */
270 emit->size = 512;
271 if (!expand(emit)) {
272 FREE(emit);
273 return NULL;
274 }
275 return emit;
276 }
277
278 /**
279 * Free an svga_shader_emitter_v10 object.
280 */
281 static void
282 free_emitter(struct svga_shader_emitter_v10 *emit)
283 {
284 assert(emit);
285 FREE(emit->buf); /* will be NULL if translation succeeded */
286 FREE(emit);
287 }
288
289 static inline boolean
290 reserve(struct svga_shader_emitter_v10 *emit,
291 unsigned nr_dwords)
292 {
293 while (emit->ptr - emit->buf + nr_dwords * sizeof(uint32) >= emit->size) {
294 if (!expand(emit))
295 return FALSE;
296 }
297
298 return TRUE;
299 }
300
301 static boolean
302 emit_dword(struct svga_shader_emitter_v10 *emit, uint32 dword)
303 {
304 if (!reserve(emit, 1))
305 return FALSE;
306
307 *(uint32 *)emit->ptr = dword;
308 emit->ptr += sizeof dword;
309 return TRUE;
310 }
311
312 static boolean
313 emit_dwords(struct svga_shader_emitter_v10 *emit,
314 const uint32 *dwords,
315 unsigned nr)
316 {
317 if (!reserve(emit, nr))
318 return FALSE;
319
320 memcpy(emit->ptr, dwords, nr * sizeof *dwords);
321 emit->ptr += nr * sizeof *dwords;
322 return TRUE;
323 }
324
325 /** Return the number of tokens in the emitter's buffer */
326 static unsigned
327 emit_get_num_tokens(const struct svga_shader_emitter_v10 *emit)
328 {
329 return (emit->ptr - emit->buf) / sizeof(unsigned);
330 }
331
332
333 /**
334 * Check for register overflow. If we overflow we'll set an
335 * error flag. This function can be called for register declarations
336 * or use as src/dst instruction operands.
337 * \param type register type. One of VGPU10_OPERAND_TYPE_x
338 or VGPU10_OPCODE_DCL_x
339 * \param index the register index
340 */
341 static void
342 check_register_index(struct svga_shader_emitter_v10 *emit,
343 unsigned operandType, unsigned index)
344 {
345 bool overflow_before = emit->register_overflow;
346
347 switch (operandType) {
348 case VGPU10_OPERAND_TYPE_TEMP:
349 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP:
350 case VGPU10_OPCODE_DCL_TEMPS:
351 if (index >= VGPU10_MAX_TEMPS) {
352 emit->register_overflow = TRUE;
353 }
354 break;
355 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER:
356 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER:
357 if (index >= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
358 emit->register_overflow = TRUE;
359 }
360 break;
361 case VGPU10_OPERAND_TYPE_INPUT:
362 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID:
363 case VGPU10_OPCODE_DCL_INPUT:
364 case VGPU10_OPCODE_DCL_INPUT_SGV:
365 case VGPU10_OPCODE_DCL_INPUT_SIV:
366 case VGPU10_OPCODE_DCL_INPUT_PS:
367 case VGPU10_OPCODE_DCL_INPUT_PS_SGV:
368 case VGPU10_OPCODE_DCL_INPUT_PS_SIV:
369 if ((emit->unit == PIPE_SHADER_VERTEX &&
370 index >= VGPU10_MAX_VS_INPUTS) ||
371 (emit->unit == PIPE_SHADER_GEOMETRY &&
372 index >= VGPU10_MAX_GS_INPUTS) ||
373 (emit->unit == PIPE_SHADER_FRAGMENT &&
374 index >= VGPU10_MAX_FS_INPUTS)) {
375 emit->register_overflow = TRUE;
376 }
377 break;
378 case VGPU10_OPERAND_TYPE_OUTPUT:
379 case VGPU10_OPCODE_DCL_OUTPUT:
380 case VGPU10_OPCODE_DCL_OUTPUT_SGV:
381 case VGPU10_OPCODE_DCL_OUTPUT_SIV:
382 if ((emit->unit == PIPE_SHADER_VERTEX &&
383 index >= VGPU10_MAX_VS_OUTPUTS) ||
384 (emit->unit == PIPE_SHADER_GEOMETRY &&
385 index >= VGPU10_MAX_GS_OUTPUTS) ||
386 (emit->unit == PIPE_SHADER_FRAGMENT &&
387 index >= VGPU10_MAX_FS_OUTPUTS)) {
388 emit->register_overflow = TRUE;
389 }
390 break;
391 case VGPU10_OPERAND_TYPE_SAMPLER:
392 case VGPU10_OPCODE_DCL_SAMPLER:
393 if (index >= VGPU10_MAX_SAMPLERS) {
394 emit->register_overflow = TRUE;
395 }
396 break;
397 case VGPU10_OPERAND_TYPE_RESOURCE:
398 case VGPU10_OPCODE_DCL_RESOURCE:
399 if (index >= VGPU10_MAX_RESOURCES) {
400 emit->register_overflow = TRUE;
401 }
402 break;
403 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER:
404 if (index >= MAX_IMMEDIATE_COUNT) {
405 emit->register_overflow = TRUE;
406 }
407 break;
408 default:
409 assert(0);
410 ; /* nothing */
411 }
412
413 if (emit->register_overflow && !overflow_before) {
414 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
415 operandType, index);
416 }
417 }
418
419
420 /**
421 * Examine misc state to determine the clipping mode.
422 */
423 static void
424 determine_clipping_mode(struct svga_shader_emitter_v10 *emit)
425 {
426 if (emit->info.num_written_clipdistance > 0) {
427 emit->clip_mode = CLIP_DISTANCE;
428 }
429 else if (emit->info.writes_clipvertex) {
430 emit->clip_mode = CLIP_VERTEX;
431 }
432 else if (emit->key.clip_plane_enable) {
433 emit->clip_mode = CLIP_LEGACY;
434 }
435 else {
436 emit->clip_mode = CLIP_NONE;
437 }
438 }
439
440
441 /**
442 * For clip distance register declarations and clip distance register
443 * writes we need to mask the declaration usage or instruction writemask
444 * (respectively) against the set of the really-enabled clipping planes.
445 *
446 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
447 * has a VS that writes to all 8 clip distance registers, but the plane enable
448 * flags are a subset of that.
449 *
450 * This function is used to apply the plane enable flags to the register
451 * declaration or instruction writemask.
452 *
453 * \param writemask the declaration usage mask or instruction writemask
454 * \param clip_reg_index which clip plane register is being declared/written.
455 * The legal values are 0 and 1 (two clip planes per
456 * register, for a total of 8 clip planes)
457 */
458 static unsigned
459 apply_clip_plane_mask(struct svga_shader_emitter_v10 *emit,
460 unsigned writemask, unsigned clip_reg_index)
461 {
462 unsigned shift;
463
464 assert(clip_reg_index < 2);
465
466 /* four clip planes per clip register: */
467 shift = clip_reg_index * 4;
468 writemask &= ((emit->key.clip_plane_enable >> shift) & 0xf);
469
470 return writemask;
471 }
472
473
474 /**
475 * Translate gallium shader type into VGPU10 type.
476 */
477 static VGPU10_PROGRAM_TYPE
478 translate_shader_type(unsigned type)
479 {
480 switch (type) {
481 case PIPE_SHADER_VERTEX:
482 return VGPU10_VERTEX_SHADER;
483 case PIPE_SHADER_GEOMETRY:
484 return VGPU10_GEOMETRY_SHADER;
485 case PIPE_SHADER_FRAGMENT:
486 return VGPU10_PIXEL_SHADER;
487 default:
488 assert(!"Unexpected shader type");
489 return VGPU10_VERTEX_SHADER;
490 }
491 }
492
493
494 /**
495 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
496 * Note: we only need to translate the opcodes for "simple" instructions,
497 * as seen below. All other opcodes are handled/translated specially.
498 */
499 static VGPU10_OPCODE_TYPE
500 translate_opcode(unsigned opcode)
501 {
502 switch (opcode) {
503 case TGSI_OPCODE_MOV:
504 return VGPU10_OPCODE_MOV;
505 case TGSI_OPCODE_MUL:
506 return VGPU10_OPCODE_MUL;
507 case TGSI_OPCODE_ADD:
508 return VGPU10_OPCODE_ADD;
509 case TGSI_OPCODE_DP3:
510 return VGPU10_OPCODE_DP3;
511 case TGSI_OPCODE_DP4:
512 return VGPU10_OPCODE_DP4;
513 case TGSI_OPCODE_MIN:
514 return VGPU10_OPCODE_MIN;
515 case TGSI_OPCODE_MAX:
516 return VGPU10_OPCODE_MAX;
517 case TGSI_OPCODE_MAD:
518 return VGPU10_OPCODE_MAD;
519 case TGSI_OPCODE_SQRT:
520 return VGPU10_OPCODE_SQRT;
521 case TGSI_OPCODE_FRC:
522 return VGPU10_OPCODE_FRC;
523 case TGSI_OPCODE_FLR:
524 return VGPU10_OPCODE_ROUND_NI;
525 case TGSI_OPCODE_FSEQ:
526 return VGPU10_OPCODE_EQ;
527 case TGSI_OPCODE_FSGE:
528 return VGPU10_OPCODE_GE;
529 case TGSI_OPCODE_FSNE:
530 return VGPU10_OPCODE_NE;
531 case TGSI_OPCODE_DDX:
532 return VGPU10_OPCODE_DERIV_RTX;
533 case TGSI_OPCODE_DDY:
534 return VGPU10_OPCODE_DERIV_RTY;
535 case TGSI_OPCODE_RET:
536 return VGPU10_OPCODE_RET;
537 case TGSI_OPCODE_DIV:
538 return VGPU10_OPCODE_DIV;
539 case TGSI_OPCODE_IDIV:
540 return VGPU10_OPCODE_IDIV;
541 case TGSI_OPCODE_DP2:
542 return VGPU10_OPCODE_DP2;
543 case TGSI_OPCODE_BRK:
544 return VGPU10_OPCODE_BREAK;
545 case TGSI_OPCODE_IF:
546 return VGPU10_OPCODE_IF;
547 case TGSI_OPCODE_ELSE:
548 return VGPU10_OPCODE_ELSE;
549 case TGSI_OPCODE_ENDIF:
550 return VGPU10_OPCODE_ENDIF;
551 case TGSI_OPCODE_CEIL:
552 return VGPU10_OPCODE_ROUND_PI;
553 case TGSI_OPCODE_I2F:
554 return VGPU10_OPCODE_ITOF;
555 case TGSI_OPCODE_NOT:
556 return VGPU10_OPCODE_NOT;
557 case TGSI_OPCODE_TRUNC:
558 return VGPU10_OPCODE_ROUND_Z;
559 case TGSI_OPCODE_SHL:
560 return VGPU10_OPCODE_ISHL;
561 case TGSI_OPCODE_AND:
562 return VGPU10_OPCODE_AND;
563 case TGSI_OPCODE_OR:
564 return VGPU10_OPCODE_OR;
565 case TGSI_OPCODE_XOR:
566 return VGPU10_OPCODE_XOR;
567 case TGSI_OPCODE_CONT:
568 return VGPU10_OPCODE_CONTINUE;
569 case TGSI_OPCODE_EMIT:
570 return VGPU10_OPCODE_EMIT;
571 case TGSI_OPCODE_ENDPRIM:
572 return VGPU10_OPCODE_CUT;
573 case TGSI_OPCODE_BGNLOOP:
574 return VGPU10_OPCODE_LOOP;
575 case TGSI_OPCODE_ENDLOOP:
576 return VGPU10_OPCODE_ENDLOOP;
577 case TGSI_OPCODE_ENDSUB:
578 return VGPU10_OPCODE_RET;
579 case TGSI_OPCODE_NOP:
580 return VGPU10_OPCODE_NOP;
581 case TGSI_OPCODE_BREAKC:
582 return VGPU10_OPCODE_BREAKC;
583 case TGSI_OPCODE_END:
584 return VGPU10_OPCODE_RET;
585 case TGSI_OPCODE_F2I:
586 return VGPU10_OPCODE_FTOI;
587 case TGSI_OPCODE_IMAX:
588 return VGPU10_OPCODE_IMAX;
589 case TGSI_OPCODE_IMIN:
590 return VGPU10_OPCODE_IMIN;
591 case TGSI_OPCODE_UDIV:
592 case TGSI_OPCODE_UMOD:
593 case TGSI_OPCODE_MOD:
594 return VGPU10_OPCODE_UDIV;
595 case TGSI_OPCODE_IMUL_HI:
596 return VGPU10_OPCODE_IMUL;
597 case TGSI_OPCODE_INEG:
598 return VGPU10_OPCODE_INEG;
599 case TGSI_OPCODE_ISHR:
600 return VGPU10_OPCODE_ISHR;
601 case TGSI_OPCODE_ISGE:
602 return VGPU10_OPCODE_IGE;
603 case TGSI_OPCODE_ISLT:
604 return VGPU10_OPCODE_ILT;
605 case TGSI_OPCODE_F2U:
606 return VGPU10_OPCODE_FTOU;
607 case TGSI_OPCODE_UADD:
608 return VGPU10_OPCODE_IADD;
609 case TGSI_OPCODE_U2F:
610 return VGPU10_OPCODE_UTOF;
611 case TGSI_OPCODE_UCMP:
612 return VGPU10_OPCODE_MOVC;
613 case TGSI_OPCODE_UMAD:
614 return VGPU10_OPCODE_UMAD;
615 case TGSI_OPCODE_UMAX:
616 return VGPU10_OPCODE_UMAX;
617 case TGSI_OPCODE_UMIN:
618 return VGPU10_OPCODE_UMIN;
619 case TGSI_OPCODE_UMUL:
620 case TGSI_OPCODE_UMUL_HI:
621 return VGPU10_OPCODE_UMUL;
622 case TGSI_OPCODE_USEQ:
623 return VGPU10_OPCODE_IEQ;
624 case TGSI_OPCODE_USGE:
625 return VGPU10_OPCODE_UGE;
626 case TGSI_OPCODE_USHR:
627 return VGPU10_OPCODE_USHR;
628 case TGSI_OPCODE_USLT:
629 return VGPU10_OPCODE_ULT;
630 case TGSI_OPCODE_USNE:
631 return VGPU10_OPCODE_INE;
632 case TGSI_OPCODE_SWITCH:
633 return VGPU10_OPCODE_SWITCH;
634 case TGSI_OPCODE_CASE:
635 return VGPU10_OPCODE_CASE;
636 case TGSI_OPCODE_DEFAULT:
637 return VGPU10_OPCODE_DEFAULT;
638 case TGSI_OPCODE_ENDSWITCH:
639 return VGPU10_OPCODE_ENDSWITCH;
640 case TGSI_OPCODE_FSLT:
641 return VGPU10_OPCODE_LT;
642 case TGSI_OPCODE_ROUND:
643 return VGPU10_OPCODE_ROUND_NE;
644 default:
645 assert(!"Unexpected TGSI opcode in translate_opcode()");
646 return VGPU10_OPCODE_NOP;
647 }
648 }
649
650
651 /**
652 * Translate a TGSI register file type into a VGPU10 operand type.
653 * \param array is the TGSI_FILE_TEMPORARY register an array?
654 */
655 static VGPU10_OPERAND_TYPE
656 translate_register_file(enum tgsi_file_type file, boolean array)
657 {
658 switch (file) {
659 case TGSI_FILE_CONSTANT:
660 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
661 case TGSI_FILE_INPUT:
662 return VGPU10_OPERAND_TYPE_INPUT;
663 case TGSI_FILE_OUTPUT:
664 return VGPU10_OPERAND_TYPE_OUTPUT;
665 case TGSI_FILE_TEMPORARY:
666 return array ? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
667 : VGPU10_OPERAND_TYPE_TEMP;
668 case TGSI_FILE_IMMEDIATE:
669 /* all immediates are 32-bit values at this time so
670 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
671 */
672 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER;
673 case TGSI_FILE_SAMPLER:
674 return VGPU10_OPERAND_TYPE_SAMPLER;
675 case TGSI_FILE_SYSTEM_VALUE:
676 return VGPU10_OPERAND_TYPE_INPUT;
677
678 /* XXX TODO more cases to finish */
679
680 default:
681 assert(!"Bad tgsi register file!");
682 return VGPU10_OPERAND_TYPE_NULL;
683 }
684 }
685
686
687 /**
688 * Emit a null dst register
689 */
690 static void
691 emit_null_dst_register(struct svga_shader_emitter_v10 *emit)
692 {
693 VGPU10OperandToken0 operand;
694
695 operand.value = 0;
696 operand.operandType = VGPU10_OPERAND_TYPE_NULL;
697 operand.numComponents = VGPU10_OPERAND_0_COMPONENT;
698
699 emit_dword(emit, operand.value);
700 }
701
702
703 /**
704 * If the given register is a temporary, return the array ID.
705 * Else return zero.
706 */
707 static unsigned
708 get_temp_array_id(const struct svga_shader_emitter_v10 *emit,
709 enum tgsi_file_type file, unsigned index)
710 {
711 if (file == TGSI_FILE_TEMPORARY) {
712 return emit->temp_map[index].arrayId;
713 }
714 else {
715 return 0;
716 }
717 }
718
719
720 /**
721 * If the given register is a temporary, convert the index from a TGSI
722 * TEMPORARY index to a VGPU10 temp index.
723 */
724 static unsigned
725 remap_temp_index(const struct svga_shader_emitter_v10 *emit,
726 enum tgsi_file_type file, unsigned index)
727 {
728 if (file == TGSI_FILE_TEMPORARY) {
729 return emit->temp_map[index].index;
730 }
731 else {
732 return index;
733 }
734 }
735
736
737 /**
738 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
739 * Note: the operandType field must already be initialized.
740 */
741 static VGPU10OperandToken0
742 setup_operand0_indexing(struct svga_shader_emitter_v10 *emit,
743 VGPU10OperandToken0 operand0,
744 enum tgsi_file_type file,
745 boolean indirect, boolean index2D,
746 unsigned tempArrayID)
747 {
748 unsigned indexDim, index0Rep, index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
749
750 /*
751 * Compute index dimensions
752 */
753 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32 ||
754 operand0.operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
755 /* there's no swizzle for in-line immediates */
756 indexDim = VGPU10_OPERAND_INDEX_0D;
757 assert(operand0.selectionMode == 0);
758 }
759 else {
760 if (index2D ||
761 tempArrayID > 0 ||
762 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
763 indexDim = VGPU10_OPERAND_INDEX_2D;
764 }
765 else {
766 indexDim = VGPU10_OPERAND_INDEX_1D;
767 }
768 }
769
770 /*
771 * Compute index representations (immediate, relative, etc).
772 */
773 if (tempArrayID > 0) {
774 assert(file == TGSI_FILE_TEMPORARY);
775 /* First index is the array ID, second index is the array element */
776 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
777 if (indirect) {
778 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
779 }
780 else {
781 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
782 }
783 }
784 else if (indirect) {
785 if (file == TGSI_FILE_CONSTANT) {
786 /* index[0] indicates which constant buffer while index[1] indicates
787 * the position in the constant buffer.
788 */
789 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
790 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
791 }
792 else {
793 /* All other register files are 1-dimensional */
794 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
795 }
796 }
797 else {
798 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
799 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
800 }
801
802 operand0.indexDimension = indexDim;
803 operand0.index0Representation = index0Rep;
804 operand0.index1Representation = index1Rep;
805
806 return operand0;
807 }
808
809
810 /**
811 * Emit the operand for expressing an address register for indirect indexing.
812 * Note that the address register is really just a temp register.
813 * \param addr_reg_index which address register to use
814 */
815 static void
816 emit_indirect_register(struct svga_shader_emitter_v10 *emit,
817 unsigned addr_reg_index)
818 {
819 unsigned tmp_reg_index;
820 VGPU10OperandToken0 operand0;
821
822 assert(addr_reg_index < MAX_VGPU10_ADDR_REGS);
823
824 tmp_reg_index = emit->address_reg_index[addr_reg_index];
825
826 /* operand0 is a simple temporary register, selecting one component */
827 operand0.value = 0;
828 operand0.operandType = VGPU10_OPERAND_TYPE_TEMP;
829 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
830 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
831 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
832 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
833 operand0.swizzleX = 0;
834 operand0.swizzleY = 1;
835 operand0.swizzleZ = 2;
836 operand0.swizzleW = 3;
837
838 emit_dword(emit, operand0.value);
839 emit_dword(emit, remap_temp_index(emit, TGSI_FILE_TEMPORARY, tmp_reg_index));
840 }
841
842
843 /**
844 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
845 * \param emit the emitter context
846 * \param reg the TGSI dst register to translate
847 */
848 static void
849 emit_dst_register(struct svga_shader_emitter_v10 *emit,
850 const struct tgsi_full_dst_register *reg)
851 {
852 enum tgsi_file_type file = reg->Register.File;
853 unsigned index = reg->Register.Index;
854 const enum tgsi_semantic sem_name = emit->info.output_semantic_name[index];
855 const unsigned sem_index = emit->info.output_semantic_index[index];
856 unsigned writemask = reg->Register.WriteMask;
857 const unsigned indirect = reg->Register.Indirect;
858 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
859 const unsigned index2d = reg->Register.Dimension;
860 VGPU10OperandToken0 operand0;
861
862 if (file == TGSI_FILE_OUTPUT) {
863 if (emit->unit == PIPE_SHADER_VERTEX ||
864 emit->unit == PIPE_SHADER_GEOMETRY) {
865 if (index == emit->vposition.out_index &&
866 emit->vposition.tmp_index != INVALID_INDEX) {
867 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
868 * vertex position result in a temporary so that we can modify
869 * it in the post_helper() code.
870 */
871 file = TGSI_FILE_TEMPORARY;
872 index = emit->vposition.tmp_index;
873 }
874 else if (sem_name == TGSI_SEMANTIC_CLIPDIST &&
875 emit->clip_dist_tmp_index != INVALID_INDEX) {
876 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
877 * We store the clip distance in a temporary first, then
878 * we'll copy it to the shadow copy and to CLIPDIST with the
879 * enabled planes mask in emit_clip_distance_instructions().
880 */
881 file = TGSI_FILE_TEMPORARY;
882 index = emit->clip_dist_tmp_index + sem_index;
883 }
884 else if (sem_name == TGSI_SEMANTIC_CLIPVERTEX &&
885 emit->clip_vertex_tmp_index != INVALID_INDEX) {
886 /* replace the CLIPVERTEX output register with a temporary */
887 assert(emit->clip_mode == CLIP_VERTEX);
888 assert(sem_index == 0);
889 file = TGSI_FILE_TEMPORARY;
890 index = emit->clip_vertex_tmp_index;
891 }
892 }
893 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
894 if (sem_name == TGSI_SEMANTIC_POSITION) {
895 /* Fragment depth output register */
896 operand0.value = 0;
897 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
898 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
899 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
900 emit_dword(emit, operand0.value);
901 return;
902 }
903 else if (index == emit->fs.color_out_index[0] &&
904 emit->fs.color_tmp_index != INVALID_INDEX) {
905 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
906 * fragment color result in a temporary so that we can read it
907 * it in the post_helper() code.
908 */
909 file = TGSI_FILE_TEMPORARY;
910 index = emit->fs.color_tmp_index;
911 }
912 else {
913 /* Typically, for fragment shaders, the output register index
914 * matches the color semantic index. But not when we write to
915 * the fragment depth register. In that case, OUT[0] will be
916 * fragdepth and OUT[1] will be the 0th color output. We need
917 * to use the semantic index for color outputs.
918 */
919 assert(sem_name == TGSI_SEMANTIC_COLOR);
920 index = emit->info.output_semantic_index[index];
921
922 emit->num_output_writes++;
923 }
924 }
925 }
926
927 /* init operand tokens to all zero */
928 operand0.value = 0;
929
930 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
931
932 /* the operand has a writemask */
933 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
934
935 /* Which of the four dest components to write to. Note that we can use a
936 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
937 */
938 STATIC_ASSERT(TGSI_WRITEMASK_X == VGPU10_OPERAND_4_COMPONENT_MASK_X);
939 operand0.mask = writemask;
940
941 /* translate TGSI register file type to VGPU10 operand type */
942 operand0.operandType = translate_register_file(file, tempArrayId > 0);
943
944 check_register_index(emit, operand0.operandType, index);
945
946 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
947 index2d, tempArrayId);
948
949 /* Emit tokens */
950 emit_dword(emit, operand0.value);
951 if (tempArrayId > 0) {
952 emit_dword(emit, tempArrayId);
953 }
954
955 emit_dword(emit, remap_temp_index(emit, file, index));
956
957 if (indirect) {
958 emit_indirect_register(emit, reg->Indirect.Index);
959 }
960 }
961
962
963 /**
964 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
965 */
966 static void
967 emit_src_register(struct svga_shader_emitter_v10 *emit,
968 const struct tgsi_full_src_register *reg)
969 {
970 enum tgsi_file_type file = reg->Register.File;
971 unsigned index = reg->Register.Index;
972 const unsigned indirect = reg->Register.Indirect;
973 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
974 const unsigned index2d = reg->Register.Dimension;
975 const unsigned swizzleX = reg->Register.SwizzleX;
976 const unsigned swizzleY = reg->Register.SwizzleY;
977 const unsigned swizzleZ = reg->Register.SwizzleZ;
978 const unsigned swizzleW = reg->Register.SwizzleW;
979 const unsigned absolute = reg->Register.Absolute;
980 const unsigned negate = reg->Register.Negate;
981 bool is_prim_id = FALSE;
982
983 VGPU10OperandToken0 operand0;
984 VGPU10OperandToken1 operand1;
985
986 if (emit->unit == PIPE_SHADER_FRAGMENT &&
987 file == TGSI_FILE_INPUT) {
988 if (index == emit->fs.face_input_index) {
989 /* Replace INPUT[FACE] with TEMP[FACE] */
990 file = TGSI_FILE_TEMPORARY;
991 index = emit->fs.face_tmp_index;
992 }
993 else if (index == emit->fs.fragcoord_input_index) {
994 /* Replace INPUT[POSITION] with TEMP[POSITION] */
995 file = TGSI_FILE_TEMPORARY;
996 index = emit->fs.fragcoord_tmp_index;
997 }
998 else {
999 /* We remap fragment shader inputs to that FS input indexes
1000 * match up with VS/GS output indexes.
1001 */
1002 index = emit->linkage.input_map[index];
1003 }
1004 }
1005 else if (emit->unit == PIPE_SHADER_GEOMETRY &&
1006 file == TGSI_FILE_INPUT) {
1007 is_prim_id = (index == emit->gs.prim_id_index);
1008 index = emit->linkage.input_map[index];
1009 }
1010 else if (emit->unit == PIPE_SHADER_VERTEX) {
1011 if (file == TGSI_FILE_INPUT) {
1012 /* if input is adjusted... */
1013 if ((emit->key.vs.adjust_attrib_w_1 |
1014 emit->key.vs.adjust_attrib_itof |
1015 emit->key.vs.adjust_attrib_utof |
1016 emit->key.vs.attrib_is_bgra |
1017 emit->key.vs.attrib_puint_to_snorm |
1018 emit->key.vs.attrib_puint_to_uscaled |
1019 emit->key.vs.attrib_puint_to_sscaled) & (1 << index)) {
1020 file = TGSI_FILE_TEMPORARY;
1021 index = emit->vs.adjusted_input[index];
1022 }
1023 }
1024 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1025 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1026 index = emit->system_value_indexes[index];
1027 }
1028 }
1029
1030 operand0.value = operand1.value = 0;
1031
1032 if (is_prim_id) {
1033 /* NOTE: we should be using VGPU10_OPERAND_1_COMPONENT here, but
1034 * our virtual GPU accepts this as-is.
1035 */
1036 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
1037 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
1038 }
1039 else {
1040 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1041 operand0.operandType = translate_register_file(file, tempArrayId > 0);
1042 }
1043
1044 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
1045 index2d, tempArrayId);
1046
1047 if (operand0.operandType != VGPU10_OPERAND_TYPE_IMMEDIATE32 &&
1048 operand0.operandType != VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
1049 /* there's no swizzle for in-line immediates */
1050 if (swizzleX == swizzleY &&
1051 swizzleX == swizzleZ &&
1052 swizzleX == swizzleW) {
1053 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1054 }
1055 else {
1056 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1057 }
1058
1059 operand0.swizzleX = swizzleX;
1060 operand0.swizzleY = swizzleY;
1061 operand0.swizzleZ = swizzleZ;
1062 operand0.swizzleW = swizzleW;
1063
1064 if (absolute || negate) {
1065 operand0.extended = 1;
1066 operand1.extendedOperandType = VGPU10_EXTENDED_OPERAND_MODIFIER;
1067 if (absolute && !negate)
1068 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABS;
1069 if (!absolute && negate)
1070 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_NEG;
1071 if (absolute && negate)
1072 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABSNEG;
1073 }
1074 }
1075
1076 /* Emit the operand tokens */
1077 emit_dword(emit, operand0.value);
1078 if (operand0.extended)
1079 emit_dword(emit, operand1.value);
1080
1081 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32) {
1082 /* Emit the four float/int in-line immediate values */
1083 unsigned *c;
1084 assert(index < ARRAY_SIZE(emit->immediates));
1085 assert(file == TGSI_FILE_IMMEDIATE);
1086 assert(swizzleX < 4);
1087 assert(swizzleY < 4);
1088 assert(swizzleZ < 4);
1089 assert(swizzleW < 4);
1090 c = (unsigned *) emit->immediates[index];
1091 emit_dword(emit, c[swizzleX]);
1092 emit_dword(emit, c[swizzleY]);
1093 emit_dword(emit, c[swizzleZ]);
1094 emit_dword(emit, c[swizzleW]);
1095 }
1096 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_1D) {
1097 /* Emit the register index(es) */
1098 if (index2d ||
1099 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
1100 emit_dword(emit, reg->Dimension.Index);
1101 }
1102
1103 if (tempArrayId > 0) {
1104 emit_dword(emit, tempArrayId);
1105 }
1106
1107 emit_dword(emit, remap_temp_index(emit, file, index));
1108
1109 if (indirect) {
1110 emit_indirect_register(emit, reg->Indirect.Index);
1111 }
1112 }
1113 }
1114
1115
1116 /**
1117 * Emit a resource operand (for use with a SAMPLE instruction).
1118 */
1119 static void
1120 emit_resource_register(struct svga_shader_emitter_v10 *emit,
1121 unsigned resource_number)
1122 {
1123 VGPU10OperandToken0 operand0;
1124
1125 check_register_index(emit, VGPU10_OPERAND_TYPE_RESOURCE, resource_number);
1126
1127 /* init */
1128 operand0.value = 0;
1129
1130 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
1131 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1132 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1133 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1134 operand0.swizzleX = VGPU10_COMPONENT_X;
1135 operand0.swizzleY = VGPU10_COMPONENT_Y;
1136 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1137 operand0.swizzleW = VGPU10_COMPONENT_W;
1138
1139 emit_dword(emit, operand0.value);
1140 emit_dword(emit, resource_number);
1141 }
1142
1143
1144 /**
1145 * Emit a sampler operand (for use with a SAMPLE instruction).
1146 */
1147 static void
1148 emit_sampler_register(struct svga_shader_emitter_v10 *emit,
1149 unsigned sampler_number)
1150 {
1151 VGPU10OperandToken0 operand0;
1152
1153 check_register_index(emit, VGPU10_OPERAND_TYPE_SAMPLER, sampler_number);
1154
1155 /* init */
1156 operand0.value = 0;
1157
1158 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
1159 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1160
1161 emit_dword(emit, operand0.value);
1162 emit_dword(emit, sampler_number);
1163 }
1164
1165
1166 /**
1167 * Emit an operand which reads the IS_FRONT_FACING register.
1168 */
1169 static void
1170 emit_face_register(struct svga_shader_emitter_v10 *emit)
1171 {
1172 VGPU10OperandToken0 operand0;
1173 unsigned index = emit->linkage.input_map[emit->fs.face_input_index];
1174
1175 /* init */
1176 operand0.value = 0;
1177
1178 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT;
1179 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1180 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1181 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1182
1183 operand0.swizzleX = VGPU10_COMPONENT_X;
1184 operand0.swizzleY = VGPU10_COMPONENT_X;
1185 operand0.swizzleZ = VGPU10_COMPONENT_X;
1186 operand0.swizzleW = VGPU10_COMPONENT_X;
1187
1188 emit_dword(emit, operand0.value);
1189 emit_dword(emit, index);
1190 }
1191
1192
1193 /**
1194 * Emit the token for a VGPU10 opcode.
1195 * \param saturate clamp result to [0,1]?
1196 */
1197 static void
1198 emit_opcode(struct svga_shader_emitter_v10 *emit,
1199 unsigned vgpu10_opcode, boolean saturate)
1200 {
1201 VGPU10OpcodeToken0 token0;
1202
1203 token0.value = 0; /* init all fields to zero */
1204 token0.opcodeType = vgpu10_opcode;
1205 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1206 token0.saturate = saturate;
1207
1208 emit_dword(emit, token0.value);
1209 }
1210
1211
1212 /**
1213 * Emit the token for a VGPU10 resinfo instruction.
1214 * \param modifier return type modifier, _uint or _rcpFloat.
1215 * TODO: We may want to remove this parameter if it will
1216 * only ever be used as _uint.
1217 */
1218 static void
1219 emit_opcode_resinfo(struct svga_shader_emitter_v10 *emit,
1220 VGPU10_RESINFO_RETURN_TYPE modifier)
1221 {
1222 VGPU10OpcodeToken0 token0;
1223
1224 token0.value = 0; /* init all fields to zero */
1225 token0.opcodeType = VGPU10_OPCODE_RESINFO;
1226 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1227 token0.resinfoReturnType = modifier;
1228
1229 emit_dword(emit, token0.value);
1230 }
1231
1232
1233 /**
1234 * Emit opcode tokens for a texture sample instruction. Texture instructions
1235 * can be rather complicated (texel offsets, etc) so we have this specialized
1236 * function.
1237 */
1238 static void
1239 emit_sample_opcode(struct svga_shader_emitter_v10 *emit,
1240 unsigned vgpu10_opcode, boolean saturate,
1241 const int offsets[3])
1242 {
1243 VGPU10OpcodeToken0 token0;
1244 VGPU10OpcodeToken1 token1;
1245
1246 token0.value = 0; /* init all fields to zero */
1247 token0.opcodeType = vgpu10_opcode;
1248 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1249 token0.saturate = saturate;
1250
1251 if (offsets[0] || offsets[1] || offsets[2]) {
1252 assert(offsets[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1253 assert(offsets[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1254 assert(offsets[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1255 assert(offsets[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1256 assert(offsets[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1257 assert(offsets[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1258
1259 token0.extended = 1;
1260 token1.value = 0;
1261 token1.opcodeType = VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS;
1262 token1.offsetU = offsets[0];
1263 token1.offsetV = offsets[1];
1264 token1.offsetW = offsets[2];
1265 }
1266
1267 emit_dword(emit, token0.value);
1268 if (token0.extended) {
1269 emit_dword(emit, token1.value);
1270 }
1271 }
1272
1273
1274 /**
1275 * Emit a DISCARD opcode token.
1276 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1277 * Otherwise, we'll discard the fragment if the X component is 0.
1278 */
1279 static void
1280 emit_discard_opcode(struct svga_shader_emitter_v10 *emit, boolean nonzero)
1281 {
1282 VGPU10OpcodeToken0 opcode0;
1283
1284 opcode0.value = 0;
1285 opcode0.opcodeType = VGPU10_OPCODE_DISCARD;
1286 if (nonzero)
1287 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
1288
1289 emit_dword(emit, opcode0.value);
1290 }
1291
1292
1293 /**
1294 * We need to call this before we begin emitting a VGPU10 instruction.
1295 */
1296 static void
1297 begin_emit_instruction(struct svga_shader_emitter_v10 *emit)
1298 {
1299 assert(emit->inst_start_token == 0);
1300 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1301 * Note, we can't save a pointer because it would become invalid if
1302 * we have to realloc the output buffer.
1303 */
1304 emit->inst_start_token = emit_get_num_tokens(emit);
1305 }
1306
1307
1308 /**
1309 * We need to call this after we emit the last token of a VGPU10 instruction.
1310 * This function patches in the opcode token's instructionLength field.
1311 */
1312 static void
1313 end_emit_instruction(struct svga_shader_emitter_v10 *emit)
1314 {
1315 VGPU10OpcodeToken0 *tokens = (VGPU10OpcodeToken0 *) emit->buf;
1316 unsigned inst_length;
1317
1318 assert(emit->inst_start_token > 0);
1319
1320 if (emit->discard_instruction) {
1321 /* Back up the emit->ptr to where this instruction started so
1322 * that we discard the current instruction.
1323 */
1324 emit->ptr = (char *) (tokens + emit->inst_start_token);
1325 }
1326 else {
1327 /* Compute instruction length and patch that into the start of
1328 * the instruction.
1329 */
1330 inst_length = emit_get_num_tokens(emit) - emit->inst_start_token;
1331
1332 assert(inst_length > 0);
1333
1334 tokens[emit->inst_start_token].instructionLength = inst_length;
1335 }
1336
1337 emit->inst_start_token = 0; /* reset to zero for error checking */
1338 emit->discard_instruction = FALSE;
1339 }
1340
1341
1342 /**
1343 * Return index for a free temporary register.
1344 */
1345 static unsigned
1346 get_temp_index(struct svga_shader_emitter_v10 *emit)
1347 {
1348 assert(emit->internal_temp_count < MAX_INTERNAL_TEMPS);
1349 return emit->num_shader_temps + emit->internal_temp_count++;
1350 }
1351
1352
1353 /**
1354 * Release the temporaries which were generated by get_temp_index().
1355 */
1356 static void
1357 free_temp_indexes(struct svga_shader_emitter_v10 *emit)
1358 {
1359 emit->internal_temp_count = 0;
1360 }
1361
1362
1363 /**
1364 * Create a tgsi_full_src_register.
1365 */
1366 static struct tgsi_full_src_register
1367 make_src_reg(enum tgsi_file_type file, unsigned index)
1368 {
1369 struct tgsi_full_src_register reg;
1370
1371 memset(&reg, 0, sizeof(reg));
1372 reg.Register.File = file;
1373 reg.Register.Index = index;
1374 reg.Register.SwizzleX = TGSI_SWIZZLE_X;
1375 reg.Register.SwizzleY = TGSI_SWIZZLE_Y;
1376 reg.Register.SwizzleZ = TGSI_SWIZZLE_Z;
1377 reg.Register.SwizzleW = TGSI_SWIZZLE_W;
1378 return reg;
1379 }
1380
1381
1382 /**
1383 * Create a tgsi_full_src_register for a temporary.
1384 */
1385 static struct tgsi_full_src_register
1386 make_src_temp_reg(unsigned index)
1387 {
1388 return make_src_reg(TGSI_FILE_TEMPORARY, index);
1389 }
1390
1391
1392 /**
1393 * Create a tgsi_full_src_register for a constant.
1394 */
1395 static struct tgsi_full_src_register
1396 make_src_const_reg(unsigned index)
1397 {
1398 return make_src_reg(TGSI_FILE_CONSTANT, index);
1399 }
1400
1401
1402 /**
1403 * Create a tgsi_full_src_register for an immediate constant.
1404 */
1405 static struct tgsi_full_src_register
1406 make_src_immediate_reg(unsigned index)
1407 {
1408 return make_src_reg(TGSI_FILE_IMMEDIATE, index);
1409 }
1410
1411
1412 /**
1413 * Create a tgsi_full_dst_register.
1414 */
1415 static struct tgsi_full_dst_register
1416 make_dst_reg(enum tgsi_file_type file, unsigned index)
1417 {
1418 struct tgsi_full_dst_register reg;
1419
1420 memset(&reg, 0, sizeof(reg));
1421 reg.Register.File = file;
1422 reg.Register.Index = index;
1423 reg.Register.WriteMask = TGSI_WRITEMASK_XYZW;
1424 return reg;
1425 }
1426
1427
1428 /**
1429 * Create a tgsi_full_dst_register for a temporary.
1430 */
1431 static struct tgsi_full_dst_register
1432 make_dst_temp_reg(unsigned index)
1433 {
1434 return make_dst_reg(TGSI_FILE_TEMPORARY, index);
1435 }
1436
1437
1438 /**
1439 * Create a tgsi_full_dst_register for an output.
1440 */
1441 static struct tgsi_full_dst_register
1442 make_dst_output_reg(unsigned index)
1443 {
1444 return make_dst_reg(TGSI_FILE_OUTPUT, index);
1445 }
1446
1447
1448 /**
1449 * Create negated tgsi_full_src_register.
1450 */
1451 static struct tgsi_full_src_register
1452 negate_src(const struct tgsi_full_src_register *reg)
1453 {
1454 struct tgsi_full_src_register neg = *reg;
1455 neg.Register.Negate = !reg->Register.Negate;
1456 return neg;
1457 }
1458
1459 /**
1460 * Create absolute value of a tgsi_full_src_register.
1461 */
1462 static struct tgsi_full_src_register
1463 absolute_src(const struct tgsi_full_src_register *reg)
1464 {
1465 struct tgsi_full_src_register absolute = *reg;
1466 absolute.Register.Absolute = 1;
1467 return absolute;
1468 }
1469
1470
1471 /** Return the named swizzle term from the src register */
1472 static inline unsigned
1473 get_swizzle(const struct tgsi_full_src_register *reg, enum tgsi_swizzle term)
1474 {
1475 switch (term) {
1476 case TGSI_SWIZZLE_X:
1477 return reg->Register.SwizzleX;
1478 case TGSI_SWIZZLE_Y:
1479 return reg->Register.SwizzleY;
1480 case TGSI_SWIZZLE_Z:
1481 return reg->Register.SwizzleZ;
1482 case TGSI_SWIZZLE_W:
1483 return reg->Register.SwizzleW;
1484 default:
1485 assert(!"Bad swizzle");
1486 return TGSI_SWIZZLE_X;
1487 }
1488 }
1489
1490
1491 /**
1492 * Create swizzled tgsi_full_src_register.
1493 */
1494 static struct tgsi_full_src_register
1495 swizzle_src(const struct tgsi_full_src_register *reg,
1496 enum tgsi_swizzle swizzleX, enum tgsi_swizzle swizzleY,
1497 enum tgsi_swizzle swizzleZ, enum tgsi_swizzle swizzleW)
1498 {
1499 struct tgsi_full_src_register swizzled = *reg;
1500 /* Note: we swizzle the current swizzle */
1501 swizzled.Register.SwizzleX = get_swizzle(reg, swizzleX);
1502 swizzled.Register.SwizzleY = get_swizzle(reg, swizzleY);
1503 swizzled.Register.SwizzleZ = get_swizzle(reg, swizzleZ);
1504 swizzled.Register.SwizzleW = get_swizzle(reg, swizzleW);
1505 return swizzled;
1506 }
1507
1508
1509 /**
1510 * Create swizzled tgsi_full_src_register where all the swizzle
1511 * terms are the same.
1512 */
1513 static struct tgsi_full_src_register
1514 scalar_src(const struct tgsi_full_src_register *reg, enum tgsi_swizzle swizzle)
1515 {
1516 struct tgsi_full_src_register swizzled = *reg;
1517 /* Note: we swizzle the current swizzle */
1518 swizzled.Register.SwizzleX =
1519 swizzled.Register.SwizzleY =
1520 swizzled.Register.SwizzleZ =
1521 swizzled.Register.SwizzleW = get_swizzle(reg, swizzle);
1522 return swizzled;
1523 }
1524
1525
1526 /**
1527 * Create new tgsi_full_dst_register with writemask.
1528 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1529 */
1530 static struct tgsi_full_dst_register
1531 writemask_dst(const struct tgsi_full_dst_register *reg, unsigned mask)
1532 {
1533 struct tgsi_full_dst_register masked = *reg;
1534 masked.Register.WriteMask = mask;
1535 return masked;
1536 }
1537
1538
1539 /**
1540 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1541 */
1542 static boolean
1543 same_swizzle_terms(const struct tgsi_full_src_register *reg)
1544 {
1545 return (reg->Register.SwizzleX == reg->Register.SwizzleY &&
1546 reg->Register.SwizzleY == reg->Register.SwizzleZ &&
1547 reg->Register.SwizzleZ == reg->Register.SwizzleW);
1548 }
1549
1550
1551 /**
1552 * Search the vector for the value 'x' and return its position.
1553 */
1554 static int
1555 find_imm_in_vec4(const union tgsi_immediate_data vec[4],
1556 union tgsi_immediate_data x)
1557 {
1558 unsigned i;
1559 for (i = 0; i < 4; i++) {
1560 if (vec[i].Int == x.Int)
1561 return i;
1562 }
1563 return -1;
1564 }
1565
1566
1567 /**
1568 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1569 */
1570 static int
1571 find_immediate(struct svga_shader_emitter_v10 *emit,
1572 union tgsi_immediate_data x, unsigned startIndex)
1573 {
1574 const unsigned endIndex = emit->num_immediates;
1575 unsigned i;
1576
1577 assert(emit->immediates_emitted);
1578
1579 /* Search immediates for x, y, z, w */
1580 for (i = startIndex; i < endIndex; i++) {
1581 if (x.Int == emit->immediates[i][0].Int ||
1582 x.Int == emit->immediates[i][1].Int ||
1583 x.Int == emit->immediates[i][2].Int ||
1584 x.Int == emit->immediates[i][3].Int) {
1585 return i;
1586 }
1587 }
1588 /* Should never try to use an immediate value that wasn't pre-declared */
1589 assert(!"find_immediate() failed!");
1590 return -1;
1591 }
1592
1593
1594 /**
1595 * Return a tgsi_full_src_register for an immediate/literal
1596 * union tgsi_immediate_data[4] value.
1597 * Note: the values must have been previously declared/allocated in
1598 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1599 * vec4 immediate.
1600 */
1601 static struct tgsi_full_src_register
1602 make_immediate_reg_4(struct svga_shader_emitter_v10 *emit,
1603 const union tgsi_immediate_data imm[4])
1604 {
1605 struct tgsi_full_src_register reg;
1606 unsigned i;
1607
1608 for (i = 0; i < emit->num_common_immediates; i++) {
1609 /* search for first component value */
1610 int immpos = find_immediate(emit, imm[0], i);
1611 int x, y, z, w;
1612
1613 assert(immpos >= 0);
1614
1615 /* find remaining components within the immediate vector */
1616 x = find_imm_in_vec4(emit->immediates[immpos], imm[0]);
1617 y = find_imm_in_vec4(emit->immediates[immpos], imm[1]);
1618 z = find_imm_in_vec4(emit->immediates[immpos], imm[2]);
1619 w = find_imm_in_vec4(emit->immediates[immpos], imm[3]);
1620
1621 if (x >=0 && y >= 0 && z >= 0 && w >= 0) {
1622 /* found them all */
1623 memset(&reg, 0, sizeof(reg));
1624 reg.Register.File = TGSI_FILE_IMMEDIATE;
1625 reg.Register.Index = immpos;
1626 reg.Register.SwizzleX = x;
1627 reg.Register.SwizzleY = y;
1628 reg.Register.SwizzleZ = z;
1629 reg.Register.SwizzleW = w;
1630 return reg;
1631 }
1632 /* else, keep searching */
1633 }
1634
1635 assert(!"Failed to find immediate register!");
1636
1637 /* Just return IMM[0].xxxx */
1638 memset(&reg, 0, sizeof(reg));
1639 reg.Register.File = TGSI_FILE_IMMEDIATE;
1640 return reg;
1641 }
1642
1643
1644 /**
1645 * Return a tgsi_full_src_register for an immediate/literal
1646 * union tgsi_immediate_data value of the form {value, value, value, value}.
1647 * \sa make_immediate_reg_4() regarding allowed values.
1648 */
1649 static struct tgsi_full_src_register
1650 make_immediate_reg(struct svga_shader_emitter_v10 *emit,
1651 union tgsi_immediate_data value)
1652 {
1653 struct tgsi_full_src_register reg;
1654 int immpos = find_immediate(emit, value, 0);
1655
1656 assert(immpos >= 0);
1657
1658 memset(&reg, 0, sizeof(reg));
1659 reg.Register.File = TGSI_FILE_IMMEDIATE;
1660 reg.Register.Index = immpos;
1661 reg.Register.SwizzleX =
1662 reg.Register.SwizzleY =
1663 reg.Register.SwizzleZ =
1664 reg.Register.SwizzleW = find_imm_in_vec4(emit->immediates[immpos], value);
1665
1666 return reg;
1667 }
1668
1669
1670 /**
1671 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1672 * \sa make_immediate_reg_4() regarding allowed values.
1673 */
1674 static struct tgsi_full_src_register
1675 make_immediate_reg_float4(struct svga_shader_emitter_v10 *emit,
1676 float x, float y, float z, float w)
1677 {
1678 union tgsi_immediate_data imm[4];
1679 imm[0].Float = x;
1680 imm[1].Float = y;
1681 imm[2].Float = z;
1682 imm[3].Float = w;
1683 return make_immediate_reg_4(emit, imm);
1684 }
1685
1686
1687 /**
1688 * Return a tgsi_full_src_register for an immediate/literal float value
1689 * of the form {value, value, value, value}.
1690 * \sa make_immediate_reg_4() regarding allowed values.
1691 */
1692 static struct tgsi_full_src_register
1693 make_immediate_reg_float(struct svga_shader_emitter_v10 *emit, float value)
1694 {
1695 union tgsi_immediate_data imm;
1696 imm.Float = value;
1697 return make_immediate_reg(emit, imm);
1698 }
1699
1700
1701 /**
1702 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1703 */
1704 static struct tgsi_full_src_register
1705 make_immediate_reg_int4(struct svga_shader_emitter_v10 *emit,
1706 int x, int y, int z, int w)
1707 {
1708 union tgsi_immediate_data imm[4];
1709 imm[0].Int = x;
1710 imm[1].Int = y;
1711 imm[2].Int = z;
1712 imm[3].Int = w;
1713 return make_immediate_reg_4(emit, imm);
1714 }
1715
1716
1717 /**
1718 * Return a tgsi_full_src_register for an immediate/literal int value
1719 * of the form {value, value, value, value}.
1720 * \sa make_immediate_reg_4() regarding allowed values.
1721 */
1722 static struct tgsi_full_src_register
1723 make_immediate_reg_int(struct svga_shader_emitter_v10 *emit, int value)
1724 {
1725 union tgsi_immediate_data imm;
1726 imm.Int = value;
1727 return make_immediate_reg(emit, imm);
1728 }
1729
1730
1731 /**
1732 * Allocate space for a union tgsi_immediate_data[4] immediate.
1733 * \return the index/position of the immediate.
1734 */
1735 static unsigned
1736 alloc_immediate_4(struct svga_shader_emitter_v10 *emit,
1737 const union tgsi_immediate_data imm[4])
1738 {
1739 unsigned n = emit->num_immediates++;
1740 assert(!emit->immediates_emitted);
1741 assert(n < ARRAY_SIZE(emit->immediates));
1742 emit->immediates[n][0] = imm[0];
1743 emit->immediates[n][1] = imm[1];
1744 emit->immediates[n][2] = imm[2];
1745 emit->immediates[n][3] = imm[3];
1746 return n;
1747 }
1748
1749
1750 /**
1751 * Allocate space for a float[4] immediate.
1752 * \return the index/position of the immediate.
1753 */
1754 static unsigned
1755 alloc_immediate_float4(struct svga_shader_emitter_v10 *emit,
1756 float x, float y, float z, float w)
1757 {
1758 union tgsi_immediate_data imm[4];
1759 imm[0].Float = x;
1760 imm[1].Float = y;
1761 imm[2].Float = z;
1762 imm[3].Float = w;
1763 return alloc_immediate_4(emit, imm);
1764 }
1765
1766
1767 /**
1768 * Allocate space for an int[4] immediate.
1769 * \return the index/position of the immediate.
1770 */
1771 static unsigned
1772 alloc_immediate_int4(struct svga_shader_emitter_v10 *emit,
1773 int x, int y, int z, int w)
1774 {
1775 union tgsi_immediate_data imm[4];
1776 imm[0].Int = x;
1777 imm[1].Int = y;
1778 imm[2].Int = z;
1779 imm[3].Int = w;
1780 return alloc_immediate_4(emit, imm);
1781 }
1782
1783
1784 /**
1785 * Allocate a shader input to store a system value.
1786 */
1787 static unsigned
1788 alloc_system_value_index(struct svga_shader_emitter_v10 *emit, unsigned index)
1789 {
1790 const unsigned n = emit->info.file_max[TGSI_FILE_INPUT] + 1 + index;
1791 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1792 emit->system_value_indexes[index] = n;
1793 return n;
1794 }
1795
1796
1797 /**
1798 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1799 */
1800 static boolean
1801 emit_vgpu10_immediate(struct svga_shader_emitter_v10 *emit,
1802 const struct tgsi_full_immediate *imm)
1803 {
1804 /* We don't actually emit any code here. We just save the
1805 * immediate values and emit them later.
1806 */
1807 alloc_immediate_4(emit, imm->u);
1808 return TRUE;
1809 }
1810
1811
1812 /**
1813 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1814 * containing all the immediate values previously allocated
1815 * with alloc_immediate_4().
1816 */
1817 static boolean
1818 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10 *emit)
1819 {
1820 VGPU10OpcodeToken0 token;
1821
1822 assert(!emit->immediates_emitted);
1823
1824 token.value = 0;
1825 token.opcodeType = VGPU10_OPCODE_CUSTOMDATA;
1826 token.customDataClass = VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER;
1827
1828 /* Note: no begin/end_emit_instruction() calls */
1829 emit_dword(emit, token.value);
1830 emit_dword(emit, 2 + 4 * emit->num_immediates);
1831 emit_dwords(emit, (unsigned *) emit->immediates, 4 * emit->num_immediates);
1832
1833 emit->immediates_emitted = TRUE;
1834
1835 return TRUE;
1836 }
1837
1838
1839 /**
1840 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1841 * interpolation mode.
1842 * \return a VGPU10_INTERPOLATION_x value
1843 */
1844 static unsigned
1845 translate_interpolation(const struct svga_shader_emitter_v10 *emit,
1846 enum tgsi_interpolate_mode interp,
1847 enum tgsi_interpolate_loc interpolate_loc)
1848 {
1849 if (interp == TGSI_INTERPOLATE_COLOR) {
1850 interp = emit->key.fs.flatshade ?
1851 TGSI_INTERPOLATE_CONSTANT : TGSI_INTERPOLATE_PERSPECTIVE;
1852 }
1853
1854 switch (interp) {
1855 case TGSI_INTERPOLATE_CONSTANT:
1856 return VGPU10_INTERPOLATION_CONSTANT;
1857 case TGSI_INTERPOLATE_LINEAR:
1858 return interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID ?
1859 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID :
1860 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE;
1861 case TGSI_INTERPOLATE_PERSPECTIVE:
1862 return interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID ?
1863 VGPU10_INTERPOLATION_LINEAR_CENTROID :
1864 VGPU10_INTERPOLATION_LINEAR;
1865 default:
1866 assert(!"Unexpected interpolation mode");
1867 return VGPU10_INTERPOLATION_CONSTANT;
1868 }
1869 }
1870
1871
1872 /**
1873 * Translate a TGSI property to VGPU10.
1874 * Don't emit any instructions yet, only need to gather the primitive property information.
1875 * The output primitive topology might be changed later. The final property instructions
1876 * will be emitted as part of the pre-helper code.
1877 */
1878 static boolean
1879 emit_vgpu10_property(struct svga_shader_emitter_v10 *emit,
1880 const struct tgsi_full_property *prop)
1881 {
1882 static const VGPU10_PRIMITIVE primType[] = {
1883 VGPU10_PRIMITIVE_POINT, /* PIPE_PRIM_POINTS */
1884 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINES */
1885 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_LOOP */
1886 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_STRIP */
1887 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLES */
1888 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_STRIP */
1889 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_FAN */
1890 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUADS */
1891 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
1892 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_POLYGON */
1893 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
1894 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1895 VGPU10_PRIMITIVE_TRIANGLE_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1896 VGPU10_PRIMITIVE_TRIANGLE_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1897 };
1898
1899 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology[] = {
1900 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST, /* PIPE_PRIM_POINTS */
1901 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINES */
1902 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINE_LOOP */
1903 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP, /* PIPE_PRIM_LINE_STRIP */
1904 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST, /* PIPE_PRIM_TRIANGLES */
1905 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_STRIP */
1906 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_FAN */
1907 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUADS */
1908 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
1909 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_POLYGON */
1910 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
1911 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1912 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1913 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1914 };
1915
1916 static const unsigned inputArraySize[] = {
1917 0, /* VGPU10_PRIMITIVE_UNDEFINED */
1918 1, /* VGPU10_PRIMITIVE_POINT */
1919 2, /* VGPU10_PRIMITIVE_LINE */
1920 3, /* VGPU10_PRIMITIVE_TRIANGLE */
1921 0,
1922 0,
1923 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
1924 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
1925 };
1926
1927 switch (prop->Property.PropertyName) {
1928 case TGSI_PROPERTY_GS_INPUT_PRIM:
1929 assert(prop->u[0].Data < ARRAY_SIZE(primType));
1930 emit->gs.prim_type = primType[prop->u[0].Data];
1931 assert(emit->gs.prim_type != VGPU10_PRIMITIVE_UNDEFINED);
1932 emit->gs.input_size = inputArraySize[emit->gs.prim_type];
1933 break;
1934
1935 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1936 assert(prop->u[0].Data < ARRAY_SIZE(primTopology));
1937 emit->gs.prim_topology = primTopology[prop->u[0].Data];
1938 assert(emit->gs.prim_topology != VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED);
1939 break;
1940
1941 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1942 emit->gs.max_out_vertices = prop->u[0].Data;
1943 break;
1944
1945 default:
1946 break;
1947 }
1948
1949 return TRUE;
1950 }
1951
1952
1953 static void
1954 emit_property_instruction(struct svga_shader_emitter_v10 *emit,
1955 VGPU10OpcodeToken0 opcode0, unsigned nData,
1956 unsigned data)
1957 {
1958 begin_emit_instruction(emit);
1959 emit_dword(emit, opcode0.value);
1960 if (nData)
1961 emit_dword(emit, data);
1962 end_emit_instruction(emit);
1963 }
1964
1965
1966 /**
1967 * Emit property instructions
1968 */
1969 static void
1970 emit_property_instructions(struct svga_shader_emitter_v10 *emit)
1971 {
1972 VGPU10OpcodeToken0 opcode0;
1973
1974 assert(emit->unit == PIPE_SHADER_GEOMETRY);
1975
1976 /* emit input primitive type declaration */
1977 opcode0.value = 0;
1978 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE;
1979 opcode0.primitive = emit->gs.prim_type;
1980 emit_property_instruction(emit, opcode0, 0, 0);
1981
1982 /* emit output primitive topology declaration */
1983 opcode0.value = 0;
1984 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY;
1985 opcode0.primitiveTopology = emit->gs.prim_topology;
1986 emit_property_instruction(emit, opcode0, 0, 0);
1987
1988 /* emit max output vertices */
1989 opcode0.value = 0;
1990 opcode0.opcodeType = VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT;
1991 emit_property_instruction(emit, opcode0, 1, emit->gs.max_out_vertices);
1992 }
1993
1994
1995 /**
1996 * Emit a vgpu10 declaration "instruction".
1997 * \param index the register index
1998 * \param size array size of the operand. In most cases, it is 1,
1999 * but for inputs to geometry shader, the array size varies
2000 * depending on the primitive type.
2001 */
2002 static void
2003 emit_decl_instruction(struct svga_shader_emitter_v10 *emit,
2004 VGPU10OpcodeToken0 opcode0,
2005 VGPU10OperandToken0 operand0,
2006 VGPU10NameToken name_token,
2007 unsigned index, unsigned size)
2008 {
2009 assert(opcode0.opcodeType);
2010 assert(operand0.mask);
2011
2012 begin_emit_instruction(emit);
2013 emit_dword(emit, opcode0.value);
2014
2015 emit_dword(emit, operand0.value);
2016
2017 if (operand0.indexDimension == VGPU10_OPERAND_INDEX_1D) {
2018 /* Next token is the index of the register to declare */
2019 emit_dword(emit, index);
2020 }
2021 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_2D) {
2022 /* Next token is the size of the register */
2023 emit_dword(emit, size);
2024
2025 /* Followed by the index of the register */
2026 emit_dword(emit, index);
2027 }
2028
2029 if (name_token.value) {
2030 emit_dword(emit, name_token.value);
2031 }
2032
2033 end_emit_instruction(emit);
2034 }
2035
2036
2037 /**
2038 * Emit the declaration for a shader input.
2039 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2040 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2041 * \param dim index dimension
2042 * \param index the input register index
2043 * \param size array size of the operand. In most cases, it is 1,
2044 * but for inputs to geometry shader, the array size varies
2045 * depending on the primitive type.
2046 * \param name one of VGPU10_NAME_x
2047 * \parma numComp number of components
2048 * \param selMode component selection mode
2049 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2050 * \param interpMode interpolation mode
2051 */
2052 static void
2053 emit_input_declaration(struct svga_shader_emitter_v10 *emit,
2054 unsigned opcodeType, unsigned operandType,
2055 unsigned dim, unsigned index, unsigned size,
2056 unsigned name, unsigned numComp,
2057 unsigned selMode, unsigned usageMask,
2058 unsigned interpMode)
2059 {
2060 VGPU10OpcodeToken0 opcode0;
2061 VGPU10OperandToken0 operand0;
2062 VGPU10NameToken name_token;
2063
2064 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2065 assert(opcodeType == VGPU10_OPCODE_DCL_INPUT ||
2066 opcodeType == VGPU10_OPCODE_DCL_INPUT_SIV ||
2067 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS ||
2068 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SGV);
2069 assert(operandType == VGPU10_OPERAND_TYPE_INPUT ||
2070 operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID);
2071 assert(numComp <= VGPU10_OPERAND_4_COMPONENT);
2072 assert(selMode <= VGPU10_OPERAND_4_COMPONENT_MASK_MODE);
2073 assert(dim <= VGPU10_OPERAND_INDEX_3D);
2074 assert(name == VGPU10_NAME_UNDEFINED ||
2075 name == VGPU10_NAME_POSITION ||
2076 name == VGPU10_NAME_INSTANCE_ID ||
2077 name == VGPU10_NAME_VERTEX_ID ||
2078 name == VGPU10_NAME_PRIMITIVE_ID ||
2079 name == VGPU10_NAME_IS_FRONT_FACE);
2080 assert(interpMode == VGPU10_INTERPOLATION_UNDEFINED ||
2081 interpMode == VGPU10_INTERPOLATION_CONSTANT ||
2082 interpMode == VGPU10_INTERPOLATION_LINEAR ||
2083 interpMode == VGPU10_INTERPOLATION_LINEAR_CENTROID ||
2084 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE ||
2085 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID);
2086
2087 check_register_index(emit, opcodeType, index);
2088
2089 opcode0.value = operand0.value = name_token.value = 0;
2090
2091 opcode0.opcodeType = opcodeType;
2092 opcode0.interpolationMode = interpMode;
2093
2094 operand0.operandType = operandType;
2095 operand0.numComponents = numComp;
2096 operand0.selectionMode = selMode;
2097 operand0.mask = usageMask;
2098 operand0.indexDimension = dim;
2099 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2100 if (dim == VGPU10_OPERAND_INDEX_2D)
2101 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2102
2103 name_token.name = name;
2104
2105 emit_decl_instruction(emit, opcode0, operand0, name_token, index, size);
2106 }
2107
2108
2109 /**
2110 * Emit the declaration for a shader output.
2111 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2112 * \param index the output register index
2113 * \param name one of VGPU10_NAME_x
2114 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2115 */
2116 static void
2117 emit_output_declaration(struct svga_shader_emitter_v10 *emit,
2118 unsigned type, unsigned index,
2119 unsigned name, unsigned usageMask)
2120 {
2121 VGPU10OpcodeToken0 opcode0;
2122 VGPU10OperandToken0 operand0;
2123 VGPU10NameToken name_token;
2124
2125 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2126 assert(type == VGPU10_OPCODE_DCL_OUTPUT ||
2127 type == VGPU10_OPCODE_DCL_OUTPUT_SGV ||
2128 type == VGPU10_OPCODE_DCL_OUTPUT_SIV);
2129 assert(name == VGPU10_NAME_UNDEFINED ||
2130 name == VGPU10_NAME_POSITION ||
2131 name == VGPU10_NAME_PRIMITIVE_ID ||
2132 name == VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX ||
2133 name == VGPU10_NAME_CLIP_DISTANCE);
2134
2135 check_register_index(emit, type, index);
2136
2137 opcode0.value = operand0.value = name_token.value = 0;
2138
2139 opcode0.opcodeType = type;
2140 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT;
2141 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2142 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2143 operand0.mask = usageMask;
2144 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2145 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2146
2147 name_token.name = name;
2148
2149 emit_decl_instruction(emit, opcode0, operand0, name_token, index, 1);
2150 }
2151
2152
2153 /**
2154 * Emit the declaration for the fragment depth output.
2155 */
2156 static void
2157 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10 *emit)
2158 {
2159 VGPU10OpcodeToken0 opcode0;
2160 VGPU10OperandToken0 operand0;
2161 VGPU10NameToken name_token;
2162
2163 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2164
2165 opcode0.value = operand0.value = name_token.value = 0;
2166
2167 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2168 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
2169 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
2170 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2171 operand0.mask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2172
2173 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2174 }
2175
2176
2177 /**
2178 * Emit the declaration for a system value input/output.
2179 */
2180 static void
2181 emit_system_value_declaration(struct svga_shader_emitter_v10 *emit,
2182 enum tgsi_semantic semantic_name, unsigned index)
2183 {
2184 switch (semantic_name) {
2185 case TGSI_SEMANTIC_INSTANCEID:
2186 index = alloc_system_value_index(emit, index);
2187 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2188 VGPU10_OPERAND_TYPE_INPUT,
2189 VGPU10_OPERAND_INDEX_1D,
2190 index, 1,
2191 VGPU10_NAME_INSTANCE_ID,
2192 VGPU10_OPERAND_4_COMPONENT,
2193 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2194 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2195 VGPU10_INTERPOLATION_UNDEFINED);
2196 break;
2197 case TGSI_SEMANTIC_VERTEXID:
2198 index = alloc_system_value_index(emit, index);
2199 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2200 VGPU10_OPERAND_TYPE_INPUT,
2201 VGPU10_OPERAND_INDEX_1D,
2202 index, 1,
2203 VGPU10_NAME_VERTEX_ID,
2204 VGPU10_OPERAND_4_COMPONENT,
2205 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2206 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2207 VGPU10_INTERPOLATION_UNDEFINED);
2208 break;
2209 default:
2210 ; /* XXX */
2211 }
2212 }
2213
2214 /**
2215 * Translate a TGSI declaration to VGPU10.
2216 */
2217 static boolean
2218 emit_vgpu10_declaration(struct svga_shader_emitter_v10 *emit,
2219 const struct tgsi_full_declaration *decl)
2220 {
2221 switch (decl->Declaration.File) {
2222 case TGSI_FILE_INPUT:
2223 /* do nothing - see emit_input_declarations() */
2224 return TRUE;
2225
2226 case TGSI_FILE_OUTPUT:
2227 assert(decl->Range.First == decl->Range.Last);
2228 emit->output_usage_mask[decl->Range.First] = decl->Declaration.UsageMask;
2229 return TRUE;
2230
2231 case TGSI_FILE_TEMPORARY:
2232 /* Don't declare the temps here. Just keep track of how many
2233 * and emit the declaration later.
2234 */
2235 if (decl->Declaration.Array) {
2236 /* Indexed temporary array. Save the start index of the array
2237 * and the size of the array.
2238 */
2239 const unsigned arrayID = MIN2(decl->Array.ArrayID, MAX_TEMP_ARRAYS);
2240 unsigned i;
2241
2242 assert(arrayID < ARRAY_SIZE(emit->temp_arrays));
2243
2244 /* Save this array so we can emit the declaration for it later */
2245 emit->temp_arrays[arrayID].start = decl->Range.First;
2246 emit->temp_arrays[arrayID].size =
2247 decl->Range.Last - decl->Range.First + 1;
2248
2249 emit->num_temp_arrays = MAX2(emit->num_temp_arrays, arrayID + 1);
2250 assert(emit->num_temp_arrays <= MAX_TEMP_ARRAYS);
2251 emit->num_temp_arrays = MIN2(emit->num_temp_arrays, MAX_TEMP_ARRAYS);
2252
2253 /* Fill in the temp_map entries for this array */
2254 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2255 emit->temp_map[i].arrayId = arrayID;
2256 emit->temp_map[i].index = i - decl->Range.First;
2257 }
2258 }
2259
2260 /* for all temps, indexed or not, keep track of highest index */
2261 emit->num_shader_temps = MAX2(emit->num_shader_temps,
2262 decl->Range.Last + 1);
2263 return TRUE;
2264
2265 case TGSI_FILE_CONSTANT:
2266 /* Don't declare constants here. Just keep track and emit later. */
2267 {
2268 unsigned constbuf = 0, num_consts;
2269 if (decl->Declaration.Dimension) {
2270 constbuf = decl->Dim.Index2D;
2271 }
2272 /* We throw an assertion here when, in fact, the shader should never
2273 * have linked due to constbuf index out of bounds, so we shouldn't
2274 * have reached here.
2275 */
2276 assert(constbuf < ARRAY_SIZE(emit->num_shader_consts));
2277
2278 num_consts = MAX2(emit->num_shader_consts[constbuf],
2279 decl->Range.Last + 1);
2280
2281 if (num_consts > VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
2282 debug_printf("Warning: constant buffer is declared to size [%u]"
2283 " but [%u] is the limit.\n",
2284 num_consts,
2285 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2286 }
2287 /* The linker doesn't enforce the max UBO size so we clamp here */
2288 emit->num_shader_consts[constbuf] =
2289 MIN2(num_consts, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2290 }
2291 return TRUE;
2292
2293 case TGSI_FILE_IMMEDIATE:
2294 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2295 return FALSE;
2296
2297 case TGSI_FILE_SYSTEM_VALUE:
2298 emit_system_value_declaration(emit, decl->Semantic.Name,
2299 decl->Range.First);
2300 return TRUE;
2301
2302 case TGSI_FILE_SAMPLER:
2303 /* Don't declare samplers here. Just keep track and emit later. */
2304 emit->num_samplers = MAX2(emit->num_samplers, decl->Range.Last + 1);
2305 return TRUE;
2306
2307 #if 0
2308 case TGSI_FILE_RESOURCE:
2309 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2310 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2311 assert(!"TGSI_FILE_RESOURCE not handled yet");
2312 return FALSE;
2313 #endif
2314
2315 case TGSI_FILE_ADDRESS:
2316 emit->num_address_regs = MAX2(emit->num_address_regs,
2317 decl->Range.Last + 1);
2318 return TRUE;
2319
2320 case TGSI_FILE_SAMPLER_VIEW:
2321 {
2322 unsigned unit = decl->Range.First;
2323 assert(decl->Range.First == decl->Range.Last);
2324 emit->sampler_target[unit] = decl->SamplerView.Resource;
2325 /* Note: we can ignore YZW return types for now */
2326 emit->sampler_return_type[unit] = decl->SamplerView.ReturnTypeX;
2327 }
2328 return TRUE;
2329
2330 default:
2331 assert(!"Unexpected type of declaration");
2332 return FALSE;
2333 }
2334 }
2335
2336
2337
2338 /**
2339 * Emit all input declarations.
2340 */
2341 static boolean
2342 emit_input_declarations(struct svga_shader_emitter_v10 *emit)
2343 {
2344 unsigned i;
2345
2346 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2347
2348 for (i = 0; i < emit->linkage.num_inputs; i++) {
2349 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2350 unsigned usage_mask = emit->info.input_usage_mask[i];
2351 unsigned index = emit->linkage.input_map[i];
2352 unsigned type, interpolationMode, name;
2353
2354 if (usage_mask == 0)
2355 continue; /* register is not actually used */
2356
2357 if (semantic_name == TGSI_SEMANTIC_POSITION) {
2358 /* fragment position input */
2359 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2360 interpolationMode = VGPU10_INTERPOLATION_LINEAR;
2361 name = VGPU10_NAME_POSITION;
2362 if (usage_mask & TGSI_WRITEMASK_W) {
2363 /* we need to replace use of 'w' with '1/w' */
2364 emit->fs.fragcoord_input_index = i;
2365 }
2366 }
2367 else if (semantic_name == TGSI_SEMANTIC_FACE) {
2368 /* fragment front-facing input */
2369 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2370 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2371 name = VGPU10_NAME_IS_FRONT_FACE;
2372 emit->fs.face_input_index = i;
2373 }
2374 else if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2375 /* primitive ID */
2376 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2377 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2378 name = VGPU10_NAME_PRIMITIVE_ID;
2379 }
2380 else {
2381 /* general fragment input */
2382 type = VGPU10_OPCODE_DCL_INPUT_PS;
2383 interpolationMode =
2384 translate_interpolation(emit,
2385 emit->info.input_interpolate[i],
2386 emit->info.input_interpolate_loc[i]);
2387
2388 /* keeps track if flat interpolation mode is being used */
2389 emit->uses_flat_interp = emit->uses_flat_interp ||
2390 (interpolationMode == VGPU10_INTERPOLATION_CONSTANT);
2391
2392 name = VGPU10_NAME_UNDEFINED;
2393 }
2394
2395 emit_input_declaration(emit, type,
2396 VGPU10_OPERAND_TYPE_INPUT,
2397 VGPU10_OPERAND_INDEX_1D, index, 1,
2398 name,
2399 VGPU10_OPERAND_4_COMPONENT,
2400 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2401 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2402 interpolationMode);
2403 }
2404 }
2405 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
2406
2407 for (i = 0; i < emit->info.num_inputs; i++) {
2408 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2409 unsigned usage_mask = emit->info.input_usage_mask[i];
2410 unsigned index = emit->linkage.input_map[i];
2411 unsigned opcodeType, operandType;
2412 unsigned numComp, selMode;
2413 unsigned name;
2414 unsigned dim;
2415
2416 if (usage_mask == 0)
2417 continue; /* register is not actually used */
2418
2419 opcodeType = VGPU10_OPCODE_DCL_INPUT;
2420 operandType = VGPU10_OPERAND_TYPE_INPUT;
2421 numComp = VGPU10_OPERAND_4_COMPONENT;
2422 selMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2423 name = VGPU10_NAME_UNDEFINED;
2424
2425 /* all geometry shader inputs are two dimensional except gl_PrimitiveID */
2426 dim = VGPU10_OPERAND_INDEX_2D;
2427
2428 if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2429 /* Primitive ID */
2430 operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
2431 dim = VGPU10_OPERAND_INDEX_0D;
2432 numComp = VGPU10_OPERAND_0_COMPONENT;
2433 selMode = 0;
2434
2435 /* also save the register index so we can check for
2436 * primitive id when emit src register. We need to modify the
2437 * operand type, index dimension when emit primitive id src reg.
2438 */
2439 emit->gs.prim_id_index = i;
2440 }
2441 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2442 /* vertex position input */
2443 opcodeType = VGPU10_OPCODE_DCL_INPUT_SIV;
2444 name = VGPU10_NAME_POSITION;
2445 }
2446
2447 emit_input_declaration(emit, opcodeType, operandType,
2448 dim, index,
2449 emit->gs.input_size,
2450 name,
2451 numComp, selMode,
2452 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2453 VGPU10_INTERPOLATION_UNDEFINED);
2454 }
2455 }
2456 else {
2457 assert(emit->unit == PIPE_SHADER_VERTEX);
2458
2459 for (i = 0; i < emit->info.file_max[TGSI_FILE_INPUT] + 1; i++) {
2460 unsigned usage_mask = emit->info.input_usage_mask[i];
2461 unsigned index = i;
2462
2463 if (usage_mask == 0)
2464 continue; /* register is not actually used */
2465
2466 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT,
2467 VGPU10_OPERAND_TYPE_INPUT,
2468 VGPU10_OPERAND_INDEX_1D, index, 1,
2469 VGPU10_NAME_UNDEFINED,
2470 VGPU10_OPERAND_4_COMPONENT,
2471 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2472 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2473 VGPU10_INTERPOLATION_UNDEFINED);
2474 }
2475 }
2476
2477 return TRUE;
2478 }
2479
2480
2481 /**
2482 * Emit all output declarations.
2483 */
2484 static boolean
2485 emit_output_declarations(struct svga_shader_emitter_v10 *emit)
2486 {
2487 unsigned i;
2488
2489 for (i = 0; i < emit->info.num_outputs; i++) {
2490 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2491 const enum tgsi_semantic semantic_name =
2492 emit->info.output_semantic_name[i];
2493 const unsigned semantic_index = emit->info.output_semantic_index[i];
2494 unsigned index = i;
2495
2496 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2497 if (semantic_name == TGSI_SEMANTIC_COLOR) {
2498 assert(semantic_index < ARRAY_SIZE(emit->fs.color_out_index));
2499
2500 emit->fs.color_out_index[semantic_index] = index;
2501
2502 /* The semantic index is the shader's color output/buffer index */
2503 emit_output_declaration(emit,
2504 VGPU10_OPCODE_DCL_OUTPUT, semantic_index,
2505 VGPU10_NAME_UNDEFINED,
2506 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2507
2508 if (semantic_index == 0) {
2509 if (emit->key.fs.write_color0_to_n_cbufs > 1) {
2510 /* Emit declarations for the additional color outputs
2511 * for broadcasting.
2512 */
2513 unsigned j;
2514 for (j = 1; j < emit->key.fs.write_color0_to_n_cbufs; j++) {
2515 /* Allocate a new output index */
2516 unsigned idx = emit->info.num_outputs + j - 1;
2517 emit->fs.color_out_index[j] = idx;
2518 emit_output_declaration(emit,
2519 VGPU10_OPCODE_DCL_OUTPUT, idx,
2520 VGPU10_NAME_UNDEFINED,
2521 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2522 emit->info.output_semantic_index[idx] = j;
2523 }
2524 }
2525 }
2526 else {
2527 assert(!emit->key.fs.write_color0_to_n_cbufs);
2528 }
2529 }
2530 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2531 /* Fragment depth output */
2532 emit_fragdepth_output_declaration(emit);
2533 }
2534 else {
2535 assert(!"Bad output semantic name");
2536 }
2537 }
2538 else {
2539 /* VS or GS */
2540 unsigned name, type;
2541 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2542
2543 switch (semantic_name) {
2544 case TGSI_SEMANTIC_POSITION:
2545 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2546 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2547 name = VGPU10_NAME_POSITION;
2548 /* Save the index of the vertex position output register */
2549 emit->vposition.out_index = index;
2550 break;
2551 case TGSI_SEMANTIC_CLIPDIST:
2552 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2553 name = VGPU10_NAME_CLIP_DISTANCE;
2554 /* save the starting index of the clip distance output register */
2555 if (semantic_index == 0)
2556 emit->clip_dist_out_index = index;
2557 writemask = emit->output_usage_mask[index];
2558 writemask = apply_clip_plane_mask(emit, writemask, semantic_index);
2559 if (writemask == 0x0) {
2560 continue; /* discard this do-nothing declaration */
2561 }
2562 break;
2563 case TGSI_SEMANTIC_PRIMID:
2564 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2565 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2566 name = VGPU10_NAME_PRIMITIVE_ID;
2567 break;
2568 case TGSI_SEMANTIC_LAYER:
2569 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2570 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2571 name = VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX;
2572 break;
2573 case TGSI_SEMANTIC_CLIPVERTEX:
2574 type = VGPU10_OPCODE_DCL_OUTPUT;
2575 name = VGPU10_NAME_UNDEFINED;
2576 emit->clip_vertex_out_index = index;
2577 break;
2578 default:
2579 /* generic output */
2580 type = VGPU10_OPCODE_DCL_OUTPUT;
2581 name = VGPU10_NAME_UNDEFINED;
2582 }
2583
2584 emit_output_declaration(emit, type, index, name, writemask);
2585 }
2586 }
2587
2588 if (emit->vposition.so_index != INVALID_INDEX &&
2589 emit->vposition.out_index != INVALID_INDEX) {
2590
2591 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2592
2593 /* Emit the declaration for the non-adjusted vertex position
2594 * for stream output purpose
2595 */
2596 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2597 emit->vposition.so_index,
2598 VGPU10_NAME_UNDEFINED,
2599 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2600 }
2601
2602 if (emit->clip_dist_so_index != INVALID_INDEX &&
2603 emit->clip_dist_out_index != INVALID_INDEX) {
2604
2605 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2606
2607 /* Emit the declaration for the clip distance shadow copy which
2608 * will be used for stream output purpose and for clip distance
2609 * varying variable
2610 */
2611 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2612 emit->clip_dist_so_index,
2613 VGPU10_NAME_UNDEFINED,
2614 emit->output_usage_mask[emit->clip_dist_out_index]);
2615
2616 if (emit->info.num_written_clipdistance > 4) {
2617 /* for the second clip distance register, each handles 4 planes */
2618 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2619 emit->clip_dist_so_index + 1,
2620 VGPU10_NAME_UNDEFINED,
2621 emit->output_usage_mask[emit->clip_dist_out_index+1]);
2622 }
2623 }
2624
2625 return TRUE;
2626 }
2627
2628
2629 /**
2630 * Emit the declaration for the temporary registers.
2631 */
2632 static boolean
2633 emit_temporaries_declaration(struct svga_shader_emitter_v10 *emit)
2634 {
2635 unsigned total_temps, reg, i;
2636
2637 total_temps = emit->num_shader_temps;
2638
2639 /* If there is indirect access to non-indexable temps in the shader,
2640 * convert those temps to indexable temps. This works around a bug
2641 * in the GLSL->TGSI translator exposed in piglit test
2642 * glsl-1.20/execution/fs-const-array-of-struct-of-array.shader_test.
2643 * Internal temps added by the driver remain as non-indexable temps.
2644 */
2645 if ((emit->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) &&
2646 emit->num_temp_arrays == 0) {
2647 unsigned arrayID;
2648
2649 arrayID = 1;
2650 emit->num_temp_arrays = arrayID + 1;
2651 emit->temp_arrays[arrayID].start = 0;
2652 emit->temp_arrays[arrayID].size = total_temps;
2653
2654 /* Fill in the temp_map entries for this temp array */
2655 for (i = 0; i < total_temps; i++) {
2656 emit->temp_map[i].arrayId = arrayID;
2657 emit->temp_map[i].index = i;
2658 }
2659 }
2660
2661 /* Allocate extra temps for specially-implemented instructions,
2662 * such as LIT.
2663 */
2664 total_temps += MAX_INTERNAL_TEMPS;
2665
2666 if (emit->unit == PIPE_SHADER_VERTEX || emit->unit == PIPE_SHADER_GEOMETRY) {
2667 if (emit->vposition.need_prescale || emit->key.vs.undo_viewport ||
2668 emit->key.clip_plane_enable ||
2669 emit->vposition.so_index != INVALID_INDEX) {
2670 emit->vposition.tmp_index = total_temps;
2671 total_temps += 1;
2672 }
2673
2674 if (emit->unit == PIPE_SHADER_VERTEX) {
2675 unsigned attrib_mask = (emit->key.vs.adjust_attrib_w_1 |
2676 emit->key.vs.adjust_attrib_itof |
2677 emit->key.vs.adjust_attrib_utof |
2678 emit->key.vs.attrib_is_bgra |
2679 emit->key.vs.attrib_puint_to_snorm |
2680 emit->key.vs.attrib_puint_to_uscaled |
2681 emit->key.vs.attrib_puint_to_sscaled);
2682 while (attrib_mask) {
2683 unsigned index = u_bit_scan(&attrib_mask);
2684 emit->vs.adjusted_input[index] = total_temps++;
2685 }
2686 }
2687
2688 if (emit->clip_mode == CLIP_DISTANCE) {
2689 /* We need to write the clip distance to a temporary register
2690 * first. Then it will be copied to the shadow copy for
2691 * the clip distance varying variable and stream output purpose.
2692 * It will also be copied to the actual CLIPDIST register
2693 * according to the enabled clip planes
2694 */
2695 emit->clip_dist_tmp_index = total_temps++;
2696 if (emit->info.num_written_clipdistance > 4)
2697 total_temps++; /* second clip register */
2698 }
2699 else if (emit->clip_mode == CLIP_VERTEX) {
2700 /* We need to convert the TGSI CLIPVERTEX output to one or more
2701 * clip distances. Allocate a temp reg for the clipvertex here.
2702 */
2703 assert(emit->info.writes_clipvertex > 0);
2704 emit->clip_vertex_tmp_index = total_temps;
2705 total_temps++;
2706 }
2707 }
2708 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
2709 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS ||
2710 emit->key.fs.white_fragments ||
2711 emit->key.fs.write_color0_to_n_cbufs > 1) {
2712 /* Allocate a temp to hold the output color */
2713 emit->fs.color_tmp_index = total_temps;
2714 total_temps += 1;
2715 }
2716
2717 if (emit->fs.face_input_index != INVALID_INDEX) {
2718 /* Allocate a temp for the +/-1 face register */
2719 emit->fs.face_tmp_index = total_temps;
2720 total_temps += 1;
2721 }
2722
2723 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
2724 /* Allocate a temp for modified fragment position register */
2725 emit->fs.fragcoord_tmp_index = total_temps;
2726 total_temps += 1;
2727 }
2728 }
2729
2730 for (i = 0; i < emit->num_address_regs; i++) {
2731 emit->address_reg_index[i] = total_temps++;
2732 }
2733
2734 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2735 * temp indexes. Basically, we compact all the non-array temp register
2736 * indexes into a consecutive series.
2737 *
2738 * Before, we may have some TGSI declarations like:
2739 * DCL TEMP[0..1], LOCAL
2740 * DCL TEMP[2..4], ARRAY(1), LOCAL
2741 * DCL TEMP[5..7], ARRAY(2), LOCAL
2742 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2743 *
2744 * After, we'll have a map like this:
2745 * temp_map[0] = { array 0, index 0 }
2746 * temp_map[1] = { array 0, index 1 }
2747 * temp_map[2] = { array 1, index 0 }
2748 * temp_map[3] = { array 1, index 1 }
2749 * temp_map[4] = { array 1, index 2 }
2750 * temp_map[5] = { array 2, index 0 }
2751 * temp_map[6] = { array 2, index 1 }
2752 * temp_map[7] = { array 2, index 2 }
2753 * temp_map[8] = { array 0, index 2 }
2754 * temp_map[9] = { array 0, index 3 }
2755 *
2756 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2757 * temps numbered 0..3
2758 *
2759 * Any time we emit a temporary register index, we'll have to use the
2760 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2761 *
2762 * Finally, we recompute the total_temps value here.
2763 */
2764 reg = 0;
2765 for (i = 0; i < total_temps; i++) {
2766 if (emit->temp_map[i].arrayId == 0) {
2767 emit->temp_map[i].index = reg++;
2768 }
2769 }
2770
2771 if (0) {
2772 debug_printf("total_temps %u\n", total_temps);
2773 for (i = 0; i < total_temps; i++) {
2774 debug_printf("temp %u -> array %u index %u\n",
2775 i, emit->temp_map[i].arrayId, emit->temp_map[i].index);
2776 }
2777 }
2778
2779 total_temps = reg;
2780
2781 /* Emit declaration of ordinary temp registers */
2782 if (total_temps > 0) {
2783 VGPU10OpcodeToken0 opcode0;
2784
2785 opcode0.value = 0;
2786 opcode0.opcodeType = VGPU10_OPCODE_DCL_TEMPS;
2787
2788 begin_emit_instruction(emit);
2789 emit_dword(emit, opcode0.value);
2790 emit_dword(emit, total_temps);
2791 end_emit_instruction(emit);
2792 }
2793
2794 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2795 * it's unused.
2796 */
2797 for (i = 1; i < emit->num_temp_arrays; i++) {
2798 unsigned num_temps = emit->temp_arrays[i].size;
2799
2800 if (num_temps > 0) {
2801 VGPU10OpcodeToken0 opcode0;
2802
2803 opcode0.value = 0;
2804 opcode0.opcodeType = VGPU10_OPCODE_DCL_INDEXABLE_TEMP;
2805
2806 begin_emit_instruction(emit);
2807 emit_dword(emit, opcode0.value);
2808 emit_dword(emit, i); /* which array */
2809 emit_dword(emit, num_temps);
2810 emit_dword(emit, 4); /* num components */
2811 end_emit_instruction(emit);
2812
2813 total_temps += num_temps;
2814 }
2815 }
2816
2817 /* Check that the grand total of all regular and indexed temps is
2818 * under the limit.
2819 */
2820 check_register_index(emit, VGPU10_OPCODE_DCL_TEMPS, total_temps - 1);
2821
2822 return TRUE;
2823 }
2824
2825
2826 static boolean
2827 emit_constant_declaration(struct svga_shader_emitter_v10 *emit)
2828 {
2829 VGPU10OpcodeToken0 opcode0;
2830 VGPU10OperandToken0 operand0;
2831 unsigned total_consts, i;
2832
2833 opcode0.value = 0;
2834 opcode0.opcodeType = VGPU10_OPCODE_DCL_CONSTANT_BUFFER;
2835 opcode0.accessPattern = VGPU10_CB_IMMEDIATE_INDEXED;
2836 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
2837
2838 operand0.value = 0;
2839 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2840 operand0.indexDimension = VGPU10_OPERAND_INDEX_2D;
2841 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2842 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2843 operand0.operandType = VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
2844 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
2845 operand0.swizzleX = 0;
2846 operand0.swizzleY = 1;
2847 operand0.swizzleZ = 2;
2848 operand0.swizzleW = 3;
2849
2850 /**
2851 * Emit declaration for constant buffer [0]. We also allocate
2852 * room for the extra constants here.
2853 */
2854 total_consts = emit->num_shader_consts[0];
2855
2856 /* Now, allocate constant slots for the "extra" constants.
2857 * Note: it's critical that these extra constant locations
2858 * exactly match what's emitted by the "extra" constants code
2859 * in svga_state_constants.c
2860 */
2861
2862 /* Vertex position scale/translation */
2863 if (emit->vposition.need_prescale) {
2864 emit->vposition.prescale_scale_index = total_consts++;
2865 emit->vposition.prescale_trans_index = total_consts++;
2866 }
2867
2868 if (emit->unit == PIPE_SHADER_VERTEX) {
2869 if (emit->key.vs.undo_viewport) {
2870 emit->vs.viewport_index = total_consts++;
2871 }
2872 }
2873
2874 /* user-defined clip planes */
2875 if (emit->key.clip_plane_enable) {
2876 unsigned n = util_bitcount(emit->key.clip_plane_enable);
2877 assert(emit->unit == PIPE_SHADER_VERTEX ||
2878 emit->unit == PIPE_SHADER_GEOMETRY);
2879 for (i = 0; i < n; i++) {
2880 emit->clip_plane_const[i] = total_consts++;
2881 }
2882 }
2883
2884 /* Texcoord scale factors for RECT textures */
2885 {
2886 for (i = 0; i < emit->num_samplers; i++) {
2887 if (emit->key.tex[i].unnormalized) {
2888 emit->texcoord_scale_index[i] = total_consts++;
2889 }
2890 }
2891 }
2892
2893 /* Texture buffer sizes */
2894 for (i = 0; i < emit->num_samplers; i++) {
2895 if (emit->sampler_target[i] == TGSI_TEXTURE_BUFFER) {
2896 emit->texture_buffer_size_index[i] = total_consts++;
2897 }
2898 }
2899
2900 if (total_consts > 0) {
2901 begin_emit_instruction(emit);
2902 emit_dword(emit, opcode0.value);
2903 emit_dword(emit, operand0.value);
2904 emit_dword(emit, 0); /* which const buffer slot */
2905 emit_dword(emit, total_consts);
2906 end_emit_instruction(emit);
2907 }
2908
2909 /* Declare remaining constant buffers (UBOs) */
2910 for (i = 1; i < ARRAY_SIZE(emit->num_shader_consts); i++) {
2911 if (emit->num_shader_consts[i] > 0) {
2912 begin_emit_instruction(emit);
2913 emit_dword(emit, opcode0.value);
2914 emit_dword(emit, operand0.value);
2915 emit_dword(emit, i); /* which const buffer slot */
2916 emit_dword(emit, emit->num_shader_consts[i]);
2917 end_emit_instruction(emit);
2918 }
2919 }
2920
2921 return TRUE;
2922 }
2923
2924
2925 /**
2926 * Emit declarations for samplers.
2927 */
2928 static boolean
2929 emit_sampler_declarations(struct svga_shader_emitter_v10 *emit)
2930 {
2931 unsigned i;
2932
2933 for (i = 0; i < emit->num_samplers; i++) {
2934 VGPU10OpcodeToken0 opcode0;
2935 VGPU10OperandToken0 operand0;
2936
2937 opcode0.value = 0;
2938 opcode0.opcodeType = VGPU10_OPCODE_DCL_SAMPLER;
2939 opcode0.samplerMode = VGPU10_SAMPLER_MODE_DEFAULT;
2940
2941 operand0.value = 0;
2942 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
2943 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
2944 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2945 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2946
2947 begin_emit_instruction(emit);
2948 emit_dword(emit, opcode0.value);
2949 emit_dword(emit, operand0.value);
2950 emit_dword(emit, i);
2951 end_emit_instruction(emit);
2952 }
2953
2954 return TRUE;
2955 }
2956
2957
2958 /**
2959 * Translate TGSI_TEXTURE_x to VGAPU10_RESOURCE_DIMENSION_x.
2960 */
2961 static unsigned
2962 tgsi_texture_to_resource_dimension(enum tgsi_texture_type target,
2963 boolean is_array)
2964 {
2965 switch (target) {
2966 case TGSI_TEXTURE_BUFFER:
2967 return VGPU10_RESOURCE_DIMENSION_BUFFER;
2968 case TGSI_TEXTURE_1D:
2969 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
2970 case TGSI_TEXTURE_2D:
2971 case TGSI_TEXTURE_RECT:
2972 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
2973 case TGSI_TEXTURE_3D:
2974 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D;
2975 case TGSI_TEXTURE_CUBE:
2976 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
2977 case TGSI_TEXTURE_SHADOW1D:
2978 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
2979 case TGSI_TEXTURE_SHADOW2D:
2980 case TGSI_TEXTURE_SHADOWRECT:
2981 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
2982 case TGSI_TEXTURE_1D_ARRAY:
2983 case TGSI_TEXTURE_SHADOW1D_ARRAY:
2984 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
2985 : VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
2986 case TGSI_TEXTURE_2D_ARRAY:
2987 case TGSI_TEXTURE_SHADOW2D_ARRAY:
2988 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
2989 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
2990 case TGSI_TEXTURE_SHADOWCUBE:
2991 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
2992 case TGSI_TEXTURE_2D_MSAA:
2993 return VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
2994 case TGSI_TEXTURE_2D_ARRAY_MSAA:
2995 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
2996 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
2997 case TGSI_TEXTURE_CUBE_ARRAY:
2998 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY;
2999 default:
3000 assert(!"Unexpected resource type");
3001 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3002 }
3003 }
3004
3005
3006 /**
3007 * Given a tgsi_return_type, return true iff it is an integer type.
3008 */
3009 static boolean
3010 is_integer_type(enum tgsi_return_type type)
3011 {
3012 switch (type) {
3013 case TGSI_RETURN_TYPE_SINT:
3014 case TGSI_RETURN_TYPE_UINT:
3015 return TRUE;
3016 case TGSI_RETURN_TYPE_FLOAT:
3017 case TGSI_RETURN_TYPE_UNORM:
3018 case TGSI_RETURN_TYPE_SNORM:
3019 return FALSE;
3020 case TGSI_RETURN_TYPE_COUNT:
3021 default:
3022 assert(!"is_integer_type: Unknown tgsi_return_type");
3023 return FALSE;
3024 }
3025 }
3026
3027
3028 /**
3029 * Emit declarations for resources.
3030 * XXX When we're sure that all TGSI shaders will be generated with
3031 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
3032 * rework this code.
3033 */
3034 static boolean
3035 emit_resource_declarations(struct svga_shader_emitter_v10 *emit)
3036 {
3037 unsigned i;
3038
3039 /* Emit resource decl for each sampler */
3040 for (i = 0; i < emit->num_samplers; i++) {
3041 VGPU10OpcodeToken0 opcode0;
3042 VGPU10OperandToken0 operand0;
3043 VGPU10ResourceReturnTypeToken return_type;
3044 VGPU10_RESOURCE_RETURN_TYPE rt;
3045
3046 opcode0.value = 0;
3047 opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;
3048 opcode0.resourceDimension =
3049 tgsi_texture_to_resource_dimension(emit->sampler_target[i],
3050 emit->key.tex[i].is_array);
3051 operand0.value = 0;
3052 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
3053 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
3054 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
3055 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3056
3057 #if 1
3058 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
3059 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM == TGSI_RETURN_TYPE_UNORM + 1);
3060 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM == TGSI_RETURN_TYPE_SNORM + 1);
3061 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT == TGSI_RETURN_TYPE_SINT + 1);
3062 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT == TGSI_RETURN_TYPE_UINT + 1);
3063 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT == TGSI_RETURN_TYPE_FLOAT + 1);
3064 assert(emit->sampler_return_type[i] <= TGSI_RETURN_TYPE_FLOAT);
3065 rt = emit->sampler_return_type[i] + 1;
3066 #else
3067 switch (emit->sampler_return_type[i]) {
3068 case TGSI_RETURN_TYPE_UNORM: rt = VGPU10_RETURN_TYPE_UNORM; break;
3069 case TGSI_RETURN_TYPE_SNORM: rt = VGPU10_RETURN_TYPE_SNORM; break;
3070 case TGSI_RETURN_TYPE_SINT: rt = VGPU10_RETURN_TYPE_SINT; break;
3071 case TGSI_RETURN_TYPE_UINT: rt = VGPU10_RETURN_TYPE_UINT; break;
3072 case TGSI_RETURN_TYPE_FLOAT: rt = VGPU10_RETURN_TYPE_FLOAT; break;
3073 case TGSI_RETURN_TYPE_COUNT:
3074 default:
3075 rt = VGPU10_RETURN_TYPE_FLOAT;
3076 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3077 }
3078 #endif
3079
3080 return_type.value = 0;
3081 return_type.component0 = rt;
3082 return_type.component1 = rt;
3083 return_type.component2 = rt;
3084 return_type.component3 = rt;
3085
3086 begin_emit_instruction(emit);
3087 emit_dword(emit, opcode0.value);
3088 emit_dword(emit, operand0.value);
3089 emit_dword(emit, i);
3090 emit_dword(emit, return_type.value);
3091 end_emit_instruction(emit);
3092 }
3093
3094 return TRUE;
3095 }
3096
3097 static void
3098 emit_instruction_op1(struct svga_shader_emitter_v10 *emit,
3099 unsigned opcode,
3100 const struct tgsi_full_dst_register *dst,
3101 const struct tgsi_full_src_register *src,
3102 boolean saturate)
3103 {
3104 begin_emit_instruction(emit);
3105 emit_opcode(emit, opcode, saturate);
3106 emit_dst_register(emit, dst);
3107 emit_src_register(emit, src);
3108 end_emit_instruction(emit);
3109 }
3110
3111 static void
3112 emit_instruction_op2(struct svga_shader_emitter_v10 *emit,
3113 unsigned opcode,
3114 const struct tgsi_full_dst_register *dst,
3115 const struct tgsi_full_src_register *src1,
3116 const struct tgsi_full_src_register *src2,
3117 boolean saturate)
3118 {
3119 begin_emit_instruction(emit);
3120 emit_opcode(emit, opcode, saturate);
3121 emit_dst_register(emit, dst);
3122 emit_src_register(emit, src1);
3123 emit_src_register(emit, src2);
3124 end_emit_instruction(emit);
3125 }
3126
3127 static void
3128 emit_instruction_op3(struct svga_shader_emitter_v10 *emit,
3129 unsigned opcode,
3130 const struct tgsi_full_dst_register *dst,
3131 const struct tgsi_full_src_register *src1,
3132 const struct tgsi_full_src_register *src2,
3133 const struct tgsi_full_src_register *src3,
3134 boolean saturate)
3135 {
3136 begin_emit_instruction(emit);
3137 emit_opcode(emit, opcode, saturate);
3138 emit_dst_register(emit, dst);
3139 emit_src_register(emit, src1);
3140 emit_src_register(emit, src2);
3141 emit_src_register(emit, src3);
3142 end_emit_instruction(emit);
3143 }
3144
3145 /**
3146 * Emit the actual clip distance instructions to be used for clipping
3147 * by copying the clip distance from the temporary registers to the
3148 * CLIPDIST registers written with the enabled planes mask.
3149 * Also copy the clip distance from the temporary to the clip distance
3150 * shadow copy register which will be referenced by the input shader
3151 */
3152 static void
3153 emit_clip_distance_instructions(struct svga_shader_emitter_v10 *emit)
3154 {
3155 struct tgsi_full_src_register tmp_clip_dist_src;
3156 struct tgsi_full_dst_register clip_dist_dst;
3157
3158 unsigned i;
3159 unsigned clip_plane_enable = emit->key.clip_plane_enable;
3160 unsigned clip_dist_tmp_index = emit->clip_dist_tmp_index;
3161 int num_written_clipdist = emit->info.num_written_clipdistance;
3162
3163 assert(emit->clip_dist_out_index != INVALID_INDEX);
3164 assert(emit->clip_dist_tmp_index != INVALID_INDEX);
3165
3166 /**
3167 * Temporary reset the temporary clip dist register index so
3168 * that the copy to the real clip dist register will not
3169 * attempt to copy to the temporary register again
3170 */
3171 emit->clip_dist_tmp_index = INVALID_INDEX;
3172
3173 for (i = 0; i < 2 && num_written_clipdist > 0; i++, num_written_clipdist-=4) {
3174
3175 tmp_clip_dist_src = make_src_temp_reg(clip_dist_tmp_index + i);
3176
3177 /**
3178 * copy to the shadow copy for use by varying variable and
3179 * stream output. All clip distances
3180 * will be written regardless of the enabled clipping planes.
3181 */
3182 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3183 emit->clip_dist_so_index + i);
3184
3185 /* MOV clip_dist_so, tmp_clip_dist */
3186 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3187 &tmp_clip_dist_src, FALSE);
3188
3189 /**
3190 * copy those clip distances to enabled clipping planes
3191 * to CLIPDIST registers for clipping
3192 */
3193 if (clip_plane_enable & 0xf) {
3194 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3195 emit->clip_dist_out_index + i);
3196 clip_dist_dst = writemask_dst(&clip_dist_dst, clip_plane_enable & 0xf);
3197
3198 /* MOV CLIPDIST, tmp_clip_dist */
3199 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3200 &tmp_clip_dist_src, FALSE);
3201 }
3202 /* four clip planes per clip register */
3203 clip_plane_enable >>= 4;
3204 }
3205 /**
3206 * set the temporary clip dist register index back to the
3207 * temporary index for the next vertex
3208 */
3209 emit->clip_dist_tmp_index = clip_dist_tmp_index;
3210 }
3211
3212 /* Declare clip distance output registers for user-defined clip planes
3213 * or the TGSI_CLIPVERTEX output.
3214 */
3215 static void
3216 emit_clip_distance_declarations(struct svga_shader_emitter_v10 *emit)
3217 {
3218 unsigned num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3219 unsigned index = emit->num_outputs;
3220 unsigned plane_mask;
3221
3222 assert(emit->unit == PIPE_SHADER_VERTEX ||
3223 emit->unit == PIPE_SHADER_GEOMETRY);
3224 assert(num_clip_planes <= 8);
3225
3226 if (emit->clip_mode != CLIP_LEGACY &&
3227 emit->clip_mode != CLIP_VERTEX) {
3228 return;
3229 }
3230
3231 if (num_clip_planes == 0)
3232 return;
3233
3234 /* Declare one or two clip output registers. The number of components
3235 * in the mask reflects the number of clip planes. For example, if 5
3236 * clip planes are needed, we'll declare outputs similar to:
3237 * dcl_output_siv o2.xyzw, clip_distance
3238 * dcl_output_siv o3.x, clip_distance
3239 */
3240 emit->clip_dist_out_index = index; /* save the starting clip dist reg index */
3241
3242 plane_mask = (1 << num_clip_planes) - 1;
3243 if (plane_mask & 0xf) {
3244 unsigned cmask = plane_mask & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3245 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index,
3246 VGPU10_NAME_CLIP_DISTANCE, cmask);
3247 emit->num_outputs++;
3248 }
3249 if (plane_mask & 0xf0) {
3250 unsigned cmask = (plane_mask >> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3251 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index + 1,
3252 VGPU10_NAME_CLIP_DISTANCE, cmask);
3253 emit->num_outputs++;
3254 }
3255 }
3256
3257
3258 /**
3259 * Emit the instructions for writing to the clip distance registers
3260 * to handle legacy/automatic clip planes.
3261 * For each clip plane, the distance is the dot product of the vertex
3262 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3263 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3264 * output registers already declared.
3265 */
3266 static void
3267 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10 *emit,
3268 unsigned vpos_tmp_index)
3269 {
3270 unsigned i, num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3271
3272 assert(emit->clip_mode == CLIP_LEGACY);
3273 assert(num_clip_planes <= 8);
3274
3275 assert(emit->unit == PIPE_SHADER_VERTEX ||
3276 emit->unit == PIPE_SHADER_GEOMETRY);
3277
3278 for (i = 0; i < num_clip_planes; i++) {
3279 struct tgsi_full_dst_register dst;
3280 struct tgsi_full_src_register plane_src, vpos_src;
3281 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3282 unsigned comp = i % 4;
3283 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3284
3285 /* create dst, src regs */
3286 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3287 dst = writemask_dst(&dst, writemask);
3288
3289 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3290 vpos_src = make_src_temp_reg(vpos_tmp_index);
3291
3292 /* DP4 clip_dist, plane, vpos */
3293 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3294 &plane_src, &vpos_src, FALSE);
3295 }
3296 }
3297
3298
3299 /**
3300 * Emit the instructions for computing the clip distance results from
3301 * the clip vertex temporary.
3302 * For each clip plane, the distance is the dot product of the clip vertex
3303 * position (found in a temp reg) and the clip plane coefficients.
3304 */
3305 static void
3306 emit_clip_vertex_instructions(struct svga_shader_emitter_v10 *emit)
3307 {
3308 const unsigned num_clip = util_bitcount(emit->key.clip_plane_enable);
3309 unsigned i;
3310 struct tgsi_full_dst_register dst;
3311 struct tgsi_full_src_register clipvert_src;
3312 const unsigned clip_vertex_tmp = emit->clip_vertex_tmp_index;
3313
3314 assert(emit->unit == PIPE_SHADER_VERTEX ||
3315 emit->unit == PIPE_SHADER_GEOMETRY);
3316
3317 assert(emit->clip_mode == CLIP_VERTEX);
3318
3319 clipvert_src = make_src_temp_reg(clip_vertex_tmp);
3320
3321 for (i = 0; i < num_clip; i++) {
3322 struct tgsi_full_src_register plane_src;
3323 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3324 unsigned comp = i % 4;
3325 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3326
3327 /* create dst, src regs */
3328 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3329 dst = writemask_dst(&dst, writemask);
3330
3331 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3332
3333 /* DP4 clip_dist, plane, vpos */
3334 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3335 &plane_src, &clipvert_src, FALSE);
3336 }
3337
3338 /* copy temporary clip vertex register to the clip vertex register */
3339
3340 assert(emit->clip_vertex_out_index != INVALID_INDEX);
3341
3342 /**
3343 * temporary reset the temporary clip vertex register index so
3344 * that copy to the clip vertex register will not attempt
3345 * to copy to the temporary register again
3346 */
3347 emit->clip_vertex_tmp_index = INVALID_INDEX;
3348
3349 /* MOV clip_vertex, clip_vertex_tmp */
3350 dst = make_dst_reg(TGSI_FILE_OUTPUT, emit->clip_vertex_out_index);
3351 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
3352 &dst, &clipvert_src, FALSE);
3353
3354 /**
3355 * set the temporary clip vertex register index back to the
3356 * temporary index for the next vertex
3357 */
3358 emit->clip_vertex_tmp_index = clip_vertex_tmp;
3359 }
3360
3361 /**
3362 * Emit code to convert RGBA to BGRA
3363 */
3364 static void
3365 emit_swap_r_b(struct svga_shader_emitter_v10 *emit,
3366 const struct tgsi_full_dst_register *dst,
3367 const struct tgsi_full_src_register *src)
3368 {
3369 struct tgsi_full_src_register bgra_src =
3370 swizzle_src(src, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W);
3371
3372 begin_emit_instruction(emit);
3373 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
3374 emit_dst_register(emit, dst);
3375 emit_src_register(emit, &bgra_src);
3376 end_emit_instruction(emit);
3377 }
3378
3379
3380 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3381 static void
3382 emit_puint_to_snorm(struct svga_shader_emitter_v10 *emit,
3383 const struct tgsi_full_dst_register *dst,
3384 const struct tgsi_full_src_register *src)
3385 {
3386 struct tgsi_full_src_register half = make_immediate_reg_float(emit, 0.5f);
3387 struct tgsi_full_src_register two =
3388 make_immediate_reg_float4(emit, 2.0f, 2.0f, 2.0f, 3.0f);
3389 struct tgsi_full_src_register neg_two =
3390 make_immediate_reg_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
3391
3392 unsigned val_tmp = get_temp_index(emit);
3393 struct tgsi_full_dst_register val_dst = make_dst_temp_reg(val_tmp);
3394 struct tgsi_full_src_register val_src = make_src_temp_reg(val_tmp);
3395
3396 unsigned bias_tmp = get_temp_index(emit);
3397 struct tgsi_full_dst_register bias_dst = make_dst_temp_reg(bias_tmp);
3398 struct tgsi_full_src_register bias_src = make_src_temp_reg(bias_tmp);
3399
3400 /* val = src * 2.0 */
3401 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &val_dst,
3402 src, &two, FALSE);
3403
3404 /* bias = src > 0.5 */
3405 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &bias_dst,
3406 src, &half, FALSE);
3407
3408 /* bias = bias & -2.0 */
3409 emit_instruction_op2(emit, VGPU10_OPCODE_AND, &bias_dst,
3410 &bias_src, &neg_two, FALSE);
3411
3412 /* dst = val + bias */
3413 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, dst,
3414 &val_src, &bias_src, FALSE);
3415
3416 free_temp_indexes(emit);
3417 }
3418
3419
3420 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3421 static void
3422 emit_puint_to_uscaled(struct svga_shader_emitter_v10 *emit,
3423 const struct tgsi_full_dst_register *dst,
3424 const struct tgsi_full_src_register *src)
3425 {
3426 struct tgsi_full_src_register scale =
3427 make_immediate_reg_float4(emit, 1023.0f, 1023.0f, 1023.0f, 3.0f);
3428
3429 /* dst = src * scale */
3430 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, dst, src, &scale, FALSE);
3431 }
3432
3433
3434 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3435 static void
3436 emit_puint_to_sscaled(struct svga_shader_emitter_v10 *emit,
3437 const struct tgsi_full_dst_register *dst,
3438 const struct tgsi_full_src_register *src)
3439 {
3440 struct tgsi_full_src_register lshift =
3441 make_immediate_reg_int4(emit, 22, 12, 2, 0);
3442 struct tgsi_full_src_register rshift =
3443 make_immediate_reg_int4(emit, 22, 22, 22, 30);
3444
3445 struct tgsi_full_src_register src_xxxx = scalar_src(src, TGSI_SWIZZLE_X);
3446
3447 unsigned tmp = get_temp_index(emit);
3448 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3449 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3450
3451 /*
3452 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3453 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3454 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3455 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3456 * dst = i_to_f(r,g,b,a); # convert to float
3457 */
3458 emit_instruction_op2(emit, VGPU10_OPCODE_ISHL, &tmp_dst,
3459 &src_xxxx, &lshift, FALSE);
3460 emit_instruction_op2(emit, VGPU10_OPCODE_ISHR, &tmp_dst,
3461 &tmp_src, &rshift, FALSE);
3462 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF, dst, &tmp_src, FALSE);
3463
3464 free_temp_indexes(emit);
3465 }
3466
3467
3468 /**
3469 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3470 */
3471 static boolean
3472 emit_arl_uarl(struct svga_shader_emitter_v10 *emit,
3473 const struct tgsi_full_instruction *inst)
3474 {
3475 unsigned index = inst->Dst[0].Register.Index;
3476 struct tgsi_full_dst_register dst;
3477 unsigned opcode;
3478
3479 assert(index < MAX_VGPU10_ADDR_REGS);
3480 dst = make_dst_temp_reg(emit->address_reg_index[index]);
3481
3482 /* ARL dst, s0
3483 * Translates into:
3484 * FTOI address_tmp, s0
3485 *
3486 * UARL dst, s0
3487 * Translates into:
3488 * MOV address_tmp, s0
3489 */
3490 if (inst->Instruction.Opcode == TGSI_OPCODE_ARL)
3491 opcode = VGPU10_OPCODE_FTOI;
3492 else
3493 opcode = VGPU10_OPCODE_MOV;
3494
3495 emit_instruction_op1(emit, opcode, &dst, &inst->Src[0], FALSE);
3496
3497 return TRUE;
3498 }
3499
3500
3501 /**
3502 * Emit code for TGSI_OPCODE_CAL instruction.
3503 */
3504 static boolean
3505 emit_cal(struct svga_shader_emitter_v10 *emit,
3506 const struct tgsi_full_instruction *inst)
3507 {
3508 unsigned label = inst->Label.Label;
3509 VGPU10OperandToken0 operand;
3510 operand.value = 0;
3511 operand.operandType = VGPU10_OPERAND_TYPE_LABEL;
3512
3513 begin_emit_instruction(emit);
3514 emit_dword(emit, operand.value);
3515 emit_dword(emit, label);
3516 end_emit_instruction(emit);
3517
3518 return TRUE;
3519 }
3520
3521
3522 /**
3523 * Emit code for TGSI_OPCODE_IABS instruction.
3524 */
3525 static boolean
3526 emit_iabs(struct svga_shader_emitter_v10 *emit,
3527 const struct tgsi_full_instruction *inst)
3528 {
3529 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3530 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3531 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3532 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3533 *
3534 * Translates into
3535 * IMAX dst, src, neg(src)
3536 */
3537 struct tgsi_full_src_register neg_src = negate_src(&inst->Src[0]);
3538 emit_instruction_op2(emit, VGPU10_OPCODE_IMAX, &inst->Dst[0],
3539 &inst->Src[0], &neg_src, FALSE);
3540
3541 return TRUE;
3542 }
3543
3544
3545 /**
3546 * Emit code for TGSI_OPCODE_CMP instruction.
3547 */
3548 static boolean
3549 emit_cmp(struct svga_shader_emitter_v10 *emit,
3550 const struct tgsi_full_instruction *inst)
3551 {
3552 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3553 * dst.y = (src0.y < 0) ? src1.y : src2.y
3554 * dst.z = (src0.z < 0) ? src1.z : src2.z
3555 * dst.w = (src0.w < 0) ? src1.w : src2.w
3556 *
3557 * Translates into
3558 * LT tmp, src0, 0.0
3559 * MOVC dst, tmp, src1, src2
3560 */
3561 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3562 unsigned tmp = get_temp_index(emit);
3563 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3564 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3565
3566 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst,
3567 &inst->Src[0], &zero, FALSE);
3568 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0],
3569 &tmp_src, &inst->Src[1], &inst->Src[2],
3570 inst->Instruction.Saturate);
3571
3572 free_temp_indexes(emit);
3573
3574 return TRUE;
3575 }
3576
3577
3578 /**
3579 * Emit code for TGSI_OPCODE_DP2A instruction.
3580 */
3581 static boolean
3582 emit_dp2a(struct svga_shader_emitter_v10 *emit,
3583 const struct tgsi_full_instruction *inst)
3584 {
3585 /* dst.x = src0.x * src1.x + src0.y * src1.y + src2.x
3586 * dst.y = src0.x * src1.x + src0.y * src1.y + src2.x
3587 * dst.z = src0.x * src1.x + src0.y * src1.y + src2.x
3588 * dst.w = src0.x * src1.x + src0.y * src1.y + src2.x
3589 * Translate into
3590 * MAD tmp.x, s0.y, s1.y, s2.x
3591 * MAD tmp.x, s0.x, s1.x, tmp.x
3592 * MOV dst.xyzw, tmp.xxxx
3593 */
3594 unsigned tmp = get_temp_index(emit);
3595 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3596 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3597
3598 struct tgsi_full_src_register tmp_src_xxxx =
3599 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3600 struct tgsi_full_dst_register tmp_dst_x =
3601 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3602
3603 struct tgsi_full_src_register src0_xxxx =
3604 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3605 struct tgsi_full_src_register src0_yyyy =
3606 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3607 struct tgsi_full_src_register src1_xxxx =
3608 scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
3609 struct tgsi_full_src_register src1_yyyy =
3610 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3611 struct tgsi_full_src_register src2_xxxx =
3612 scalar_src(&inst->Src[2], TGSI_SWIZZLE_X);
3613
3614 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &tmp_dst_x, &src0_yyyy,
3615 &src1_yyyy, &src2_xxxx, FALSE);
3616 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &tmp_dst_x, &src0_xxxx,
3617 &src1_xxxx, &tmp_src_xxxx, FALSE);
3618 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
3619 &tmp_src_xxxx, inst->Instruction.Saturate);
3620
3621 free_temp_indexes(emit);
3622
3623 return TRUE;
3624 }
3625
3626
3627 /**
3628 * Emit code for TGSI_OPCODE_DPH instruction.
3629 */
3630 static boolean
3631 emit_dph(struct svga_shader_emitter_v10 *emit,
3632 const struct tgsi_full_instruction *inst)
3633 {
3634 /*
3635 * DP3 tmp, s0, s1
3636 * ADD dst, tmp, s1.wwww
3637 */
3638
3639 struct tgsi_full_src_register s1_wwww =
3640 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_W, TGSI_SWIZZLE_W,
3641 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
3642
3643 unsigned tmp = get_temp_index(emit);
3644 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3645 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3646
3647 /* DP3 tmp, s0, s1 */
3648 emit_instruction_op2(emit, VGPU10_OPCODE_DP3, &tmp_dst, &inst->Src[0],
3649 &inst->Src[1], FALSE);
3650
3651 /* ADD dst, tmp, s1.wwww */
3652 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &inst->Dst[0], &tmp_src,
3653 &s1_wwww, inst->Instruction.Saturate);
3654
3655 free_temp_indexes(emit);
3656
3657 return TRUE;
3658 }
3659
3660
3661 /**
3662 * Emit code for TGSI_OPCODE_DST instruction.
3663 */
3664 static boolean
3665 emit_dst(struct svga_shader_emitter_v10 *emit,
3666 const struct tgsi_full_instruction *inst)
3667 {
3668 /*
3669 * dst.x = 1
3670 * dst.y = src0.y * src1.y
3671 * dst.z = src0.z
3672 * dst.w = src1.w
3673 */
3674
3675 struct tgsi_full_src_register s0_yyyy =
3676 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3677 struct tgsi_full_src_register s0_zzzz =
3678 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
3679 struct tgsi_full_src_register s1_yyyy =
3680 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3681 struct tgsi_full_src_register s1_wwww =
3682 scalar_src(&inst->Src[1], TGSI_SWIZZLE_W);
3683
3684 /*
3685 * If dst and either src0 and src1 are the same we need
3686 * to create a temporary for it and insert a extra move.
3687 */
3688 unsigned tmp_move = get_temp_index(emit);
3689 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3690 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3691
3692 /* MOV dst.x, 1.0 */
3693 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3694 struct tgsi_full_dst_register dst_x =
3695 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3696 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3697
3698 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
3699 }
3700
3701 /* MUL dst.y, s0.y, s1.y */
3702 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3703 struct tgsi_full_dst_register dst_y =
3704 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3705
3706 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &dst_y, &s0_yyyy,
3707 &s1_yyyy, inst->Instruction.Saturate);
3708 }
3709
3710 /* MOV dst.z, s0.z */
3711 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3712 struct tgsi_full_dst_register dst_z =
3713 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3714
3715 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z, &s0_zzzz,
3716 inst->Instruction.Saturate);
3717 }
3718
3719 /* MOV dst.w, s1.w */
3720 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3721 struct tgsi_full_dst_register dst_w =
3722 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3723
3724 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &s1_wwww,
3725 inst->Instruction.Saturate);
3726 }
3727
3728 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3729 FALSE);
3730 free_temp_indexes(emit);
3731
3732 return TRUE;
3733 }
3734
3735
3736
3737 /**
3738 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3739 */
3740 static boolean
3741 emit_endprim(struct svga_shader_emitter_v10 *emit,
3742 const struct tgsi_full_instruction *inst)
3743 {
3744 assert(emit->unit == PIPE_SHADER_GEOMETRY);
3745
3746 /* We can't use emit_simple() because the TGSI instruction has one
3747 * operand (vertex stream number) which we must ignore for VGPU10.
3748 */
3749 begin_emit_instruction(emit);
3750 emit_opcode(emit, VGPU10_OPCODE_CUT, FALSE);
3751 end_emit_instruction(emit);
3752 return TRUE;
3753 }
3754
3755
3756 /**
3757 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3758 */
3759 static boolean
3760 emit_ex2(struct svga_shader_emitter_v10 *emit,
3761 const struct tgsi_full_instruction *inst)
3762 {
3763 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3764 * while VGPU10 computes four values.
3765 *
3766 * dst = EX2(src):
3767 * dst.xyzw = 2.0 ^ src.x
3768 */
3769
3770 struct tgsi_full_src_register src_xxxx =
3771 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3772 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3773
3774 /* EXP tmp, s0.xxxx */
3775 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0], &src_xxxx,
3776 inst->Instruction.Saturate);
3777
3778 return TRUE;
3779 }
3780
3781
3782 /**
3783 * Emit code for TGSI_OPCODE_EXP instruction.
3784 */
3785 static boolean
3786 emit_exp(struct svga_shader_emitter_v10 *emit,
3787 const struct tgsi_full_instruction *inst)
3788 {
3789 /*
3790 * dst.x = 2 ^ floor(s0.x)
3791 * dst.y = s0.x - floor(s0.x)
3792 * dst.z = 2 ^ s0.x
3793 * dst.w = 1.0
3794 */
3795
3796 struct tgsi_full_src_register src_xxxx =
3797 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3798 unsigned tmp = get_temp_index(emit);
3799 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3800 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3801
3802 /*
3803 * If dst and src are the same we need to create
3804 * a temporary for it and insert a extra move.
3805 */
3806 unsigned tmp_move = get_temp_index(emit);
3807 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3808 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3809
3810 /* only use X component of temp reg */
3811 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3812 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3813
3814 /* ROUND_NI tmp.x, s0.x */
3815 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
3816 &src_xxxx, FALSE); /* round to -infinity */
3817
3818 /* EXP dst.x, tmp.x */
3819 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3820 struct tgsi_full_dst_register dst_x =
3821 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3822
3823 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_x, &tmp_src,
3824 inst->Instruction.Saturate);
3825 }
3826
3827 /* ADD dst.y, s0.x, -tmp */
3828 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3829 struct tgsi_full_dst_register dst_y =
3830 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3831 struct tgsi_full_src_register neg_tmp_src = negate_src(&tmp_src);
3832
3833 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_y, &src_xxxx,
3834 &neg_tmp_src, inst->Instruction.Saturate);
3835 }
3836
3837 /* EXP dst.z, s0.x */
3838 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3839 struct tgsi_full_dst_register dst_z =
3840 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3841
3842 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_z, &src_xxxx,
3843 inst->Instruction.Saturate);
3844 }
3845
3846 /* MOV dst.w, 1.0 */
3847 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3848 struct tgsi_full_dst_register dst_w =
3849 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3850 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3851
3852 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one,
3853 FALSE);
3854 }
3855
3856 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3857 FALSE);
3858
3859 free_temp_indexes(emit);
3860
3861 return TRUE;
3862 }
3863
3864
3865 /**
3866 * Emit code for TGSI_OPCODE_IF instruction.
3867 */
3868 static boolean
3869 emit_if(struct svga_shader_emitter_v10 *emit,
3870 const struct tgsi_full_instruction *inst)
3871 {
3872 VGPU10OpcodeToken0 opcode0;
3873
3874 /* The src register should be a scalar */
3875 assert(inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleY &&
3876 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleZ &&
3877 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleW);
3878
3879 /* The only special thing here is that we need to set the
3880 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
3881 * src.x is non-zero.
3882 */
3883 opcode0.value = 0;
3884 opcode0.opcodeType = VGPU10_OPCODE_IF;
3885 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
3886
3887 begin_emit_instruction(emit);
3888 emit_dword(emit, opcode0.value);
3889 emit_src_register(emit, &inst->Src[0]);
3890 end_emit_instruction(emit);
3891
3892 return TRUE;
3893 }
3894
3895
3896 /**
3897 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
3898 * the register components are negative).
3899 */
3900 static boolean
3901 emit_kill_if(struct svga_shader_emitter_v10 *emit,
3902 const struct tgsi_full_instruction *inst)
3903 {
3904 unsigned tmp = get_temp_index(emit);
3905 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3906 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3907
3908 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3909
3910 struct tgsi_full_dst_register tmp_dst_x =
3911 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3912 struct tgsi_full_src_register tmp_src_xxxx =
3913 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3914
3915 /* tmp = src[0] < 0.0 */
3916 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
3917 &zero, FALSE);
3918
3919 if (!same_swizzle_terms(&inst->Src[0])) {
3920 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
3921 * logically OR the swizzle terms. Most uses of KILL_IF only
3922 * test one channel so it's good to avoid these extra steps.
3923 */
3924 struct tgsi_full_src_register tmp_src_yyyy =
3925 scalar_src(&tmp_src, TGSI_SWIZZLE_Y);
3926 struct tgsi_full_src_register tmp_src_zzzz =
3927 scalar_src(&tmp_src, TGSI_SWIZZLE_Z);
3928 struct tgsi_full_src_register tmp_src_wwww =
3929 scalar_src(&tmp_src, TGSI_SWIZZLE_W);
3930
3931 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3932 &tmp_src_yyyy, FALSE);
3933 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3934 &tmp_src_zzzz, FALSE);
3935 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3936 &tmp_src_wwww, FALSE);
3937 }
3938
3939 begin_emit_instruction(emit);
3940 emit_discard_opcode(emit, TRUE); /* discard if src0.x is non-zero */
3941 emit_src_register(emit, &tmp_src_xxxx);
3942 end_emit_instruction(emit);
3943
3944 free_temp_indexes(emit);
3945
3946 return TRUE;
3947 }
3948
3949
3950 /**
3951 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
3952 */
3953 static boolean
3954 emit_kill(struct svga_shader_emitter_v10 *emit,
3955 const struct tgsi_full_instruction *inst)
3956 {
3957 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3958
3959 /* DISCARD if 0.0 is zero */
3960 begin_emit_instruction(emit);
3961 emit_discard_opcode(emit, FALSE);
3962 emit_src_register(emit, &zero);
3963 end_emit_instruction(emit);
3964
3965 return TRUE;
3966 }
3967
3968
3969 /**
3970 * Emit code for TGSI_OPCODE_LG2 instruction.
3971 */
3972 static boolean
3973 emit_lg2(struct svga_shader_emitter_v10 *emit,
3974 const struct tgsi_full_instruction *inst)
3975 {
3976 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
3977 * while VGPU10 computes four values.
3978 *
3979 * dst = LG2(src):
3980 * dst.xyzw = log2(src.x)
3981 */
3982
3983 struct tgsi_full_src_register src_xxxx =
3984 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3985 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3986
3987 /* LOG tmp, s0.xxxx */
3988 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &inst->Dst[0], &src_xxxx,
3989 inst->Instruction.Saturate);
3990
3991 return TRUE;
3992 }
3993
3994
3995 /**
3996 * Emit code for TGSI_OPCODE_LIT instruction.
3997 */
3998 static boolean
3999 emit_lit(struct svga_shader_emitter_v10 *emit,
4000 const struct tgsi_full_instruction *inst)
4001 {
4002 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4003
4004 /*
4005 * If dst and src are the same we need to create
4006 * a temporary for it and insert a extra move.
4007 */
4008 unsigned tmp_move = get_temp_index(emit);
4009 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
4010 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
4011
4012 /*
4013 * dst.x = 1
4014 * dst.y = max(src.x, 0)
4015 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
4016 * dst.w = 1
4017 */
4018
4019 /* MOV dst.x, 1.0 */
4020 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4021 struct tgsi_full_dst_register dst_x =
4022 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
4023 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
4024 }
4025
4026 /* MOV dst.w, 1.0 */
4027 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4028 struct tgsi_full_dst_register dst_w =
4029 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
4030 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4031 }
4032
4033 /* MAX dst.y, src.x, 0.0 */
4034 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4035 struct tgsi_full_dst_register dst_y =
4036 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
4037 struct tgsi_full_src_register zero =
4038 make_immediate_reg_float(emit, 0.0f);
4039 struct tgsi_full_src_register src_xxxx =
4040 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4041 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4042
4043 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &dst_y, &src_xxxx,
4044 &zero, inst->Instruction.Saturate);
4045 }
4046
4047 /*
4048 * tmp1 = clamp(src.w, -128, 128);
4049 * MAX tmp1, src.w, -128
4050 * MIN tmp1, tmp1, 128
4051 *
4052 * tmp2 = max(tmp2, 0);
4053 * MAX tmp2, src.y, 0
4054 *
4055 * tmp1 = pow(tmp2, tmp1);
4056 * LOG tmp2, tmp2
4057 * MUL tmp1, tmp2, tmp1
4058 * EXP tmp1, tmp1
4059 *
4060 * tmp1 = (src.w == 0) ? 1 : tmp1;
4061 * EQ tmp2, 0, src.w
4062 * MOVC tmp1, tmp2, 1.0, tmp1
4063 *
4064 * dst.z = (0 < src.x) ? tmp1 : 0;
4065 * LT tmp2, 0, src.x
4066 * MOVC dst.z, tmp2, tmp1, 0.0
4067 */
4068 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4069 struct tgsi_full_dst_register dst_z =
4070 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
4071
4072 unsigned tmp1 = get_temp_index(emit);
4073 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4074 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4075 unsigned tmp2 = get_temp_index(emit);
4076 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4077 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4078
4079 struct tgsi_full_src_register src_xxxx =
4080 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4081 struct tgsi_full_src_register src_yyyy =
4082 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
4083 struct tgsi_full_src_register src_wwww =
4084 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
4085
4086 struct tgsi_full_src_register zero =
4087 make_immediate_reg_float(emit, 0.0f);
4088 struct tgsi_full_src_register lowerbound =
4089 make_immediate_reg_float(emit, -128.0f);
4090 struct tgsi_full_src_register upperbound =
4091 make_immediate_reg_float(emit, 128.0f);
4092
4093 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp1_dst, &src_wwww,
4094 &lowerbound, FALSE);
4095 emit_instruction_op2(emit, VGPU10_OPCODE_MIN, &tmp1_dst, &tmp1_src,
4096 &upperbound, FALSE);
4097 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp2_dst, &src_yyyy,
4098 &zero, FALSE);
4099
4100 /* POW tmp1, tmp2, tmp1 */
4101 /* LOG tmp2, tmp2 */
4102 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp2_dst, &tmp2_src,
4103 FALSE);
4104
4105 /* MUL tmp1, tmp2, tmp1 */
4106 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &tmp2_src,
4107 &tmp1_src, FALSE);
4108
4109 /* EXP tmp1, tmp1 */
4110 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp1_dst, &tmp1_src,
4111 FALSE);
4112
4113 /* EQ tmp2, 0, src.w */
4114 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp2_dst, &zero,
4115 &src_wwww, FALSE);
4116 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4117 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp1_dst,
4118 &tmp2_src, &one, &tmp1_src, FALSE);
4119
4120 /* LT tmp2, 0, src.x */
4121 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp2_dst, &zero,
4122 &src_xxxx, FALSE);
4123 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4124 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &dst_z,
4125 &tmp2_src, &tmp1_src, &zero, FALSE);
4126 }
4127
4128 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
4129 FALSE);
4130 free_temp_indexes(emit);
4131
4132 return TRUE;
4133 }
4134
4135
4136 /**
4137 * Emit code for TGSI_OPCODE_LOG instruction.
4138 */
4139 static boolean
4140 emit_log(struct svga_shader_emitter_v10 *emit,
4141 const struct tgsi_full_instruction *inst)
4142 {
4143 /*
4144 * dst.x = floor(lg2(abs(s0.x)))
4145 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4146 * dst.z = lg2(abs(s0.x))
4147 * dst.w = 1.0
4148 */
4149
4150 struct tgsi_full_src_register src_xxxx =
4151 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4152 unsigned tmp = get_temp_index(emit);
4153 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4154 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4155 struct tgsi_full_src_register abs_src_xxxx = absolute_src(&src_xxxx);
4156
4157 /* only use X component of temp reg */
4158 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4159 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4160
4161 /* LOG tmp.x, abs(s0.x) */
4162 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XYZ) {
4163 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst,
4164 &abs_src_xxxx, FALSE);
4165 }
4166
4167 /* MOV dst.z, tmp.x */
4168 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4169 struct tgsi_full_dst_register dst_z =
4170 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Z);
4171
4172 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z,
4173 &tmp_src, inst->Instruction.Saturate);
4174 }
4175
4176 /* FLR tmp.x, tmp.x */
4177 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY) {
4178 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
4179 &tmp_src, FALSE);
4180 }
4181
4182 /* MOV dst.x, tmp.x */
4183 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4184 struct tgsi_full_dst_register dst_x =
4185 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4186
4187 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &tmp_src,
4188 inst->Instruction.Saturate);
4189 }
4190
4191 /* EXP tmp.x, tmp.x */
4192 /* DIV dst.y, abs(s0.x), tmp.x */
4193 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4194 struct tgsi_full_dst_register dst_y =
4195 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4196
4197 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp_dst, &tmp_src,
4198 FALSE);
4199 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &dst_y, &abs_src_xxxx,
4200 &tmp_src, inst->Instruction.Saturate);
4201 }
4202
4203 /* MOV dst.w, 1.0 */
4204 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4205 struct tgsi_full_dst_register dst_w =
4206 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_W);
4207 struct tgsi_full_src_register one =
4208 make_immediate_reg_float(emit, 1.0f);
4209
4210 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4211 }
4212
4213 free_temp_indexes(emit);
4214
4215 return TRUE;
4216 }
4217
4218
4219 /**
4220 * Emit code for TGSI_OPCODE_LRP instruction.
4221 */
4222 static boolean
4223 emit_lrp(struct svga_shader_emitter_v10 *emit,
4224 const struct tgsi_full_instruction *inst)
4225 {
4226 /* dst = LRP(s0, s1, s2):
4227 * dst = s0 * (s1 - s2) + s2
4228 * Translates into:
4229 * SUB tmp, s1, s2; tmp = s1 - s2
4230 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4231 */
4232 unsigned tmp = get_temp_index(emit);
4233 struct tgsi_full_src_register src_tmp = make_src_temp_reg(tmp);
4234 struct tgsi_full_dst_register dst_tmp = make_dst_temp_reg(tmp);
4235 struct tgsi_full_src_register neg_src2 = negate_src(&inst->Src[2]);
4236
4237 /* ADD tmp, s1, -s2 */
4238 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_tmp,
4239 &inst->Src[1], &neg_src2, FALSE);
4240
4241 /* MAD dst, s1, tmp, s3 */
4242 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &inst->Dst[0],
4243 &inst->Src[0], &src_tmp, &inst->Src[2],
4244 inst->Instruction.Saturate);
4245
4246 free_temp_indexes(emit);
4247
4248 return TRUE;
4249 }
4250
4251
4252 /**
4253 * Emit code for TGSI_OPCODE_POW instruction.
4254 */
4255 static boolean
4256 emit_pow(struct svga_shader_emitter_v10 *emit,
4257 const struct tgsi_full_instruction *inst)
4258 {
4259 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4260 * src1.x while VGPU10 computes four values.
4261 *
4262 * dst = POW(src0, src1):
4263 * dst.xyzw = src0.x ^ src1.x
4264 */
4265 unsigned tmp = get_temp_index(emit);
4266 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4267 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4268 struct tgsi_full_src_register src0_xxxx =
4269 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4270 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4271 struct tgsi_full_src_register src1_xxxx =
4272 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4273 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4274
4275 /* LOG tmp, s0.xxxx */
4276 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst, &src0_xxxx,
4277 FALSE);
4278
4279 /* MUL tmp, tmp, s1.xxxx */
4280 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst, &tmp_src,
4281 &src1_xxxx, FALSE);
4282
4283 /* EXP tmp, s0.xxxx */
4284 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0],
4285 &tmp_src, inst->Instruction.Saturate);
4286
4287 /* free tmp */
4288 free_temp_indexes(emit);
4289
4290 return TRUE;
4291 }
4292
4293
4294 /**
4295 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4296 */
4297 static boolean
4298 emit_rcp(struct svga_shader_emitter_v10 *emit,
4299 const struct tgsi_full_instruction *inst)
4300 {
4301 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4302
4303 unsigned tmp = get_temp_index(emit);
4304 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4305 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4306
4307 struct tgsi_full_dst_register tmp_dst_x =
4308 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4309 struct tgsi_full_src_register tmp_src_xxxx =
4310 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4311
4312 /* DIV tmp.x, 1.0, s0 */
4313 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst_x, &one,
4314 &inst->Src[0], FALSE);
4315
4316 /* MOV dst, tmp.xxxx */
4317 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4318 &tmp_src_xxxx, inst->Instruction.Saturate);
4319
4320 free_temp_indexes(emit);
4321
4322 return TRUE;
4323 }
4324
4325
4326 /**
4327 * Emit code for TGSI_OPCODE_RSQ instruction.
4328 */
4329 static boolean
4330 emit_rsq(struct svga_shader_emitter_v10 *emit,
4331 const struct tgsi_full_instruction *inst)
4332 {
4333 /* dst = RSQ(src):
4334 * dst.xyzw = 1 / sqrt(src.x)
4335 * Translates into:
4336 * RSQ tmp, src.x
4337 * MOV dst, tmp.xxxx
4338 */
4339
4340 unsigned tmp = get_temp_index(emit);
4341 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4342 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4343
4344 struct tgsi_full_dst_register tmp_dst_x =
4345 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4346 struct tgsi_full_src_register tmp_src_xxxx =
4347 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4348
4349 /* RSQ tmp, src.x */
4350 emit_instruction_op1(emit, VGPU10_OPCODE_RSQ, &tmp_dst_x,
4351 &inst->Src[0], FALSE);
4352
4353 /* MOV dst, tmp.xxxx */
4354 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4355 &tmp_src_xxxx, inst->Instruction.Saturate);
4356
4357 /* free tmp */
4358 free_temp_indexes(emit);
4359
4360 return TRUE;
4361 }
4362
4363
4364 /**
4365 * Emit code for TGSI_OPCODE_SCS instruction.
4366 */
4367 static boolean
4368 emit_scs(struct svga_shader_emitter_v10 *emit,
4369 const struct tgsi_full_instruction *inst)
4370 {
4371 /* dst.x = cos(src.x)
4372 * dst.y = sin(src.x)
4373 * dst.z = 0.0
4374 * dst.w = 1.0
4375 */
4376 struct tgsi_full_dst_register dst_x =
4377 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4378 struct tgsi_full_dst_register dst_y =
4379 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4380 struct tgsi_full_dst_register dst_zw =
4381 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_ZW);
4382
4383 struct tgsi_full_src_register zero_one =
4384 make_immediate_reg_float4(emit, 0.0f, 0.0f, 0.0f, 1.0f);
4385
4386 begin_emit_instruction(emit);
4387 emit_opcode(emit, VGPU10_OPCODE_SINCOS, inst->Instruction.Saturate);
4388 emit_dst_register(emit, &dst_y);
4389 emit_dst_register(emit, &dst_x);
4390 emit_src_register(emit, &inst->Src[0]);
4391 end_emit_instruction(emit);
4392
4393 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
4394 &dst_zw, &zero_one, inst->Instruction.Saturate);
4395
4396 return TRUE;
4397 }
4398
4399
4400 /**
4401 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4402 */
4403 static boolean
4404 emit_seq(struct svga_shader_emitter_v10 *emit,
4405 const struct tgsi_full_instruction *inst)
4406 {
4407 /* dst = SEQ(s0, s1):
4408 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4409 * Translates into:
4410 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4411 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4412 */
4413 unsigned tmp = get_temp_index(emit);
4414 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4415 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4416 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4417 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4418
4419 /* EQ tmp, s0, s1 */
4420 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp_dst, &inst->Src[0],
4421 &inst->Src[1], FALSE);
4422
4423 /* MOVC dst, tmp, one, zero */
4424 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4425 &one, &zero, FALSE);
4426
4427 free_temp_indexes(emit);
4428
4429 return TRUE;
4430 }
4431
4432
4433 /**
4434 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4435 */
4436 static boolean
4437 emit_sge(struct svga_shader_emitter_v10 *emit,
4438 const struct tgsi_full_instruction *inst)
4439 {
4440 /* dst = SGE(s0, s1):
4441 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4442 * Translates into:
4443 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4444 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4445 */
4446 unsigned tmp = get_temp_index(emit);
4447 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4448 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4449 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4450 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4451
4452 /* GE tmp, s0, s1 */
4453 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[0],
4454 &inst->Src[1], FALSE);
4455
4456 /* MOVC dst, tmp, one, zero */
4457 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4458 &one, &zero, FALSE);
4459
4460 free_temp_indexes(emit);
4461
4462 return TRUE;
4463 }
4464
4465
4466 /**
4467 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4468 */
4469 static boolean
4470 emit_sgt(struct svga_shader_emitter_v10 *emit,
4471 const struct tgsi_full_instruction *inst)
4472 {
4473 /* dst = SGT(s0, s1):
4474 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4475 * Translates into:
4476 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4477 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4478 */
4479 unsigned tmp = get_temp_index(emit);
4480 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4481 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4482 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4483 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4484
4485 /* LT tmp, s1, s0 */
4486 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[1],
4487 &inst->Src[0], FALSE);
4488
4489 /* MOVC dst, tmp, one, zero */
4490 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4491 &one, &zero, FALSE);
4492
4493 free_temp_indexes(emit);
4494
4495 return TRUE;
4496 }
4497
4498
4499 /**
4500 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4501 */
4502 static boolean
4503 emit_sincos(struct svga_shader_emitter_v10 *emit,
4504 const struct tgsi_full_instruction *inst)
4505 {
4506 unsigned tmp = get_temp_index(emit);
4507 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4508 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4509
4510 struct tgsi_full_src_register tmp_src_xxxx =
4511 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4512 struct tgsi_full_dst_register tmp_dst_x =
4513 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4514
4515 begin_emit_instruction(emit);
4516 emit_opcode(emit, VGPU10_OPCODE_SINCOS, FALSE);
4517
4518 if(inst->Instruction.Opcode == TGSI_OPCODE_SIN)
4519 {
4520 emit_dst_register(emit, &tmp_dst_x); /* first destination register */
4521 emit_null_dst_register(emit); /* second destination register */
4522 }
4523 else {
4524 emit_null_dst_register(emit);
4525 emit_dst_register(emit, &tmp_dst_x);
4526 }
4527
4528 emit_src_register(emit, &inst->Src[0]);
4529 end_emit_instruction(emit);
4530
4531 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4532 &tmp_src_xxxx, inst->Instruction.Saturate);
4533
4534 free_temp_indexes(emit);
4535
4536 return TRUE;
4537 }
4538
4539
4540 /**
4541 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4542 */
4543 static boolean
4544 emit_sle(struct svga_shader_emitter_v10 *emit,
4545 const struct tgsi_full_instruction *inst)
4546 {
4547 /* dst = SLE(s0, s1):
4548 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4549 * Translates into:
4550 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4551 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4552 */
4553 unsigned tmp = get_temp_index(emit);
4554 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4555 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4556 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4557 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4558
4559 /* GE tmp, s1, s0 */
4560 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[1],
4561 &inst->Src[0], FALSE);
4562
4563 /* MOVC dst, tmp, one, zero */
4564 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4565 &one, &zero, FALSE);
4566
4567 free_temp_indexes(emit);
4568
4569 return TRUE;
4570 }
4571
4572
4573 /**
4574 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4575 */
4576 static boolean
4577 emit_slt(struct svga_shader_emitter_v10 *emit,
4578 const struct tgsi_full_instruction *inst)
4579 {
4580 /* dst = SLT(s0, s1):
4581 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4582 * Translates into:
4583 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4584 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4585 */
4586 unsigned tmp = get_temp_index(emit);
4587 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4588 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4589 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4590 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4591
4592 /* LT tmp, s0, s1 */
4593 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4594 &inst->Src[1], FALSE);
4595
4596 /* MOVC dst, tmp, one, zero */
4597 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4598 &one, &zero, FALSE);
4599
4600 free_temp_indexes(emit);
4601
4602 return TRUE;
4603 }
4604
4605
4606 /**
4607 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4608 */
4609 static boolean
4610 emit_sne(struct svga_shader_emitter_v10 *emit,
4611 const struct tgsi_full_instruction *inst)
4612 {
4613 /* dst = SNE(s0, s1):
4614 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4615 * Translates into:
4616 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4617 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4618 */
4619 unsigned tmp = get_temp_index(emit);
4620 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4621 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4622 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4623 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4624
4625 /* NE tmp, s0, s1 */
4626 emit_instruction_op2(emit, VGPU10_OPCODE_NE, &tmp_dst, &inst->Src[0],
4627 &inst->Src[1], FALSE);
4628
4629 /* MOVC dst, tmp, one, zero */
4630 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4631 &one, &zero, FALSE);
4632
4633 free_temp_indexes(emit);
4634
4635 return TRUE;
4636 }
4637
4638
4639 /**
4640 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4641 */
4642 static boolean
4643 emit_ssg(struct svga_shader_emitter_v10 *emit,
4644 const struct tgsi_full_instruction *inst)
4645 {
4646 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4647 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4648 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4649 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4650 * Translates into:
4651 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4652 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4653 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4654 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4655 */
4656 struct tgsi_full_src_register zero =
4657 make_immediate_reg_float(emit, 0.0f);
4658 struct tgsi_full_src_register one =
4659 make_immediate_reg_float(emit, 1.0f);
4660 struct tgsi_full_src_register neg_one =
4661 make_immediate_reg_float(emit, -1.0f);
4662
4663 unsigned tmp1 = get_temp_index(emit);
4664 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4665 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4666
4667 unsigned tmp2 = get_temp_index(emit);
4668 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4669 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4670
4671 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &inst->Src[0],
4672 &zero, FALSE);
4673 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp2_dst, &tmp1_src,
4674 &neg_one, &zero, FALSE);
4675 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &zero,
4676 &inst->Src[0], FALSE);
4677 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp1_src,
4678 &one, &tmp2_src, FALSE);
4679
4680 free_temp_indexes(emit);
4681
4682 return TRUE;
4683 }
4684
4685
4686 /**
4687 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4688 */
4689 static boolean
4690 emit_issg(struct svga_shader_emitter_v10 *emit,
4691 const struct tgsi_full_instruction *inst)
4692 {
4693 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4694 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4695 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4696 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4697 * Translates into:
4698 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4699 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4700 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4701 */
4702 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4703
4704 unsigned tmp1 = get_temp_index(emit);
4705 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4706 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4707
4708 unsigned tmp2 = get_temp_index(emit);
4709 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4710 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4711
4712 struct tgsi_full_src_register neg_tmp2 = negate_src(&tmp2_src);
4713
4714 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp1_dst,
4715 &inst->Src[0], &zero, FALSE);
4716 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp2_dst,
4717 &zero, &inst->Src[0], FALSE);
4718 emit_instruction_op2(emit, VGPU10_OPCODE_IADD, &inst->Dst[0],
4719 &tmp1_src, &neg_tmp2, FALSE);
4720
4721 free_temp_indexes(emit);
4722
4723 return TRUE;
4724 }
4725
4726
4727 /**
4728 * Emit a comparison instruction. The dest register will get
4729 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4730 */
4731 static void
4732 emit_comparison(struct svga_shader_emitter_v10 *emit,
4733 SVGA3dCmpFunc func,
4734 const struct tgsi_full_dst_register *dst,
4735 const struct tgsi_full_src_register *src0,
4736 const struct tgsi_full_src_register *src1)
4737 {
4738 struct tgsi_full_src_register immediate;
4739 VGPU10OpcodeToken0 opcode0;
4740 boolean swapSrc = FALSE;
4741
4742 /* Sanity checks for svga vs. gallium enums */
4743 STATIC_ASSERT(SVGA3D_CMP_LESS == (PIPE_FUNC_LESS + 1));
4744 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL == (PIPE_FUNC_GEQUAL + 1));
4745
4746 opcode0.value = 0;
4747
4748 switch (func) {
4749 case SVGA3D_CMP_NEVER:
4750 immediate = make_immediate_reg_int(emit, 0);
4751 /* MOV dst, {0} */
4752 begin_emit_instruction(emit);
4753 emit_dword(emit, VGPU10_OPCODE_MOV);
4754 emit_dst_register(emit, dst);
4755 emit_src_register(emit, &immediate);
4756 end_emit_instruction(emit);
4757 return;
4758 case SVGA3D_CMP_ALWAYS:
4759 immediate = make_immediate_reg_int(emit, -1);
4760 /* MOV dst, {-1} */
4761 begin_emit_instruction(emit);
4762 emit_dword(emit, VGPU10_OPCODE_MOV);
4763 emit_dst_register(emit, dst);
4764 emit_src_register(emit, &immediate);
4765 end_emit_instruction(emit);
4766 return;
4767 case SVGA3D_CMP_LESS:
4768 opcode0.opcodeType = VGPU10_OPCODE_LT;
4769 break;
4770 case SVGA3D_CMP_EQUAL:
4771 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4772 break;
4773 case SVGA3D_CMP_LESSEQUAL:
4774 opcode0.opcodeType = VGPU10_OPCODE_GE;
4775 swapSrc = TRUE;
4776 break;
4777 case SVGA3D_CMP_GREATER:
4778 opcode0.opcodeType = VGPU10_OPCODE_LT;
4779 swapSrc = TRUE;
4780 break;
4781 case SVGA3D_CMP_NOTEQUAL:
4782 opcode0.opcodeType = VGPU10_OPCODE_NE;
4783 break;
4784 case SVGA3D_CMP_GREATEREQUAL:
4785 opcode0.opcodeType = VGPU10_OPCODE_GE;
4786 break;
4787 default:
4788 assert(!"Unexpected comparison mode");
4789 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4790 }
4791
4792 begin_emit_instruction(emit);
4793 emit_dword(emit, opcode0.value);
4794 emit_dst_register(emit, dst);
4795 if (swapSrc) {
4796 emit_src_register(emit, src1);
4797 emit_src_register(emit, src0);
4798 }
4799 else {
4800 emit_src_register(emit, src0);
4801 emit_src_register(emit, src1);
4802 }
4803 end_emit_instruction(emit);
4804 }
4805
4806
4807 /**
4808 * Get texel/address offsets for a texture instruction.
4809 */
4810 static void
4811 get_texel_offsets(const struct svga_shader_emitter_v10 *emit,
4812 const struct tgsi_full_instruction *inst, int offsets[3])
4813 {
4814 if (inst->Texture.NumOffsets == 1) {
4815 /* According to OpenGL Shader Language spec the offsets are only
4816 * fetched from a previously-declared immediate/literal.
4817 */
4818 const struct tgsi_texture_offset *off = inst->TexOffsets;
4819 const unsigned index = off[0].Index;
4820 const unsigned swizzleX = off[0].SwizzleX;
4821 const unsigned swizzleY = off[0].SwizzleY;
4822 const unsigned swizzleZ = off[0].SwizzleZ;
4823 const union tgsi_immediate_data *imm = emit->immediates[index];
4824
4825 assert(inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE);
4826
4827 offsets[0] = imm[swizzleX].Int;
4828 offsets[1] = imm[swizzleY].Int;
4829 offsets[2] = imm[swizzleZ].Int;
4830 }
4831 else {
4832 offsets[0] = offsets[1] = offsets[2] = 0;
4833 }
4834 }
4835
4836
4837 /**
4838 * Set up the coordinate register for texture sampling.
4839 * When we're sampling from a RECT texture we have to scale the
4840 * unnormalized coordinate to a normalized coordinate.
4841 * We do that by multiplying the coordinate by an "extra" constant.
4842 * An alternative would be to use the RESINFO instruction to query the
4843 * texture's size.
4844 */
4845 static struct tgsi_full_src_register
4846 setup_texcoord(struct svga_shader_emitter_v10 *emit,
4847 unsigned unit,
4848 const struct tgsi_full_src_register *coord)
4849 {
4850 if (emit->key.tex[unit].unnormalized) {
4851 unsigned scale_index = emit->texcoord_scale_index[unit];
4852 unsigned tmp = get_temp_index(emit);
4853 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4854 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4855 struct tgsi_full_src_register scale_src = make_src_const_reg(scale_index);
4856
4857 /* MUL tmp, coord, const[] */
4858 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4859 coord, &scale_src, FALSE);
4860 return tmp_src;
4861 }
4862 else {
4863 /* use texcoord as-is */
4864 return *coord;
4865 }
4866 }
4867
4868
4869 /**
4870 * For SAMPLE_C instructions, emit the extra src register which indicates
4871 * the reference/comparision value.
4872 */
4873 static void
4874 emit_tex_compare_refcoord(struct svga_shader_emitter_v10 *emit,
4875 enum tgsi_texture_type target,
4876 const struct tgsi_full_src_register *coord)
4877 {
4878 struct tgsi_full_src_register coord_src_ref;
4879 unsigned component;
4880
4881 assert(tgsi_is_shadow_target(target));
4882
4883 assert(target != TGSI_TEXTURE_SHADOWCUBE_ARRAY); /* XXX not implemented */
4884 if (target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
4885 target == TGSI_TEXTURE_SHADOWCUBE)
4886 component = TGSI_SWIZZLE_W;
4887 else
4888 component = TGSI_SWIZZLE_Z;
4889
4890 coord_src_ref = scalar_src(coord, component);
4891
4892 emit_src_register(emit, &coord_src_ref);
4893 }
4894
4895
4896 /**
4897 * Info for implementing texture swizzles.
4898 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
4899 * functions use this to encapsulate the extra steps needed to perform
4900 * a texture swizzle, or shadow/depth comparisons.
4901 * The shadow/depth comparison is only done here if for the cases where
4902 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
4903 */
4904 struct tex_swizzle_info
4905 {
4906 boolean swizzled;
4907 boolean shadow_compare;
4908 unsigned unit;
4909 enum tgsi_texture_type texture_target; /**< TGSI_TEXTURE_x */
4910 struct tgsi_full_src_register tmp_src;
4911 struct tgsi_full_dst_register tmp_dst;
4912 const struct tgsi_full_dst_register *inst_dst;
4913 const struct tgsi_full_src_register *coord_src;
4914 };
4915
4916
4917 /**
4918 * Do setup for handling texture swizzles or shadow compares.
4919 * \param unit the texture unit
4920 * \param inst the TGSI texture instruction
4921 * \param shadow_compare do shadow/depth comparison?
4922 * \param swz returns the swizzle info
4923 */
4924 static void
4925 begin_tex_swizzle(struct svga_shader_emitter_v10 *emit,
4926 unsigned unit,
4927 const struct tgsi_full_instruction *inst,
4928 boolean shadow_compare,
4929 struct tex_swizzle_info *swz)
4930 {
4931 swz->swizzled = (emit->key.tex[unit].swizzle_r != TGSI_SWIZZLE_X ||
4932 emit->key.tex[unit].swizzle_g != TGSI_SWIZZLE_Y ||
4933 emit->key.tex[unit].swizzle_b != TGSI_SWIZZLE_Z ||
4934 emit->key.tex[unit].swizzle_a != TGSI_SWIZZLE_W);
4935
4936 swz->shadow_compare = shadow_compare;
4937 swz->texture_target = inst->Texture.Texture;
4938
4939 if (swz->swizzled || shadow_compare) {
4940 /* Allocate temp register for the result of the SAMPLE instruction
4941 * and the source of the MOV/compare/swizzle instructions.
4942 */
4943 unsigned tmp = get_temp_index(emit);
4944 swz->tmp_src = make_src_temp_reg(tmp);
4945 swz->tmp_dst = make_dst_temp_reg(tmp);
4946
4947 swz->unit = unit;
4948 }
4949 swz->inst_dst = &inst->Dst[0];
4950 swz->coord_src = &inst->Src[0];
4951 }
4952
4953
4954 /**
4955 * Returns the register to put the SAMPLE instruction results into.
4956 * This will either be the original instruction dst reg (if no swizzle
4957 * and no shadow comparison) or a temporary reg if there is a swizzle.
4958 */
4959 static const struct tgsi_full_dst_register *
4960 get_tex_swizzle_dst(const struct tex_swizzle_info *swz)
4961 {
4962 return (swz->swizzled || swz->shadow_compare)
4963 ? &swz->tmp_dst : swz->inst_dst;
4964 }
4965
4966
4967 /**
4968 * This emits the MOV instruction that actually implements a texture swizzle
4969 * and/or shadow comparison.
4970 */
4971 static void
4972 end_tex_swizzle(struct svga_shader_emitter_v10 *emit,
4973 const struct tex_swizzle_info *swz)
4974 {
4975 if (swz->shadow_compare) {
4976 /* Emit extra instructions to compare the fetched texel value against
4977 * a texture coordinate component. The result of the comparison
4978 * is 0.0 or 1.0.
4979 */
4980 struct tgsi_full_src_register coord_src;
4981 struct tgsi_full_src_register texel_src =
4982 scalar_src(&swz->tmp_src, TGSI_SWIZZLE_X);
4983 struct tgsi_full_src_register one =
4984 make_immediate_reg_float(emit, 1.0f);
4985 /* convert gallium comparison func to SVGA comparison func */
4986 SVGA3dCmpFunc compare_func = emit->key.tex[swz->unit].compare_func + 1;
4987
4988 assert(emit->unit == PIPE_SHADER_FRAGMENT);
4989
4990 switch (swz->texture_target) {
4991 case TGSI_TEXTURE_SHADOW2D:
4992 case TGSI_TEXTURE_SHADOWRECT:
4993 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4994 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Z);
4995 break;
4996 case TGSI_TEXTURE_SHADOW1D:
4997 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Y);
4998 break;
4999 case TGSI_TEXTURE_SHADOWCUBE:
5000 case TGSI_TEXTURE_SHADOW2D_ARRAY:
5001 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_W);
5002 break;
5003 default:
5004 assert(!"Unexpected texture target in end_tex_swizzle()");
5005 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Z);
5006 }
5007
5008 /* COMPARE tmp, coord, texel */
5009 /* XXX it would seem that the texel and coord arguments should
5010 * be transposed here, but piglit tests indicate otherwise.
5011 */
5012 emit_comparison(emit, compare_func,
5013 &swz->tmp_dst, &texel_src, &coord_src);
5014
5015 /* AND dest, tmp, {1.0} */
5016 begin_emit_instruction(emit);
5017 emit_opcode(emit, VGPU10_OPCODE_AND, FALSE);
5018 if (swz->swizzled) {
5019 emit_dst_register(emit, &swz->tmp_dst);
5020 }
5021 else {
5022 emit_dst_register(emit, swz->inst_dst);
5023 }
5024 emit_src_register(emit, &swz->tmp_src);
5025 emit_src_register(emit, &one);
5026 end_emit_instruction(emit);
5027 }
5028
5029 if (swz->swizzled) {
5030 unsigned swz_r = emit->key.tex[swz->unit].swizzle_r;
5031 unsigned swz_g = emit->key.tex[swz->unit].swizzle_g;
5032 unsigned swz_b = emit->key.tex[swz->unit].swizzle_b;
5033 unsigned swz_a = emit->key.tex[swz->unit].swizzle_a;
5034 unsigned writemask_0 = 0, writemask_1 = 0;
5035 boolean int_tex = is_integer_type(emit->sampler_return_type[swz->unit]);
5036
5037 /* Swizzle w/out zero/one terms */
5038 struct tgsi_full_src_register src_swizzled =
5039 swizzle_src(&swz->tmp_src,
5040 swz_r < PIPE_SWIZZLE_0 ? swz_r : PIPE_SWIZZLE_X,
5041 swz_g < PIPE_SWIZZLE_0 ? swz_g : PIPE_SWIZZLE_Y,
5042 swz_b < PIPE_SWIZZLE_0 ? swz_b : PIPE_SWIZZLE_Z,
5043 swz_a < PIPE_SWIZZLE_0 ? swz_a : PIPE_SWIZZLE_W);
5044
5045 /* MOV dst, color(tmp).<swizzle> */
5046 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5047 swz->inst_dst, &src_swizzled, FALSE);
5048
5049 /* handle swizzle zero terms */
5050 writemask_0 = (((swz_r == PIPE_SWIZZLE_0) << 0) |
5051 ((swz_g == PIPE_SWIZZLE_0) << 1) |
5052 ((swz_b == PIPE_SWIZZLE_0) << 2) |
5053 ((swz_a == PIPE_SWIZZLE_0) << 3));
5054 writemask_0 &= swz->inst_dst->Register.WriteMask;
5055
5056 if (writemask_0) {
5057 struct tgsi_full_src_register zero = int_tex ?
5058 make_immediate_reg_int(emit, 0) :
5059 make_immediate_reg_float(emit, 0.0f);
5060 struct tgsi_full_dst_register dst =
5061 writemask_dst(swz->inst_dst, writemask_0);
5062
5063 /* MOV dst.writemask_0, {0,0,0,0} */
5064 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5065 &dst, &zero, FALSE);
5066 }
5067
5068 /* handle swizzle one terms */
5069 writemask_1 = (((swz_r == PIPE_SWIZZLE_1) << 0) |
5070 ((swz_g == PIPE_SWIZZLE_1) << 1) |
5071 ((swz_b == PIPE_SWIZZLE_1) << 2) |
5072 ((swz_a == PIPE_SWIZZLE_1) << 3));
5073 writemask_1 &= swz->inst_dst->Register.WriteMask;
5074
5075 if (writemask_1) {
5076 struct tgsi_full_src_register one = int_tex ?
5077 make_immediate_reg_int(emit, 1) :
5078 make_immediate_reg_float(emit, 1.0f);
5079 struct tgsi_full_dst_register dst =
5080 writemask_dst(swz->inst_dst, writemask_1);
5081
5082 /* MOV dst.writemask_1, {1,1,1,1} */
5083 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst, &one, FALSE);
5084 }
5085 }
5086 }
5087
5088
5089 /**
5090 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5091 */
5092 static boolean
5093 emit_sample(struct svga_shader_emitter_v10 *emit,
5094 const struct tgsi_full_instruction *inst)
5095 {
5096 const unsigned resource_unit = inst->Src[1].Register.Index;
5097 const unsigned sampler_unit = inst->Src[2].Register.Index;
5098 struct tgsi_full_src_register coord;
5099 int offsets[3];
5100 struct tex_swizzle_info swz_info;
5101
5102 begin_tex_swizzle(emit, sampler_unit, inst, FALSE, &swz_info);
5103
5104 get_texel_offsets(emit, inst, offsets);
5105
5106 coord = setup_texcoord(emit, resource_unit, &inst->Src[0]);
5107
5108 /* SAMPLE dst, coord(s0), resource, sampler */
5109 begin_emit_instruction(emit);
5110
5111 /* NOTE: for non-fragment shaders, we should use VGPU10_OPCODE_SAMPLE_L
5112 * with LOD=0. But our virtual GPU accepts this as-is.
5113 */
5114 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE,
5115 inst->Instruction.Saturate, offsets);
5116 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5117 emit_src_register(emit, &coord);
5118 emit_resource_register(emit, resource_unit);
5119 emit_sampler_register(emit, sampler_unit);
5120 end_emit_instruction(emit);
5121
5122 end_tex_swizzle(emit, &swz_info);
5123
5124 free_temp_indexes(emit);
5125
5126 return TRUE;
5127 }
5128
5129
5130 /**
5131 * Check if a texture instruction is valid.
5132 * An example of an invalid texture instruction is doing shadow comparison
5133 * with an integer-valued texture.
5134 * If we detect an invalid texture instruction, we replace it with:
5135 * MOV dst, {1,1,1,1};
5136 * \return TRUE if valid, FALSE if invalid.
5137 */
5138 static boolean
5139 is_valid_tex_instruction(struct svga_shader_emitter_v10 *emit,
5140 const struct tgsi_full_instruction *inst)
5141 {
5142 const unsigned unit = inst->Src[1].Register.Index;
5143 const unsigned target = inst->Texture.Texture;
5144 boolean valid = TRUE;
5145
5146 if (tgsi_is_shadow_target(target) &&
5147 is_integer_type(emit->sampler_return_type[unit])) {
5148 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5149 valid = FALSE;
5150 }
5151 /* XXX might check for other conditions in the future here */
5152
5153 if (!valid) {
5154 /* emit a MOV dst, {1,1,1,1} instruction. */
5155 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
5156 begin_emit_instruction(emit);
5157 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5158 emit_dst_register(emit, &inst->Dst[0]);
5159 emit_src_register(emit, &one);
5160 end_emit_instruction(emit);
5161 }
5162
5163 return valid;
5164 }
5165
5166
5167 /**
5168 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5169 */
5170 static boolean
5171 emit_tex(struct svga_shader_emitter_v10 *emit,
5172 const struct tgsi_full_instruction *inst)
5173 {
5174 const uint unit = inst->Src[1].Register.Index;
5175 unsigned target = inst->Texture.Texture;
5176 unsigned opcode;
5177 struct tgsi_full_src_register coord;
5178 int offsets[3];
5179 struct tex_swizzle_info swz_info;
5180
5181 /* check that the sampler returns a float */
5182 if (!is_valid_tex_instruction(emit, inst))
5183 return TRUE;
5184
5185 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5186
5187 get_texel_offsets(emit, inst, offsets);
5188
5189 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5190
5191 /* SAMPLE dst, coord(s0), resource, sampler */
5192 begin_emit_instruction(emit);
5193
5194 if (tgsi_is_shadow_target(target))
5195 opcode = VGPU10_OPCODE_SAMPLE_C;
5196 else
5197 opcode = VGPU10_OPCODE_SAMPLE;
5198
5199 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5200 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5201 emit_src_register(emit, &coord);
5202 emit_resource_register(emit, unit);
5203 emit_sampler_register(emit, unit);
5204 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5205 emit_tex_compare_refcoord(emit, target, &coord);
5206 }
5207 end_emit_instruction(emit);
5208
5209 end_tex_swizzle(emit, &swz_info);
5210
5211 free_temp_indexes(emit);
5212
5213 return TRUE;
5214 }
5215
5216
5217 /**
5218 * Emit code for TGSI_OPCODE_TXP (projective texture)
5219 */
5220 static boolean
5221 emit_txp(struct svga_shader_emitter_v10 *emit,
5222 const struct tgsi_full_instruction *inst)
5223 {
5224 const uint unit = inst->Src[1].Register.Index;
5225 unsigned target = inst->Texture.Texture;
5226 unsigned opcode;
5227 int offsets[3];
5228 unsigned tmp = get_temp_index(emit);
5229 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
5230 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
5231 struct tgsi_full_src_register src0_wwww =
5232 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5233 struct tgsi_full_src_register coord;
5234 struct tex_swizzle_info swz_info;
5235
5236 /* check that the sampler returns a float */
5237 if (!is_valid_tex_instruction(emit, inst))
5238 return TRUE;
5239
5240 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5241
5242 get_texel_offsets(emit, inst, offsets);
5243
5244 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5245
5246 /* DIV tmp, coord, coord.wwww */
5247 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst,
5248 &coord, &src0_wwww, FALSE);
5249
5250 /* SAMPLE dst, coord(tmp), resource, sampler */
5251 begin_emit_instruction(emit);
5252
5253 if (tgsi_is_shadow_target(target))
5254 /* NOTE: for non-fragment shaders, we should use
5255 * VGPU10_OPCODE_SAMPLE_C_LZ, but our virtual GPU accepts this as-is.
5256 */
5257 opcode = VGPU10_OPCODE_SAMPLE_C;
5258 else
5259 opcode = VGPU10_OPCODE_SAMPLE;
5260
5261 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5262 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5263 emit_src_register(emit, &tmp_src); /* projected coord */
5264 emit_resource_register(emit, unit);
5265 emit_sampler_register(emit, unit);
5266 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5267 emit_tex_compare_refcoord(emit, target, &tmp_src);
5268 }
5269 end_emit_instruction(emit);
5270
5271 end_tex_swizzle(emit, &swz_info);
5272
5273 free_temp_indexes(emit);
5274
5275 return TRUE;
5276 }
5277
5278
5279 /*
5280 * Emit code for TGSI_OPCODE_XPD instruction.
5281 */
5282 static boolean
5283 emit_xpd(struct svga_shader_emitter_v10 *emit,
5284 const struct tgsi_full_instruction *inst)
5285 {
5286 /* dst.x = src0.y * src1.z - src1.y * src0.z
5287 * dst.y = src0.z * src1.x - src1.z * src0.x
5288 * dst.z = src0.x * src1.y - src1.x * src0.y
5289 * dst.w = 1
5290 */
5291 struct tgsi_full_src_register s0_xxxx =
5292 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
5293 struct tgsi_full_src_register s0_yyyy =
5294 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
5295 struct tgsi_full_src_register s0_zzzz =
5296 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
5297
5298 struct tgsi_full_src_register s1_xxxx =
5299 scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5300 struct tgsi_full_src_register s1_yyyy =
5301 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
5302 struct tgsi_full_src_register s1_zzzz =
5303 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Z);
5304
5305 unsigned tmp1 = get_temp_index(emit);
5306 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
5307 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
5308
5309 unsigned tmp2 = get_temp_index(emit);
5310 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
5311 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
5312 struct tgsi_full_src_register neg_tmp2_src = negate_src(&tmp2_src);
5313
5314 unsigned tmp3 = get_temp_index(emit);
5315 struct tgsi_full_src_register tmp3_src = make_src_temp_reg(tmp3);
5316 struct tgsi_full_dst_register tmp3_dst = make_dst_temp_reg(tmp3);
5317 struct tgsi_full_dst_register tmp3_dst_x =
5318 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_X);
5319 struct tgsi_full_dst_register tmp3_dst_y =
5320 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_Y);
5321 struct tgsi_full_dst_register tmp3_dst_z =
5322 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_Z);
5323 struct tgsi_full_dst_register tmp3_dst_w =
5324 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_W);
5325
5326 /* Note: we put all the intermediate computations into tmp3 in case
5327 * the XPD dest register is that same as one of the src regs (in which
5328 * case we could clobber a src reg before we're done with it) .
5329 *
5330 * Note: we could get by with just one temp register instead of three
5331 * since we're doing scalar operations and there's enough room in one
5332 * temp for everything.
5333 */
5334
5335 /* MUL tmp1, src0.y, src1.z */
5336 /* MUL tmp2, src1.y, src0.z */
5337 /* ADD tmp3.x, tmp1, -tmp2 */
5338 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
5339 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst,
5340 &s0_yyyy, &s1_zzzz, FALSE);
5341 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst,
5342 &s1_yyyy, &s0_zzzz, FALSE);
5343 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_x,
5344 &tmp1_src, &neg_tmp2_src, FALSE);
5345 }
5346
5347 /* MUL tmp1, src0.z, src1.x */
5348 /* MUL tmp2, src1.z, src0.x */
5349 /* ADD tmp3.y, tmp1, -tmp2 */
5350 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
5351 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &s0_zzzz,
5352 &s1_xxxx, FALSE);
5353 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst, &s1_zzzz,
5354 &s0_xxxx, FALSE);
5355 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_y,
5356 &tmp1_src, &neg_tmp2_src, FALSE);
5357 }
5358
5359 /* MUL tmp1, src0.x, src1.y */
5360 /* MUL tmp2, src1.x, src0.y */
5361 /* ADD tmp3.z, tmp1, -tmp2 */
5362 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
5363 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &s0_xxxx,
5364 &s1_yyyy, FALSE);
5365 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst, &s1_xxxx,
5366 &s0_yyyy, FALSE);
5367 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_z,
5368 &tmp1_src, &neg_tmp2_src, FALSE);
5369 }
5370
5371 /* MOV tmp3.w, 1.0 */
5372 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
5373 struct tgsi_full_src_register one =
5374 make_immediate_reg_float(emit, 1.0f);
5375
5376 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &tmp3_dst_w, &one, FALSE);
5377 }
5378
5379 /* MOV dst, tmp3 */
5380 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &tmp3_src,
5381 inst->Instruction.Saturate);
5382
5383
5384 free_temp_indexes(emit);
5385
5386 return TRUE;
5387 }
5388
5389
5390 /**
5391 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5392 */
5393 static boolean
5394 emit_txd(struct svga_shader_emitter_v10 *emit,
5395 const struct tgsi_full_instruction *inst)
5396 {
5397 const uint unit = inst->Src[3].Register.Index;
5398 unsigned target = inst->Texture.Texture;
5399 int offsets[3];
5400 struct tgsi_full_src_register coord;
5401 struct tex_swizzle_info swz_info;
5402
5403 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5404 &swz_info);
5405
5406 get_texel_offsets(emit, inst, offsets);
5407
5408 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5409
5410 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5411 begin_emit_instruction(emit);
5412 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_D,
5413 inst->Instruction.Saturate, offsets);
5414 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5415 emit_src_register(emit, &coord);
5416 emit_resource_register(emit, unit);
5417 emit_sampler_register(emit, unit);
5418 emit_src_register(emit, &inst->Src[1]); /* Xderiv */
5419 emit_src_register(emit, &inst->Src[2]); /* Yderiv */
5420 end_emit_instruction(emit);
5421
5422 end_tex_swizzle(emit, &swz_info);
5423
5424 free_temp_indexes(emit);
5425
5426 return TRUE;
5427 }
5428
5429
5430 /**
5431 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5432 */
5433 static boolean
5434 emit_txf(struct svga_shader_emitter_v10 *emit,
5435 const struct tgsi_full_instruction *inst)
5436 {
5437 const uint unit = inst->Src[1].Register.Index;
5438 const boolean msaa = tgsi_is_msaa_target(inst->Texture.Texture);
5439 int offsets[3];
5440 struct tex_swizzle_info swz_info;
5441
5442 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5443
5444 get_texel_offsets(emit, inst, offsets);
5445
5446 if (msaa) {
5447 /* Fetch one sample from an MSAA texture */
5448 struct tgsi_full_src_register sampleIndex =
5449 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5450 /* LD_MS dst, coord(s0), resource, sampleIndex */
5451 begin_emit_instruction(emit);
5452 emit_sample_opcode(emit, VGPU10_OPCODE_LD_MS,
5453 inst->Instruction.Saturate, offsets);
5454 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5455 emit_src_register(emit, &inst->Src[0]);
5456 emit_resource_register(emit, unit);
5457 emit_src_register(emit, &sampleIndex);
5458 end_emit_instruction(emit);
5459 }
5460 else {
5461 /* Fetch one texel specified by integer coordinate */
5462 /* LD dst, coord(s0), resource */
5463 begin_emit_instruction(emit);
5464 emit_sample_opcode(emit, VGPU10_OPCODE_LD,
5465 inst->Instruction.Saturate, offsets);
5466 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5467 emit_src_register(emit, &inst->Src[0]);
5468 emit_resource_register(emit, unit);
5469 end_emit_instruction(emit);
5470 }
5471
5472 end_tex_swizzle(emit, &swz_info);
5473
5474 free_temp_indexes(emit);
5475
5476 return TRUE;
5477 }
5478
5479
5480 /**
5481 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5482 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5483 */
5484 static boolean
5485 emit_txl_txb(struct svga_shader_emitter_v10 *emit,
5486 const struct tgsi_full_instruction *inst)
5487 {
5488 unsigned target = inst->Texture.Texture;
5489 unsigned opcode, unit;
5490 int offsets[3];
5491 struct tgsi_full_src_register coord, lod_bias;
5492 struct tex_swizzle_info swz_info;
5493
5494 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5495 inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5496 inst->Instruction.Opcode == TGSI_OPCODE_TXB2);
5497
5498 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
5499 lod_bias = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5500 unit = inst->Src[2].Register.Index;
5501 }
5502 else {
5503 lod_bias = scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5504 unit = inst->Src[1].Register.Index;
5505 }
5506
5507 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5508 &swz_info);
5509
5510 get_texel_offsets(emit, inst, offsets);
5511
5512 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5513
5514 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5515 begin_emit_instruction(emit);
5516 if (inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
5517 opcode = VGPU10_OPCODE_SAMPLE_L;
5518 }
5519 else {
5520 opcode = VGPU10_OPCODE_SAMPLE_B;
5521 }
5522 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5523 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5524 emit_src_register(emit, &coord);
5525 emit_resource_register(emit, unit);
5526 emit_sampler_register(emit, unit);
5527 emit_src_register(emit, &lod_bias);
5528 end_emit_instruction(emit);
5529
5530 end_tex_swizzle(emit, &swz_info);
5531
5532 free_temp_indexes(emit);
5533
5534 return TRUE;
5535 }
5536
5537
5538 /**
5539 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5540 */
5541 static boolean
5542 emit_txq(struct svga_shader_emitter_v10 *emit,
5543 const struct tgsi_full_instruction *inst)
5544 {
5545 const uint unit = inst->Src[1].Register.Index;
5546
5547 if (emit->sampler_target[unit] == TGSI_TEXTURE_BUFFER) {
5548 /* RESINFO does not support querying texture buffers, so we instead
5549 * store texture buffer sizes in shader constants, then copy them to
5550 * implement TXQ instead of emitting RESINFO.
5551 * MOV dst, const[texture_buffer_size_index[unit]]
5552 */
5553 struct tgsi_full_src_register size_src =
5554 make_src_const_reg(emit->texture_buffer_size_index[unit]);
5555 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &size_src,
5556 FALSE);
5557 } else {
5558 /* RESINFO dst, srcMipLevel, resource */
5559 begin_emit_instruction(emit);
5560 emit_opcode_resinfo(emit, VGPU10_RESINFO_RETURN_UINT);
5561 emit_dst_register(emit, &inst->Dst[0]);
5562 emit_src_register(emit, &inst->Src[0]);
5563 emit_resource_register(emit, unit);
5564 end_emit_instruction(emit);
5565 }
5566
5567 free_temp_indexes(emit);
5568
5569 return TRUE;
5570 }
5571
5572
5573 /**
5574 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5575 */
5576 static boolean
5577 emit_simple(struct svga_shader_emitter_v10 *emit,
5578 const struct tgsi_full_instruction *inst)
5579 {
5580 const unsigned opcode = inst->Instruction.Opcode;
5581 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5582 unsigned i;
5583
5584 begin_emit_instruction(emit);
5585 emit_opcode(emit, translate_opcode(inst->Instruction.Opcode),
5586 inst->Instruction.Saturate);
5587 for (i = 0; i < op->num_dst; i++) {
5588 emit_dst_register(emit, &inst->Dst[i]);
5589 }
5590 for (i = 0; i < op->num_src; i++) {
5591 emit_src_register(emit, &inst->Src[i]);
5592 }
5593 end_emit_instruction(emit);
5594
5595 return TRUE;
5596 }
5597
5598
5599 /**
5600 * We only special case the MOV instruction to try to detect constant
5601 * color writes in the fragment shader.
5602 */
5603 static boolean
5604 emit_mov(struct svga_shader_emitter_v10 *emit,
5605 const struct tgsi_full_instruction *inst)
5606 {
5607 const struct tgsi_full_src_register *src = &inst->Src[0];
5608 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
5609
5610 if (emit->unit == PIPE_SHADER_FRAGMENT &&
5611 dst->Register.File == TGSI_FILE_OUTPUT &&
5612 dst->Register.Index == 0 &&
5613 src->Register.File == TGSI_FILE_CONSTANT &&
5614 !src->Register.Indirect) {
5615 emit->constant_color_output = TRUE;
5616 }
5617
5618 return emit_simple(emit, inst);
5619 }
5620
5621
5622 /**
5623 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5624 * where TGSI only uses one dest register.
5625 */
5626 static boolean
5627 emit_simple_1dst(struct svga_shader_emitter_v10 *emit,
5628 const struct tgsi_full_instruction *inst,
5629 unsigned dst_count,
5630 unsigned dst_index)
5631 {
5632 const unsigned opcode = inst->Instruction.Opcode;
5633 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5634 unsigned i;
5635
5636 begin_emit_instruction(emit);
5637 emit_opcode(emit, translate_opcode(inst->Instruction.Opcode),
5638 inst->Instruction.Saturate);
5639
5640 for (i = 0; i < dst_count; i++) {
5641 if (i == dst_index) {
5642 emit_dst_register(emit, &inst->Dst[0]);
5643 } else {
5644 emit_null_dst_register(emit);
5645 }
5646 }
5647
5648 for (i = 0; i < op->num_src; i++) {
5649 emit_src_register(emit, &inst->Src[i]);
5650 }
5651 end_emit_instruction(emit);
5652
5653 return TRUE;
5654 }
5655
5656
5657 /**
5658 * Translate a single TGSI instruction to VGPU10.
5659 */
5660 static boolean
5661 emit_vgpu10_instruction(struct svga_shader_emitter_v10 *emit,
5662 unsigned inst_number,
5663 const struct tgsi_full_instruction *inst)
5664 {
5665 const unsigned opcode = inst->Instruction.Opcode;
5666
5667 switch (opcode) {
5668 case TGSI_OPCODE_ADD:
5669 case TGSI_OPCODE_AND:
5670 case TGSI_OPCODE_BGNLOOP:
5671 case TGSI_OPCODE_BRK:
5672 case TGSI_OPCODE_CEIL:
5673 case TGSI_OPCODE_CONT:
5674 case TGSI_OPCODE_DDX:
5675 case TGSI_OPCODE_DDY:
5676 case TGSI_OPCODE_DIV:
5677 case TGSI_OPCODE_DP2:
5678 case TGSI_OPCODE_DP3:
5679 case TGSI_OPCODE_DP4:
5680 case TGSI_OPCODE_ELSE:
5681 case TGSI_OPCODE_ENDIF:
5682 case TGSI_OPCODE_ENDLOOP:
5683 case TGSI_OPCODE_ENDSUB:
5684 case TGSI_OPCODE_F2I:
5685 case TGSI_OPCODE_F2U:
5686 case TGSI_OPCODE_FLR:
5687 case TGSI_OPCODE_FRC:
5688 case TGSI_OPCODE_FSEQ:
5689 case TGSI_OPCODE_FSGE:
5690 case TGSI_OPCODE_FSLT:
5691 case TGSI_OPCODE_FSNE:
5692 case TGSI_OPCODE_I2F:
5693 case TGSI_OPCODE_IMAX:
5694 case TGSI_OPCODE_IMIN:
5695 case TGSI_OPCODE_INEG:
5696 case TGSI_OPCODE_ISGE:
5697 case TGSI_OPCODE_ISHR:
5698 case TGSI_OPCODE_ISLT:
5699 case TGSI_OPCODE_MAD:
5700 case TGSI_OPCODE_MAX:
5701 case TGSI_OPCODE_MIN:
5702 case TGSI_OPCODE_MUL:
5703 case TGSI_OPCODE_NOP:
5704 case TGSI_OPCODE_NOT:
5705 case TGSI_OPCODE_OR:
5706 case TGSI_OPCODE_RET:
5707 case TGSI_OPCODE_UADD:
5708 case TGSI_OPCODE_USEQ:
5709 case TGSI_OPCODE_USGE:
5710 case TGSI_OPCODE_USLT:
5711 case TGSI_OPCODE_UMIN:
5712 case TGSI_OPCODE_UMAD:
5713 case TGSI_OPCODE_UMAX:
5714 case TGSI_OPCODE_ROUND:
5715 case TGSI_OPCODE_SQRT:
5716 case TGSI_OPCODE_SHL:
5717 case TGSI_OPCODE_TRUNC:
5718 case TGSI_OPCODE_U2F:
5719 case TGSI_OPCODE_UCMP:
5720 case TGSI_OPCODE_USHR:
5721 case TGSI_OPCODE_USNE:
5722 case TGSI_OPCODE_XOR:
5723 /* simple instructions */
5724 return emit_simple(emit, inst);
5725
5726 case TGSI_OPCODE_MOV:
5727 return emit_mov(emit, inst);
5728 case TGSI_OPCODE_EMIT:
5729 return emit_vertex(emit, inst);
5730 case TGSI_OPCODE_ENDPRIM:
5731 return emit_endprim(emit, inst);
5732 case TGSI_OPCODE_IABS:
5733 return emit_iabs(emit, inst);
5734 case TGSI_OPCODE_ARL:
5735 /* fall-through */
5736 case TGSI_OPCODE_UARL:
5737 return emit_arl_uarl(emit, inst);
5738 case TGSI_OPCODE_BGNSUB:
5739 /* no-op */
5740 return TRUE;
5741 case TGSI_OPCODE_CAL:
5742 return emit_cal(emit, inst);
5743 case TGSI_OPCODE_CMP:
5744 return emit_cmp(emit, inst);
5745 case TGSI_OPCODE_COS:
5746 return emit_sincos(emit, inst);
5747 case TGSI_OPCODE_DP2A:
5748 return emit_dp2a(emit, inst);
5749 case TGSI_OPCODE_DPH:
5750 return emit_dph(emit, inst);
5751 case TGSI_OPCODE_DST:
5752 return emit_dst(emit, inst);
5753 case TGSI_OPCODE_EX2:
5754 return emit_ex2(emit, inst);
5755 case TGSI_OPCODE_EXP:
5756 return emit_exp(emit, inst);
5757 case TGSI_OPCODE_IF:
5758 return emit_if(emit, inst);
5759 case TGSI_OPCODE_KILL:
5760 return emit_kill(emit, inst);
5761 case TGSI_OPCODE_KILL_IF:
5762 return emit_kill_if(emit, inst);
5763 case TGSI_OPCODE_LG2:
5764 return emit_lg2(emit, inst);
5765 case TGSI_OPCODE_LIT:
5766 return emit_lit(emit, inst);
5767 case TGSI_OPCODE_LOG:
5768 return emit_log(emit, inst);
5769 case TGSI_OPCODE_LRP:
5770 return emit_lrp(emit, inst);
5771 case TGSI_OPCODE_POW:
5772 return emit_pow(emit, inst);
5773 case TGSI_OPCODE_RCP:
5774 return emit_rcp(emit, inst);
5775 case TGSI_OPCODE_RSQ:
5776 return emit_rsq(emit, inst);
5777 case TGSI_OPCODE_SAMPLE:
5778 return emit_sample(emit, inst);
5779 case TGSI_OPCODE_SCS:
5780 return emit_scs(emit, inst);
5781 case TGSI_OPCODE_SEQ:
5782 return emit_seq(emit, inst);
5783 case TGSI_OPCODE_SGE:
5784 return emit_sge(emit, inst);
5785 case TGSI_OPCODE_SGT:
5786 return emit_sgt(emit, inst);
5787 case TGSI_OPCODE_SIN:
5788 return emit_sincos(emit, inst);
5789 case TGSI_OPCODE_SLE:
5790 return emit_sle(emit, inst);
5791 case TGSI_OPCODE_SLT:
5792 return emit_slt(emit, inst);
5793 case TGSI_OPCODE_SNE:
5794 return emit_sne(emit, inst);
5795 case TGSI_OPCODE_SSG:
5796 return emit_ssg(emit, inst);
5797 case TGSI_OPCODE_ISSG:
5798 return emit_issg(emit, inst);
5799 case TGSI_OPCODE_TEX:
5800 return emit_tex(emit, inst);
5801 case TGSI_OPCODE_TXP:
5802 return emit_txp(emit, inst);
5803 case TGSI_OPCODE_TXB:
5804 case TGSI_OPCODE_TXB2:
5805 case TGSI_OPCODE_TXL:
5806 return emit_txl_txb(emit, inst);
5807 case TGSI_OPCODE_TXD:
5808 return emit_txd(emit, inst);
5809 case TGSI_OPCODE_TXF:
5810 return emit_txf(emit, inst);
5811 case TGSI_OPCODE_TXQ:
5812 return emit_txq(emit, inst);
5813 case TGSI_OPCODE_UIF:
5814 return emit_if(emit, inst);
5815 case TGSI_OPCODE_XPD:
5816 return emit_xpd(emit, inst);
5817 case TGSI_OPCODE_UMUL_HI:
5818 case TGSI_OPCODE_IMUL_HI:
5819 case TGSI_OPCODE_UDIV:
5820 case TGSI_OPCODE_IDIV:
5821 /* These cases use only the FIRST of two destination registers */
5822 return emit_simple_1dst(emit, inst, 2, 0);
5823 case TGSI_OPCODE_UMUL:
5824 case TGSI_OPCODE_UMOD:
5825 case TGSI_OPCODE_MOD:
5826 /* These cases use only the SECOND of two destination registers */
5827 return emit_simple_1dst(emit, inst, 2, 1);
5828 case TGSI_OPCODE_END:
5829 if (!emit_post_helpers(emit))
5830 return FALSE;
5831 return emit_simple(emit, inst);
5832
5833 default:
5834 debug_printf("Unimplemented tgsi instruction %s\n",
5835 tgsi_get_opcode_name(opcode));
5836 return FALSE;
5837 }
5838
5839 return TRUE;
5840 }
5841
5842
5843 /**
5844 * Emit the extra instructions to adjust the vertex position.
5845 * There are two possible adjustments:
5846 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5847 * "prescale" and "pretranslate" values.
5848 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5849 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5850 */
5851 static void
5852 emit_vpos_instructions(struct svga_shader_emitter_v10 *emit,
5853 unsigned vs_pos_tmp_index)
5854 {
5855 struct tgsi_full_src_register tmp_pos_src;
5856 struct tgsi_full_dst_register pos_dst;
5857
5858 /* Don't bother to emit any extra vertex instructions if vertex position is
5859 * not written out
5860 */
5861 if (emit->vposition.out_index == INVALID_INDEX)
5862 return;
5863
5864 tmp_pos_src = make_src_temp_reg(vs_pos_tmp_index);
5865 pos_dst = make_dst_output_reg(emit->vposition.out_index);
5866
5867 /* If non-adjusted vertex position register index
5868 * is valid, copy the vertex position from the temporary
5869 * vertex position register before it is modified by the
5870 * prescale computation.
5871 */
5872 if (emit->vposition.so_index != INVALID_INDEX) {
5873 struct tgsi_full_dst_register pos_so_dst =
5874 make_dst_output_reg(emit->vposition.so_index);
5875
5876 /* MOV pos_so, tmp_pos */
5877 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_so_dst,
5878 &tmp_pos_src, FALSE);
5879 }
5880
5881 if (emit->vposition.need_prescale) {
5882 /* This code adjusts the vertex position to match the VGPU10 convention.
5883 * If p is the position computed by the shader (usually by applying the
5884 * modelview and projection matrices), the new position q is computed by:
5885 *
5886 * q.x = p.w * trans.x + p.x * scale.x
5887 * q.y = p.w * trans.y + p.y * scale.y
5888 * q.z = p.w * trans.z + p.z * scale.z;
5889 * q.w = p.w * trans.w + p.w;
5890 */
5891 struct tgsi_full_src_register tmp_pos_src_w =
5892 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
5893 struct tgsi_full_dst_register tmp_pos_dst =
5894 make_dst_temp_reg(vs_pos_tmp_index);
5895 struct tgsi_full_dst_register tmp_pos_dst_xyz =
5896 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XYZ);
5897
5898 struct tgsi_full_src_register prescale_scale =
5899 make_src_const_reg(emit->vposition.prescale_scale_index);
5900 struct tgsi_full_src_register prescale_trans =
5901 make_src_const_reg(emit->vposition.prescale_trans_index);
5902
5903 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
5904 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xyz,
5905 &tmp_pos_src, &prescale_scale, FALSE);
5906
5907 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
5908 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &pos_dst, &tmp_pos_src_w,
5909 &prescale_trans, &tmp_pos_src, FALSE);
5910 }
5911 else if (emit->key.vs.undo_viewport) {
5912 /* This code computes the final vertex position from the temporary
5913 * vertex position by undoing the viewport transformation and the
5914 * divide-by-W operation (we convert window coords back to clip coords).
5915 * This is needed when we use the 'draw' module for fallbacks.
5916 * If p is the temp pos in window coords, then the NDC coord q is:
5917 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
5918 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
5919 * q.z = p.z * p.w
5920 * q.w = p.w
5921 * CONST[vs_viewport_index] contains:
5922 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
5923 */
5924 struct tgsi_full_dst_register tmp_pos_dst =
5925 make_dst_temp_reg(vs_pos_tmp_index);
5926 struct tgsi_full_dst_register tmp_pos_dst_xy =
5927 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XY);
5928 struct tgsi_full_src_register tmp_pos_src_wwww =
5929 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
5930
5931 struct tgsi_full_dst_register pos_dst_xyz =
5932 writemask_dst(&pos_dst, TGSI_WRITEMASK_XYZ);
5933 struct tgsi_full_dst_register pos_dst_w =
5934 writemask_dst(&pos_dst, TGSI_WRITEMASK_W);
5935
5936 struct tgsi_full_src_register vp_xyzw =
5937 make_src_const_reg(emit->vs.viewport_index);
5938 struct tgsi_full_src_register vp_zwww =
5939 swizzle_src(&vp_xyzw, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
5940 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
5941
5942 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
5943 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_pos_dst_xy,
5944 &tmp_pos_src, &vp_zwww, FALSE);
5945
5946 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
5947 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xy,
5948 &tmp_pos_src, &vp_xyzw, FALSE);
5949
5950 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
5951 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &pos_dst_xyz,
5952 &tmp_pos_src, &tmp_pos_src_wwww, FALSE);
5953
5954 /* MOV pos.w, tmp_pos.w */
5955 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_dst_w,
5956 &tmp_pos_src, FALSE);
5957 }
5958 else if (vs_pos_tmp_index != INVALID_INDEX) {
5959 /* This code is to handle the case where the temporary vertex
5960 * position register is created when the vertex shader has stream
5961 * output and prescale is disabled because rasterization is to be
5962 * discarded.
5963 */
5964 struct tgsi_full_dst_register pos_dst =
5965 make_dst_output_reg(emit->vposition.out_index);
5966
5967 /* MOV pos, tmp_pos */
5968 begin_emit_instruction(emit);
5969 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5970 emit_dst_register(emit, &pos_dst);
5971 emit_src_register(emit, &tmp_pos_src);
5972 end_emit_instruction(emit);
5973 }
5974 }
5975
5976 static void
5977 emit_clipping_instructions(struct svga_shader_emitter_v10 *emit)
5978 {
5979 if (emit->clip_mode == CLIP_DISTANCE) {
5980 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
5981 emit_clip_distance_instructions(emit);
5982
5983 } else if (emit->clip_mode == CLIP_VERTEX) {
5984 /* Convert TGSI CLIPVERTEX to CLIPDIST */
5985 emit_clip_vertex_instructions(emit);
5986 }
5987
5988 /**
5989 * Emit vertex position and take care of legacy user planes only if
5990 * there is a valid vertex position register index.
5991 * This is to take care of the case
5992 * where the shader doesn't output vertex position. Then in
5993 * this case, don't bother to emit more vertex instructions.
5994 */
5995 if (emit->vposition.out_index == INVALID_INDEX)
5996 return;
5997
5998 /**
5999 * Emit per-vertex clipping instructions for legacy user defined clip planes.
6000 * NOTE: we must emit the clip distance instructions before the
6001 * emit_vpos_instructions() call since the later function will change
6002 * the TEMP[vs_pos_tmp_index] value.
6003 */
6004 if (emit->clip_mode == CLIP_LEGACY) {
6005 /* Emit CLIPDIST for legacy user defined clip planes */
6006 emit_clip_distance_from_vpos(emit, emit->vposition.tmp_index);
6007 }
6008 }
6009
6010
6011 /**
6012 * Emit extra per-vertex instructions. This includes clip-coordinate
6013 * space conversion and computing clip distances. This is called for
6014 * each GS emit-vertex instruction and at the end of VS translation.
6015 */
6016 static void
6017 emit_vertex_instructions(struct svga_shader_emitter_v10 *emit)
6018 {
6019 const unsigned vs_pos_tmp_index = emit->vposition.tmp_index;
6020
6021 /* Emit clipping instructions based on clipping mode */
6022 emit_clipping_instructions(emit);
6023
6024 /**
6025 * Reset the temporary vertex position register index
6026 * so that emit_dst_register() will use the real vertex position output
6027 */
6028 emit->vposition.tmp_index = INVALID_INDEX;
6029
6030 /* Emit vertex position instructions */
6031 emit_vpos_instructions(emit, vs_pos_tmp_index);
6032
6033 /* Restore original vposition.tmp_index value for the next GS vertex.
6034 * It doesn't matter for VS.
6035 */
6036 emit->vposition.tmp_index = vs_pos_tmp_index;
6037 }
6038
6039 /**
6040 * Translate the TGSI_OPCODE_EMIT GS instruction.
6041 */
6042 static boolean
6043 emit_vertex(struct svga_shader_emitter_v10 *emit,
6044 const struct tgsi_full_instruction *inst)
6045 {
6046 unsigned ret = TRUE;
6047
6048 assert(emit->unit == PIPE_SHADER_GEOMETRY);
6049
6050 emit_vertex_instructions(emit);
6051
6052 /* We can't use emit_simple() because the TGSI instruction has one
6053 * operand (vertex stream number) which we must ignore for VGPU10.
6054 */
6055 begin_emit_instruction(emit);
6056 emit_opcode(emit, VGPU10_OPCODE_EMIT, FALSE);
6057 end_emit_instruction(emit);
6058
6059 return ret;
6060 }
6061
6062
6063 /**
6064 * Emit the extra code to convert from VGPU10's boolean front-face
6065 * register to TGSI's signed front-face register.
6066 *
6067 * TODO: Make temporary front-face register a scalar.
6068 */
6069 static void
6070 emit_frontface_instructions(struct svga_shader_emitter_v10 *emit)
6071 {
6072 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6073
6074 if (emit->fs.face_input_index != INVALID_INDEX) {
6075 /* convert vgpu10 boolean face register to gallium +/-1 value */
6076 struct tgsi_full_dst_register tmp_dst =
6077 make_dst_temp_reg(emit->fs.face_tmp_index);
6078 struct tgsi_full_src_register one =
6079 make_immediate_reg_float(emit, 1.0f);
6080 struct tgsi_full_src_register neg_one =
6081 make_immediate_reg_float(emit, -1.0f);
6082
6083 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6084 begin_emit_instruction(emit);
6085 emit_opcode(emit, VGPU10_OPCODE_MOVC, FALSE);
6086 emit_dst_register(emit, &tmp_dst);
6087 emit_face_register(emit);
6088 emit_src_register(emit, &one);
6089 emit_src_register(emit, &neg_one);
6090 end_emit_instruction(emit);
6091 }
6092 }
6093
6094
6095 /**
6096 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6097 */
6098 static void
6099 emit_fragcoord_instructions(struct svga_shader_emitter_v10 *emit)
6100 {
6101 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6102
6103 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
6104 struct tgsi_full_dst_register tmp_dst =
6105 make_dst_temp_reg(emit->fs.fragcoord_tmp_index);
6106 struct tgsi_full_dst_register tmp_dst_xyz =
6107 writemask_dst(&tmp_dst, TGSI_WRITEMASK_XYZ);
6108 struct tgsi_full_dst_register tmp_dst_w =
6109 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6110 struct tgsi_full_src_register one =
6111 make_immediate_reg_float(emit, 1.0f);
6112 struct tgsi_full_src_register fragcoord =
6113 make_src_reg(TGSI_FILE_INPUT, emit->fs.fragcoord_input_index);
6114
6115 /* save the input index */
6116 unsigned fragcoord_input_index = emit->fs.fragcoord_input_index;
6117 /* set to invalid to prevent substitution in emit_src_register() */
6118 emit->fs.fragcoord_input_index = INVALID_INDEX;
6119
6120 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6121 begin_emit_instruction(emit);
6122 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6123 emit_dst_register(emit, &tmp_dst_xyz);
6124 emit_src_register(emit, &fragcoord);
6125 end_emit_instruction(emit);
6126
6127 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6128 begin_emit_instruction(emit);
6129 emit_opcode(emit, VGPU10_OPCODE_DIV, FALSE);
6130 emit_dst_register(emit, &tmp_dst_w);
6131 emit_src_register(emit, &one);
6132 emit_src_register(emit, &fragcoord);
6133 end_emit_instruction(emit);
6134
6135 /* restore saved value */
6136 emit->fs.fragcoord_input_index = fragcoord_input_index;
6137 }
6138 }
6139
6140
6141 /**
6142 * Emit extra instructions to adjust VS inputs/attributes. This can
6143 * mean casting a vertex attribute from int to float or setting the
6144 * W component to 1, or both.
6145 */
6146 static void
6147 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10 *emit)
6148 {
6149 const unsigned save_w_1_mask = emit->key.vs.adjust_attrib_w_1;
6150 const unsigned save_itof_mask = emit->key.vs.adjust_attrib_itof;
6151 const unsigned save_utof_mask = emit->key.vs.adjust_attrib_utof;
6152 const unsigned save_is_bgra_mask = emit->key.vs.attrib_is_bgra;
6153 const unsigned save_puint_to_snorm_mask = emit->key.vs.attrib_puint_to_snorm;
6154 const unsigned save_puint_to_uscaled_mask = emit->key.vs.attrib_puint_to_uscaled;
6155 const unsigned save_puint_to_sscaled_mask = emit->key.vs.attrib_puint_to_sscaled;
6156
6157 unsigned adjust_mask = (save_w_1_mask |
6158 save_itof_mask |
6159 save_utof_mask |
6160 save_is_bgra_mask |
6161 save_puint_to_snorm_mask |
6162 save_puint_to_uscaled_mask |
6163 save_puint_to_sscaled_mask);
6164
6165 assert(emit->unit == PIPE_SHADER_VERTEX);
6166
6167 if (adjust_mask) {
6168 struct tgsi_full_src_register one =
6169 make_immediate_reg_float(emit, 1.0f);
6170
6171 struct tgsi_full_src_register one_int =
6172 make_immediate_reg_int(emit, 1);
6173
6174 /* We need to turn off these bitmasks while emitting the
6175 * instructions below, then restore them afterward.
6176 */
6177 emit->key.vs.adjust_attrib_w_1 = 0;
6178 emit->key.vs.adjust_attrib_itof = 0;
6179 emit->key.vs.adjust_attrib_utof = 0;
6180 emit->key.vs.attrib_is_bgra = 0;
6181 emit->key.vs.attrib_puint_to_snorm = 0;
6182 emit->key.vs.attrib_puint_to_uscaled = 0;
6183 emit->key.vs.attrib_puint_to_sscaled = 0;
6184
6185 while (adjust_mask) {
6186 unsigned index = u_bit_scan(&adjust_mask);
6187
6188 /* skip the instruction if this vertex attribute is not being used */
6189 if (emit->info.input_usage_mask[index] == 0)
6190 continue;
6191
6192 unsigned tmp = emit->vs.adjusted_input[index];
6193 struct tgsi_full_src_register input_src =
6194 make_src_reg(TGSI_FILE_INPUT, index);
6195
6196 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6197 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6198 struct tgsi_full_dst_register tmp_dst_w =
6199 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6200
6201 /* ITOF/UTOF/MOV tmp, input[index] */
6202 if (save_itof_mask & (1 << index)) {
6203 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF,
6204 &tmp_dst, &input_src, FALSE);
6205 }
6206 else if (save_utof_mask & (1 << index)) {
6207 emit_instruction_op1(emit, VGPU10_OPCODE_UTOF,
6208 &tmp_dst, &input_src, FALSE);
6209 }
6210 else if (save_puint_to_snorm_mask & (1 << index)) {
6211 emit_puint_to_snorm(emit, &tmp_dst, &input_src);
6212 }
6213 else if (save_puint_to_uscaled_mask & (1 << index)) {
6214 emit_puint_to_uscaled(emit, &tmp_dst, &input_src);
6215 }
6216 else if (save_puint_to_sscaled_mask & (1 << index)) {
6217 emit_puint_to_sscaled(emit, &tmp_dst, &input_src);
6218 }
6219 else {
6220 assert((save_w_1_mask | save_is_bgra_mask) & (1 << index));
6221 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6222 &tmp_dst, &input_src, FALSE);
6223 }
6224
6225 if (save_is_bgra_mask & (1 << index)) {
6226 emit_swap_r_b(emit, &tmp_dst, &tmp_src);
6227 }
6228
6229 if (save_w_1_mask & (1 << index)) {
6230 /* MOV tmp.w, 1.0 */
6231 if (emit->key.vs.attrib_is_pure_int & (1 << index)) {
6232 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6233 &tmp_dst_w, &one_int, FALSE);
6234 }
6235 else {
6236 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6237 &tmp_dst_w, &one, FALSE);
6238 }
6239 }
6240 }
6241
6242 emit->key.vs.adjust_attrib_w_1 = save_w_1_mask;
6243 emit->key.vs.adjust_attrib_itof = save_itof_mask;
6244 emit->key.vs.adjust_attrib_utof = save_utof_mask;
6245 emit->key.vs.attrib_is_bgra = save_is_bgra_mask;
6246 emit->key.vs.attrib_puint_to_snorm = save_puint_to_snorm_mask;
6247 emit->key.vs.attrib_puint_to_uscaled = save_puint_to_uscaled_mask;
6248 emit->key.vs.attrib_puint_to_sscaled = save_puint_to_sscaled_mask;
6249 }
6250 }
6251
6252
6253 /**
6254 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6255 * to implement some instructions. We pre-allocate those values here
6256 * in the immediate constant buffer.
6257 */
6258 static void
6259 alloc_common_immediates(struct svga_shader_emitter_v10 *emit)
6260 {
6261 unsigned n = 0;
6262
6263 emit->common_immediate_pos[n++] =
6264 alloc_immediate_float4(emit, 0.0f, 1.0f, 0.5f, -1.0f);
6265
6266 emit->common_immediate_pos[n++] =
6267 alloc_immediate_float4(emit, 128.0f, -128.0f, 2.0f, 3.0f);
6268
6269 emit->common_immediate_pos[n++] =
6270 alloc_immediate_int4(emit, 0, 1, 0, -1);
6271
6272 if (emit->key.vs.attrib_puint_to_snorm) {
6273 emit->common_immediate_pos[n++] =
6274 alloc_immediate_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
6275 }
6276
6277 if (emit->key.vs.attrib_puint_to_uscaled) {
6278 emit->common_immediate_pos[n++] =
6279 alloc_immediate_float4(emit, 1023.0f, 3.0f, 0.0f, 0.0f);
6280 }
6281
6282 if (emit->key.vs.attrib_puint_to_sscaled) {
6283 emit->common_immediate_pos[n++] =
6284 alloc_immediate_int4(emit, 22, 12, 2, 0);
6285
6286 emit->common_immediate_pos[n++] =
6287 alloc_immediate_int4(emit, 22, 30, 0, 0);
6288 }
6289
6290 assert(n <= ARRAY_SIZE(emit->common_immediate_pos));
6291 emit->num_common_immediates = n;
6292 }
6293
6294
6295 /**
6296 * Emit any extra/helper declarations/code that we might need between
6297 * the declaration section and code section.
6298 */
6299 static boolean
6300 emit_pre_helpers(struct svga_shader_emitter_v10 *emit)
6301 {
6302 /* Properties */
6303 if (emit->unit == PIPE_SHADER_GEOMETRY)
6304 emit_property_instructions(emit);
6305
6306 /* Declare inputs */
6307 if (!emit_input_declarations(emit))
6308 return FALSE;
6309
6310 /* Declare outputs */
6311 if (!emit_output_declarations(emit))
6312 return FALSE;
6313
6314 /* Declare temporary registers */
6315 emit_temporaries_declaration(emit);
6316
6317 /* Declare constant registers */
6318 emit_constant_declaration(emit);
6319
6320 /* Declare samplers and resources */
6321 emit_sampler_declarations(emit);
6322 emit_resource_declarations(emit);
6323
6324 /* Declare clip distance output registers */
6325 if (emit->unit == PIPE_SHADER_VERTEX ||
6326 emit->unit == PIPE_SHADER_GEOMETRY) {
6327 emit_clip_distance_declarations(emit);
6328 }
6329
6330 alloc_common_immediates(emit);
6331
6332 if (emit->unit == PIPE_SHADER_FRAGMENT &&
6333 emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6334 float alpha = emit->key.fs.alpha_ref;
6335 emit->fs.alpha_ref_index =
6336 alloc_immediate_float4(emit, alpha, alpha, alpha, alpha);
6337 }
6338
6339 /* Now, emit the constant block containing all the immediates
6340 * declared by shader, as well as the extra ones seen above.
6341 */
6342 emit_vgpu10_immediates_block(emit);
6343
6344 if (emit->unit == PIPE_SHADER_FRAGMENT) {
6345 emit_frontface_instructions(emit);
6346 emit_fragcoord_instructions(emit);
6347 }
6348 else if (emit->unit == PIPE_SHADER_VERTEX) {
6349 emit_vertex_attrib_instructions(emit);
6350 }
6351
6352 return TRUE;
6353 }
6354
6355
6356 /**
6357 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6358 * against the alpha reference value and discards the fragment if the
6359 * comparison fails.
6360 */
6361 static void
6362 emit_alpha_test_instructions(struct svga_shader_emitter_v10 *emit,
6363 unsigned fs_color_tmp_index)
6364 {
6365 /* compare output color's alpha to alpha ref and kill */
6366 unsigned tmp = get_temp_index(emit);
6367 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6368 struct tgsi_full_src_register tmp_src_x =
6369 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
6370 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6371 struct tgsi_full_src_register color_src =
6372 make_src_temp_reg(fs_color_tmp_index);
6373 struct tgsi_full_src_register color_src_w =
6374 scalar_src(&color_src, TGSI_SWIZZLE_W);
6375 struct tgsi_full_src_register ref_src =
6376 make_src_immediate_reg(emit->fs.alpha_ref_index);
6377 struct tgsi_full_dst_register color_dst =
6378 make_dst_output_reg(emit->fs.color_out_index[0]);
6379
6380 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6381
6382 /* dst = src0 'alpha_func' src1 */
6383 emit_comparison(emit, emit->key.fs.alpha_func, &tmp_dst,
6384 &color_src_w, &ref_src);
6385
6386 /* DISCARD if dst.x == 0 */
6387 begin_emit_instruction(emit);
6388 emit_discard_opcode(emit, FALSE); /* discard if src0.x is zero */
6389 emit_src_register(emit, &tmp_src_x);
6390 end_emit_instruction(emit);
6391
6392 /* If we don't need to broadcast the color below or set fragments to
6393 * white, emit final color here.
6394 */
6395 if (emit->key.fs.write_color0_to_n_cbufs <= 1 &&
6396 !emit->key.fs.white_fragments) {
6397 /* MOV output.color, tempcolor */
6398 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6399 &color_src, FALSE); /* XXX saturate? */
6400 }
6401
6402 free_temp_indexes(emit);
6403 }
6404
6405
6406 /**
6407 * When we need to emit white for all fragments (for emulating XOR logicop
6408 * mode), this function copies white into the temporary color output register.
6409 */
6410 static void
6411 emit_set_color_white(struct svga_shader_emitter_v10 *emit,
6412 unsigned fs_color_tmp_index)
6413 {
6414 struct tgsi_full_dst_register color_dst =
6415 make_dst_temp_reg(fs_color_tmp_index);
6416 struct tgsi_full_src_register white =
6417 make_immediate_reg_float(emit, 1.0f);
6418
6419 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst, &white, FALSE);
6420 }
6421
6422
6423 /**
6424 * Emit instructions for writing a single color output to multiple
6425 * color buffers.
6426 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS (or
6427 * when key.fs.white_fragments is true).
6428 * property is set and the number of render targets is greater than one.
6429 * \param fs_color_tmp_index index of the temp register that holds the
6430 * color to broadcast.
6431 */
6432 static void
6433 emit_broadcast_color_instructions(struct svga_shader_emitter_v10 *emit,
6434 unsigned fs_color_tmp_index)
6435 {
6436 const unsigned n = emit->key.fs.write_color0_to_n_cbufs;
6437 unsigned i;
6438 struct tgsi_full_src_register color_src =
6439 make_src_temp_reg(fs_color_tmp_index);
6440
6441 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6442
6443 for (i = 0; i < n; i++) {
6444 unsigned output_reg = emit->fs.color_out_index[i];
6445 struct tgsi_full_dst_register color_dst =
6446 make_dst_output_reg(output_reg);
6447
6448 /* Fill in this semantic here since we'll use it later in
6449 * emit_dst_register().
6450 */
6451 emit->info.output_semantic_name[output_reg] = TGSI_SEMANTIC_COLOR;
6452
6453 /* MOV output.color[i], tempcolor */
6454 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6455 &color_src, FALSE); /* XXX saturate? */
6456 }
6457 }
6458
6459
6460 /**
6461 * Emit extra helper code after the original shader code, but before the
6462 * last END/RET instruction.
6463 * For vertex shaders this means emitting the extra code to apply the
6464 * prescale scale/translation.
6465 */
6466 static boolean
6467 emit_post_helpers(struct svga_shader_emitter_v10 *emit)
6468 {
6469 if (emit->unit == PIPE_SHADER_VERTEX) {
6470 emit_vertex_instructions(emit);
6471 }
6472 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
6473 const unsigned fs_color_tmp_index = emit->fs.color_tmp_index;
6474
6475 /* We no longer want emit_dst_register() to substitute the
6476 * temporary fragment color register for the real color output.
6477 */
6478 emit->fs.color_tmp_index = INVALID_INDEX;
6479
6480 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6481 emit_alpha_test_instructions(emit, fs_color_tmp_index);
6482 }
6483 if (emit->key.fs.white_fragments) {
6484 emit_set_color_white(emit, fs_color_tmp_index);
6485 }
6486 if (emit->key.fs.write_color0_to_n_cbufs > 1 ||
6487 emit->key.fs.white_fragments) {
6488 emit_broadcast_color_instructions(emit, fs_color_tmp_index);
6489 }
6490 }
6491
6492 return TRUE;
6493 }
6494
6495
6496 /**
6497 * Translate the TGSI tokens into VGPU10 tokens.
6498 */
6499 static boolean
6500 emit_vgpu10_instructions(struct svga_shader_emitter_v10 *emit,
6501 const struct tgsi_token *tokens)
6502 {
6503 struct tgsi_parse_context parse;
6504 boolean ret = TRUE;
6505 boolean pre_helpers_emitted = FALSE;
6506 unsigned inst_number = 0;
6507
6508 tgsi_parse_init(&parse, tokens);
6509
6510 while (!tgsi_parse_end_of_tokens(&parse)) {
6511 tgsi_parse_token(&parse);
6512
6513 switch (parse.FullToken.Token.Type) {
6514 case TGSI_TOKEN_TYPE_IMMEDIATE:
6515 ret = emit_vgpu10_immediate(emit, &parse.FullToken.FullImmediate);
6516 if (!ret)
6517 goto done;
6518 break;
6519
6520 case TGSI_TOKEN_TYPE_DECLARATION:
6521 ret = emit_vgpu10_declaration(emit, &parse.FullToken.FullDeclaration);
6522 if (!ret)
6523 goto done;
6524 break;
6525
6526 case TGSI_TOKEN_TYPE_INSTRUCTION:
6527 if (!pre_helpers_emitted) {
6528 ret = emit_pre_helpers(emit);
6529 if (!ret)
6530 goto done;
6531 pre_helpers_emitted = TRUE;
6532 }
6533 ret = emit_vgpu10_instruction(emit, inst_number++,
6534 &parse.FullToken.FullInstruction);
6535 if (!ret)
6536 goto done;
6537 break;
6538
6539 case TGSI_TOKEN_TYPE_PROPERTY:
6540 ret = emit_vgpu10_property(emit, &parse.FullToken.FullProperty);
6541 if (!ret)
6542 goto done;
6543 break;
6544
6545 default:
6546 break;
6547 }
6548 }
6549
6550 done:
6551 tgsi_parse_free(&parse);
6552 return ret;
6553 }
6554
6555
6556 /**
6557 * Emit the first VGPU10 shader tokens.
6558 */
6559 static boolean
6560 emit_vgpu10_header(struct svga_shader_emitter_v10 *emit)
6561 {
6562 VGPU10ProgramToken ptoken;
6563
6564 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6565 ptoken.majorVersion = 4;
6566 ptoken.minorVersion = 0;
6567 ptoken.programType = translate_shader_type(emit->unit);
6568 if (!emit_dword(emit, ptoken.value))
6569 return FALSE;
6570
6571 /* Second token: total length of shader, in tokens. We can't fill this
6572 * in until we're all done. Emit zero for now.
6573 */
6574 return emit_dword(emit, 0);
6575 }
6576
6577
6578 static boolean
6579 emit_vgpu10_tail(struct svga_shader_emitter_v10 *emit)
6580 {
6581 VGPU10ProgramToken *tokens;
6582
6583 /* Replace the second token with total shader length */
6584 tokens = (VGPU10ProgramToken *) emit->buf;
6585 tokens[1].value = emit_get_num_tokens(emit);
6586
6587 return TRUE;
6588 }
6589
6590
6591 /**
6592 * Modify the FS to read the BCOLORs and use the FACE register
6593 * to choose between the front/back colors.
6594 */
6595 static const struct tgsi_token *
6596 transform_fs_twoside(const struct tgsi_token *tokens)
6597 {
6598 if (0) {
6599 debug_printf("Before tgsi_add_two_side ------------------\n");
6600 tgsi_dump(tokens,0);
6601 }
6602 tokens = tgsi_add_two_side(tokens);
6603 if (0) {
6604 debug_printf("After tgsi_add_two_side ------------------\n");
6605 tgsi_dump(tokens, 0);
6606 }
6607 return tokens;
6608 }
6609
6610
6611 /**
6612 * Modify the FS to do polygon stipple.
6613 */
6614 static const struct tgsi_token *
6615 transform_fs_pstipple(struct svga_shader_emitter_v10 *emit,
6616 const struct tgsi_token *tokens)
6617 {
6618 const struct tgsi_token *new_tokens;
6619 unsigned unit;
6620
6621 if (0) {
6622 debug_printf("Before pstipple ------------------\n");
6623 tgsi_dump(tokens,0);
6624 }
6625
6626 new_tokens = util_pstipple_create_fragment_shader(tokens, &unit, 0,
6627 TGSI_FILE_INPUT);
6628
6629 emit->fs.pstipple_sampler_unit = unit;
6630
6631 /* Setup texture state for stipple */
6632 emit->sampler_target[unit] = TGSI_TEXTURE_2D;
6633 emit->key.tex[unit].swizzle_r = TGSI_SWIZZLE_X;
6634 emit->key.tex[unit].swizzle_g = TGSI_SWIZZLE_Y;
6635 emit->key.tex[unit].swizzle_b = TGSI_SWIZZLE_Z;
6636 emit->key.tex[unit].swizzle_a = TGSI_SWIZZLE_W;
6637
6638 if (0) {
6639 debug_printf("After pstipple ------------------\n");
6640 tgsi_dump(new_tokens, 0);
6641 }
6642
6643 return new_tokens;
6644 }
6645
6646 /**
6647 * Modify the FS to support anti-aliasing point.
6648 */
6649 static const struct tgsi_token *
6650 transform_fs_aapoint(const struct tgsi_token *tokens,
6651 int aa_coord_index)
6652 {
6653 if (0) {
6654 debug_printf("Before tgsi_add_aa_point ------------------\n");
6655 tgsi_dump(tokens,0);
6656 }
6657 tokens = tgsi_add_aa_point(tokens, aa_coord_index);
6658 if (0) {
6659 debug_printf("After tgsi_add_aa_point ------------------\n");
6660 tgsi_dump(tokens, 0);
6661 }
6662 return tokens;
6663 }
6664
6665 /**
6666 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6667 */
6668 struct svga_shader_variant *
6669 svga_tgsi_vgpu10_translate(struct svga_context *svga,
6670 const struct svga_shader *shader,
6671 const struct svga_compile_key *key,
6672 unsigned unit)
6673 {
6674 struct svga_shader_variant *variant = NULL;
6675 struct svga_shader_emitter_v10 *emit;
6676 const struct tgsi_token *tokens = shader->tokens;
6677 struct svga_vertex_shader *vs = svga->curr.vs;
6678 struct svga_geometry_shader *gs = svga->curr.gs;
6679
6680 assert(unit == PIPE_SHADER_VERTEX ||
6681 unit == PIPE_SHADER_GEOMETRY ||
6682 unit == PIPE_SHADER_FRAGMENT);
6683
6684 /* These two flags cannot be used together */
6685 assert(key->vs.need_prescale + key->vs.undo_viewport <= 1);
6686
6687 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE);
6688 /*
6689 * Setup the code emitter
6690 */
6691 emit = alloc_emitter();
6692 if (!emit)
6693 goto done;
6694
6695 emit->unit = unit;
6696 emit->key = *key;
6697
6698 emit->vposition.need_prescale = (emit->key.vs.need_prescale ||
6699 emit->key.gs.need_prescale);
6700 emit->vposition.tmp_index = INVALID_INDEX;
6701 emit->vposition.so_index = INVALID_INDEX;
6702 emit->vposition.out_index = INVALID_INDEX;
6703
6704 emit->fs.color_tmp_index = INVALID_INDEX;
6705 emit->fs.face_input_index = INVALID_INDEX;
6706 emit->fs.fragcoord_input_index = INVALID_INDEX;
6707
6708 emit->gs.prim_id_index = INVALID_INDEX;
6709
6710 emit->clip_dist_out_index = INVALID_INDEX;
6711 emit->clip_dist_tmp_index = INVALID_INDEX;
6712 emit->clip_dist_so_index = INVALID_INDEX;
6713 emit->clip_vertex_out_index = INVALID_INDEX;
6714
6715 if (emit->key.fs.alpha_func == SVGA3D_CMP_INVALID) {
6716 emit->key.fs.alpha_func = SVGA3D_CMP_ALWAYS;
6717 }
6718
6719 if (unit == PIPE_SHADER_FRAGMENT) {
6720 if (key->fs.light_twoside) {
6721 tokens = transform_fs_twoside(tokens);
6722 }
6723 if (key->fs.pstipple) {
6724 const struct tgsi_token *new_tokens =
6725 transform_fs_pstipple(emit, tokens);
6726 if (tokens != shader->tokens) {
6727 /* free the two-sided shader tokens */
6728 tgsi_free_tokens(tokens);
6729 }
6730 tokens = new_tokens;
6731 }
6732 if (key->fs.aa_point) {
6733 tokens = transform_fs_aapoint(tokens, key->fs.aa_point_coord_index);
6734 }
6735 }
6736
6737 if (SVGA_DEBUG & DEBUG_TGSI) {
6738 debug_printf("#####################################\n");
6739 debug_printf("### TGSI Shader %u\n", shader->id);
6740 tgsi_dump(tokens, 0);
6741 }
6742
6743 /**
6744 * Rescan the header if the token string is different from the one
6745 * included in the shader; otherwise, the header info is already up-to-date
6746 */
6747 if (tokens != shader->tokens) {
6748 tgsi_scan_shader(tokens, &emit->info);
6749 } else {
6750 emit->info = shader->info;
6751 }
6752
6753 emit->num_outputs = emit->info.num_outputs;
6754
6755 if (unit == PIPE_SHADER_FRAGMENT) {
6756 /* Compute FS input remapping to match the output from VS/GS */
6757 if (gs) {
6758 svga_link_shaders(&gs->base.info, &emit->info, &emit->linkage);
6759 } else {
6760 assert(vs);
6761 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6762 }
6763 } else if (unit == PIPE_SHADER_GEOMETRY) {
6764 assert(vs);
6765 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6766 }
6767
6768 determine_clipping_mode(emit);
6769
6770 if (unit == PIPE_SHADER_GEOMETRY || unit == PIPE_SHADER_VERTEX) {
6771 if (shader->stream_output != NULL || emit->clip_mode == CLIP_DISTANCE) {
6772 /* if there is stream output declarations associated
6773 * with this shader or the shader writes to ClipDistance
6774 * then reserve extra registers for the non-adjusted vertex position
6775 * and the ClipDistance shadow copy
6776 */
6777 emit->vposition.so_index = emit->num_outputs++;
6778
6779 if (emit->clip_mode == CLIP_DISTANCE) {
6780 emit->clip_dist_so_index = emit->num_outputs++;
6781 if (emit->info.num_written_clipdistance > 4)
6782 emit->num_outputs++;
6783 }
6784 }
6785 }
6786
6787 /*
6788 * Do actual shader translation.
6789 */
6790 if (!emit_vgpu10_header(emit)) {
6791 debug_printf("svga: emit VGPU10 header failed\n");
6792 goto cleanup;
6793 }
6794
6795 if (!emit_vgpu10_instructions(emit, tokens)) {
6796 debug_printf("svga: emit VGPU10 instructions failed\n");
6797 goto cleanup;
6798 }
6799
6800 if (!emit_vgpu10_tail(emit)) {
6801 debug_printf("svga: emit VGPU10 tail failed\n");
6802 goto cleanup;
6803 }
6804
6805 if (emit->register_overflow) {
6806 goto cleanup;
6807 }
6808
6809 /*
6810 * Create, initialize the 'variant' object.
6811 */
6812 variant = svga_new_shader_variant(svga);
6813 if (!variant)
6814 goto cleanup;
6815
6816 variant->shader = shader;
6817 variant->nr_tokens = emit_get_num_tokens(emit);
6818 variant->tokens = (const unsigned *)emit->buf;
6819 emit->buf = NULL; /* buffer is no longer owed by emitter context */
6820 memcpy(&variant->key, key, sizeof(*key));
6821 variant->id = UTIL_BITMASK_INVALID_INDEX;
6822
6823 /* The extra constant starting offset starts with the number of
6824 * shader constants declared in the shader.
6825 */
6826 variant->extra_const_start = emit->num_shader_consts[0];
6827 if (key->gs.wide_point) {
6828 /**
6829 * The extra constant added in the transformed shader
6830 * for inverse viewport scale is to be supplied by the driver.
6831 * So the extra constant starting offset needs to be reduced by 1.
6832 */
6833 assert(variant->extra_const_start > 0);
6834 variant->extra_const_start--;
6835 }
6836
6837 variant->pstipple_sampler_unit = emit->fs.pstipple_sampler_unit;
6838
6839 /* If there was exactly one write to a fragment shader output register
6840 * and it came from a constant buffer, we know all fragments will have
6841 * the same color (except for blending).
6842 */
6843 variant->constant_color_output =
6844 emit->constant_color_output && emit->num_output_writes == 1;
6845
6846 /** keep track in the variant if flat interpolation is used
6847 * for any of the varyings.
6848 */
6849 variant->uses_flat_interp = emit->uses_flat_interp;
6850
6851 if (tokens != shader->tokens) {
6852 tgsi_free_tokens(tokens);
6853 }
6854
6855 cleanup:
6856 free_emitter(emit);
6857
6858 done:
6859 SVGA_STATS_TIME_POP(svga_sws(svga));
6860 return variant;
6861 }