svga: fix starting index for system values
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_vgpu10.c
1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file svga_tgsi_vgpu10.c
28 *
29 * TGSI -> VGPU10 shader translation.
30 *
31 * \author Mingcheng Chen
32 * \author Brian Paul
33 */
34
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
51
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
57
58 #include "VGPU10ShaderTokens.h"
59
60
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
67
68
69 /**
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
72 */
73 enum clipping_mode
74 {
75 CLIP_NONE, /**< No clipping enabled */
76 CLIP_LEGACY, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
79 */
80 CLIP_DISTANCE, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
82 */
83 CLIP_VERTEX /**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
86 */
87 };
88
89
90 struct svga_shader_emitter_v10
91 {
92 /* The token output buffer */
93 unsigned size;
94 char *buf;
95 char *ptr;
96
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key;
99 struct tgsi_shader_info info;
100 unsigned unit;
101 unsigned version; /**< Either 40 or 41 at this time */
102
103 unsigned inst_start_token;
104 boolean discard_instruction; /**< throw away current instruction? */
105
106 union tgsi_immediate_data immediates[MAX_IMMEDIATE_COUNT][4];
107 unsigned num_immediates; /**< Number of immediates emitted */
108 unsigned common_immediate_pos[8]; /**< literals for common immediates */
109 unsigned num_common_immediates;
110 boolean immediates_emitted;
111
112 unsigned num_outputs; /**< include any extra outputs */
113 /** The first extra output is reserved for
114 * non-adjusted vertex position for
115 * stream output purpose
116 */
117
118 /* Temporary Registers */
119 unsigned num_shader_temps; /**< num of temps used by original shader */
120 unsigned internal_temp_count; /**< currently allocated internal temps */
121 struct {
122 unsigned start, size;
123 } temp_arrays[MAX_TEMP_ARRAYS];
124 unsigned num_temp_arrays;
125
126 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
127 struct {
128 unsigned arrayId, index;
129 } temp_map[VGPU10_MAX_TEMPS]; /**< arrayId, element */
130
131 /** Number of constants used by original shader for each constant buffer.
132 * The size should probably always match with that of svga_state.constbufs.
133 */
134 unsigned num_shader_consts[SVGA_MAX_CONST_BUFS];
135
136 /* Samplers */
137 unsigned num_samplers;
138 boolean sampler_view[PIPE_MAX_SAMPLERS]; /**< True if sampler view exists*/
139 ubyte sampler_target[PIPE_MAX_SAMPLERS]; /**< TGSI_TEXTURE_x */
140 ubyte sampler_return_type[PIPE_MAX_SAMPLERS]; /**< TGSI_RETURN_TYPE_x */
141
142 /* Address regs (really implemented with temps) */
143 unsigned num_address_regs;
144 unsigned address_reg_index[MAX_VGPU10_ADDR_REGS];
145
146 /* Output register usage masks */
147 ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
148
149 /* To map TGSI system value index to VGPU shader input indexes */
150 ubyte system_value_indexes[MAX_SYSTEM_VALUES];
151
152 struct {
153 /* vertex position scale/translation */
154 unsigned out_index; /**< the real position output reg */
155 unsigned tmp_index; /**< the fake/temp position output reg */
156 unsigned so_index; /**< the non-adjusted position output reg */
157 unsigned prescale_scale_index, prescale_trans_index;
158 boolean need_prescale;
159 } vposition;
160
161 /* For vertex shaders only */
162 struct {
163 /* viewport constant */
164 unsigned viewport_index;
165
166 /* temp index of adjusted vertex attributes */
167 unsigned adjusted_input[PIPE_MAX_SHADER_INPUTS];
168 } vs;
169
170 /* For fragment shaders only */
171 struct {
172 unsigned color_out_index[PIPE_MAX_COLOR_BUFS]; /**< the real color output regs */
173 unsigned num_color_outputs;
174 unsigned color_tmp_index; /**< fake/temp color output reg */
175 unsigned alpha_ref_index; /**< immediate constant for alpha ref */
176
177 /* front-face */
178 unsigned face_input_index; /**< real fragment shader face reg (bool) */
179 unsigned face_tmp_index; /**< temp face reg converted to -1 / +1 */
180
181 unsigned pstipple_sampler_unit;
182
183 unsigned fragcoord_input_index; /**< real fragment position input reg */
184 unsigned fragcoord_tmp_index; /**< 1/w modified position temp reg */
185
186 /** Which texture units are doing shadow comparison in the FS code */
187 unsigned shadow_compare_units;
188
189 unsigned sample_id_sys_index; /**< TGSI index of sample id sys value */
190
191 unsigned sample_pos_sys_index; /**< TGSI index of sample pos sys value */
192 unsigned sample_pos_tmp_index; /**< which temp reg has the sample pos */
193 } fs;
194
195 /* For geometry shaders only */
196 struct {
197 VGPU10_PRIMITIVE prim_type;/**< VGPU10 primitive type */
198 VGPU10_PRIMITIVE_TOPOLOGY prim_topology; /**< VGPU10 primitive topology */
199 unsigned input_size; /**< size of input arrays */
200 unsigned prim_id_index; /**< primitive id register index */
201 unsigned max_out_vertices; /**< maximum number of output vertices */
202 } gs;
203
204 /* For vertex or geometry shaders */
205 enum clipping_mode clip_mode;
206 unsigned clip_dist_out_index; /**< clip distance output register index */
207 unsigned clip_dist_tmp_index; /**< clip distance temporary register */
208 unsigned clip_dist_so_index; /**< clip distance shadow copy */
209
210 /** Index of temporary holding the clipvertex coordinate */
211 unsigned clip_vertex_out_index; /**< clip vertex output register index */
212 unsigned clip_vertex_tmp_index; /**< clip vertex temporary index */
213
214 /* user clip plane constant slot indexes */
215 unsigned clip_plane_const[PIPE_MAX_CLIP_PLANES];
216
217 unsigned num_output_writes;
218 boolean constant_color_output;
219
220 boolean uses_flat_interp;
221
222 /* For all shaders: const reg index for RECT coord scaling */
223 unsigned texcoord_scale_index[PIPE_MAX_SAMPLERS];
224
225 /* For all shaders: const reg index for texture buffer size */
226 unsigned texture_buffer_size_index[PIPE_MAX_SAMPLERS];
227
228 /* VS/GS/FS Linkage info */
229 struct shader_linkage linkage;
230
231 bool register_overflow; /**< Set if we exceed a VGPU10 register limit */
232 };
233
234
235 static boolean
236 emit_post_helpers(struct svga_shader_emitter_v10 *emit);
237
238 static boolean
239 emit_vertex(struct svga_shader_emitter_v10 *emit,
240 const struct tgsi_full_instruction *inst);
241
242 static char err_buf[128];
243
244 static boolean
245 expand(struct svga_shader_emitter_v10 *emit)
246 {
247 char *new_buf;
248 unsigned newsize = emit->size * 2;
249
250 if (emit->buf != err_buf)
251 new_buf = REALLOC(emit->buf, emit->size, newsize);
252 else
253 new_buf = NULL;
254
255 if (!new_buf) {
256 emit->ptr = err_buf;
257 emit->buf = err_buf;
258 emit->size = sizeof(err_buf);
259 return FALSE;
260 }
261
262 emit->size = newsize;
263 emit->ptr = new_buf + (emit->ptr - emit->buf);
264 emit->buf = new_buf;
265 return TRUE;
266 }
267
268 /**
269 * Create and initialize a new svga_shader_emitter_v10 object.
270 */
271 static struct svga_shader_emitter_v10 *
272 alloc_emitter(void)
273 {
274 struct svga_shader_emitter_v10 *emit = CALLOC(1, sizeof(*emit));
275
276 if (!emit)
277 return NULL;
278
279 /* to initialize the output buffer */
280 emit->size = 512;
281 if (!expand(emit)) {
282 FREE(emit);
283 return NULL;
284 }
285 return emit;
286 }
287
288 /**
289 * Free an svga_shader_emitter_v10 object.
290 */
291 static void
292 free_emitter(struct svga_shader_emitter_v10 *emit)
293 {
294 assert(emit);
295 FREE(emit->buf); /* will be NULL if translation succeeded */
296 FREE(emit);
297 }
298
299 static inline boolean
300 reserve(struct svga_shader_emitter_v10 *emit,
301 unsigned nr_dwords)
302 {
303 while (emit->ptr - emit->buf + nr_dwords * sizeof(uint32) >= emit->size) {
304 if (!expand(emit))
305 return FALSE;
306 }
307
308 return TRUE;
309 }
310
311 static boolean
312 emit_dword(struct svga_shader_emitter_v10 *emit, uint32 dword)
313 {
314 if (!reserve(emit, 1))
315 return FALSE;
316
317 *(uint32 *)emit->ptr = dword;
318 emit->ptr += sizeof dword;
319 return TRUE;
320 }
321
322 static boolean
323 emit_dwords(struct svga_shader_emitter_v10 *emit,
324 const uint32 *dwords,
325 unsigned nr)
326 {
327 if (!reserve(emit, nr))
328 return FALSE;
329
330 memcpy(emit->ptr, dwords, nr * sizeof *dwords);
331 emit->ptr += nr * sizeof *dwords;
332 return TRUE;
333 }
334
335 /** Return the number of tokens in the emitter's buffer */
336 static unsigned
337 emit_get_num_tokens(const struct svga_shader_emitter_v10 *emit)
338 {
339 return (emit->ptr - emit->buf) / sizeof(unsigned);
340 }
341
342
343 /**
344 * Check for register overflow. If we overflow we'll set an
345 * error flag. This function can be called for register declarations
346 * or use as src/dst instruction operands.
347 * \param type register type. One of VGPU10_OPERAND_TYPE_x
348 or VGPU10_OPCODE_DCL_x
349 * \param index the register index
350 */
351 static void
352 check_register_index(struct svga_shader_emitter_v10 *emit,
353 unsigned operandType, unsigned index)
354 {
355 bool overflow_before = emit->register_overflow;
356
357 switch (operandType) {
358 case VGPU10_OPERAND_TYPE_TEMP:
359 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP:
360 case VGPU10_OPCODE_DCL_TEMPS:
361 if (index >= VGPU10_MAX_TEMPS) {
362 emit->register_overflow = TRUE;
363 }
364 break;
365 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER:
366 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER:
367 if (index >= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
368 emit->register_overflow = TRUE;
369 }
370 break;
371 case VGPU10_OPERAND_TYPE_INPUT:
372 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID:
373 case VGPU10_OPCODE_DCL_INPUT:
374 case VGPU10_OPCODE_DCL_INPUT_SGV:
375 case VGPU10_OPCODE_DCL_INPUT_SIV:
376 case VGPU10_OPCODE_DCL_INPUT_PS:
377 case VGPU10_OPCODE_DCL_INPUT_PS_SGV:
378 case VGPU10_OPCODE_DCL_INPUT_PS_SIV:
379 if ((emit->unit == PIPE_SHADER_VERTEX &&
380 index >= VGPU10_MAX_VS_INPUTS) ||
381 (emit->unit == PIPE_SHADER_GEOMETRY &&
382 index >= VGPU10_MAX_GS_INPUTS) ||
383 (emit->unit == PIPE_SHADER_FRAGMENT &&
384 index >= VGPU10_MAX_FS_INPUTS)) {
385 emit->register_overflow = TRUE;
386 }
387 break;
388 case VGPU10_OPERAND_TYPE_OUTPUT:
389 case VGPU10_OPCODE_DCL_OUTPUT:
390 case VGPU10_OPCODE_DCL_OUTPUT_SGV:
391 case VGPU10_OPCODE_DCL_OUTPUT_SIV:
392 if ((emit->unit == PIPE_SHADER_VERTEX &&
393 index >= VGPU10_MAX_VS_OUTPUTS) ||
394 (emit->unit == PIPE_SHADER_GEOMETRY &&
395 index >= VGPU10_MAX_GS_OUTPUTS) ||
396 (emit->unit == PIPE_SHADER_FRAGMENT &&
397 index >= VGPU10_MAX_FS_OUTPUTS)) {
398 emit->register_overflow = TRUE;
399 }
400 break;
401 case VGPU10_OPERAND_TYPE_SAMPLER:
402 case VGPU10_OPCODE_DCL_SAMPLER:
403 if (index >= VGPU10_MAX_SAMPLERS) {
404 emit->register_overflow = TRUE;
405 }
406 break;
407 case VGPU10_OPERAND_TYPE_RESOURCE:
408 case VGPU10_OPCODE_DCL_RESOURCE:
409 if (index >= VGPU10_MAX_RESOURCES) {
410 emit->register_overflow = TRUE;
411 }
412 break;
413 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER:
414 if (index >= MAX_IMMEDIATE_COUNT) {
415 emit->register_overflow = TRUE;
416 }
417 break;
418 case VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK:
419 /* nothing */
420 break;
421 default:
422 assert(0);
423 ; /* nothing */
424 }
425
426 if (emit->register_overflow && !overflow_before) {
427 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
428 operandType, index);
429 }
430 }
431
432
433 /**
434 * Examine misc state to determine the clipping mode.
435 */
436 static void
437 determine_clipping_mode(struct svga_shader_emitter_v10 *emit)
438 {
439 if (emit->info.num_written_clipdistance > 0) {
440 emit->clip_mode = CLIP_DISTANCE;
441 }
442 else if (emit->info.writes_clipvertex) {
443 emit->clip_mode = CLIP_VERTEX;
444 }
445 else if (emit->key.clip_plane_enable) {
446 emit->clip_mode = CLIP_LEGACY;
447 }
448 else {
449 emit->clip_mode = CLIP_NONE;
450 }
451 }
452
453
454 /**
455 * For clip distance register declarations and clip distance register
456 * writes we need to mask the declaration usage or instruction writemask
457 * (respectively) against the set of the really-enabled clipping planes.
458 *
459 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
460 * has a VS that writes to all 8 clip distance registers, but the plane enable
461 * flags are a subset of that.
462 *
463 * This function is used to apply the plane enable flags to the register
464 * declaration or instruction writemask.
465 *
466 * \param writemask the declaration usage mask or instruction writemask
467 * \param clip_reg_index which clip plane register is being declared/written.
468 * The legal values are 0 and 1 (two clip planes per
469 * register, for a total of 8 clip planes)
470 */
471 static unsigned
472 apply_clip_plane_mask(struct svga_shader_emitter_v10 *emit,
473 unsigned writemask, unsigned clip_reg_index)
474 {
475 unsigned shift;
476
477 assert(clip_reg_index < 2);
478
479 /* four clip planes per clip register: */
480 shift = clip_reg_index * 4;
481 writemask &= ((emit->key.clip_plane_enable >> shift) & 0xf);
482
483 return writemask;
484 }
485
486
487 /**
488 * Translate gallium shader type into VGPU10 type.
489 */
490 static VGPU10_PROGRAM_TYPE
491 translate_shader_type(unsigned type)
492 {
493 switch (type) {
494 case PIPE_SHADER_VERTEX:
495 return VGPU10_VERTEX_SHADER;
496 case PIPE_SHADER_GEOMETRY:
497 return VGPU10_GEOMETRY_SHADER;
498 case PIPE_SHADER_FRAGMENT:
499 return VGPU10_PIXEL_SHADER;
500 default:
501 assert(!"Unexpected shader type");
502 return VGPU10_VERTEX_SHADER;
503 }
504 }
505
506
507 /**
508 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
509 * Note: we only need to translate the opcodes for "simple" instructions,
510 * as seen below. All other opcodes are handled/translated specially.
511 */
512 static VGPU10_OPCODE_TYPE
513 translate_opcode(enum tgsi_opcode opcode)
514 {
515 switch (opcode) {
516 case TGSI_OPCODE_MOV:
517 return VGPU10_OPCODE_MOV;
518 case TGSI_OPCODE_MUL:
519 return VGPU10_OPCODE_MUL;
520 case TGSI_OPCODE_ADD:
521 return VGPU10_OPCODE_ADD;
522 case TGSI_OPCODE_DP3:
523 return VGPU10_OPCODE_DP3;
524 case TGSI_OPCODE_DP4:
525 return VGPU10_OPCODE_DP4;
526 case TGSI_OPCODE_MIN:
527 return VGPU10_OPCODE_MIN;
528 case TGSI_OPCODE_MAX:
529 return VGPU10_OPCODE_MAX;
530 case TGSI_OPCODE_MAD:
531 return VGPU10_OPCODE_MAD;
532 case TGSI_OPCODE_SQRT:
533 return VGPU10_OPCODE_SQRT;
534 case TGSI_OPCODE_FRC:
535 return VGPU10_OPCODE_FRC;
536 case TGSI_OPCODE_FLR:
537 return VGPU10_OPCODE_ROUND_NI;
538 case TGSI_OPCODE_FSEQ:
539 return VGPU10_OPCODE_EQ;
540 case TGSI_OPCODE_FSGE:
541 return VGPU10_OPCODE_GE;
542 case TGSI_OPCODE_FSNE:
543 return VGPU10_OPCODE_NE;
544 case TGSI_OPCODE_DDX:
545 return VGPU10_OPCODE_DERIV_RTX;
546 case TGSI_OPCODE_DDY:
547 return VGPU10_OPCODE_DERIV_RTY;
548 case TGSI_OPCODE_RET:
549 return VGPU10_OPCODE_RET;
550 case TGSI_OPCODE_DIV:
551 return VGPU10_OPCODE_DIV;
552 case TGSI_OPCODE_IDIV:
553 return VGPU10_OPCODE_IDIV;
554 case TGSI_OPCODE_DP2:
555 return VGPU10_OPCODE_DP2;
556 case TGSI_OPCODE_BRK:
557 return VGPU10_OPCODE_BREAK;
558 case TGSI_OPCODE_IF:
559 return VGPU10_OPCODE_IF;
560 case TGSI_OPCODE_ELSE:
561 return VGPU10_OPCODE_ELSE;
562 case TGSI_OPCODE_ENDIF:
563 return VGPU10_OPCODE_ENDIF;
564 case TGSI_OPCODE_CEIL:
565 return VGPU10_OPCODE_ROUND_PI;
566 case TGSI_OPCODE_I2F:
567 return VGPU10_OPCODE_ITOF;
568 case TGSI_OPCODE_NOT:
569 return VGPU10_OPCODE_NOT;
570 case TGSI_OPCODE_TRUNC:
571 return VGPU10_OPCODE_ROUND_Z;
572 case TGSI_OPCODE_SHL:
573 return VGPU10_OPCODE_ISHL;
574 case TGSI_OPCODE_AND:
575 return VGPU10_OPCODE_AND;
576 case TGSI_OPCODE_OR:
577 return VGPU10_OPCODE_OR;
578 case TGSI_OPCODE_XOR:
579 return VGPU10_OPCODE_XOR;
580 case TGSI_OPCODE_CONT:
581 return VGPU10_OPCODE_CONTINUE;
582 case TGSI_OPCODE_EMIT:
583 return VGPU10_OPCODE_EMIT;
584 case TGSI_OPCODE_ENDPRIM:
585 return VGPU10_OPCODE_CUT;
586 case TGSI_OPCODE_BGNLOOP:
587 return VGPU10_OPCODE_LOOP;
588 case TGSI_OPCODE_ENDLOOP:
589 return VGPU10_OPCODE_ENDLOOP;
590 case TGSI_OPCODE_ENDSUB:
591 return VGPU10_OPCODE_RET;
592 case TGSI_OPCODE_NOP:
593 return VGPU10_OPCODE_NOP;
594 case TGSI_OPCODE_END:
595 return VGPU10_OPCODE_RET;
596 case TGSI_OPCODE_F2I:
597 return VGPU10_OPCODE_FTOI;
598 case TGSI_OPCODE_IMAX:
599 return VGPU10_OPCODE_IMAX;
600 case TGSI_OPCODE_IMIN:
601 return VGPU10_OPCODE_IMIN;
602 case TGSI_OPCODE_UDIV:
603 case TGSI_OPCODE_UMOD:
604 case TGSI_OPCODE_MOD:
605 return VGPU10_OPCODE_UDIV;
606 case TGSI_OPCODE_IMUL_HI:
607 return VGPU10_OPCODE_IMUL;
608 case TGSI_OPCODE_INEG:
609 return VGPU10_OPCODE_INEG;
610 case TGSI_OPCODE_ISHR:
611 return VGPU10_OPCODE_ISHR;
612 case TGSI_OPCODE_ISGE:
613 return VGPU10_OPCODE_IGE;
614 case TGSI_OPCODE_ISLT:
615 return VGPU10_OPCODE_ILT;
616 case TGSI_OPCODE_F2U:
617 return VGPU10_OPCODE_FTOU;
618 case TGSI_OPCODE_UADD:
619 return VGPU10_OPCODE_IADD;
620 case TGSI_OPCODE_U2F:
621 return VGPU10_OPCODE_UTOF;
622 case TGSI_OPCODE_UCMP:
623 return VGPU10_OPCODE_MOVC;
624 case TGSI_OPCODE_UMAD:
625 return VGPU10_OPCODE_UMAD;
626 case TGSI_OPCODE_UMAX:
627 return VGPU10_OPCODE_UMAX;
628 case TGSI_OPCODE_UMIN:
629 return VGPU10_OPCODE_UMIN;
630 case TGSI_OPCODE_UMUL:
631 case TGSI_OPCODE_UMUL_HI:
632 return VGPU10_OPCODE_UMUL;
633 case TGSI_OPCODE_USEQ:
634 return VGPU10_OPCODE_IEQ;
635 case TGSI_OPCODE_USGE:
636 return VGPU10_OPCODE_UGE;
637 case TGSI_OPCODE_USHR:
638 return VGPU10_OPCODE_USHR;
639 case TGSI_OPCODE_USLT:
640 return VGPU10_OPCODE_ULT;
641 case TGSI_OPCODE_USNE:
642 return VGPU10_OPCODE_INE;
643 case TGSI_OPCODE_SWITCH:
644 return VGPU10_OPCODE_SWITCH;
645 case TGSI_OPCODE_CASE:
646 return VGPU10_OPCODE_CASE;
647 case TGSI_OPCODE_DEFAULT:
648 return VGPU10_OPCODE_DEFAULT;
649 case TGSI_OPCODE_ENDSWITCH:
650 return VGPU10_OPCODE_ENDSWITCH;
651 case TGSI_OPCODE_FSLT:
652 return VGPU10_OPCODE_LT;
653 case TGSI_OPCODE_ROUND:
654 return VGPU10_OPCODE_ROUND_NE;
655 case TGSI_OPCODE_SAMPLE_POS:
656 /* Note: we never actually get this opcode because there's no GLSL
657 * function to query multisample resource sample positions. There's
658 * only the TGSI_SEMANTIC_SAMPLEPOS system value which contains the
659 * position of the current sample in the render target.
660 */
661 /* FALL-THROUGH */
662 case TGSI_OPCODE_SAMPLE_INFO:
663 /* NOTE: we never actually get this opcode because the GLSL compiler
664 * implements the gl_NumSamples variable with a simple constant in the
665 * constant buffer.
666 */
667 /* FALL-THROUGH */
668 default:
669 assert(!"Unexpected TGSI opcode in translate_opcode()");
670 return VGPU10_OPCODE_NOP;
671 }
672 }
673
674
675 /**
676 * Translate a TGSI register file type into a VGPU10 operand type.
677 * \param array is the TGSI_FILE_TEMPORARY register an array?
678 */
679 static VGPU10_OPERAND_TYPE
680 translate_register_file(enum tgsi_file_type file, boolean array)
681 {
682 switch (file) {
683 case TGSI_FILE_CONSTANT:
684 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
685 case TGSI_FILE_INPUT:
686 return VGPU10_OPERAND_TYPE_INPUT;
687 case TGSI_FILE_OUTPUT:
688 return VGPU10_OPERAND_TYPE_OUTPUT;
689 case TGSI_FILE_TEMPORARY:
690 return array ? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
691 : VGPU10_OPERAND_TYPE_TEMP;
692 case TGSI_FILE_IMMEDIATE:
693 /* all immediates are 32-bit values at this time so
694 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
695 */
696 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER;
697 case TGSI_FILE_SAMPLER:
698 return VGPU10_OPERAND_TYPE_SAMPLER;
699 case TGSI_FILE_SYSTEM_VALUE:
700 return VGPU10_OPERAND_TYPE_INPUT;
701
702 /* XXX TODO more cases to finish */
703
704 default:
705 assert(!"Bad tgsi register file!");
706 return VGPU10_OPERAND_TYPE_NULL;
707 }
708 }
709
710
711 /**
712 * Emit a null dst register
713 */
714 static void
715 emit_null_dst_register(struct svga_shader_emitter_v10 *emit)
716 {
717 VGPU10OperandToken0 operand;
718
719 operand.value = 0;
720 operand.operandType = VGPU10_OPERAND_TYPE_NULL;
721 operand.numComponents = VGPU10_OPERAND_0_COMPONENT;
722
723 emit_dword(emit, operand.value);
724 }
725
726
727 /**
728 * If the given register is a temporary, return the array ID.
729 * Else return zero.
730 */
731 static unsigned
732 get_temp_array_id(const struct svga_shader_emitter_v10 *emit,
733 enum tgsi_file_type file, unsigned index)
734 {
735 if (file == TGSI_FILE_TEMPORARY) {
736 return emit->temp_map[index].arrayId;
737 }
738 else {
739 return 0;
740 }
741 }
742
743
744 /**
745 * If the given register is a temporary, convert the index from a TGSI
746 * TEMPORARY index to a VGPU10 temp index.
747 */
748 static unsigned
749 remap_temp_index(const struct svga_shader_emitter_v10 *emit,
750 enum tgsi_file_type file, unsigned index)
751 {
752 if (file == TGSI_FILE_TEMPORARY) {
753 return emit->temp_map[index].index;
754 }
755 else {
756 return index;
757 }
758 }
759
760
761 /**
762 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
763 * Note: the operandType field must already be initialized.
764 */
765 static VGPU10OperandToken0
766 setup_operand0_indexing(struct svga_shader_emitter_v10 *emit,
767 VGPU10OperandToken0 operand0,
768 enum tgsi_file_type file,
769 boolean indirect, boolean index2D,
770 unsigned tempArrayID)
771 {
772 unsigned indexDim, index0Rep, index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
773
774 /*
775 * Compute index dimensions
776 */
777 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32 ||
778 operand0.operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
779 /* there's no swizzle for in-line immediates */
780 indexDim = VGPU10_OPERAND_INDEX_0D;
781 assert(operand0.selectionMode == 0);
782 }
783 else {
784 if (index2D ||
785 tempArrayID > 0 ||
786 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
787 indexDim = VGPU10_OPERAND_INDEX_2D;
788 }
789 else {
790 indexDim = VGPU10_OPERAND_INDEX_1D;
791 }
792 }
793
794 /*
795 * Compute index representations (immediate, relative, etc).
796 */
797 if (tempArrayID > 0) {
798 assert(file == TGSI_FILE_TEMPORARY);
799 /* First index is the array ID, second index is the array element */
800 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
801 if (indirect) {
802 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
803 }
804 else {
805 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
806 }
807 }
808 else if (indirect) {
809 if (file == TGSI_FILE_CONSTANT) {
810 /* index[0] indicates which constant buffer while index[1] indicates
811 * the position in the constant buffer.
812 */
813 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
814 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
815 }
816 else {
817 /* All other register files are 1-dimensional */
818 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
819 }
820 }
821 else {
822 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
823 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
824 }
825
826 operand0.indexDimension = indexDim;
827 operand0.index0Representation = index0Rep;
828 operand0.index1Representation = index1Rep;
829
830 return operand0;
831 }
832
833
834 /**
835 * Emit the operand for expressing an address register for indirect indexing.
836 * Note that the address register is really just a temp register.
837 * \param addr_reg_index which address register to use
838 */
839 static void
840 emit_indirect_register(struct svga_shader_emitter_v10 *emit,
841 unsigned addr_reg_index)
842 {
843 unsigned tmp_reg_index;
844 VGPU10OperandToken0 operand0;
845
846 assert(addr_reg_index < MAX_VGPU10_ADDR_REGS);
847
848 tmp_reg_index = emit->address_reg_index[addr_reg_index];
849
850 /* operand0 is a simple temporary register, selecting one component */
851 operand0.value = 0;
852 operand0.operandType = VGPU10_OPERAND_TYPE_TEMP;
853 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
854 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
855 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
856 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
857 operand0.swizzleX = 0;
858 operand0.swizzleY = 1;
859 operand0.swizzleZ = 2;
860 operand0.swizzleW = 3;
861
862 emit_dword(emit, operand0.value);
863 emit_dword(emit, remap_temp_index(emit, TGSI_FILE_TEMPORARY, tmp_reg_index));
864 }
865
866
867 /**
868 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
869 * \param emit the emitter context
870 * \param reg the TGSI dst register to translate
871 */
872 static void
873 emit_dst_register(struct svga_shader_emitter_v10 *emit,
874 const struct tgsi_full_dst_register *reg)
875 {
876 enum tgsi_file_type file = reg->Register.File;
877 unsigned index = reg->Register.Index;
878 const enum tgsi_semantic sem_name = emit->info.output_semantic_name[index];
879 const unsigned sem_index = emit->info.output_semantic_index[index];
880 unsigned writemask = reg->Register.WriteMask;
881 const boolean indirect = reg->Register.Indirect;
882 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
883 const boolean index2d = reg->Register.Dimension;
884 VGPU10OperandToken0 operand0;
885
886 if (file == TGSI_FILE_OUTPUT) {
887 if (emit->unit == PIPE_SHADER_VERTEX ||
888 emit->unit == PIPE_SHADER_GEOMETRY) {
889 if (index == emit->vposition.out_index &&
890 emit->vposition.tmp_index != INVALID_INDEX) {
891 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
892 * vertex position result in a temporary so that we can modify
893 * it in the post_helper() code.
894 */
895 file = TGSI_FILE_TEMPORARY;
896 index = emit->vposition.tmp_index;
897 }
898 else if (sem_name == TGSI_SEMANTIC_CLIPDIST &&
899 emit->clip_dist_tmp_index != INVALID_INDEX) {
900 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
901 * We store the clip distance in a temporary first, then
902 * we'll copy it to the shadow copy and to CLIPDIST with the
903 * enabled planes mask in emit_clip_distance_instructions().
904 */
905 file = TGSI_FILE_TEMPORARY;
906 index = emit->clip_dist_tmp_index + sem_index;
907 }
908 else if (sem_name == TGSI_SEMANTIC_CLIPVERTEX &&
909 emit->clip_vertex_tmp_index != INVALID_INDEX) {
910 /* replace the CLIPVERTEX output register with a temporary */
911 assert(emit->clip_mode == CLIP_VERTEX);
912 assert(sem_index == 0);
913 file = TGSI_FILE_TEMPORARY;
914 index = emit->clip_vertex_tmp_index;
915 }
916 }
917 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
918 if (sem_name == TGSI_SEMANTIC_POSITION) {
919 /* Fragment depth output register */
920 operand0.value = 0;
921 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
922 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
923 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
924 emit_dword(emit, operand0.value);
925 return;
926 }
927 else if (sem_name == TGSI_SEMANTIC_SAMPLEMASK) {
928 /* Fragment sample mask output */
929 operand0.value = 0;
930 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK;
931 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
932 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
933 emit_dword(emit, operand0.value);
934 return;
935 }
936 else if (index == emit->fs.color_out_index[0] &&
937 emit->fs.color_tmp_index != INVALID_INDEX) {
938 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
939 * fragment color result in a temporary so that we can read it
940 * it in the post_helper() code.
941 */
942 file = TGSI_FILE_TEMPORARY;
943 index = emit->fs.color_tmp_index;
944 }
945 else {
946 /* Typically, for fragment shaders, the output register index
947 * matches the color semantic index. But not when we write to
948 * the fragment depth register. In that case, OUT[0] will be
949 * fragdepth and OUT[1] will be the 0th color output. We need
950 * to use the semantic index for color outputs.
951 */
952 assert(sem_name == TGSI_SEMANTIC_COLOR);
953 index = emit->info.output_semantic_index[index];
954
955 emit->num_output_writes++;
956 }
957 }
958 }
959
960 /* init operand tokens to all zero */
961 operand0.value = 0;
962
963 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
964
965 /* the operand has a writemask */
966 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
967
968 /* Which of the four dest components to write to. Note that we can use a
969 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
970 */
971 STATIC_ASSERT(TGSI_WRITEMASK_X == VGPU10_OPERAND_4_COMPONENT_MASK_X);
972 operand0.mask = writemask;
973
974 /* translate TGSI register file type to VGPU10 operand type */
975 operand0.operandType = translate_register_file(file, tempArrayId > 0);
976
977 check_register_index(emit, operand0.operandType, index);
978
979 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
980 index2d, tempArrayId);
981
982 /* Emit tokens */
983 emit_dword(emit, operand0.value);
984 if (tempArrayId > 0) {
985 emit_dword(emit, tempArrayId);
986 }
987
988 emit_dword(emit, remap_temp_index(emit, file, index));
989
990 if (indirect) {
991 emit_indirect_register(emit, reg->Indirect.Index);
992 }
993 }
994
995
996 /**
997 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
998 * In quite a few cases, we do register substitution. For example, if
999 * the TGSI register is the front/back-face register, we replace that with
1000 * a temp register containing a value we computed earlier.
1001 */
1002 static void
1003 emit_src_register(struct svga_shader_emitter_v10 *emit,
1004 const struct tgsi_full_src_register *reg)
1005 {
1006 enum tgsi_file_type file = reg->Register.File;
1007 unsigned index = reg->Register.Index;
1008 const boolean indirect = reg->Register.Indirect;
1009 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
1010 const boolean index2d = reg->Register.Dimension;
1011 const unsigned swizzleX = reg->Register.SwizzleX;
1012 const unsigned swizzleY = reg->Register.SwizzleY;
1013 const unsigned swizzleZ = reg->Register.SwizzleZ;
1014 const unsigned swizzleW = reg->Register.SwizzleW;
1015 const boolean absolute = reg->Register.Absolute;
1016 const boolean negate = reg->Register.Negate;
1017 bool is_prim_id = FALSE;
1018
1019 VGPU10OperandToken0 operand0;
1020 VGPU10OperandToken1 operand1;
1021
1022 if (emit->unit == PIPE_SHADER_FRAGMENT){
1023 if (file == TGSI_FILE_INPUT) {
1024 if (index == emit->fs.face_input_index) {
1025 /* Replace INPUT[FACE] with TEMP[FACE] */
1026 file = TGSI_FILE_TEMPORARY;
1027 index = emit->fs.face_tmp_index;
1028 }
1029 else if (index == emit->fs.fragcoord_input_index) {
1030 /* Replace INPUT[POSITION] with TEMP[POSITION] */
1031 file = TGSI_FILE_TEMPORARY;
1032 index = emit->fs.fragcoord_tmp_index;
1033 }
1034 else {
1035 /* We remap fragment shader inputs to that FS input indexes
1036 * match up with VS/GS output indexes.
1037 */
1038 index = emit->linkage.input_map[index];
1039 }
1040 }
1041 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1042 if (index == emit->fs.sample_pos_sys_index) {
1043 assert(emit->version >= 41);
1044 /* Current sample position is in a temp register */
1045 file = TGSI_FILE_TEMPORARY;
1046 index = emit->fs.sample_pos_tmp_index;
1047 }
1048 else {
1049 /* Map the TGSI system value to a VGPU10 input register */
1050 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1051 file = TGSI_FILE_INPUT;
1052 index = emit->system_value_indexes[index];
1053 }
1054 }
1055 }
1056 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
1057 if (file == TGSI_FILE_INPUT) {
1058 is_prim_id = (index == emit->gs.prim_id_index);
1059 index = emit->linkage.input_map[index];
1060 }
1061 }
1062 else if (emit->unit == PIPE_SHADER_VERTEX) {
1063 if (file == TGSI_FILE_INPUT) {
1064 /* if input is adjusted... */
1065 if ((emit->key.vs.adjust_attrib_w_1 |
1066 emit->key.vs.adjust_attrib_itof |
1067 emit->key.vs.adjust_attrib_utof |
1068 emit->key.vs.attrib_is_bgra |
1069 emit->key.vs.attrib_puint_to_snorm |
1070 emit->key.vs.attrib_puint_to_uscaled |
1071 emit->key.vs.attrib_puint_to_sscaled) & (1 << index)) {
1072 file = TGSI_FILE_TEMPORARY;
1073 index = emit->vs.adjusted_input[index];
1074 }
1075 }
1076 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1077 /* Map the TGSI system value to a VGPU10 input register */
1078 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1079 file = TGSI_FILE_INPUT;
1080 index = emit->system_value_indexes[index];
1081 }
1082 }
1083
1084 operand0.value = operand1.value = 0;
1085
1086 if (is_prim_id) {
1087 /* NOTE: we should be using VGPU10_OPERAND_1_COMPONENT here, but
1088 * our virtual GPU accepts this as-is.
1089 */
1090 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
1091 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
1092 }
1093 else {
1094 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1095 operand0.operandType = translate_register_file(file, tempArrayId > 0);
1096 }
1097
1098 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
1099 index2d, tempArrayId);
1100
1101 if (operand0.operandType != VGPU10_OPERAND_TYPE_IMMEDIATE32 &&
1102 operand0.operandType != VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
1103 /* there's no swizzle for in-line immediates */
1104 if (swizzleX == swizzleY &&
1105 swizzleX == swizzleZ &&
1106 swizzleX == swizzleW) {
1107 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1108 }
1109 else {
1110 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1111 }
1112
1113 operand0.swizzleX = swizzleX;
1114 operand0.swizzleY = swizzleY;
1115 operand0.swizzleZ = swizzleZ;
1116 operand0.swizzleW = swizzleW;
1117
1118 if (absolute || negate) {
1119 operand0.extended = 1;
1120 operand1.extendedOperandType = VGPU10_EXTENDED_OPERAND_MODIFIER;
1121 if (absolute && !negate)
1122 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABS;
1123 if (!absolute && negate)
1124 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_NEG;
1125 if (absolute && negate)
1126 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABSNEG;
1127 }
1128 }
1129
1130 /* Emit the operand tokens */
1131 emit_dword(emit, operand0.value);
1132 if (operand0.extended)
1133 emit_dword(emit, operand1.value);
1134
1135 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32) {
1136 /* Emit the four float/int in-line immediate values */
1137 unsigned *c;
1138 assert(index < ARRAY_SIZE(emit->immediates));
1139 assert(file == TGSI_FILE_IMMEDIATE);
1140 assert(swizzleX < 4);
1141 assert(swizzleY < 4);
1142 assert(swizzleZ < 4);
1143 assert(swizzleW < 4);
1144 c = (unsigned *) emit->immediates[index];
1145 emit_dword(emit, c[swizzleX]);
1146 emit_dword(emit, c[swizzleY]);
1147 emit_dword(emit, c[swizzleZ]);
1148 emit_dword(emit, c[swizzleW]);
1149 }
1150 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_1D) {
1151 /* Emit the register index(es) */
1152 if (index2d ||
1153 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
1154 emit_dword(emit, reg->Dimension.Index);
1155 }
1156
1157 if (tempArrayId > 0) {
1158 emit_dword(emit, tempArrayId);
1159 }
1160
1161 emit_dword(emit, remap_temp_index(emit, file, index));
1162
1163 if (indirect) {
1164 emit_indirect_register(emit, reg->Indirect.Index);
1165 }
1166 }
1167 }
1168
1169
1170 /**
1171 * Emit a resource operand (for use with a SAMPLE instruction).
1172 */
1173 static void
1174 emit_resource_register(struct svga_shader_emitter_v10 *emit,
1175 unsigned resource_number)
1176 {
1177 VGPU10OperandToken0 operand0;
1178
1179 check_register_index(emit, VGPU10_OPERAND_TYPE_RESOURCE, resource_number);
1180
1181 /* init */
1182 operand0.value = 0;
1183
1184 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
1185 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1186 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1187 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1188 operand0.swizzleX = VGPU10_COMPONENT_X;
1189 operand0.swizzleY = VGPU10_COMPONENT_Y;
1190 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1191 operand0.swizzleW = VGPU10_COMPONENT_W;
1192
1193 emit_dword(emit, operand0.value);
1194 emit_dword(emit, resource_number);
1195 }
1196
1197
1198 /**
1199 * Emit a sampler operand (for use with a SAMPLE instruction).
1200 */
1201 static void
1202 emit_sampler_register(struct svga_shader_emitter_v10 *emit,
1203 unsigned sampler_number)
1204 {
1205 VGPU10OperandToken0 operand0;
1206
1207 check_register_index(emit, VGPU10_OPERAND_TYPE_SAMPLER, sampler_number);
1208
1209 /* init */
1210 operand0.value = 0;
1211
1212 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
1213 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1214
1215 emit_dword(emit, operand0.value);
1216 emit_dword(emit, sampler_number);
1217 }
1218
1219
1220 /**
1221 * Emit an operand which reads the IS_FRONT_FACING register.
1222 */
1223 static void
1224 emit_face_register(struct svga_shader_emitter_v10 *emit)
1225 {
1226 VGPU10OperandToken0 operand0;
1227 unsigned index = emit->linkage.input_map[emit->fs.face_input_index];
1228
1229 /* init */
1230 operand0.value = 0;
1231
1232 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT;
1233 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1234 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1235 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1236
1237 operand0.swizzleX = VGPU10_COMPONENT_X;
1238 operand0.swizzleY = VGPU10_COMPONENT_X;
1239 operand0.swizzleZ = VGPU10_COMPONENT_X;
1240 operand0.swizzleW = VGPU10_COMPONENT_X;
1241
1242 emit_dword(emit, operand0.value);
1243 emit_dword(emit, index);
1244 }
1245
1246
1247 /**
1248 * Emit tokens for the "rasterizer" register used by the SAMPLE_POS
1249 * instruction.
1250 */
1251 static void
1252 emit_rasterizer_register(struct svga_shader_emitter_v10 *emit)
1253 {
1254 VGPU10OperandToken0 operand0;
1255
1256 /* init */
1257 operand0.value = 0;
1258
1259 /* No register index for rasterizer index (there's only one) */
1260 operand0.operandType = VGPU10_OPERAND_TYPE_RASTERIZER;
1261 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
1262 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1263 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1264 operand0.swizzleX = VGPU10_COMPONENT_X;
1265 operand0.swizzleY = VGPU10_COMPONENT_Y;
1266 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1267 operand0.swizzleW = VGPU10_COMPONENT_W;
1268
1269 emit_dword(emit, operand0.value);
1270 }
1271
1272
1273 /**
1274 * Emit the token for a VGPU10 opcode.
1275 * \param saturate clamp result to [0,1]?
1276 */
1277 static void
1278 emit_opcode(struct svga_shader_emitter_v10 *emit,
1279 VGPU10_OPCODE_TYPE vgpu10_opcode, boolean saturate)
1280 {
1281 VGPU10OpcodeToken0 token0;
1282
1283 token0.value = 0; /* init all fields to zero */
1284 token0.opcodeType = vgpu10_opcode;
1285 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1286 token0.saturate = saturate;
1287
1288 emit_dword(emit, token0.value);
1289 }
1290
1291
1292 /**
1293 * Emit the token for a VGPU10 resinfo instruction.
1294 * \param modifier return type modifier, _uint or _rcpFloat.
1295 * TODO: We may want to remove this parameter if it will
1296 * only ever be used as _uint.
1297 */
1298 static void
1299 emit_opcode_resinfo(struct svga_shader_emitter_v10 *emit,
1300 VGPU10_RESINFO_RETURN_TYPE modifier)
1301 {
1302 VGPU10OpcodeToken0 token0;
1303
1304 token0.value = 0; /* init all fields to zero */
1305 token0.opcodeType = VGPU10_OPCODE_RESINFO;
1306 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1307 token0.resinfoReturnType = modifier;
1308
1309 emit_dword(emit, token0.value);
1310 }
1311
1312
1313 /**
1314 * Emit opcode tokens for a texture sample instruction. Texture instructions
1315 * can be rather complicated (texel offsets, etc) so we have this specialized
1316 * function.
1317 */
1318 static void
1319 emit_sample_opcode(struct svga_shader_emitter_v10 *emit,
1320 unsigned vgpu10_opcode, boolean saturate,
1321 const int offsets[3])
1322 {
1323 VGPU10OpcodeToken0 token0;
1324 VGPU10OpcodeToken1 token1;
1325
1326 token0.value = 0; /* init all fields to zero */
1327 token0.opcodeType = vgpu10_opcode;
1328 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1329 token0.saturate = saturate;
1330
1331 if (offsets[0] || offsets[1] || offsets[2]) {
1332 assert(offsets[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1333 assert(offsets[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1334 assert(offsets[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1335 assert(offsets[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1336 assert(offsets[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1337 assert(offsets[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1338
1339 token0.extended = 1;
1340 token1.value = 0;
1341 token1.opcodeType = VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS;
1342 token1.offsetU = offsets[0];
1343 token1.offsetV = offsets[1];
1344 token1.offsetW = offsets[2];
1345 }
1346
1347 emit_dword(emit, token0.value);
1348 if (token0.extended) {
1349 emit_dword(emit, token1.value);
1350 }
1351 }
1352
1353
1354 /**
1355 * Emit a DISCARD opcode token.
1356 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1357 * Otherwise, we'll discard the fragment if the X component is 0.
1358 */
1359 static void
1360 emit_discard_opcode(struct svga_shader_emitter_v10 *emit, boolean nonzero)
1361 {
1362 VGPU10OpcodeToken0 opcode0;
1363
1364 opcode0.value = 0;
1365 opcode0.opcodeType = VGPU10_OPCODE_DISCARD;
1366 if (nonzero)
1367 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
1368
1369 emit_dword(emit, opcode0.value);
1370 }
1371
1372
1373 /**
1374 * We need to call this before we begin emitting a VGPU10 instruction.
1375 */
1376 static void
1377 begin_emit_instruction(struct svga_shader_emitter_v10 *emit)
1378 {
1379 assert(emit->inst_start_token == 0);
1380 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1381 * Note, we can't save a pointer because it would become invalid if
1382 * we have to realloc the output buffer.
1383 */
1384 emit->inst_start_token = emit_get_num_tokens(emit);
1385 }
1386
1387
1388 /**
1389 * We need to call this after we emit the last token of a VGPU10 instruction.
1390 * This function patches in the opcode token's instructionLength field.
1391 */
1392 static void
1393 end_emit_instruction(struct svga_shader_emitter_v10 *emit)
1394 {
1395 VGPU10OpcodeToken0 *tokens = (VGPU10OpcodeToken0 *) emit->buf;
1396 unsigned inst_length;
1397
1398 assert(emit->inst_start_token > 0);
1399
1400 if (emit->discard_instruction) {
1401 /* Back up the emit->ptr to where this instruction started so
1402 * that we discard the current instruction.
1403 */
1404 emit->ptr = (char *) (tokens + emit->inst_start_token);
1405 }
1406 else {
1407 /* Compute instruction length and patch that into the start of
1408 * the instruction.
1409 */
1410 inst_length = emit_get_num_tokens(emit) - emit->inst_start_token;
1411
1412 assert(inst_length > 0);
1413
1414 tokens[emit->inst_start_token].instructionLength = inst_length;
1415 }
1416
1417 emit->inst_start_token = 0; /* reset to zero for error checking */
1418 emit->discard_instruction = FALSE;
1419 }
1420
1421
1422 /**
1423 * Return index for a free temporary register.
1424 */
1425 static unsigned
1426 get_temp_index(struct svga_shader_emitter_v10 *emit)
1427 {
1428 assert(emit->internal_temp_count < MAX_INTERNAL_TEMPS);
1429 return emit->num_shader_temps + emit->internal_temp_count++;
1430 }
1431
1432
1433 /**
1434 * Release the temporaries which were generated by get_temp_index().
1435 */
1436 static void
1437 free_temp_indexes(struct svga_shader_emitter_v10 *emit)
1438 {
1439 emit->internal_temp_count = 0;
1440 }
1441
1442
1443 /**
1444 * Create a tgsi_full_src_register.
1445 */
1446 static struct tgsi_full_src_register
1447 make_src_reg(enum tgsi_file_type file, unsigned index)
1448 {
1449 struct tgsi_full_src_register reg;
1450
1451 memset(&reg, 0, sizeof(reg));
1452 reg.Register.File = file;
1453 reg.Register.Index = index;
1454 reg.Register.SwizzleX = TGSI_SWIZZLE_X;
1455 reg.Register.SwizzleY = TGSI_SWIZZLE_Y;
1456 reg.Register.SwizzleZ = TGSI_SWIZZLE_Z;
1457 reg.Register.SwizzleW = TGSI_SWIZZLE_W;
1458 return reg;
1459 }
1460
1461
1462 /**
1463 * Create a tgsi_full_src_register with a swizzle such that all four
1464 * vector components have the same scalar value.
1465 */
1466 static struct tgsi_full_src_register
1467 make_src_scalar_reg(enum tgsi_file_type file, unsigned index, unsigned component)
1468 {
1469 struct tgsi_full_src_register reg;
1470
1471 assert(component >= TGSI_SWIZZLE_X);
1472 assert(component <= TGSI_SWIZZLE_W);
1473
1474 memset(&reg, 0, sizeof(reg));
1475 reg.Register.File = file;
1476 reg.Register.Index = index;
1477 reg.Register.SwizzleX =
1478 reg.Register.SwizzleY =
1479 reg.Register.SwizzleZ =
1480 reg.Register.SwizzleW = component;
1481 return reg;
1482 }
1483
1484
1485 /**
1486 * Create a tgsi_full_src_register for a temporary.
1487 */
1488 static struct tgsi_full_src_register
1489 make_src_temp_reg(unsigned index)
1490 {
1491 return make_src_reg(TGSI_FILE_TEMPORARY, index);
1492 }
1493
1494
1495 /**
1496 * Create a tgsi_full_src_register for a constant.
1497 */
1498 static struct tgsi_full_src_register
1499 make_src_const_reg(unsigned index)
1500 {
1501 return make_src_reg(TGSI_FILE_CONSTANT, index);
1502 }
1503
1504
1505 /**
1506 * Create a tgsi_full_src_register for an immediate constant.
1507 */
1508 static struct tgsi_full_src_register
1509 make_src_immediate_reg(unsigned index)
1510 {
1511 return make_src_reg(TGSI_FILE_IMMEDIATE, index);
1512 }
1513
1514
1515 /**
1516 * Create a tgsi_full_dst_register.
1517 */
1518 static struct tgsi_full_dst_register
1519 make_dst_reg(enum tgsi_file_type file, unsigned index)
1520 {
1521 struct tgsi_full_dst_register reg;
1522
1523 memset(&reg, 0, sizeof(reg));
1524 reg.Register.File = file;
1525 reg.Register.Index = index;
1526 reg.Register.WriteMask = TGSI_WRITEMASK_XYZW;
1527 return reg;
1528 }
1529
1530
1531 /**
1532 * Create a tgsi_full_dst_register for a temporary.
1533 */
1534 static struct tgsi_full_dst_register
1535 make_dst_temp_reg(unsigned index)
1536 {
1537 return make_dst_reg(TGSI_FILE_TEMPORARY, index);
1538 }
1539
1540
1541 /**
1542 * Create a tgsi_full_dst_register for an output.
1543 */
1544 static struct tgsi_full_dst_register
1545 make_dst_output_reg(unsigned index)
1546 {
1547 return make_dst_reg(TGSI_FILE_OUTPUT, index);
1548 }
1549
1550
1551 /**
1552 * Create negated tgsi_full_src_register.
1553 */
1554 static struct tgsi_full_src_register
1555 negate_src(const struct tgsi_full_src_register *reg)
1556 {
1557 struct tgsi_full_src_register neg = *reg;
1558 neg.Register.Negate = !reg->Register.Negate;
1559 return neg;
1560 }
1561
1562 /**
1563 * Create absolute value of a tgsi_full_src_register.
1564 */
1565 static struct tgsi_full_src_register
1566 absolute_src(const struct tgsi_full_src_register *reg)
1567 {
1568 struct tgsi_full_src_register absolute = *reg;
1569 absolute.Register.Absolute = 1;
1570 return absolute;
1571 }
1572
1573
1574 /** Return the named swizzle term from the src register */
1575 static inline unsigned
1576 get_swizzle(const struct tgsi_full_src_register *reg, enum tgsi_swizzle term)
1577 {
1578 switch (term) {
1579 case TGSI_SWIZZLE_X:
1580 return reg->Register.SwizzleX;
1581 case TGSI_SWIZZLE_Y:
1582 return reg->Register.SwizzleY;
1583 case TGSI_SWIZZLE_Z:
1584 return reg->Register.SwizzleZ;
1585 case TGSI_SWIZZLE_W:
1586 return reg->Register.SwizzleW;
1587 default:
1588 assert(!"Bad swizzle");
1589 return TGSI_SWIZZLE_X;
1590 }
1591 }
1592
1593
1594 /**
1595 * Create swizzled tgsi_full_src_register.
1596 */
1597 static struct tgsi_full_src_register
1598 swizzle_src(const struct tgsi_full_src_register *reg,
1599 enum tgsi_swizzle swizzleX, enum tgsi_swizzle swizzleY,
1600 enum tgsi_swizzle swizzleZ, enum tgsi_swizzle swizzleW)
1601 {
1602 struct tgsi_full_src_register swizzled = *reg;
1603 /* Note: we swizzle the current swizzle */
1604 swizzled.Register.SwizzleX = get_swizzle(reg, swizzleX);
1605 swizzled.Register.SwizzleY = get_swizzle(reg, swizzleY);
1606 swizzled.Register.SwizzleZ = get_swizzle(reg, swizzleZ);
1607 swizzled.Register.SwizzleW = get_swizzle(reg, swizzleW);
1608 return swizzled;
1609 }
1610
1611
1612 /**
1613 * Create swizzled tgsi_full_src_register where all the swizzle
1614 * terms are the same.
1615 */
1616 static struct tgsi_full_src_register
1617 scalar_src(const struct tgsi_full_src_register *reg, enum tgsi_swizzle swizzle)
1618 {
1619 struct tgsi_full_src_register swizzled = *reg;
1620 /* Note: we swizzle the current swizzle */
1621 swizzled.Register.SwizzleX =
1622 swizzled.Register.SwizzleY =
1623 swizzled.Register.SwizzleZ =
1624 swizzled.Register.SwizzleW = get_swizzle(reg, swizzle);
1625 return swizzled;
1626 }
1627
1628
1629 /**
1630 * Create new tgsi_full_dst_register with writemask.
1631 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1632 */
1633 static struct tgsi_full_dst_register
1634 writemask_dst(const struct tgsi_full_dst_register *reg, unsigned mask)
1635 {
1636 struct tgsi_full_dst_register masked = *reg;
1637 masked.Register.WriteMask = mask;
1638 return masked;
1639 }
1640
1641
1642 /**
1643 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1644 */
1645 static boolean
1646 same_swizzle_terms(const struct tgsi_full_src_register *reg)
1647 {
1648 return (reg->Register.SwizzleX == reg->Register.SwizzleY &&
1649 reg->Register.SwizzleY == reg->Register.SwizzleZ &&
1650 reg->Register.SwizzleZ == reg->Register.SwizzleW);
1651 }
1652
1653
1654 /**
1655 * Search the vector for the value 'x' and return its position.
1656 */
1657 static int
1658 find_imm_in_vec4(const union tgsi_immediate_data vec[4],
1659 union tgsi_immediate_data x)
1660 {
1661 unsigned i;
1662 for (i = 0; i < 4; i++) {
1663 if (vec[i].Int == x.Int)
1664 return i;
1665 }
1666 return -1;
1667 }
1668
1669
1670 /**
1671 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1672 */
1673 static int
1674 find_immediate(struct svga_shader_emitter_v10 *emit,
1675 union tgsi_immediate_data x, unsigned startIndex)
1676 {
1677 const unsigned endIndex = emit->num_immediates;
1678 unsigned i;
1679
1680 assert(emit->immediates_emitted);
1681
1682 /* Search immediates for x, y, z, w */
1683 for (i = startIndex; i < endIndex; i++) {
1684 if (x.Int == emit->immediates[i][0].Int ||
1685 x.Int == emit->immediates[i][1].Int ||
1686 x.Int == emit->immediates[i][2].Int ||
1687 x.Int == emit->immediates[i][3].Int) {
1688 return i;
1689 }
1690 }
1691 /* Should never try to use an immediate value that wasn't pre-declared */
1692 assert(!"find_immediate() failed!");
1693 return -1;
1694 }
1695
1696
1697 /**
1698 * Return a tgsi_full_src_register for an immediate/literal
1699 * union tgsi_immediate_data[4] value.
1700 * Note: the values must have been previously declared/allocated in
1701 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1702 * vec4 immediate.
1703 */
1704 static struct tgsi_full_src_register
1705 make_immediate_reg_4(struct svga_shader_emitter_v10 *emit,
1706 const union tgsi_immediate_data imm[4])
1707 {
1708 struct tgsi_full_src_register reg;
1709 unsigned i;
1710
1711 for (i = 0; i < emit->num_common_immediates; i++) {
1712 /* search for first component value */
1713 int immpos = find_immediate(emit, imm[0], i);
1714 int x, y, z, w;
1715
1716 assert(immpos >= 0);
1717
1718 /* find remaining components within the immediate vector */
1719 x = find_imm_in_vec4(emit->immediates[immpos], imm[0]);
1720 y = find_imm_in_vec4(emit->immediates[immpos], imm[1]);
1721 z = find_imm_in_vec4(emit->immediates[immpos], imm[2]);
1722 w = find_imm_in_vec4(emit->immediates[immpos], imm[3]);
1723
1724 if (x >=0 && y >= 0 && z >= 0 && w >= 0) {
1725 /* found them all */
1726 memset(&reg, 0, sizeof(reg));
1727 reg.Register.File = TGSI_FILE_IMMEDIATE;
1728 reg.Register.Index = immpos;
1729 reg.Register.SwizzleX = x;
1730 reg.Register.SwizzleY = y;
1731 reg.Register.SwizzleZ = z;
1732 reg.Register.SwizzleW = w;
1733 return reg;
1734 }
1735 /* else, keep searching */
1736 }
1737
1738 assert(!"Failed to find immediate register!");
1739
1740 /* Just return IMM[0].xxxx */
1741 memset(&reg, 0, sizeof(reg));
1742 reg.Register.File = TGSI_FILE_IMMEDIATE;
1743 return reg;
1744 }
1745
1746
1747 /**
1748 * Return a tgsi_full_src_register for an immediate/literal
1749 * union tgsi_immediate_data value of the form {value, value, value, value}.
1750 * \sa make_immediate_reg_4() regarding allowed values.
1751 */
1752 static struct tgsi_full_src_register
1753 make_immediate_reg(struct svga_shader_emitter_v10 *emit,
1754 union tgsi_immediate_data value)
1755 {
1756 struct tgsi_full_src_register reg;
1757 int immpos = find_immediate(emit, value, 0);
1758
1759 assert(immpos >= 0);
1760
1761 memset(&reg, 0, sizeof(reg));
1762 reg.Register.File = TGSI_FILE_IMMEDIATE;
1763 reg.Register.Index = immpos;
1764 reg.Register.SwizzleX =
1765 reg.Register.SwizzleY =
1766 reg.Register.SwizzleZ =
1767 reg.Register.SwizzleW = find_imm_in_vec4(emit->immediates[immpos], value);
1768
1769 return reg;
1770 }
1771
1772
1773 /**
1774 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1775 * \sa make_immediate_reg_4() regarding allowed values.
1776 */
1777 static struct tgsi_full_src_register
1778 make_immediate_reg_float4(struct svga_shader_emitter_v10 *emit,
1779 float x, float y, float z, float w)
1780 {
1781 union tgsi_immediate_data imm[4];
1782 imm[0].Float = x;
1783 imm[1].Float = y;
1784 imm[2].Float = z;
1785 imm[3].Float = w;
1786 return make_immediate_reg_4(emit, imm);
1787 }
1788
1789
1790 /**
1791 * Return a tgsi_full_src_register for an immediate/literal float value
1792 * of the form {value, value, value, value}.
1793 * \sa make_immediate_reg_4() regarding allowed values.
1794 */
1795 static struct tgsi_full_src_register
1796 make_immediate_reg_float(struct svga_shader_emitter_v10 *emit, float value)
1797 {
1798 union tgsi_immediate_data imm;
1799 imm.Float = value;
1800 return make_immediate_reg(emit, imm);
1801 }
1802
1803
1804 /**
1805 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1806 */
1807 static struct tgsi_full_src_register
1808 make_immediate_reg_int4(struct svga_shader_emitter_v10 *emit,
1809 int x, int y, int z, int w)
1810 {
1811 union tgsi_immediate_data imm[4];
1812 imm[0].Int = x;
1813 imm[1].Int = y;
1814 imm[2].Int = z;
1815 imm[3].Int = w;
1816 return make_immediate_reg_4(emit, imm);
1817 }
1818
1819
1820 /**
1821 * Return a tgsi_full_src_register for an immediate/literal int value
1822 * of the form {value, value, value, value}.
1823 * \sa make_immediate_reg_4() regarding allowed values.
1824 */
1825 static struct tgsi_full_src_register
1826 make_immediate_reg_int(struct svga_shader_emitter_v10 *emit, int value)
1827 {
1828 union tgsi_immediate_data imm;
1829 imm.Int = value;
1830 return make_immediate_reg(emit, imm);
1831 }
1832
1833
1834 /**
1835 * Allocate space for a union tgsi_immediate_data[4] immediate.
1836 * \return the index/position of the immediate.
1837 */
1838 static unsigned
1839 alloc_immediate_4(struct svga_shader_emitter_v10 *emit,
1840 const union tgsi_immediate_data imm[4])
1841 {
1842 unsigned n = emit->num_immediates++;
1843 assert(!emit->immediates_emitted);
1844 assert(n < ARRAY_SIZE(emit->immediates));
1845 emit->immediates[n][0] = imm[0];
1846 emit->immediates[n][1] = imm[1];
1847 emit->immediates[n][2] = imm[2];
1848 emit->immediates[n][3] = imm[3];
1849 return n;
1850 }
1851
1852
1853 /**
1854 * Allocate space for a float[4] immediate.
1855 * \return the index/position of the immediate.
1856 */
1857 static unsigned
1858 alloc_immediate_float4(struct svga_shader_emitter_v10 *emit,
1859 float x, float y, float z, float w)
1860 {
1861 union tgsi_immediate_data imm[4];
1862 imm[0].Float = x;
1863 imm[1].Float = y;
1864 imm[2].Float = z;
1865 imm[3].Float = w;
1866 return alloc_immediate_4(emit, imm);
1867 }
1868
1869
1870 /**
1871 * Allocate space for an int[4] immediate.
1872 * \return the index/position of the immediate.
1873 */
1874 static unsigned
1875 alloc_immediate_int4(struct svga_shader_emitter_v10 *emit,
1876 int x, int y, int z, int w)
1877 {
1878 union tgsi_immediate_data imm[4];
1879 imm[0].Int = x;
1880 imm[1].Int = y;
1881 imm[2].Int = z;
1882 imm[3].Int = w;
1883 return alloc_immediate_4(emit, imm);
1884 }
1885
1886
1887 /**
1888 * Allocate a shader input to store a system value.
1889 */
1890 static unsigned
1891 alloc_system_value_index(struct svga_shader_emitter_v10 *emit, unsigned index)
1892 {
1893 const unsigned n = emit->linkage.input_map_max + 1 + index;
1894 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1895 emit->system_value_indexes[index] = n;
1896 return n;
1897 }
1898
1899
1900 /**
1901 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1902 */
1903 static boolean
1904 emit_vgpu10_immediate(struct svga_shader_emitter_v10 *emit,
1905 const struct tgsi_full_immediate *imm)
1906 {
1907 /* We don't actually emit any code here. We just save the
1908 * immediate values and emit them later.
1909 */
1910 alloc_immediate_4(emit, imm->u);
1911 return TRUE;
1912 }
1913
1914
1915 /**
1916 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1917 * containing all the immediate values previously allocated
1918 * with alloc_immediate_4().
1919 */
1920 static boolean
1921 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10 *emit)
1922 {
1923 VGPU10OpcodeToken0 token;
1924
1925 assert(!emit->immediates_emitted);
1926
1927 token.value = 0;
1928 token.opcodeType = VGPU10_OPCODE_CUSTOMDATA;
1929 token.customDataClass = VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER;
1930
1931 /* Note: no begin/end_emit_instruction() calls */
1932 emit_dword(emit, token.value);
1933 emit_dword(emit, 2 + 4 * emit->num_immediates);
1934 emit_dwords(emit, (unsigned *) emit->immediates, 4 * emit->num_immediates);
1935
1936 emit->immediates_emitted = TRUE;
1937
1938 return TRUE;
1939 }
1940
1941
1942 /**
1943 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1944 * interpolation mode.
1945 * \return a VGPU10_INTERPOLATION_x value
1946 */
1947 static unsigned
1948 translate_interpolation(const struct svga_shader_emitter_v10 *emit,
1949 enum tgsi_interpolate_mode interp,
1950 enum tgsi_interpolate_loc interpolate_loc)
1951 {
1952 if (interp == TGSI_INTERPOLATE_COLOR) {
1953 interp = emit->key.fs.flatshade ?
1954 TGSI_INTERPOLATE_CONSTANT : TGSI_INTERPOLATE_PERSPECTIVE;
1955 }
1956
1957 switch (interp) {
1958 case TGSI_INTERPOLATE_CONSTANT:
1959 return VGPU10_INTERPOLATION_CONSTANT;
1960 case TGSI_INTERPOLATE_LINEAR:
1961 if (interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID) {
1962 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID;
1963 } else if (interpolate_loc == TGSI_INTERPOLATE_LOC_SAMPLE &&
1964 emit->version >= 41) {
1965 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE;
1966 } else {
1967 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE;
1968 }
1969 break;
1970 case TGSI_INTERPOLATE_PERSPECTIVE:
1971 if (interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID) {
1972 return VGPU10_INTERPOLATION_LINEAR_CENTROID;
1973 } else if (interpolate_loc == TGSI_INTERPOLATE_LOC_SAMPLE &&
1974 emit->version >= 41) {
1975 return VGPU10_INTERPOLATION_LINEAR_SAMPLE;
1976 } else {
1977 return VGPU10_INTERPOLATION_LINEAR;
1978 }
1979 break;
1980 default:
1981 assert(!"Unexpected interpolation mode");
1982 return VGPU10_INTERPOLATION_CONSTANT;
1983 }
1984 }
1985
1986
1987 /**
1988 * Translate a TGSI property to VGPU10.
1989 * Don't emit any instructions yet, only need to gather the primitive property
1990 * information. The output primitive topology might be changed later. The
1991 * final property instructions will be emitted as part of the pre-helper code.
1992 */
1993 static boolean
1994 emit_vgpu10_property(struct svga_shader_emitter_v10 *emit,
1995 const struct tgsi_full_property *prop)
1996 {
1997 static const VGPU10_PRIMITIVE primType[] = {
1998 VGPU10_PRIMITIVE_POINT, /* PIPE_PRIM_POINTS */
1999 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINES */
2000 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_LOOP */
2001 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_STRIP */
2002 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLES */
2003 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_STRIP */
2004 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_FAN */
2005 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUADS */
2006 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
2007 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_POLYGON */
2008 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
2009 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2010 VGPU10_PRIMITIVE_TRIANGLE_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2011 VGPU10_PRIMITIVE_TRIANGLE_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2012 };
2013
2014 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology[] = {
2015 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST, /* PIPE_PRIM_POINTS */
2016 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINES */
2017 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINE_LOOP */
2018 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP, /* PIPE_PRIM_LINE_STRIP */
2019 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST, /* PIPE_PRIM_TRIANGLES */
2020 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_STRIP */
2021 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_FAN */
2022 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUADS */
2023 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
2024 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_POLYGON */
2025 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
2026 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2027 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2028 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2029 };
2030
2031 static const unsigned inputArraySize[] = {
2032 0, /* VGPU10_PRIMITIVE_UNDEFINED */
2033 1, /* VGPU10_PRIMITIVE_POINT */
2034 2, /* VGPU10_PRIMITIVE_LINE */
2035 3, /* VGPU10_PRIMITIVE_TRIANGLE */
2036 0,
2037 0,
2038 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
2039 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
2040 };
2041
2042 switch (prop->Property.PropertyName) {
2043 case TGSI_PROPERTY_GS_INPUT_PRIM:
2044 assert(prop->u[0].Data < ARRAY_SIZE(primType));
2045 emit->gs.prim_type = primType[prop->u[0].Data];
2046 assert(emit->gs.prim_type != VGPU10_PRIMITIVE_UNDEFINED);
2047 emit->gs.input_size = inputArraySize[emit->gs.prim_type];
2048 break;
2049
2050 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
2051 assert(prop->u[0].Data < ARRAY_SIZE(primTopology));
2052 emit->gs.prim_topology = primTopology[prop->u[0].Data];
2053 assert(emit->gs.prim_topology != VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED);
2054 break;
2055
2056 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
2057 emit->gs.max_out_vertices = prop->u[0].Data;
2058 break;
2059
2060 default:
2061 break;
2062 }
2063
2064 return TRUE;
2065 }
2066
2067
2068 static void
2069 emit_property_instruction(struct svga_shader_emitter_v10 *emit,
2070 VGPU10OpcodeToken0 opcode0, unsigned nData,
2071 unsigned data)
2072 {
2073 begin_emit_instruction(emit);
2074 emit_dword(emit, opcode0.value);
2075 if (nData)
2076 emit_dword(emit, data);
2077 end_emit_instruction(emit);
2078 }
2079
2080
2081 /**
2082 * Emit property instructions
2083 */
2084 static void
2085 emit_property_instructions(struct svga_shader_emitter_v10 *emit)
2086 {
2087 VGPU10OpcodeToken0 opcode0;
2088
2089 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2090
2091 /* emit input primitive type declaration */
2092 opcode0.value = 0;
2093 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE;
2094 opcode0.primitive = emit->gs.prim_type;
2095 emit_property_instruction(emit, opcode0, 0, 0);
2096
2097 /* emit output primitive topology declaration */
2098 opcode0.value = 0;
2099 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY;
2100 opcode0.primitiveTopology = emit->gs.prim_topology;
2101 emit_property_instruction(emit, opcode0, 0, 0);
2102
2103 /* emit max output vertices */
2104 opcode0.value = 0;
2105 opcode0.opcodeType = VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT;
2106 emit_property_instruction(emit, opcode0, 1, emit->gs.max_out_vertices);
2107 }
2108
2109
2110 /**
2111 * Emit a vgpu10 declaration "instruction".
2112 * \param index the register index
2113 * \param size array size of the operand. In most cases, it is 1,
2114 * but for inputs to geometry shader, the array size varies
2115 * depending on the primitive type.
2116 */
2117 static void
2118 emit_decl_instruction(struct svga_shader_emitter_v10 *emit,
2119 VGPU10OpcodeToken0 opcode0,
2120 VGPU10OperandToken0 operand0,
2121 VGPU10NameToken name_token,
2122 unsigned index, unsigned size)
2123 {
2124 assert(opcode0.opcodeType);
2125 assert(operand0.mask);
2126
2127 begin_emit_instruction(emit);
2128 emit_dword(emit, opcode0.value);
2129
2130 emit_dword(emit, operand0.value);
2131
2132 if (operand0.indexDimension == VGPU10_OPERAND_INDEX_1D) {
2133 /* Next token is the index of the register to declare */
2134 emit_dword(emit, index);
2135 }
2136 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_2D) {
2137 /* Next token is the size of the register */
2138 emit_dword(emit, size);
2139
2140 /* Followed by the index of the register */
2141 emit_dword(emit, index);
2142 }
2143
2144 if (name_token.value) {
2145 emit_dword(emit, name_token.value);
2146 }
2147
2148 end_emit_instruction(emit);
2149 }
2150
2151
2152 /**
2153 * Emit the declaration for a shader input.
2154 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2155 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2156 * \param dim index dimension
2157 * \param index the input register index
2158 * \param size array size of the operand. In most cases, it is 1,
2159 * but for inputs to geometry shader, the array size varies
2160 * depending on the primitive type.
2161 * \param name one of VGPU10_NAME_x
2162 * \parma numComp number of components
2163 * \param selMode component selection mode
2164 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2165 * \param interpMode interpolation mode
2166 */
2167 static void
2168 emit_input_declaration(struct svga_shader_emitter_v10 *emit,
2169 VGPU10_OPCODE_TYPE opcodeType,
2170 VGPU10_OPERAND_TYPE operandType,
2171 VGPU10_OPERAND_INDEX_DIMENSION dim,
2172 unsigned index, unsigned size,
2173 VGPU10_SYSTEM_NAME name,
2174 VGPU10_OPERAND_NUM_COMPONENTS numComp,
2175 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode,
2176 unsigned usageMask,
2177 VGPU10_INTERPOLATION_MODE interpMode)
2178 {
2179 VGPU10OpcodeToken0 opcode0;
2180 VGPU10OperandToken0 operand0;
2181 VGPU10NameToken name_token;
2182
2183 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2184 assert(opcodeType == VGPU10_OPCODE_DCL_INPUT ||
2185 opcodeType == VGPU10_OPCODE_DCL_INPUT_SIV ||
2186 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS ||
2187 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SIV ||
2188 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SGV);
2189 assert(operandType == VGPU10_OPERAND_TYPE_INPUT ||
2190 operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID);
2191 assert(numComp <= VGPU10_OPERAND_4_COMPONENT);
2192 assert(selMode <= VGPU10_OPERAND_4_COMPONENT_MASK_MODE);
2193 assert(dim <= VGPU10_OPERAND_INDEX_3D);
2194 assert(name == VGPU10_NAME_UNDEFINED ||
2195 name == VGPU10_NAME_POSITION ||
2196 name == VGPU10_NAME_INSTANCE_ID ||
2197 name == VGPU10_NAME_VERTEX_ID ||
2198 name == VGPU10_NAME_PRIMITIVE_ID ||
2199 name == VGPU10_NAME_IS_FRONT_FACE ||
2200 name == VGPU10_NAME_SAMPLE_INDEX);
2201
2202 assert(interpMode == VGPU10_INTERPOLATION_UNDEFINED ||
2203 interpMode == VGPU10_INTERPOLATION_CONSTANT ||
2204 interpMode == VGPU10_INTERPOLATION_LINEAR ||
2205 interpMode == VGPU10_INTERPOLATION_LINEAR_CENTROID ||
2206 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE ||
2207 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID ||
2208 interpMode == VGPU10_INTERPOLATION_LINEAR_SAMPLE ||
2209 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE);
2210
2211 check_register_index(emit, opcodeType, index);
2212
2213 opcode0.value = operand0.value = name_token.value = 0;
2214
2215 opcode0.opcodeType = opcodeType;
2216 opcode0.interpolationMode = interpMode;
2217
2218 operand0.operandType = operandType;
2219 operand0.numComponents = numComp;
2220 operand0.selectionMode = selMode;
2221 operand0.mask = usageMask;
2222 operand0.indexDimension = dim;
2223 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2224 if (dim == VGPU10_OPERAND_INDEX_2D)
2225 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2226
2227 name_token.name = name;
2228
2229 emit_decl_instruction(emit, opcode0, operand0, name_token, index, size);
2230 }
2231
2232
2233 /**
2234 * Emit the declaration for a shader output.
2235 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2236 * \param index the output register index
2237 * \param name one of VGPU10_NAME_x
2238 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2239 */
2240 static void
2241 emit_output_declaration(struct svga_shader_emitter_v10 *emit,
2242 VGPU10_OPCODE_TYPE type, unsigned index,
2243 VGPU10_SYSTEM_NAME name,
2244 unsigned usageMask)
2245 {
2246 VGPU10OpcodeToken0 opcode0;
2247 VGPU10OperandToken0 operand0;
2248 VGPU10NameToken name_token;
2249
2250 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2251 assert(type == VGPU10_OPCODE_DCL_OUTPUT ||
2252 type == VGPU10_OPCODE_DCL_OUTPUT_SGV ||
2253 type == VGPU10_OPCODE_DCL_OUTPUT_SIV);
2254 assert(name == VGPU10_NAME_UNDEFINED ||
2255 name == VGPU10_NAME_POSITION ||
2256 name == VGPU10_NAME_PRIMITIVE_ID ||
2257 name == VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX ||
2258 name == VGPU10_NAME_CLIP_DISTANCE);
2259
2260 check_register_index(emit, type, index);
2261
2262 opcode0.value = operand0.value = name_token.value = 0;
2263
2264 opcode0.opcodeType = type;
2265 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT;
2266 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2267 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2268 operand0.mask = usageMask;
2269 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2270 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2271
2272 name_token.name = name;
2273
2274 emit_decl_instruction(emit, opcode0, operand0, name_token, index, 1);
2275 }
2276
2277
2278 /**
2279 * Emit the declaration for the fragment depth output.
2280 */
2281 static void
2282 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10 *emit)
2283 {
2284 VGPU10OpcodeToken0 opcode0;
2285 VGPU10OperandToken0 operand0;
2286 VGPU10NameToken name_token;
2287
2288 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2289
2290 opcode0.value = operand0.value = name_token.value = 0;
2291
2292 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2293 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
2294 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
2295 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2296 operand0.mask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2297
2298 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2299 }
2300
2301
2302 /**
2303 * Emit the declaration for the fragment sample mask/coverage output.
2304 */
2305 static void
2306 emit_samplemask_output_declaration(struct svga_shader_emitter_v10 *emit)
2307 {
2308 VGPU10OpcodeToken0 opcode0;
2309 VGPU10OperandToken0 operand0;
2310 VGPU10NameToken name_token;
2311
2312 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2313 assert(emit->version >= 41);
2314
2315 opcode0.value = operand0.value = name_token.value = 0;
2316
2317 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2318 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK;
2319 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
2320 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2321 operand0.mask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2322
2323 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2324 }
2325
2326
2327 /**
2328 * Emit the declaration for a system value input/output.
2329 */
2330 static void
2331 emit_system_value_declaration(struct svga_shader_emitter_v10 *emit,
2332 enum tgsi_semantic semantic_name, unsigned index)
2333 {
2334 switch (semantic_name) {
2335 case TGSI_SEMANTIC_INSTANCEID:
2336 index = alloc_system_value_index(emit, index);
2337 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2338 VGPU10_OPERAND_TYPE_INPUT,
2339 VGPU10_OPERAND_INDEX_1D,
2340 index, 1,
2341 VGPU10_NAME_INSTANCE_ID,
2342 VGPU10_OPERAND_4_COMPONENT,
2343 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2344 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2345 VGPU10_INTERPOLATION_UNDEFINED);
2346 break;
2347 case TGSI_SEMANTIC_VERTEXID:
2348 index = alloc_system_value_index(emit, index);
2349 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2350 VGPU10_OPERAND_TYPE_INPUT,
2351 VGPU10_OPERAND_INDEX_1D,
2352 index, 1,
2353 VGPU10_NAME_VERTEX_ID,
2354 VGPU10_OPERAND_4_COMPONENT,
2355 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2356 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2357 VGPU10_INTERPOLATION_UNDEFINED);
2358 break;
2359 case TGSI_SEMANTIC_SAMPLEID:
2360 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2361 emit->fs.sample_id_sys_index = index;
2362 index = alloc_system_value_index(emit, index);
2363 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_PS_SIV,
2364 VGPU10_OPERAND_TYPE_INPUT,
2365 VGPU10_OPERAND_INDEX_1D,
2366 index, 1,
2367 VGPU10_NAME_SAMPLE_INDEX,
2368 VGPU10_OPERAND_4_COMPONENT,
2369 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2370 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2371 VGPU10_INTERPOLATION_CONSTANT);
2372 break;
2373 case TGSI_SEMANTIC_SAMPLEPOS:
2374 /* This system value contains the position of the current sample
2375 * when using per-sample shading. We implement this by calling
2376 * the VGPU10_OPCODE_SAMPLE_POS instruction with the current sample
2377 * index as the argument. See emit_sample_position_instructions().
2378 */
2379 assert(emit->version >= 41);
2380 emit->fs.sample_pos_sys_index = index;
2381 index = alloc_system_value_index(emit, index);
2382 break;
2383 default:
2384 debug_printf("unexpected sytem value semantic index %u\n",
2385 semantic_name);
2386 }
2387 }
2388
2389 /**
2390 * Translate a TGSI declaration to VGPU10.
2391 */
2392 static boolean
2393 emit_vgpu10_declaration(struct svga_shader_emitter_v10 *emit,
2394 const struct tgsi_full_declaration *decl)
2395 {
2396 switch (decl->Declaration.File) {
2397 case TGSI_FILE_INPUT:
2398 /* do nothing - see emit_input_declarations() */
2399 return TRUE;
2400
2401 case TGSI_FILE_OUTPUT:
2402 assert(decl->Range.First == decl->Range.Last);
2403 emit->output_usage_mask[decl->Range.First] = decl->Declaration.UsageMask;
2404 return TRUE;
2405
2406 case TGSI_FILE_TEMPORARY:
2407 /* Don't declare the temps here. Just keep track of how many
2408 * and emit the declaration later.
2409 */
2410 if (decl->Declaration.Array) {
2411 /* Indexed temporary array. Save the start index of the array
2412 * and the size of the array.
2413 */
2414 const unsigned arrayID = MIN2(decl->Array.ArrayID, MAX_TEMP_ARRAYS);
2415 unsigned i;
2416
2417 assert(arrayID < ARRAY_SIZE(emit->temp_arrays));
2418
2419 /* Save this array so we can emit the declaration for it later */
2420 emit->temp_arrays[arrayID].start = decl->Range.First;
2421 emit->temp_arrays[arrayID].size =
2422 decl->Range.Last - decl->Range.First + 1;
2423
2424 emit->num_temp_arrays = MAX2(emit->num_temp_arrays, arrayID + 1);
2425 assert(emit->num_temp_arrays <= MAX_TEMP_ARRAYS);
2426 emit->num_temp_arrays = MIN2(emit->num_temp_arrays, MAX_TEMP_ARRAYS);
2427
2428 /* Fill in the temp_map entries for this array */
2429 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2430 emit->temp_map[i].arrayId = arrayID;
2431 emit->temp_map[i].index = i - decl->Range.First;
2432 }
2433 }
2434
2435 /* for all temps, indexed or not, keep track of highest index */
2436 emit->num_shader_temps = MAX2(emit->num_shader_temps,
2437 decl->Range.Last + 1);
2438 return TRUE;
2439
2440 case TGSI_FILE_CONSTANT:
2441 /* Don't declare constants here. Just keep track and emit later. */
2442 {
2443 unsigned constbuf = 0, num_consts;
2444 if (decl->Declaration.Dimension) {
2445 constbuf = decl->Dim.Index2D;
2446 }
2447 /* We throw an assertion here when, in fact, the shader should never
2448 * have linked due to constbuf index out of bounds, so we shouldn't
2449 * have reached here.
2450 */
2451 assert(constbuf < ARRAY_SIZE(emit->num_shader_consts));
2452
2453 num_consts = MAX2(emit->num_shader_consts[constbuf],
2454 decl->Range.Last + 1);
2455
2456 if (num_consts > VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
2457 debug_printf("Warning: constant buffer is declared to size [%u]"
2458 " but [%u] is the limit.\n",
2459 num_consts,
2460 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2461 }
2462 /* The linker doesn't enforce the max UBO size so we clamp here */
2463 emit->num_shader_consts[constbuf] =
2464 MIN2(num_consts, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2465 }
2466 return TRUE;
2467
2468 case TGSI_FILE_IMMEDIATE:
2469 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2470 return FALSE;
2471
2472 case TGSI_FILE_SYSTEM_VALUE:
2473 emit_system_value_declaration(emit, decl->Semantic.Name,
2474 decl->Range.First);
2475 return TRUE;
2476
2477 case TGSI_FILE_SAMPLER:
2478 /* Don't declare samplers here. Just keep track and emit later. */
2479 emit->num_samplers = MAX2(emit->num_samplers, decl->Range.Last + 1);
2480 return TRUE;
2481
2482 #if 0
2483 case TGSI_FILE_RESOURCE:
2484 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2485 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2486 assert(!"TGSI_FILE_RESOURCE not handled yet");
2487 return FALSE;
2488 #endif
2489
2490 case TGSI_FILE_ADDRESS:
2491 emit->num_address_regs = MAX2(emit->num_address_regs,
2492 decl->Range.Last + 1);
2493 return TRUE;
2494
2495 case TGSI_FILE_SAMPLER_VIEW:
2496 {
2497 unsigned unit = decl->Range.First;
2498 assert(decl->Range.First == decl->Range.Last);
2499 emit->sampler_target[unit] = decl->SamplerView.Resource;
2500 /* Note: we can ignore YZW return types for now */
2501 emit->sampler_return_type[unit] = decl->SamplerView.ReturnTypeX;
2502 emit->sampler_view[unit] = TRUE;
2503 }
2504 return TRUE;
2505
2506 default:
2507 assert(!"Unexpected type of declaration");
2508 return FALSE;
2509 }
2510 }
2511
2512
2513
2514 /**
2515 * Emit all input declarations.
2516 */
2517 static boolean
2518 emit_input_declarations(struct svga_shader_emitter_v10 *emit)
2519 {
2520 unsigned i;
2521
2522 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2523
2524 for (i = 0; i < emit->linkage.num_inputs; i++) {
2525 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2526 unsigned usage_mask = emit->info.input_usage_mask[i];
2527 unsigned index = emit->linkage.input_map[i];
2528 VGPU10_OPCODE_TYPE type;
2529 VGPU10_INTERPOLATION_MODE interpolationMode;
2530 VGPU10_SYSTEM_NAME name;
2531
2532 if (usage_mask == 0)
2533 continue; /* register is not actually used */
2534
2535 if (semantic_name == TGSI_SEMANTIC_POSITION) {
2536 /* fragment position input */
2537 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2538 interpolationMode = VGPU10_INTERPOLATION_LINEAR;
2539 name = VGPU10_NAME_POSITION;
2540 if (usage_mask & TGSI_WRITEMASK_W) {
2541 /* we need to replace use of 'w' with '1/w' */
2542 emit->fs.fragcoord_input_index = i;
2543 }
2544 }
2545 else if (semantic_name == TGSI_SEMANTIC_FACE) {
2546 /* fragment front-facing input */
2547 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2548 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2549 name = VGPU10_NAME_IS_FRONT_FACE;
2550 emit->fs.face_input_index = i;
2551 }
2552 else if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2553 /* primitive ID */
2554 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2555 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2556 name = VGPU10_NAME_PRIMITIVE_ID;
2557 }
2558 else if (semantic_name == TGSI_SEMANTIC_SAMPLEID) {
2559 /* sample index / ID */
2560 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2561 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2562 name = VGPU10_NAME_SAMPLE_INDEX;
2563 }
2564 else {
2565 /* general fragment input */
2566 type = VGPU10_OPCODE_DCL_INPUT_PS;
2567 interpolationMode =
2568 translate_interpolation(emit,
2569 emit->info.input_interpolate[i],
2570 emit->info.input_interpolate_loc[i]);
2571
2572 /* keeps track if flat interpolation mode is being used */
2573 emit->uses_flat_interp |=
2574 (interpolationMode == VGPU10_INTERPOLATION_CONSTANT);
2575
2576 name = VGPU10_NAME_UNDEFINED;
2577 }
2578
2579 emit_input_declaration(emit, type,
2580 VGPU10_OPERAND_TYPE_INPUT,
2581 VGPU10_OPERAND_INDEX_1D, index, 1,
2582 name,
2583 VGPU10_OPERAND_4_COMPONENT,
2584 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2585 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2586 interpolationMode);
2587 }
2588 }
2589 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
2590
2591 for (i = 0; i < emit->info.num_inputs; i++) {
2592 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2593 unsigned usage_mask = emit->info.input_usage_mask[i];
2594 unsigned index = emit->linkage.input_map[i];
2595 VGPU10_OPCODE_TYPE opcodeType, operandType;
2596 VGPU10_OPERAND_NUM_COMPONENTS numComp;
2597 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode;
2598 VGPU10_SYSTEM_NAME name;
2599 VGPU10_OPERAND_INDEX_DIMENSION dim;
2600
2601 if (usage_mask == 0)
2602 continue; /* register is not actually used */
2603
2604 opcodeType = VGPU10_OPCODE_DCL_INPUT;
2605 operandType = VGPU10_OPERAND_TYPE_INPUT;
2606 numComp = VGPU10_OPERAND_4_COMPONENT;
2607 selMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2608 name = VGPU10_NAME_UNDEFINED;
2609
2610 /* all geometry shader inputs are two dimensional except
2611 * gl_PrimitiveID
2612 */
2613 dim = VGPU10_OPERAND_INDEX_2D;
2614
2615 if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2616 /* Primitive ID */
2617 operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
2618 dim = VGPU10_OPERAND_INDEX_0D;
2619 numComp = VGPU10_OPERAND_0_COMPONENT;
2620 selMode = 0;
2621
2622 /* also save the register index so we can check for
2623 * primitive id when emit src register. We need to modify the
2624 * operand type, index dimension when emit primitive id src reg.
2625 */
2626 emit->gs.prim_id_index = i;
2627 }
2628 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2629 /* vertex position input */
2630 opcodeType = VGPU10_OPCODE_DCL_INPUT_SIV;
2631 name = VGPU10_NAME_POSITION;
2632 }
2633
2634 emit_input_declaration(emit, opcodeType, operandType,
2635 dim, index,
2636 emit->gs.input_size,
2637 name,
2638 numComp, selMode,
2639 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2640 VGPU10_INTERPOLATION_UNDEFINED);
2641 }
2642 }
2643 else {
2644 assert(emit->unit == PIPE_SHADER_VERTEX);
2645
2646 for (i = 0; i < emit->info.file_max[TGSI_FILE_INPUT] + 1; i++) {
2647 unsigned usage_mask = emit->info.input_usage_mask[i];
2648 unsigned index = i;
2649
2650 if (usage_mask == 0)
2651 continue; /* register is not actually used */
2652
2653 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT,
2654 VGPU10_OPERAND_TYPE_INPUT,
2655 VGPU10_OPERAND_INDEX_1D, index, 1,
2656 VGPU10_NAME_UNDEFINED,
2657 VGPU10_OPERAND_4_COMPONENT,
2658 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2659 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2660 VGPU10_INTERPOLATION_UNDEFINED);
2661 }
2662 }
2663
2664 return TRUE;
2665 }
2666
2667
2668 /**
2669 * Emit all output declarations.
2670 */
2671 static boolean
2672 emit_output_declarations(struct svga_shader_emitter_v10 *emit)
2673 {
2674 unsigned i;
2675
2676 for (i = 0; i < emit->info.num_outputs; i++) {
2677 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2678 const enum tgsi_semantic semantic_name =
2679 emit->info.output_semantic_name[i];
2680 const unsigned semantic_index = emit->info.output_semantic_index[i];
2681 unsigned index = i;
2682
2683 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2684 if (semantic_name == TGSI_SEMANTIC_COLOR) {
2685 assert(semantic_index < ARRAY_SIZE(emit->fs.color_out_index));
2686
2687 emit->fs.color_out_index[semantic_index] = index;
2688
2689 emit->fs.num_color_outputs = MAX2(emit->fs.num_color_outputs,
2690 index + 1);
2691
2692 /* The semantic index is the shader's color output/buffer index */
2693 emit_output_declaration(emit,
2694 VGPU10_OPCODE_DCL_OUTPUT, semantic_index,
2695 VGPU10_NAME_UNDEFINED,
2696 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2697
2698 if (semantic_index == 0) {
2699 if (emit->key.fs.write_color0_to_n_cbufs > 1) {
2700 /* Emit declarations for the additional color outputs
2701 * for broadcasting.
2702 */
2703 unsigned j;
2704 for (j = 1; j < emit->key.fs.write_color0_to_n_cbufs; j++) {
2705 /* Allocate a new output index */
2706 unsigned idx = emit->info.num_outputs + j - 1;
2707 emit->fs.color_out_index[j] = idx;
2708 emit_output_declaration(emit,
2709 VGPU10_OPCODE_DCL_OUTPUT, idx,
2710 VGPU10_NAME_UNDEFINED,
2711 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2712 emit->info.output_semantic_index[idx] = j;
2713 }
2714
2715 emit->fs.num_color_outputs =
2716 emit->key.fs.write_color0_to_n_cbufs;
2717 }
2718 }
2719 else {
2720 assert(!emit->key.fs.write_color0_to_n_cbufs);
2721 }
2722 }
2723 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2724 /* Fragment depth output */
2725 emit_fragdepth_output_declaration(emit);
2726 }
2727 else if (semantic_name == TGSI_SEMANTIC_SAMPLEMASK) {
2728 /* Fragment depth output */
2729 emit_samplemask_output_declaration(emit);
2730 }
2731 else {
2732 assert(!"Bad output semantic name");
2733 }
2734 }
2735 else {
2736 /* VS or GS */
2737 VGPU10_COMPONENT_NAME name;
2738 VGPU10_OPCODE_TYPE type;
2739 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2740
2741 switch (semantic_name) {
2742 case TGSI_SEMANTIC_POSITION:
2743 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2744 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2745 name = VGPU10_NAME_POSITION;
2746 /* Save the index of the vertex position output register */
2747 emit->vposition.out_index = index;
2748 break;
2749 case TGSI_SEMANTIC_CLIPDIST:
2750 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2751 name = VGPU10_NAME_CLIP_DISTANCE;
2752 /* save the starting index of the clip distance output register */
2753 if (semantic_index == 0)
2754 emit->clip_dist_out_index = index;
2755 writemask = emit->output_usage_mask[index];
2756 writemask = apply_clip_plane_mask(emit, writemask, semantic_index);
2757 if (writemask == 0x0) {
2758 continue; /* discard this do-nothing declaration */
2759 }
2760 break;
2761 case TGSI_SEMANTIC_PRIMID:
2762 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2763 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2764 name = VGPU10_NAME_PRIMITIVE_ID;
2765 break;
2766 case TGSI_SEMANTIC_LAYER:
2767 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2768 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2769 name = VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX;
2770 break;
2771 case TGSI_SEMANTIC_CLIPVERTEX:
2772 type = VGPU10_OPCODE_DCL_OUTPUT;
2773 name = VGPU10_NAME_UNDEFINED;
2774 emit->clip_vertex_out_index = index;
2775 break;
2776 default:
2777 /* generic output */
2778 type = VGPU10_OPCODE_DCL_OUTPUT;
2779 name = VGPU10_NAME_UNDEFINED;
2780 }
2781
2782 emit_output_declaration(emit, type, index, name, writemask);
2783 }
2784 }
2785
2786 if (emit->vposition.so_index != INVALID_INDEX &&
2787 emit->vposition.out_index != INVALID_INDEX) {
2788
2789 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2790
2791 /* Emit the declaration for the non-adjusted vertex position
2792 * for stream output purpose
2793 */
2794 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2795 emit->vposition.so_index,
2796 VGPU10_NAME_UNDEFINED,
2797 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2798 }
2799
2800 if (emit->clip_dist_so_index != INVALID_INDEX &&
2801 emit->clip_dist_out_index != INVALID_INDEX) {
2802
2803 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2804
2805 /* Emit the declaration for the clip distance shadow copy which
2806 * will be used for stream output purpose and for clip distance
2807 * varying variable
2808 */
2809 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2810 emit->clip_dist_so_index,
2811 VGPU10_NAME_UNDEFINED,
2812 emit->output_usage_mask[emit->clip_dist_out_index]);
2813
2814 if (emit->info.num_written_clipdistance > 4) {
2815 /* for the second clip distance register, each handles 4 planes */
2816 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2817 emit->clip_dist_so_index + 1,
2818 VGPU10_NAME_UNDEFINED,
2819 emit->output_usage_mask[emit->clip_dist_out_index+1]);
2820 }
2821 }
2822
2823 return TRUE;
2824 }
2825
2826
2827 /**
2828 * Emit the declaration for the temporary registers.
2829 */
2830 static boolean
2831 emit_temporaries_declaration(struct svga_shader_emitter_v10 *emit)
2832 {
2833 unsigned total_temps, reg, i;
2834
2835 total_temps = emit->num_shader_temps;
2836
2837 /* If there is indirect access to non-indexable temps in the shader,
2838 * convert those temps to indexable temps. This works around a bug
2839 * in the GLSL->TGSI translator exposed in piglit test
2840 * glsl-1.20/execution/fs-const-array-of-struct-of-array.shader_test.
2841 * Internal temps added by the driver remain as non-indexable temps.
2842 */
2843 if ((emit->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) &&
2844 emit->num_temp_arrays == 0) {
2845 unsigned arrayID;
2846
2847 arrayID = 1;
2848 emit->num_temp_arrays = arrayID + 1;
2849 emit->temp_arrays[arrayID].start = 0;
2850 emit->temp_arrays[arrayID].size = total_temps;
2851
2852 /* Fill in the temp_map entries for this temp array */
2853 for (i = 0; i < total_temps; i++) {
2854 emit->temp_map[i].arrayId = arrayID;
2855 emit->temp_map[i].index = i;
2856 }
2857 }
2858
2859 /* Allocate extra temps for specially-implemented instructions,
2860 * such as LIT.
2861 */
2862 total_temps += MAX_INTERNAL_TEMPS;
2863
2864 if (emit->unit == PIPE_SHADER_VERTEX || emit->unit == PIPE_SHADER_GEOMETRY) {
2865 if (emit->vposition.need_prescale || emit->key.vs.undo_viewport ||
2866 emit->key.clip_plane_enable ||
2867 emit->vposition.so_index != INVALID_INDEX) {
2868 emit->vposition.tmp_index = total_temps;
2869 total_temps += 1;
2870 }
2871
2872 if (emit->unit == PIPE_SHADER_VERTEX) {
2873 unsigned attrib_mask = (emit->key.vs.adjust_attrib_w_1 |
2874 emit->key.vs.adjust_attrib_itof |
2875 emit->key.vs.adjust_attrib_utof |
2876 emit->key.vs.attrib_is_bgra |
2877 emit->key.vs.attrib_puint_to_snorm |
2878 emit->key.vs.attrib_puint_to_uscaled |
2879 emit->key.vs.attrib_puint_to_sscaled);
2880 while (attrib_mask) {
2881 unsigned index = u_bit_scan(&attrib_mask);
2882 emit->vs.adjusted_input[index] = total_temps++;
2883 }
2884 }
2885
2886 if (emit->clip_mode == CLIP_DISTANCE) {
2887 /* We need to write the clip distance to a temporary register
2888 * first. Then it will be copied to the shadow copy for
2889 * the clip distance varying variable and stream output purpose.
2890 * It will also be copied to the actual CLIPDIST register
2891 * according to the enabled clip planes
2892 */
2893 emit->clip_dist_tmp_index = total_temps++;
2894 if (emit->info.num_written_clipdistance > 4)
2895 total_temps++; /* second clip register */
2896 }
2897 else if (emit->clip_mode == CLIP_VERTEX) {
2898 /* We need to convert the TGSI CLIPVERTEX output to one or more
2899 * clip distances. Allocate a temp reg for the clipvertex here.
2900 */
2901 assert(emit->info.writes_clipvertex > 0);
2902 emit->clip_vertex_tmp_index = total_temps;
2903 total_temps++;
2904 }
2905 }
2906 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
2907 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS ||
2908 emit->key.fs.write_color0_to_n_cbufs > 1) {
2909 /* Allocate a temp to hold the output color */
2910 emit->fs.color_tmp_index = total_temps;
2911 total_temps += 1;
2912 }
2913
2914 if (emit->fs.face_input_index != INVALID_INDEX) {
2915 /* Allocate a temp for the +/-1 face register */
2916 emit->fs.face_tmp_index = total_temps;
2917 total_temps += 1;
2918 }
2919
2920 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
2921 /* Allocate a temp for modified fragment position register */
2922 emit->fs.fragcoord_tmp_index = total_temps;
2923 total_temps += 1;
2924 }
2925
2926 if (emit->fs.sample_pos_sys_index != INVALID_INDEX) {
2927 /* Allocate a temp for the sample position */
2928 emit->fs.sample_pos_tmp_index = total_temps++;
2929 }
2930 }
2931
2932 for (i = 0; i < emit->num_address_regs; i++) {
2933 emit->address_reg_index[i] = total_temps++;
2934 }
2935
2936 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2937 * temp indexes. Basically, we compact all the non-array temp register
2938 * indexes into a consecutive series.
2939 *
2940 * Before, we may have some TGSI declarations like:
2941 * DCL TEMP[0..1], LOCAL
2942 * DCL TEMP[2..4], ARRAY(1), LOCAL
2943 * DCL TEMP[5..7], ARRAY(2), LOCAL
2944 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2945 *
2946 * After, we'll have a map like this:
2947 * temp_map[0] = { array 0, index 0 }
2948 * temp_map[1] = { array 0, index 1 }
2949 * temp_map[2] = { array 1, index 0 }
2950 * temp_map[3] = { array 1, index 1 }
2951 * temp_map[4] = { array 1, index 2 }
2952 * temp_map[5] = { array 2, index 0 }
2953 * temp_map[6] = { array 2, index 1 }
2954 * temp_map[7] = { array 2, index 2 }
2955 * temp_map[8] = { array 0, index 2 }
2956 * temp_map[9] = { array 0, index 3 }
2957 *
2958 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2959 * temps numbered 0..3
2960 *
2961 * Any time we emit a temporary register index, we'll have to use the
2962 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2963 *
2964 * Finally, we recompute the total_temps value here.
2965 */
2966 reg = 0;
2967 for (i = 0; i < total_temps; i++) {
2968 if (emit->temp_map[i].arrayId == 0) {
2969 emit->temp_map[i].index = reg++;
2970 }
2971 }
2972
2973 if (0) {
2974 debug_printf("total_temps %u\n", total_temps);
2975 for (i = 0; i < total_temps; i++) {
2976 debug_printf("temp %u -> array %u index %u\n",
2977 i, emit->temp_map[i].arrayId, emit->temp_map[i].index);
2978 }
2979 }
2980
2981 total_temps = reg;
2982
2983 /* Emit declaration of ordinary temp registers */
2984 if (total_temps > 0) {
2985 VGPU10OpcodeToken0 opcode0;
2986
2987 opcode0.value = 0;
2988 opcode0.opcodeType = VGPU10_OPCODE_DCL_TEMPS;
2989
2990 begin_emit_instruction(emit);
2991 emit_dword(emit, opcode0.value);
2992 emit_dword(emit, total_temps);
2993 end_emit_instruction(emit);
2994 }
2995
2996 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2997 * it's unused.
2998 */
2999 for (i = 1; i < emit->num_temp_arrays; i++) {
3000 unsigned num_temps = emit->temp_arrays[i].size;
3001
3002 if (num_temps > 0) {
3003 VGPU10OpcodeToken0 opcode0;
3004
3005 opcode0.value = 0;
3006 opcode0.opcodeType = VGPU10_OPCODE_DCL_INDEXABLE_TEMP;
3007
3008 begin_emit_instruction(emit);
3009 emit_dword(emit, opcode0.value);
3010 emit_dword(emit, i); /* which array */
3011 emit_dword(emit, num_temps);
3012 emit_dword(emit, 4); /* num components */
3013 end_emit_instruction(emit);
3014
3015 total_temps += num_temps;
3016 }
3017 }
3018
3019 /* Check that the grand total of all regular and indexed temps is
3020 * under the limit.
3021 */
3022 check_register_index(emit, VGPU10_OPCODE_DCL_TEMPS, total_temps - 1);
3023
3024 return TRUE;
3025 }
3026
3027
3028 static boolean
3029 emit_constant_declaration(struct svga_shader_emitter_v10 *emit)
3030 {
3031 VGPU10OpcodeToken0 opcode0;
3032 VGPU10OperandToken0 operand0;
3033 unsigned total_consts, i;
3034
3035 opcode0.value = 0;
3036 opcode0.opcodeType = VGPU10_OPCODE_DCL_CONSTANT_BUFFER;
3037 opcode0.accessPattern = VGPU10_CB_IMMEDIATE_INDEXED;
3038 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
3039
3040 operand0.value = 0;
3041 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
3042 operand0.indexDimension = VGPU10_OPERAND_INDEX_2D;
3043 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3044 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3045 operand0.operandType = VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
3046 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
3047 operand0.swizzleX = 0;
3048 operand0.swizzleY = 1;
3049 operand0.swizzleZ = 2;
3050 operand0.swizzleW = 3;
3051
3052 /**
3053 * Emit declaration for constant buffer [0]. We also allocate
3054 * room for the extra constants here.
3055 */
3056 total_consts = emit->num_shader_consts[0];
3057
3058 /* Now, allocate constant slots for the "extra" constants.
3059 * Note: it's critical that these extra constant locations
3060 * exactly match what's emitted by the "extra" constants code
3061 * in svga_state_constants.c
3062 */
3063
3064 /* Vertex position scale/translation */
3065 if (emit->vposition.need_prescale) {
3066 emit->vposition.prescale_scale_index = total_consts++;
3067 emit->vposition.prescale_trans_index = total_consts++;
3068 }
3069
3070 if (emit->unit == PIPE_SHADER_VERTEX) {
3071 if (emit->key.vs.undo_viewport) {
3072 emit->vs.viewport_index = total_consts++;
3073 }
3074 }
3075
3076 /* user-defined clip planes */
3077 if (emit->key.clip_plane_enable) {
3078 unsigned n = util_bitcount(emit->key.clip_plane_enable);
3079 assert(emit->unit == PIPE_SHADER_VERTEX ||
3080 emit->unit == PIPE_SHADER_GEOMETRY);
3081 for (i = 0; i < n; i++) {
3082 emit->clip_plane_const[i] = total_consts++;
3083 }
3084 }
3085
3086 for (i = 0; i < emit->num_samplers; i++) {
3087
3088 if (emit->sampler_view[i]) {
3089
3090 /* Texcoord scale factors for RECT textures */
3091 if (emit->key.tex[i].unnormalized) {
3092 emit->texcoord_scale_index[i] = total_consts++;
3093 }
3094
3095 /* Texture buffer sizes */
3096 if (emit->sampler_target[i] == TGSI_TEXTURE_BUFFER) {
3097 emit->texture_buffer_size_index[i] = total_consts++;
3098 }
3099 }
3100 }
3101
3102 if (total_consts > 0) {
3103 begin_emit_instruction(emit);
3104 emit_dword(emit, opcode0.value);
3105 emit_dword(emit, operand0.value);
3106 emit_dword(emit, 0); /* which const buffer slot */
3107 emit_dword(emit, total_consts);
3108 end_emit_instruction(emit);
3109 }
3110
3111 /* Declare remaining constant buffers (UBOs) */
3112 for (i = 1; i < ARRAY_SIZE(emit->num_shader_consts); i++) {
3113 if (emit->num_shader_consts[i] > 0) {
3114 begin_emit_instruction(emit);
3115 emit_dword(emit, opcode0.value);
3116 emit_dword(emit, operand0.value);
3117 emit_dword(emit, i); /* which const buffer slot */
3118 emit_dword(emit, emit->num_shader_consts[i]);
3119 end_emit_instruction(emit);
3120 }
3121 }
3122
3123 return TRUE;
3124 }
3125
3126
3127 /**
3128 * Emit declarations for samplers.
3129 */
3130 static boolean
3131 emit_sampler_declarations(struct svga_shader_emitter_v10 *emit)
3132 {
3133 unsigned i;
3134
3135 for (i = 0; i < emit->num_samplers; i++) {
3136 VGPU10OpcodeToken0 opcode0;
3137 VGPU10OperandToken0 operand0;
3138
3139 opcode0.value = 0;
3140 opcode0.opcodeType = VGPU10_OPCODE_DCL_SAMPLER;
3141 opcode0.samplerMode = VGPU10_SAMPLER_MODE_DEFAULT;
3142
3143 operand0.value = 0;
3144 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
3145 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
3146 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
3147 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3148
3149 begin_emit_instruction(emit);
3150 emit_dword(emit, opcode0.value);
3151 emit_dword(emit, operand0.value);
3152 emit_dword(emit, i);
3153 end_emit_instruction(emit);
3154 }
3155
3156 return TRUE;
3157 }
3158
3159
3160 /**
3161 * Translate TGSI_TEXTURE_x to VGAPU10_RESOURCE_DIMENSION_x.
3162 */
3163 static unsigned
3164 tgsi_texture_to_resource_dimension(enum tgsi_texture_type target,
3165 boolean is_array)
3166 {
3167 switch (target) {
3168 case TGSI_TEXTURE_BUFFER:
3169 return VGPU10_RESOURCE_DIMENSION_BUFFER;
3170 case TGSI_TEXTURE_1D:
3171 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3172 case TGSI_TEXTURE_2D:
3173 case TGSI_TEXTURE_RECT:
3174 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3175 case TGSI_TEXTURE_3D:
3176 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D;
3177 case TGSI_TEXTURE_CUBE:
3178 case TGSI_TEXTURE_SHADOWCUBE:
3179 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
3180 case TGSI_TEXTURE_SHADOW1D:
3181 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3182 case TGSI_TEXTURE_SHADOW2D:
3183 case TGSI_TEXTURE_SHADOWRECT:
3184 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3185 case TGSI_TEXTURE_1D_ARRAY:
3186 case TGSI_TEXTURE_SHADOW1D_ARRAY:
3187 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
3188 : VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3189 case TGSI_TEXTURE_2D_ARRAY:
3190 case TGSI_TEXTURE_SHADOW2D_ARRAY:
3191 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
3192 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3193 case TGSI_TEXTURE_2D_MSAA:
3194 return VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
3195 case TGSI_TEXTURE_2D_ARRAY_MSAA:
3196 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
3197 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
3198 case TGSI_TEXTURE_CUBE_ARRAY:
3199 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
3200 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY;
3201 default:
3202 assert(!"Unexpected resource type");
3203 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3204 }
3205 }
3206
3207
3208 /**
3209 * Given a tgsi_return_type, return true iff it is an integer type.
3210 */
3211 static boolean
3212 is_integer_type(enum tgsi_return_type type)
3213 {
3214 switch (type) {
3215 case TGSI_RETURN_TYPE_SINT:
3216 case TGSI_RETURN_TYPE_UINT:
3217 return TRUE;
3218 case TGSI_RETURN_TYPE_FLOAT:
3219 case TGSI_RETURN_TYPE_UNORM:
3220 case TGSI_RETURN_TYPE_SNORM:
3221 return FALSE;
3222 case TGSI_RETURN_TYPE_COUNT:
3223 default:
3224 assert(!"is_integer_type: Unknown tgsi_return_type");
3225 return FALSE;
3226 }
3227 }
3228
3229
3230 /**
3231 * Emit declarations for resources.
3232 * XXX When we're sure that all TGSI shaders will be generated with
3233 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
3234 * rework this code.
3235 */
3236 static boolean
3237 emit_resource_declarations(struct svga_shader_emitter_v10 *emit)
3238 {
3239 unsigned i;
3240
3241 /* Emit resource decl for each sampler */
3242 for (i = 0; i < emit->num_samplers; i++) {
3243 VGPU10OpcodeToken0 opcode0;
3244 VGPU10OperandToken0 operand0;
3245 VGPU10ResourceReturnTypeToken return_type;
3246 VGPU10_RESOURCE_RETURN_TYPE rt;
3247
3248 opcode0.value = 0;
3249 opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;
3250 opcode0.resourceDimension =
3251 tgsi_texture_to_resource_dimension(emit->sampler_target[i],
3252 emit->key.tex[i].is_array);
3253 opcode0.sampleCount = emit->key.tex[i].num_samples;
3254 operand0.value = 0;
3255 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
3256 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
3257 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
3258 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3259
3260 #if 1
3261 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
3262 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM == TGSI_RETURN_TYPE_UNORM + 1);
3263 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM == TGSI_RETURN_TYPE_SNORM + 1);
3264 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT == TGSI_RETURN_TYPE_SINT + 1);
3265 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT == TGSI_RETURN_TYPE_UINT + 1);
3266 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT == TGSI_RETURN_TYPE_FLOAT + 1);
3267 assert(emit->sampler_return_type[i] <= TGSI_RETURN_TYPE_FLOAT);
3268 rt = emit->sampler_return_type[i] + 1;
3269 #else
3270 switch (emit->sampler_return_type[i]) {
3271 case TGSI_RETURN_TYPE_UNORM: rt = VGPU10_RETURN_TYPE_UNORM; break;
3272 case TGSI_RETURN_TYPE_SNORM: rt = VGPU10_RETURN_TYPE_SNORM; break;
3273 case TGSI_RETURN_TYPE_SINT: rt = VGPU10_RETURN_TYPE_SINT; break;
3274 case TGSI_RETURN_TYPE_UINT: rt = VGPU10_RETURN_TYPE_UINT; break;
3275 case TGSI_RETURN_TYPE_FLOAT: rt = VGPU10_RETURN_TYPE_FLOAT; break;
3276 case TGSI_RETURN_TYPE_COUNT:
3277 default:
3278 rt = VGPU10_RETURN_TYPE_FLOAT;
3279 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3280 }
3281 #endif
3282
3283 return_type.value = 0;
3284 return_type.component0 = rt;
3285 return_type.component1 = rt;
3286 return_type.component2 = rt;
3287 return_type.component3 = rt;
3288
3289 begin_emit_instruction(emit);
3290 emit_dword(emit, opcode0.value);
3291 emit_dword(emit, operand0.value);
3292 emit_dword(emit, i);
3293 emit_dword(emit, return_type.value);
3294 end_emit_instruction(emit);
3295 }
3296
3297 return TRUE;
3298 }
3299
3300 static void
3301 emit_instruction_op1(struct svga_shader_emitter_v10 *emit,
3302 VGPU10_OPCODE_TYPE opcode,
3303 const struct tgsi_full_dst_register *dst,
3304 const struct tgsi_full_src_register *src,
3305 boolean saturate)
3306 {
3307 begin_emit_instruction(emit);
3308 emit_opcode(emit, opcode, saturate);
3309 emit_dst_register(emit, dst);
3310 emit_src_register(emit, src);
3311 end_emit_instruction(emit);
3312 }
3313
3314 static void
3315 emit_instruction_op2(struct svga_shader_emitter_v10 *emit,
3316 VGPU10_OPCODE_TYPE opcode,
3317 const struct tgsi_full_dst_register *dst,
3318 const struct tgsi_full_src_register *src1,
3319 const struct tgsi_full_src_register *src2,
3320 boolean saturate)
3321 {
3322 begin_emit_instruction(emit);
3323 emit_opcode(emit, opcode, saturate);
3324 emit_dst_register(emit, dst);
3325 emit_src_register(emit, src1);
3326 emit_src_register(emit, src2);
3327 end_emit_instruction(emit);
3328 }
3329
3330 static void
3331 emit_instruction_op3(struct svga_shader_emitter_v10 *emit,
3332 VGPU10_OPCODE_TYPE opcode,
3333 const struct tgsi_full_dst_register *dst,
3334 const struct tgsi_full_src_register *src1,
3335 const struct tgsi_full_src_register *src2,
3336 const struct tgsi_full_src_register *src3,
3337 boolean saturate)
3338 {
3339 begin_emit_instruction(emit);
3340 emit_opcode(emit, opcode, saturate);
3341 emit_dst_register(emit, dst);
3342 emit_src_register(emit, src1);
3343 emit_src_register(emit, src2);
3344 emit_src_register(emit, src3);
3345 end_emit_instruction(emit);
3346 }
3347
3348 /**
3349 * Emit the actual clip distance instructions to be used for clipping
3350 * by copying the clip distance from the temporary registers to the
3351 * CLIPDIST registers written with the enabled planes mask.
3352 * Also copy the clip distance from the temporary to the clip distance
3353 * shadow copy register which will be referenced by the input shader
3354 */
3355 static void
3356 emit_clip_distance_instructions(struct svga_shader_emitter_v10 *emit)
3357 {
3358 struct tgsi_full_src_register tmp_clip_dist_src;
3359 struct tgsi_full_dst_register clip_dist_dst;
3360
3361 unsigned i;
3362 unsigned clip_plane_enable = emit->key.clip_plane_enable;
3363 unsigned clip_dist_tmp_index = emit->clip_dist_tmp_index;
3364 int num_written_clipdist = emit->info.num_written_clipdistance;
3365
3366 assert(emit->clip_dist_out_index != INVALID_INDEX);
3367 assert(emit->clip_dist_tmp_index != INVALID_INDEX);
3368
3369 /**
3370 * Temporary reset the temporary clip dist register index so
3371 * that the copy to the real clip dist register will not
3372 * attempt to copy to the temporary register again
3373 */
3374 emit->clip_dist_tmp_index = INVALID_INDEX;
3375
3376 for (i = 0; i < 2 && num_written_clipdist > 0; i++, num_written_clipdist-=4) {
3377
3378 tmp_clip_dist_src = make_src_temp_reg(clip_dist_tmp_index + i);
3379
3380 /**
3381 * copy to the shadow copy for use by varying variable and
3382 * stream output. All clip distances
3383 * will be written regardless of the enabled clipping planes.
3384 */
3385 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3386 emit->clip_dist_so_index + i);
3387
3388 /* MOV clip_dist_so, tmp_clip_dist */
3389 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3390 &tmp_clip_dist_src, FALSE);
3391
3392 /**
3393 * copy those clip distances to enabled clipping planes
3394 * to CLIPDIST registers for clipping
3395 */
3396 if (clip_plane_enable & 0xf) {
3397 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3398 emit->clip_dist_out_index + i);
3399 clip_dist_dst = writemask_dst(&clip_dist_dst, clip_plane_enable & 0xf);
3400
3401 /* MOV CLIPDIST, tmp_clip_dist */
3402 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3403 &tmp_clip_dist_src, FALSE);
3404 }
3405 /* four clip planes per clip register */
3406 clip_plane_enable >>= 4;
3407 }
3408 /**
3409 * set the temporary clip dist register index back to the
3410 * temporary index for the next vertex
3411 */
3412 emit->clip_dist_tmp_index = clip_dist_tmp_index;
3413 }
3414
3415 /* Declare clip distance output registers for user-defined clip planes
3416 * or the TGSI_CLIPVERTEX output.
3417 */
3418 static void
3419 emit_clip_distance_declarations(struct svga_shader_emitter_v10 *emit)
3420 {
3421 unsigned num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3422 unsigned index = emit->num_outputs;
3423 unsigned plane_mask;
3424
3425 assert(emit->unit == PIPE_SHADER_VERTEX ||
3426 emit->unit == PIPE_SHADER_GEOMETRY);
3427 assert(num_clip_planes <= 8);
3428
3429 if (emit->clip_mode != CLIP_LEGACY &&
3430 emit->clip_mode != CLIP_VERTEX) {
3431 return;
3432 }
3433
3434 if (num_clip_planes == 0)
3435 return;
3436
3437 /* Declare one or two clip output registers. The number of components
3438 * in the mask reflects the number of clip planes. For example, if 5
3439 * clip planes are needed, we'll declare outputs similar to:
3440 * dcl_output_siv o2.xyzw, clip_distance
3441 * dcl_output_siv o3.x, clip_distance
3442 */
3443 emit->clip_dist_out_index = index; /* save the starting clip dist reg index */
3444
3445 plane_mask = (1 << num_clip_planes) - 1;
3446 if (plane_mask & 0xf) {
3447 unsigned cmask = plane_mask & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3448 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index,
3449 VGPU10_NAME_CLIP_DISTANCE, cmask);
3450 emit->num_outputs++;
3451 }
3452 if (plane_mask & 0xf0) {
3453 unsigned cmask = (plane_mask >> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3454 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index + 1,
3455 VGPU10_NAME_CLIP_DISTANCE, cmask);
3456 emit->num_outputs++;
3457 }
3458 }
3459
3460
3461 /**
3462 * Emit the instructions for writing to the clip distance registers
3463 * to handle legacy/automatic clip planes.
3464 * For each clip plane, the distance is the dot product of the vertex
3465 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3466 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3467 * output registers already declared.
3468 */
3469 static void
3470 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10 *emit,
3471 unsigned vpos_tmp_index)
3472 {
3473 unsigned i, num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3474
3475 assert(emit->clip_mode == CLIP_LEGACY);
3476 assert(num_clip_planes <= 8);
3477
3478 assert(emit->unit == PIPE_SHADER_VERTEX ||
3479 emit->unit == PIPE_SHADER_GEOMETRY);
3480
3481 for (i = 0; i < num_clip_planes; i++) {
3482 struct tgsi_full_dst_register dst;
3483 struct tgsi_full_src_register plane_src, vpos_src;
3484 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3485 unsigned comp = i % 4;
3486 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3487
3488 /* create dst, src regs */
3489 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3490 dst = writemask_dst(&dst, writemask);
3491
3492 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3493 vpos_src = make_src_temp_reg(vpos_tmp_index);
3494
3495 /* DP4 clip_dist, plane, vpos */
3496 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3497 &plane_src, &vpos_src, FALSE);
3498 }
3499 }
3500
3501
3502 /**
3503 * Emit the instructions for computing the clip distance results from
3504 * the clip vertex temporary.
3505 * For each clip plane, the distance is the dot product of the clip vertex
3506 * position (found in a temp reg) and the clip plane coefficients.
3507 */
3508 static void
3509 emit_clip_vertex_instructions(struct svga_shader_emitter_v10 *emit)
3510 {
3511 const unsigned num_clip = util_bitcount(emit->key.clip_plane_enable);
3512 unsigned i;
3513 struct tgsi_full_dst_register dst;
3514 struct tgsi_full_src_register clipvert_src;
3515 const unsigned clip_vertex_tmp = emit->clip_vertex_tmp_index;
3516
3517 assert(emit->unit == PIPE_SHADER_VERTEX ||
3518 emit->unit == PIPE_SHADER_GEOMETRY);
3519
3520 assert(emit->clip_mode == CLIP_VERTEX);
3521
3522 clipvert_src = make_src_temp_reg(clip_vertex_tmp);
3523
3524 for (i = 0; i < num_clip; i++) {
3525 struct tgsi_full_src_register plane_src;
3526 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3527 unsigned comp = i % 4;
3528 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3529
3530 /* create dst, src regs */
3531 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3532 dst = writemask_dst(&dst, writemask);
3533
3534 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3535
3536 /* DP4 clip_dist, plane, vpos */
3537 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3538 &plane_src, &clipvert_src, FALSE);
3539 }
3540
3541 /* copy temporary clip vertex register to the clip vertex register */
3542
3543 assert(emit->clip_vertex_out_index != INVALID_INDEX);
3544
3545 /**
3546 * temporary reset the temporary clip vertex register index so
3547 * that copy to the clip vertex register will not attempt
3548 * to copy to the temporary register again
3549 */
3550 emit->clip_vertex_tmp_index = INVALID_INDEX;
3551
3552 /* MOV clip_vertex, clip_vertex_tmp */
3553 dst = make_dst_reg(TGSI_FILE_OUTPUT, emit->clip_vertex_out_index);
3554 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
3555 &dst, &clipvert_src, FALSE);
3556
3557 /**
3558 * set the temporary clip vertex register index back to the
3559 * temporary index for the next vertex
3560 */
3561 emit->clip_vertex_tmp_index = clip_vertex_tmp;
3562 }
3563
3564 /**
3565 * Emit code to convert RGBA to BGRA
3566 */
3567 static void
3568 emit_swap_r_b(struct svga_shader_emitter_v10 *emit,
3569 const struct tgsi_full_dst_register *dst,
3570 const struct tgsi_full_src_register *src)
3571 {
3572 struct tgsi_full_src_register bgra_src =
3573 swizzle_src(src, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W);
3574
3575 begin_emit_instruction(emit);
3576 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
3577 emit_dst_register(emit, dst);
3578 emit_src_register(emit, &bgra_src);
3579 end_emit_instruction(emit);
3580 }
3581
3582
3583 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3584 static void
3585 emit_puint_to_snorm(struct svga_shader_emitter_v10 *emit,
3586 const struct tgsi_full_dst_register *dst,
3587 const struct tgsi_full_src_register *src)
3588 {
3589 struct tgsi_full_src_register half = make_immediate_reg_float(emit, 0.5f);
3590 struct tgsi_full_src_register two =
3591 make_immediate_reg_float4(emit, 2.0f, 2.0f, 2.0f, 3.0f);
3592 struct tgsi_full_src_register neg_two =
3593 make_immediate_reg_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
3594
3595 unsigned val_tmp = get_temp_index(emit);
3596 struct tgsi_full_dst_register val_dst = make_dst_temp_reg(val_tmp);
3597 struct tgsi_full_src_register val_src = make_src_temp_reg(val_tmp);
3598
3599 unsigned bias_tmp = get_temp_index(emit);
3600 struct tgsi_full_dst_register bias_dst = make_dst_temp_reg(bias_tmp);
3601 struct tgsi_full_src_register bias_src = make_src_temp_reg(bias_tmp);
3602
3603 /* val = src * 2.0 */
3604 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &val_dst,
3605 src, &two, FALSE);
3606
3607 /* bias = src > 0.5 */
3608 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &bias_dst,
3609 src, &half, FALSE);
3610
3611 /* bias = bias & -2.0 */
3612 emit_instruction_op2(emit, VGPU10_OPCODE_AND, &bias_dst,
3613 &bias_src, &neg_two, FALSE);
3614
3615 /* dst = val + bias */
3616 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, dst,
3617 &val_src, &bias_src, FALSE);
3618
3619 free_temp_indexes(emit);
3620 }
3621
3622
3623 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3624 static void
3625 emit_puint_to_uscaled(struct svga_shader_emitter_v10 *emit,
3626 const struct tgsi_full_dst_register *dst,
3627 const struct tgsi_full_src_register *src)
3628 {
3629 struct tgsi_full_src_register scale =
3630 make_immediate_reg_float4(emit, 1023.0f, 1023.0f, 1023.0f, 3.0f);
3631
3632 /* dst = src * scale */
3633 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, dst, src, &scale, FALSE);
3634 }
3635
3636
3637 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3638 static void
3639 emit_puint_to_sscaled(struct svga_shader_emitter_v10 *emit,
3640 const struct tgsi_full_dst_register *dst,
3641 const struct tgsi_full_src_register *src)
3642 {
3643 struct tgsi_full_src_register lshift =
3644 make_immediate_reg_int4(emit, 22, 12, 2, 0);
3645 struct tgsi_full_src_register rshift =
3646 make_immediate_reg_int4(emit, 22, 22, 22, 30);
3647
3648 struct tgsi_full_src_register src_xxxx = scalar_src(src, TGSI_SWIZZLE_X);
3649
3650 unsigned tmp = get_temp_index(emit);
3651 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3652 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3653
3654 /*
3655 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3656 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3657 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3658 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3659 * dst = i_to_f(r,g,b,a); # convert to float
3660 */
3661 emit_instruction_op2(emit, VGPU10_OPCODE_ISHL, &tmp_dst,
3662 &src_xxxx, &lshift, FALSE);
3663 emit_instruction_op2(emit, VGPU10_OPCODE_ISHR, &tmp_dst,
3664 &tmp_src, &rshift, FALSE);
3665 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF, dst, &tmp_src, FALSE);
3666
3667 free_temp_indexes(emit);
3668 }
3669
3670
3671 /**
3672 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3673 */
3674 static boolean
3675 emit_arl_uarl(struct svga_shader_emitter_v10 *emit,
3676 const struct tgsi_full_instruction *inst)
3677 {
3678 unsigned index = inst->Dst[0].Register.Index;
3679 struct tgsi_full_dst_register dst;
3680 VGPU10_OPCODE_TYPE opcode;
3681
3682 assert(index < MAX_VGPU10_ADDR_REGS);
3683 dst = make_dst_temp_reg(emit->address_reg_index[index]);
3684
3685 /* ARL dst, s0
3686 * Translates into:
3687 * FTOI address_tmp, s0
3688 *
3689 * UARL dst, s0
3690 * Translates into:
3691 * MOV address_tmp, s0
3692 */
3693 if (inst->Instruction.Opcode == TGSI_OPCODE_ARL)
3694 opcode = VGPU10_OPCODE_FTOI;
3695 else
3696 opcode = VGPU10_OPCODE_MOV;
3697
3698 emit_instruction_op1(emit, opcode, &dst, &inst->Src[0], FALSE);
3699
3700 return TRUE;
3701 }
3702
3703
3704 /**
3705 * Emit code for TGSI_OPCODE_CAL instruction.
3706 */
3707 static boolean
3708 emit_cal(struct svga_shader_emitter_v10 *emit,
3709 const struct tgsi_full_instruction *inst)
3710 {
3711 unsigned label = inst->Label.Label;
3712 VGPU10OperandToken0 operand;
3713 operand.value = 0;
3714 operand.operandType = VGPU10_OPERAND_TYPE_LABEL;
3715
3716 begin_emit_instruction(emit);
3717 emit_dword(emit, operand.value);
3718 emit_dword(emit, label);
3719 end_emit_instruction(emit);
3720
3721 return TRUE;
3722 }
3723
3724
3725 /**
3726 * Emit code for TGSI_OPCODE_IABS instruction.
3727 */
3728 static boolean
3729 emit_iabs(struct svga_shader_emitter_v10 *emit,
3730 const struct tgsi_full_instruction *inst)
3731 {
3732 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3733 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3734 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3735 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3736 *
3737 * Translates into
3738 * IMAX dst, src, neg(src)
3739 */
3740 struct tgsi_full_src_register neg_src = negate_src(&inst->Src[0]);
3741 emit_instruction_op2(emit, VGPU10_OPCODE_IMAX, &inst->Dst[0],
3742 &inst->Src[0], &neg_src, FALSE);
3743
3744 return TRUE;
3745 }
3746
3747
3748 /**
3749 * Emit code for TGSI_OPCODE_CMP instruction.
3750 */
3751 static boolean
3752 emit_cmp(struct svga_shader_emitter_v10 *emit,
3753 const struct tgsi_full_instruction *inst)
3754 {
3755 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3756 * dst.y = (src0.y < 0) ? src1.y : src2.y
3757 * dst.z = (src0.z < 0) ? src1.z : src2.z
3758 * dst.w = (src0.w < 0) ? src1.w : src2.w
3759 *
3760 * Translates into
3761 * LT tmp, src0, 0.0
3762 * MOVC dst, tmp, src1, src2
3763 */
3764 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3765 unsigned tmp = get_temp_index(emit);
3766 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3767 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3768
3769 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst,
3770 &inst->Src[0], &zero, FALSE);
3771 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0],
3772 &tmp_src, &inst->Src[1], &inst->Src[2],
3773 inst->Instruction.Saturate);
3774
3775 free_temp_indexes(emit);
3776
3777 return TRUE;
3778 }
3779
3780
3781 /**
3782 * Emit code for TGSI_OPCODE_DST instruction.
3783 */
3784 static boolean
3785 emit_dst(struct svga_shader_emitter_v10 *emit,
3786 const struct tgsi_full_instruction *inst)
3787 {
3788 /*
3789 * dst.x = 1
3790 * dst.y = src0.y * src1.y
3791 * dst.z = src0.z
3792 * dst.w = src1.w
3793 */
3794
3795 struct tgsi_full_src_register s0_yyyy =
3796 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3797 struct tgsi_full_src_register s0_zzzz =
3798 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
3799 struct tgsi_full_src_register s1_yyyy =
3800 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3801 struct tgsi_full_src_register s1_wwww =
3802 scalar_src(&inst->Src[1], TGSI_SWIZZLE_W);
3803
3804 /*
3805 * If dst and either src0 and src1 are the same we need
3806 * to create a temporary for it and insert a extra move.
3807 */
3808 unsigned tmp_move = get_temp_index(emit);
3809 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3810 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3811
3812 /* MOV dst.x, 1.0 */
3813 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3814 struct tgsi_full_dst_register dst_x =
3815 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3816 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3817
3818 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
3819 }
3820
3821 /* MUL dst.y, s0.y, s1.y */
3822 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3823 struct tgsi_full_dst_register dst_y =
3824 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3825
3826 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &dst_y, &s0_yyyy,
3827 &s1_yyyy, inst->Instruction.Saturate);
3828 }
3829
3830 /* MOV dst.z, s0.z */
3831 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3832 struct tgsi_full_dst_register dst_z =
3833 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3834
3835 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z, &s0_zzzz,
3836 inst->Instruction.Saturate);
3837 }
3838
3839 /* MOV dst.w, s1.w */
3840 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3841 struct tgsi_full_dst_register dst_w =
3842 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3843
3844 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &s1_wwww,
3845 inst->Instruction.Saturate);
3846 }
3847
3848 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3849 FALSE);
3850 free_temp_indexes(emit);
3851
3852 return TRUE;
3853 }
3854
3855
3856
3857 /**
3858 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3859 */
3860 static boolean
3861 emit_endprim(struct svga_shader_emitter_v10 *emit,
3862 const struct tgsi_full_instruction *inst)
3863 {
3864 assert(emit->unit == PIPE_SHADER_GEOMETRY);
3865
3866 /* We can't use emit_simple() because the TGSI instruction has one
3867 * operand (vertex stream number) which we must ignore for VGPU10.
3868 */
3869 begin_emit_instruction(emit);
3870 emit_opcode(emit, VGPU10_OPCODE_CUT, FALSE);
3871 end_emit_instruction(emit);
3872 return TRUE;
3873 }
3874
3875
3876 /**
3877 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3878 */
3879 static boolean
3880 emit_ex2(struct svga_shader_emitter_v10 *emit,
3881 const struct tgsi_full_instruction *inst)
3882 {
3883 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3884 * while VGPU10 computes four values.
3885 *
3886 * dst = EX2(src):
3887 * dst.xyzw = 2.0 ^ src.x
3888 */
3889
3890 struct tgsi_full_src_register src_xxxx =
3891 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3892 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3893
3894 /* EXP tmp, s0.xxxx */
3895 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0], &src_xxxx,
3896 inst->Instruction.Saturate);
3897
3898 return TRUE;
3899 }
3900
3901
3902 /**
3903 * Emit code for TGSI_OPCODE_EXP instruction.
3904 */
3905 static boolean
3906 emit_exp(struct svga_shader_emitter_v10 *emit,
3907 const struct tgsi_full_instruction *inst)
3908 {
3909 /*
3910 * dst.x = 2 ^ floor(s0.x)
3911 * dst.y = s0.x - floor(s0.x)
3912 * dst.z = 2 ^ s0.x
3913 * dst.w = 1.0
3914 */
3915
3916 struct tgsi_full_src_register src_xxxx =
3917 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3918 unsigned tmp = get_temp_index(emit);
3919 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3920 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3921
3922 /*
3923 * If dst and src are the same we need to create
3924 * a temporary for it and insert a extra move.
3925 */
3926 unsigned tmp_move = get_temp_index(emit);
3927 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3928 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3929
3930 /* only use X component of temp reg */
3931 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3932 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3933
3934 /* ROUND_NI tmp.x, s0.x */
3935 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
3936 &src_xxxx, FALSE); /* round to -infinity */
3937
3938 /* EXP dst.x, tmp.x */
3939 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3940 struct tgsi_full_dst_register dst_x =
3941 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3942
3943 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_x, &tmp_src,
3944 inst->Instruction.Saturate);
3945 }
3946
3947 /* ADD dst.y, s0.x, -tmp */
3948 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3949 struct tgsi_full_dst_register dst_y =
3950 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3951 struct tgsi_full_src_register neg_tmp_src = negate_src(&tmp_src);
3952
3953 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_y, &src_xxxx,
3954 &neg_tmp_src, inst->Instruction.Saturate);
3955 }
3956
3957 /* EXP dst.z, s0.x */
3958 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3959 struct tgsi_full_dst_register dst_z =
3960 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3961
3962 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_z, &src_xxxx,
3963 inst->Instruction.Saturate);
3964 }
3965
3966 /* MOV dst.w, 1.0 */
3967 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3968 struct tgsi_full_dst_register dst_w =
3969 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3970 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3971
3972 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one,
3973 FALSE);
3974 }
3975
3976 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3977 FALSE);
3978
3979 free_temp_indexes(emit);
3980
3981 return TRUE;
3982 }
3983
3984
3985 /**
3986 * Emit code for TGSI_OPCODE_IF instruction.
3987 */
3988 static boolean
3989 emit_if(struct svga_shader_emitter_v10 *emit,
3990 const struct tgsi_full_instruction *inst)
3991 {
3992 VGPU10OpcodeToken0 opcode0;
3993
3994 /* The src register should be a scalar */
3995 assert(inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleY &&
3996 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleZ &&
3997 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleW);
3998
3999 /* The only special thing here is that we need to set the
4000 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
4001 * src.x is non-zero.
4002 */
4003 opcode0.value = 0;
4004 opcode0.opcodeType = VGPU10_OPCODE_IF;
4005 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
4006
4007 begin_emit_instruction(emit);
4008 emit_dword(emit, opcode0.value);
4009 emit_src_register(emit, &inst->Src[0]);
4010 end_emit_instruction(emit);
4011
4012 return TRUE;
4013 }
4014
4015
4016 /**
4017 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
4018 * the register components are negative).
4019 */
4020 static boolean
4021 emit_kill_if(struct svga_shader_emitter_v10 *emit,
4022 const struct tgsi_full_instruction *inst)
4023 {
4024 unsigned tmp = get_temp_index(emit);
4025 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4026 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4027
4028 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4029
4030 struct tgsi_full_dst_register tmp_dst_x =
4031 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4032 struct tgsi_full_src_register tmp_src_xxxx =
4033 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4034
4035 /* tmp = src[0] < 0.0 */
4036 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4037 &zero, FALSE);
4038
4039 if (!same_swizzle_terms(&inst->Src[0])) {
4040 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
4041 * logically OR the swizzle terms. Most uses of KILL_IF only
4042 * test one channel so it's good to avoid these extra steps.
4043 */
4044 struct tgsi_full_src_register tmp_src_yyyy =
4045 scalar_src(&tmp_src, TGSI_SWIZZLE_Y);
4046 struct tgsi_full_src_register tmp_src_zzzz =
4047 scalar_src(&tmp_src, TGSI_SWIZZLE_Z);
4048 struct tgsi_full_src_register tmp_src_wwww =
4049 scalar_src(&tmp_src, TGSI_SWIZZLE_W);
4050
4051 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4052 &tmp_src_yyyy, FALSE);
4053 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4054 &tmp_src_zzzz, FALSE);
4055 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4056 &tmp_src_wwww, FALSE);
4057 }
4058
4059 begin_emit_instruction(emit);
4060 emit_discard_opcode(emit, TRUE); /* discard if src0.x is non-zero */
4061 emit_src_register(emit, &tmp_src_xxxx);
4062 end_emit_instruction(emit);
4063
4064 free_temp_indexes(emit);
4065
4066 return TRUE;
4067 }
4068
4069
4070 /**
4071 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
4072 */
4073 static boolean
4074 emit_kill(struct svga_shader_emitter_v10 *emit,
4075 const struct tgsi_full_instruction *inst)
4076 {
4077 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4078
4079 /* DISCARD if 0.0 is zero */
4080 begin_emit_instruction(emit);
4081 emit_discard_opcode(emit, FALSE);
4082 emit_src_register(emit, &zero);
4083 end_emit_instruction(emit);
4084
4085 return TRUE;
4086 }
4087
4088
4089 /**
4090 * Emit code for TGSI_OPCODE_LG2 instruction.
4091 */
4092 static boolean
4093 emit_lg2(struct svga_shader_emitter_v10 *emit,
4094 const struct tgsi_full_instruction *inst)
4095 {
4096 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
4097 * while VGPU10 computes four values.
4098 *
4099 * dst = LG2(src):
4100 * dst.xyzw = log2(src.x)
4101 */
4102
4103 struct tgsi_full_src_register src_xxxx =
4104 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4105 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4106
4107 /* LOG tmp, s0.xxxx */
4108 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &inst->Dst[0], &src_xxxx,
4109 inst->Instruction.Saturate);
4110
4111 return TRUE;
4112 }
4113
4114
4115 /**
4116 * Emit code for TGSI_OPCODE_LIT instruction.
4117 */
4118 static boolean
4119 emit_lit(struct svga_shader_emitter_v10 *emit,
4120 const struct tgsi_full_instruction *inst)
4121 {
4122 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4123
4124 /*
4125 * If dst and src are the same we need to create
4126 * a temporary for it and insert a extra move.
4127 */
4128 unsigned tmp_move = get_temp_index(emit);
4129 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
4130 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
4131
4132 /*
4133 * dst.x = 1
4134 * dst.y = max(src.x, 0)
4135 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
4136 * dst.w = 1
4137 */
4138
4139 /* MOV dst.x, 1.0 */
4140 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4141 struct tgsi_full_dst_register dst_x =
4142 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
4143 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
4144 }
4145
4146 /* MOV dst.w, 1.0 */
4147 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4148 struct tgsi_full_dst_register dst_w =
4149 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
4150 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4151 }
4152
4153 /* MAX dst.y, src.x, 0.0 */
4154 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4155 struct tgsi_full_dst_register dst_y =
4156 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
4157 struct tgsi_full_src_register zero =
4158 make_immediate_reg_float(emit, 0.0f);
4159 struct tgsi_full_src_register src_xxxx =
4160 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4161 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4162
4163 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &dst_y, &src_xxxx,
4164 &zero, inst->Instruction.Saturate);
4165 }
4166
4167 /*
4168 * tmp1 = clamp(src.w, -128, 128);
4169 * MAX tmp1, src.w, -128
4170 * MIN tmp1, tmp1, 128
4171 *
4172 * tmp2 = max(tmp2, 0);
4173 * MAX tmp2, src.y, 0
4174 *
4175 * tmp1 = pow(tmp2, tmp1);
4176 * LOG tmp2, tmp2
4177 * MUL tmp1, tmp2, tmp1
4178 * EXP tmp1, tmp1
4179 *
4180 * tmp1 = (src.w == 0) ? 1 : tmp1;
4181 * EQ tmp2, 0, src.w
4182 * MOVC tmp1, tmp2, 1.0, tmp1
4183 *
4184 * dst.z = (0 < src.x) ? tmp1 : 0;
4185 * LT tmp2, 0, src.x
4186 * MOVC dst.z, tmp2, tmp1, 0.0
4187 */
4188 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4189 struct tgsi_full_dst_register dst_z =
4190 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
4191
4192 unsigned tmp1 = get_temp_index(emit);
4193 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4194 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4195 unsigned tmp2 = get_temp_index(emit);
4196 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4197 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4198
4199 struct tgsi_full_src_register src_xxxx =
4200 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4201 struct tgsi_full_src_register src_yyyy =
4202 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
4203 struct tgsi_full_src_register src_wwww =
4204 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
4205
4206 struct tgsi_full_src_register zero =
4207 make_immediate_reg_float(emit, 0.0f);
4208 struct tgsi_full_src_register lowerbound =
4209 make_immediate_reg_float(emit, -128.0f);
4210 struct tgsi_full_src_register upperbound =
4211 make_immediate_reg_float(emit, 128.0f);
4212
4213 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp1_dst, &src_wwww,
4214 &lowerbound, FALSE);
4215 emit_instruction_op2(emit, VGPU10_OPCODE_MIN, &tmp1_dst, &tmp1_src,
4216 &upperbound, FALSE);
4217 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp2_dst, &src_yyyy,
4218 &zero, FALSE);
4219
4220 /* POW tmp1, tmp2, tmp1 */
4221 /* LOG tmp2, tmp2 */
4222 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp2_dst, &tmp2_src,
4223 FALSE);
4224
4225 /* MUL tmp1, tmp2, tmp1 */
4226 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &tmp2_src,
4227 &tmp1_src, FALSE);
4228
4229 /* EXP tmp1, tmp1 */
4230 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp1_dst, &tmp1_src,
4231 FALSE);
4232
4233 /* EQ tmp2, 0, src.w */
4234 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp2_dst, &zero,
4235 &src_wwww, FALSE);
4236 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4237 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp1_dst,
4238 &tmp2_src, &one, &tmp1_src, FALSE);
4239
4240 /* LT tmp2, 0, src.x */
4241 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp2_dst, &zero,
4242 &src_xxxx, FALSE);
4243 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4244 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &dst_z,
4245 &tmp2_src, &tmp1_src, &zero, FALSE);
4246 }
4247
4248 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
4249 FALSE);
4250 free_temp_indexes(emit);
4251
4252 return TRUE;
4253 }
4254
4255
4256 /**
4257 * Emit Level Of Detail Query (LODQ) instruction.
4258 */
4259 static boolean
4260 emit_lodq(struct svga_shader_emitter_v10 *emit,
4261 const struct tgsi_full_instruction *inst)
4262 {
4263 const uint unit = inst->Src[1].Register.Index;
4264
4265 assert(emit->version >= 41);
4266
4267 /* LOD dst, coord, resource, sampler */
4268 begin_emit_instruction(emit);
4269 emit_opcode(emit, VGPU10_OPCODE_LOD, FALSE);
4270 emit_dst_register(emit, &inst->Dst[0]);
4271 emit_src_register(emit, &inst->Src[0]); /* coord */
4272 emit_resource_register(emit, unit);
4273 emit_sampler_register(emit, unit);
4274 end_emit_instruction(emit);
4275
4276 return TRUE;
4277 }
4278
4279
4280 /**
4281 * Emit code for TGSI_OPCODE_LOG instruction.
4282 */
4283 static boolean
4284 emit_log(struct svga_shader_emitter_v10 *emit,
4285 const struct tgsi_full_instruction *inst)
4286 {
4287 /*
4288 * dst.x = floor(lg2(abs(s0.x)))
4289 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4290 * dst.z = lg2(abs(s0.x))
4291 * dst.w = 1.0
4292 */
4293
4294 struct tgsi_full_src_register src_xxxx =
4295 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4296 unsigned tmp = get_temp_index(emit);
4297 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4298 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4299 struct tgsi_full_src_register abs_src_xxxx = absolute_src(&src_xxxx);
4300
4301 /* only use X component of temp reg */
4302 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4303 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4304
4305 /* LOG tmp.x, abs(s0.x) */
4306 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XYZ) {
4307 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst,
4308 &abs_src_xxxx, FALSE);
4309 }
4310
4311 /* MOV dst.z, tmp.x */
4312 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4313 struct tgsi_full_dst_register dst_z =
4314 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Z);
4315
4316 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z,
4317 &tmp_src, inst->Instruction.Saturate);
4318 }
4319
4320 /* FLR tmp.x, tmp.x */
4321 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY) {
4322 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
4323 &tmp_src, FALSE);
4324 }
4325
4326 /* MOV dst.x, tmp.x */
4327 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4328 struct tgsi_full_dst_register dst_x =
4329 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4330
4331 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &tmp_src,
4332 inst->Instruction.Saturate);
4333 }
4334
4335 /* EXP tmp.x, tmp.x */
4336 /* DIV dst.y, abs(s0.x), tmp.x */
4337 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4338 struct tgsi_full_dst_register dst_y =
4339 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4340
4341 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp_dst, &tmp_src,
4342 FALSE);
4343 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &dst_y, &abs_src_xxxx,
4344 &tmp_src, inst->Instruction.Saturate);
4345 }
4346
4347 /* MOV dst.w, 1.0 */
4348 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4349 struct tgsi_full_dst_register dst_w =
4350 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_W);
4351 struct tgsi_full_src_register one =
4352 make_immediate_reg_float(emit, 1.0f);
4353
4354 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4355 }
4356
4357 free_temp_indexes(emit);
4358
4359 return TRUE;
4360 }
4361
4362
4363 /**
4364 * Emit code for TGSI_OPCODE_LRP instruction.
4365 */
4366 static boolean
4367 emit_lrp(struct svga_shader_emitter_v10 *emit,
4368 const struct tgsi_full_instruction *inst)
4369 {
4370 /* dst = LRP(s0, s1, s2):
4371 * dst = s0 * (s1 - s2) + s2
4372 * Translates into:
4373 * SUB tmp, s1, s2; tmp = s1 - s2
4374 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4375 */
4376 unsigned tmp = get_temp_index(emit);
4377 struct tgsi_full_src_register src_tmp = make_src_temp_reg(tmp);
4378 struct tgsi_full_dst_register dst_tmp = make_dst_temp_reg(tmp);
4379 struct tgsi_full_src_register neg_src2 = negate_src(&inst->Src[2]);
4380
4381 /* ADD tmp, s1, -s2 */
4382 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_tmp,
4383 &inst->Src[1], &neg_src2, FALSE);
4384
4385 /* MAD dst, s1, tmp, s3 */
4386 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &inst->Dst[0],
4387 &inst->Src[0], &src_tmp, &inst->Src[2],
4388 inst->Instruction.Saturate);
4389
4390 free_temp_indexes(emit);
4391
4392 return TRUE;
4393 }
4394
4395
4396 /**
4397 * Emit code for TGSI_OPCODE_POW instruction.
4398 */
4399 static boolean
4400 emit_pow(struct svga_shader_emitter_v10 *emit,
4401 const struct tgsi_full_instruction *inst)
4402 {
4403 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4404 * src1.x while VGPU10 computes four values.
4405 *
4406 * dst = POW(src0, src1):
4407 * dst.xyzw = src0.x ^ src1.x
4408 */
4409 unsigned tmp = get_temp_index(emit);
4410 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4411 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4412 struct tgsi_full_src_register src0_xxxx =
4413 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4414 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4415 struct tgsi_full_src_register src1_xxxx =
4416 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4417 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4418
4419 /* LOG tmp, s0.xxxx */
4420 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst, &src0_xxxx,
4421 FALSE);
4422
4423 /* MUL tmp, tmp, s1.xxxx */
4424 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst, &tmp_src,
4425 &src1_xxxx, FALSE);
4426
4427 /* EXP tmp, s0.xxxx */
4428 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0],
4429 &tmp_src, inst->Instruction.Saturate);
4430
4431 /* free tmp */
4432 free_temp_indexes(emit);
4433
4434 return TRUE;
4435 }
4436
4437
4438 /**
4439 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4440 */
4441 static boolean
4442 emit_rcp(struct svga_shader_emitter_v10 *emit,
4443 const struct tgsi_full_instruction *inst)
4444 {
4445 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4446
4447 unsigned tmp = get_temp_index(emit);
4448 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4449 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4450
4451 struct tgsi_full_dst_register tmp_dst_x =
4452 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4453 struct tgsi_full_src_register tmp_src_xxxx =
4454 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4455
4456 /* DIV tmp.x, 1.0, s0 */
4457 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst_x, &one,
4458 &inst->Src[0], FALSE);
4459
4460 /* MOV dst, tmp.xxxx */
4461 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4462 &tmp_src_xxxx, inst->Instruction.Saturate);
4463
4464 free_temp_indexes(emit);
4465
4466 return TRUE;
4467 }
4468
4469
4470 /**
4471 * Emit code for TGSI_OPCODE_RSQ instruction.
4472 */
4473 static boolean
4474 emit_rsq(struct svga_shader_emitter_v10 *emit,
4475 const struct tgsi_full_instruction *inst)
4476 {
4477 /* dst = RSQ(src):
4478 * dst.xyzw = 1 / sqrt(src.x)
4479 * Translates into:
4480 * RSQ tmp, src.x
4481 * MOV dst, tmp.xxxx
4482 */
4483
4484 unsigned tmp = get_temp_index(emit);
4485 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4486 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4487
4488 struct tgsi_full_dst_register tmp_dst_x =
4489 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4490 struct tgsi_full_src_register tmp_src_xxxx =
4491 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4492
4493 /* RSQ tmp, src.x */
4494 emit_instruction_op1(emit, VGPU10_OPCODE_RSQ, &tmp_dst_x,
4495 &inst->Src[0], FALSE);
4496
4497 /* MOV dst, tmp.xxxx */
4498 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4499 &tmp_src_xxxx, inst->Instruction.Saturate);
4500
4501 /* free tmp */
4502 free_temp_indexes(emit);
4503
4504 return TRUE;
4505 }
4506
4507
4508 /**
4509 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4510 */
4511 static boolean
4512 emit_seq(struct svga_shader_emitter_v10 *emit,
4513 const struct tgsi_full_instruction *inst)
4514 {
4515 /* dst = SEQ(s0, s1):
4516 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4517 * Translates into:
4518 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4519 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4520 */
4521 unsigned tmp = get_temp_index(emit);
4522 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4523 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4524 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4525 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4526
4527 /* EQ tmp, s0, s1 */
4528 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp_dst, &inst->Src[0],
4529 &inst->Src[1], FALSE);
4530
4531 /* MOVC dst, tmp, one, zero */
4532 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4533 &one, &zero, FALSE);
4534
4535 free_temp_indexes(emit);
4536
4537 return TRUE;
4538 }
4539
4540
4541 /**
4542 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4543 */
4544 static boolean
4545 emit_sge(struct svga_shader_emitter_v10 *emit,
4546 const struct tgsi_full_instruction *inst)
4547 {
4548 /* dst = SGE(s0, s1):
4549 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4550 * Translates into:
4551 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4552 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4553 */
4554 unsigned tmp = get_temp_index(emit);
4555 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4556 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4557 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4558 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4559
4560 /* GE tmp, s0, s1 */
4561 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[0],
4562 &inst->Src[1], FALSE);
4563
4564 /* MOVC dst, tmp, one, zero */
4565 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4566 &one, &zero, FALSE);
4567
4568 free_temp_indexes(emit);
4569
4570 return TRUE;
4571 }
4572
4573
4574 /**
4575 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4576 */
4577 static boolean
4578 emit_sgt(struct svga_shader_emitter_v10 *emit,
4579 const struct tgsi_full_instruction *inst)
4580 {
4581 /* dst = SGT(s0, s1):
4582 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4583 * Translates into:
4584 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4585 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4586 */
4587 unsigned tmp = get_temp_index(emit);
4588 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4589 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4590 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4591 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4592
4593 /* LT tmp, s1, s0 */
4594 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[1],
4595 &inst->Src[0], FALSE);
4596
4597 /* MOVC dst, tmp, one, zero */
4598 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4599 &one, &zero, FALSE);
4600
4601 free_temp_indexes(emit);
4602
4603 return TRUE;
4604 }
4605
4606
4607 /**
4608 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4609 */
4610 static boolean
4611 emit_sincos(struct svga_shader_emitter_v10 *emit,
4612 const struct tgsi_full_instruction *inst)
4613 {
4614 unsigned tmp = get_temp_index(emit);
4615 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4616 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4617
4618 struct tgsi_full_src_register tmp_src_xxxx =
4619 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4620 struct tgsi_full_dst_register tmp_dst_x =
4621 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4622
4623 begin_emit_instruction(emit);
4624 emit_opcode(emit, VGPU10_OPCODE_SINCOS, FALSE);
4625
4626 if(inst->Instruction.Opcode == TGSI_OPCODE_SIN)
4627 {
4628 emit_dst_register(emit, &tmp_dst_x); /* first destination register */
4629 emit_null_dst_register(emit); /* second destination register */
4630 }
4631 else {
4632 emit_null_dst_register(emit);
4633 emit_dst_register(emit, &tmp_dst_x);
4634 }
4635
4636 emit_src_register(emit, &inst->Src[0]);
4637 end_emit_instruction(emit);
4638
4639 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4640 &tmp_src_xxxx, inst->Instruction.Saturate);
4641
4642 free_temp_indexes(emit);
4643
4644 return TRUE;
4645 }
4646
4647
4648 /**
4649 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4650 */
4651 static boolean
4652 emit_sle(struct svga_shader_emitter_v10 *emit,
4653 const struct tgsi_full_instruction *inst)
4654 {
4655 /* dst = SLE(s0, s1):
4656 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4657 * Translates into:
4658 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4659 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4660 */
4661 unsigned tmp = get_temp_index(emit);
4662 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4663 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4664 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4665 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4666
4667 /* GE tmp, s1, s0 */
4668 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[1],
4669 &inst->Src[0], FALSE);
4670
4671 /* MOVC dst, tmp, one, zero */
4672 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4673 &one, &zero, FALSE);
4674
4675 free_temp_indexes(emit);
4676
4677 return TRUE;
4678 }
4679
4680
4681 /**
4682 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4683 */
4684 static boolean
4685 emit_slt(struct svga_shader_emitter_v10 *emit,
4686 const struct tgsi_full_instruction *inst)
4687 {
4688 /* dst = SLT(s0, s1):
4689 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4690 * Translates into:
4691 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4692 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4693 */
4694 unsigned tmp = get_temp_index(emit);
4695 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4696 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4697 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4698 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4699
4700 /* LT tmp, s0, s1 */
4701 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4702 &inst->Src[1], FALSE);
4703
4704 /* MOVC dst, tmp, one, zero */
4705 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4706 &one, &zero, FALSE);
4707
4708 free_temp_indexes(emit);
4709
4710 return TRUE;
4711 }
4712
4713
4714 /**
4715 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4716 */
4717 static boolean
4718 emit_sne(struct svga_shader_emitter_v10 *emit,
4719 const struct tgsi_full_instruction *inst)
4720 {
4721 /* dst = SNE(s0, s1):
4722 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4723 * Translates into:
4724 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4725 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4726 */
4727 unsigned tmp = get_temp_index(emit);
4728 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4729 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4730 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4731 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4732
4733 /* NE tmp, s0, s1 */
4734 emit_instruction_op2(emit, VGPU10_OPCODE_NE, &tmp_dst, &inst->Src[0],
4735 &inst->Src[1], FALSE);
4736
4737 /* MOVC dst, tmp, one, zero */
4738 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4739 &one, &zero, FALSE);
4740
4741 free_temp_indexes(emit);
4742
4743 return TRUE;
4744 }
4745
4746
4747 /**
4748 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4749 */
4750 static boolean
4751 emit_ssg(struct svga_shader_emitter_v10 *emit,
4752 const struct tgsi_full_instruction *inst)
4753 {
4754 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4755 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4756 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4757 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4758 * Translates into:
4759 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4760 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4761 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4762 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4763 */
4764 struct tgsi_full_src_register zero =
4765 make_immediate_reg_float(emit, 0.0f);
4766 struct tgsi_full_src_register one =
4767 make_immediate_reg_float(emit, 1.0f);
4768 struct tgsi_full_src_register neg_one =
4769 make_immediate_reg_float(emit, -1.0f);
4770
4771 unsigned tmp1 = get_temp_index(emit);
4772 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4773 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4774
4775 unsigned tmp2 = get_temp_index(emit);
4776 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4777 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4778
4779 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &inst->Src[0],
4780 &zero, FALSE);
4781 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp2_dst, &tmp1_src,
4782 &neg_one, &zero, FALSE);
4783 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &zero,
4784 &inst->Src[0], FALSE);
4785 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp1_src,
4786 &one, &tmp2_src, FALSE);
4787
4788 free_temp_indexes(emit);
4789
4790 return TRUE;
4791 }
4792
4793
4794 /**
4795 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4796 */
4797 static boolean
4798 emit_issg(struct svga_shader_emitter_v10 *emit,
4799 const struct tgsi_full_instruction *inst)
4800 {
4801 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4802 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4803 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4804 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4805 * Translates into:
4806 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4807 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4808 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4809 */
4810 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4811
4812 unsigned tmp1 = get_temp_index(emit);
4813 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4814 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4815
4816 unsigned tmp2 = get_temp_index(emit);
4817 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4818 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4819
4820 struct tgsi_full_src_register neg_tmp2 = negate_src(&tmp2_src);
4821
4822 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp1_dst,
4823 &inst->Src[0], &zero, FALSE);
4824 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp2_dst,
4825 &zero, &inst->Src[0], FALSE);
4826 emit_instruction_op2(emit, VGPU10_OPCODE_IADD, &inst->Dst[0],
4827 &tmp1_src, &neg_tmp2, FALSE);
4828
4829 free_temp_indexes(emit);
4830
4831 return TRUE;
4832 }
4833
4834
4835 /**
4836 * Emit a comparison instruction. The dest register will get
4837 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4838 */
4839 static void
4840 emit_comparison(struct svga_shader_emitter_v10 *emit,
4841 SVGA3dCmpFunc func,
4842 const struct tgsi_full_dst_register *dst,
4843 const struct tgsi_full_src_register *src0,
4844 const struct tgsi_full_src_register *src1)
4845 {
4846 struct tgsi_full_src_register immediate;
4847 VGPU10OpcodeToken0 opcode0;
4848 boolean swapSrc = FALSE;
4849
4850 /* Sanity checks for svga vs. gallium enums */
4851 STATIC_ASSERT(SVGA3D_CMP_LESS == (PIPE_FUNC_LESS + 1));
4852 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL == (PIPE_FUNC_GEQUAL + 1));
4853
4854 opcode0.value = 0;
4855
4856 switch (func) {
4857 case SVGA3D_CMP_NEVER:
4858 immediate = make_immediate_reg_int(emit, 0);
4859 /* MOV dst, {0} */
4860 begin_emit_instruction(emit);
4861 emit_dword(emit, VGPU10_OPCODE_MOV);
4862 emit_dst_register(emit, dst);
4863 emit_src_register(emit, &immediate);
4864 end_emit_instruction(emit);
4865 return;
4866 case SVGA3D_CMP_ALWAYS:
4867 immediate = make_immediate_reg_int(emit, -1);
4868 /* MOV dst, {-1} */
4869 begin_emit_instruction(emit);
4870 emit_dword(emit, VGPU10_OPCODE_MOV);
4871 emit_dst_register(emit, dst);
4872 emit_src_register(emit, &immediate);
4873 end_emit_instruction(emit);
4874 return;
4875 case SVGA3D_CMP_LESS:
4876 opcode0.opcodeType = VGPU10_OPCODE_LT;
4877 break;
4878 case SVGA3D_CMP_EQUAL:
4879 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4880 break;
4881 case SVGA3D_CMP_LESSEQUAL:
4882 opcode0.opcodeType = VGPU10_OPCODE_GE;
4883 swapSrc = TRUE;
4884 break;
4885 case SVGA3D_CMP_GREATER:
4886 opcode0.opcodeType = VGPU10_OPCODE_LT;
4887 swapSrc = TRUE;
4888 break;
4889 case SVGA3D_CMP_NOTEQUAL:
4890 opcode0.opcodeType = VGPU10_OPCODE_NE;
4891 break;
4892 case SVGA3D_CMP_GREATEREQUAL:
4893 opcode0.opcodeType = VGPU10_OPCODE_GE;
4894 break;
4895 default:
4896 assert(!"Unexpected comparison mode");
4897 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4898 }
4899
4900 begin_emit_instruction(emit);
4901 emit_dword(emit, opcode0.value);
4902 emit_dst_register(emit, dst);
4903 if (swapSrc) {
4904 emit_src_register(emit, src1);
4905 emit_src_register(emit, src0);
4906 }
4907 else {
4908 emit_src_register(emit, src0);
4909 emit_src_register(emit, src1);
4910 }
4911 end_emit_instruction(emit);
4912 }
4913
4914
4915 /**
4916 * Get texel/address offsets for a texture instruction.
4917 */
4918 static void
4919 get_texel_offsets(const struct svga_shader_emitter_v10 *emit,
4920 const struct tgsi_full_instruction *inst, int offsets[3])
4921 {
4922 if (inst->Texture.NumOffsets == 1) {
4923 /* According to OpenGL Shader Language spec the offsets are only
4924 * fetched from a previously-declared immediate/literal.
4925 */
4926 const struct tgsi_texture_offset *off = inst->TexOffsets;
4927 const unsigned index = off[0].Index;
4928 const unsigned swizzleX = off[0].SwizzleX;
4929 const unsigned swizzleY = off[0].SwizzleY;
4930 const unsigned swizzleZ = off[0].SwizzleZ;
4931 const union tgsi_immediate_data *imm = emit->immediates[index];
4932
4933 assert(inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE);
4934
4935 offsets[0] = imm[swizzleX].Int;
4936 offsets[1] = imm[swizzleY].Int;
4937 offsets[2] = imm[swizzleZ].Int;
4938 }
4939 else {
4940 offsets[0] = offsets[1] = offsets[2] = 0;
4941 }
4942 }
4943
4944
4945 /**
4946 * Set up the coordinate register for texture sampling.
4947 * When we're sampling from a RECT texture we have to scale the
4948 * unnormalized coordinate to a normalized coordinate.
4949 * We do that by multiplying the coordinate by an "extra" constant.
4950 * An alternative would be to use the RESINFO instruction to query the
4951 * texture's size.
4952 */
4953 static struct tgsi_full_src_register
4954 setup_texcoord(struct svga_shader_emitter_v10 *emit,
4955 unsigned unit,
4956 const struct tgsi_full_src_register *coord)
4957 {
4958 if (emit->key.tex[unit].unnormalized) {
4959 unsigned scale_index = emit->texcoord_scale_index[unit];
4960 unsigned tmp = get_temp_index(emit);
4961 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4962 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4963 struct tgsi_full_src_register scale_src = make_src_const_reg(scale_index);
4964
4965 if (emit->key.tex[unit].texel_bias) {
4966 /* to fix texture coordinate rounding issue, 0.0001 offset is
4967 * been added. This fixes piglit test fbo-blit-scaled-linear. */
4968 struct tgsi_full_src_register offset =
4969 make_immediate_reg_float(emit, 0.0001f);
4970
4971 /* ADD tmp, coord, offset */
4972 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_dst,
4973 coord, &offset, FALSE);
4974 /* MUL tmp, tmp, scale */
4975 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4976 &tmp_src, &scale_src, FALSE);
4977 }
4978 else {
4979 /* MUL tmp, coord, const[] */
4980 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4981 coord, &scale_src, FALSE);
4982 }
4983 return tmp_src;
4984 }
4985 else {
4986 /* use texcoord as-is */
4987 return *coord;
4988 }
4989 }
4990
4991
4992 /**
4993 * For SAMPLE_C instructions, emit the extra src register which indicates
4994 * the reference/comparision value.
4995 */
4996 static void
4997 emit_tex_compare_refcoord(struct svga_shader_emitter_v10 *emit,
4998 enum tgsi_texture_type target,
4999 const struct tgsi_full_src_register *coord)
5000 {
5001 struct tgsi_full_src_register coord_src_ref;
5002 int component;
5003
5004 assert(tgsi_is_shadow_target(target));
5005
5006 component = tgsi_util_get_shadow_ref_src_index(target) % 4;
5007 assert(component >= 0);
5008
5009 coord_src_ref = scalar_src(coord, component);
5010
5011 emit_src_register(emit, &coord_src_ref);
5012 }
5013
5014
5015 /**
5016 * Info for implementing texture swizzles.
5017 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
5018 * functions use this to encapsulate the extra steps needed to perform
5019 * a texture swizzle, or shadow/depth comparisons.
5020 * The shadow/depth comparison is only done here if for the cases where
5021 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
5022 */
5023 struct tex_swizzle_info
5024 {
5025 boolean swizzled;
5026 boolean shadow_compare;
5027 unsigned unit;
5028 enum tgsi_texture_type texture_target; /**< TGSI_TEXTURE_x */
5029 struct tgsi_full_src_register tmp_src;
5030 struct tgsi_full_dst_register tmp_dst;
5031 const struct tgsi_full_dst_register *inst_dst;
5032 const struct tgsi_full_src_register *coord_src;
5033 };
5034
5035
5036 /**
5037 * Do setup for handling texture swizzles or shadow compares.
5038 * \param unit the texture unit
5039 * \param inst the TGSI texture instruction
5040 * \param shadow_compare do shadow/depth comparison?
5041 * \param swz returns the swizzle info
5042 */
5043 static void
5044 begin_tex_swizzle(struct svga_shader_emitter_v10 *emit,
5045 unsigned unit,
5046 const struct tgsi_full_instruction *inst,
5047 boolean shadow_compare,
5048 struct tex_swizzle_info *swz)
5049 {
5050 swz->swizzled = (emit->key.tex[unit].swizzle_r != TGSI_SWIZZLE_X ||
5051 emit->key.tex[unit].swizzle_g != TGSI_SWIZZLE_Y ||
5052 emit->key.tex[unit].swizzle_b != TGSI_SWIZZLE_Z ||
5053 emit->key.tex[unit].swizzle_a != TGSI_SWIZZLE_W);
5054
5055 swz->shadow_compare = shadow_compare;
5056 swz->texture_target = inst->Texture.Texture;
5057
5058 if (swz->swizzled || shadow_compare) {
5059 /* Allocate temp register for the result of the SAMPLE instruction
5060 * and the source of the MOV/compare/swizzle instructions.
5061 */
5062 unsigned tmp = get_temp_index(emit);
5063 swz->tmp_src = make_src_temp_reg(tmp);
5064 swz->tmp_dst = make_dst_temp_reg(tmp);
5065
5066 swz->unit = unit;
5067 }
5068 swz->inst_dst = &inst->Dst[0];
5069 swz->coord_src = &inst->Src[0];
5070
5071 emit->fs.shadow_compare_units |= shadow_compare << unit;
5072 }
5073
5074
5075 /**
5076 * Returns the register to put the SAMPLE instruction results into.
5077 * This will either be the original instruction dst reg (if no swizzle
5078 * and no shadow comparison) or a temporary reg if there is a swizzle.
5079 */
5080 static const struct tgsi_full_dst_register *
5081 get_tex_swizzle_dst(const struct tex_swizzle_info *swz)
5082 {
5083 return (swz->swizzled || swz->shadow_compare)
5084 ? &swz->tmp_dst : swz->inst_dst;
5085 }
5086
5087
5088 /**
5089 * This emits the MOV instruction that actually implements a texture swizzle
5090 * and/or shadow comparison.
5091 */
5092 static void
5093 end_tex_swizzle(struct svga_shader_emitter_v10 *emit,
5094 const struct tex_swizzle_info *swz)
5095 {
5096 if (swz->shadow_compare) {
5097 /* Emit extra instructions to compare the fetched texel value against
5098 * a texture coordinate component. The result of the comparison
5099 * is 0.0 or 1.0.
5100 */
5101 struct tgsi_full_src_register coord_src;
5102 struct tgsi_full_src_register texel_src =
5103 scalar_src(&swz->tmp_src, TGSI_SWIZZLE_X);
5104 struct tgsi_full_src_register one =
5105 make_immediate_reg_float(emit, 1.0f);
5106 /* convert gallium comparison func to SVGA comparison func */
5107 SVGA3dCmpFunc compare_func = emit->key.tex[swz->unit].compare_func + 1;
5108
5109 assert(emit->unit == PIPE_SHADER_FRAGMENT);
5110
5111 int component =
5112 tgsi_util_get_shadow_ref_src_index(swz->texture_target) % 4;
5113 assert(component >= 0);
5114 coord_src = scalar_src(swz->coord_src, component);
5115
5116 /* COMPARE tmp, coord, texel */
5117 emit_comparison(emit, compare_func,
5118 &swz->tmp_dst, &coord_src, &texel_src);
5119
5120 /* AND dest, tmp, {1.0} */
5121 begin_emit_instruction(emit);
5122 emit_opcode(emit, VGPU10_OPCODE_AND, FALSE);
5123 if (swz->swizzled) {
5124 emit_dst_register(emit, &swz->tmp_dst);
5125 }
5126 else {
5127 emit_dst_register(emit, swz->inst_dst);
5128 }
5129 emit_src_register(emit, &swz->tmp_src);
5130 emit_src_register(emit, &one);
5131 end_emit_instruction(emit);
5132 }
5133
5134 if (swz->swizzled) {
5135 unsigned swz_r = emit->key.tex[swz->unit].swizzle_r;
5136 unsigned swz_g = emit->key.tex[swz->unit].swizzle_g;
5137 unsigned swz_b = emit->key.tex[swz->unit].swizzle_b;
5138 unsigned swz_a = emit->key.tex[swz->unit].swizzle_a;
5139 unsigned writemask_0 = 0, writemask_1 = 0;
5140 boolean int_tex = is_integer_type(emit->sampler_return_type[swz->unit]);
5141
5142 /* Swizzle w/out zero/one terms */
5143 struct tgsi_full_src_register src_swizzled =
5144 swizzle_src(&swz->tmp_src,
5145 swz_r < PIPE_SWIZZLE_0 ? swz_r : PIPE_SWIZZLE_X,
5146 swz_g < PIPE_SWIZZLE_0 ? swz_g : PIPE_SWIZZLE_Y,
5147 swz_b < PIPE_SWIZZLE_0 ? swz_b : PIPE_SWIZZLE_Z,
5148 swz_a < PIPE_SWIZZLE_0 ? swz_a : PIPE_SWIZZLE_W);
5149
5150 /* MOV dst, color(tmp).<swizzle> */
5151 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5152 swz->inst_dst, &src_swizzled, FALSE);
5153
5154 /* handle swizzle zero terms */
5155 writemask_0 = (((swz_r == PIPE_SWIZZLE_0) << 0) |
5156 ((swz_g == PIPE_SWIZZLE_0) << 1) |
5157 ((swz_b == PIPE_SWIZZLE_0) << 2) |
5158 ((swz_a == PIPE_SWIZZLE_0) << 3));
5159 writemask_0 &= swz->inst_dst->Register.WriteMask;
5160
5161 if (writemask_0) {
5162 struct tgsi_full_src_register zero = int_tex ?
5163 make_immediate_reg_int(emit, 0) :
5164 make_immediate_reg_float(emit, 0.0f);
5165 struct tgsi_full_dst_register dst =
5166 writemask_dst(swz->inst_dst, writemask_0);
5167
5168 /* MOV dst.writemask_0, {0,0,0,0} */
5169 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5170 &dst, &zero, FALSE);
5171 }
5172
5173 /* handle swizzle one terms */
5174 writemask_1 = (((swz_r == PIPE_SWIZZLE_1) << 0) |
5175 ((swz_g == PIPE_SWIZZLE_1) << 1) |
5176 ((swz_b == PIPE_SWIZZLE_1) << 2) |
5177 ((swz_a == PIPE_SWIZZLE_1) << 3));
5178 writemask_1 &= swz->inst_dst->Register.WriteMask;
5179
5180 if (writemask_1) {
5181 struct tgsi_full_src_register one = int_tex ?
5182 make_immediate_reg_int(emit, 1) :
5183 make_immediate_reg_float(emit, 1.0f);
5184 struct tgsi_full_dst_register dst =
5185 writemask_dst(swz->inst_dst, writemask_1);
5186
5187 /* MOV dst.writemask_1, {1,1,1,1} */
5188 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst, &one, FALSE);
5189 }
5190 }
5191 }
5192
5193
5194 /**
5195 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5196 */
5197 static boolean
5198 emit_sample(struct svga_shader_emitter_v10 *emit,
5199 const struct tgsi_full_instruction *inst)
5200 {
5201 const unsigned resource_unit = inst->Src[1].Register.Index;
5202 const unsigned sampler_unit = inst->Src[2].Register.Index;
5203 struct tgsi_full_src_register coord;
5204 int offsets[3];
5205 struct tex_swizzle_info swz_info;
5206
5207 begin_tex_swizzle(emit, sampler_unit, inst, FALSE, &swz_info);
5208
5209 get_texel_offsets(emit, inst, offsets);
5210
5211 coord = setup_texcoord(emit, resource_unit, &inst->Src[0]);
5212
5213 /* SAMPLE dst, coord(s0), resource, sampler */
5214 begin_emit_instruction(emit);
5215
5216 /* NOTE: for non-fragment shaders, we should use VGPU10_OPCODE_SAMPLE_L
5217 * with LOD=0. But our virtual GPU accepts this as-is.
5218 */
5219 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE,
5220 inst->Instruction.Saturate, offsets);
5221 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5222 emit_src_register(emit, &coord);
5223 emit_resource_register(emit, resource_unit);
5224 emit_sampler_register(emit, sampler_unit);
5225 end_emit_instruction(emit);
5226
5227 end_tex_swizzle(emit, &swz_info);
5228
5229 free_temp_indexes(emit);
5230
5231 return TRUE;
5232 }
5233
5234
5235 /**
5236 * Check if a texture instruction is valid.
5237 * An example of an invalid texture instruction is doing shadow comparison
5238 * with an integer-valued texture.
5239 * If we detect an invalid texture instruction, we replace it with:
5240 * MOV dst, {1,1,1,1};
5241 * \return TRUE if valid, FALSE if invalid.
5242 */
5243 static boolean
5244 is_valid_tex_instruction(struct svga_shader_emitter_v10 *emit,
5245 const struct tgsi_full_instruction *inst)
5246 {
5247 const unsigned unit = inst->Src[1].Register.Index;
5248 const enum tgsi_texture_type target = inst->Texture.Texture;
5249 boolean valid = TRUE;
5250
5251 if (tgsi_is_shadow_target(target) &&
5252 is_integer_type(emit->sampler_return_type[unit])) {
5253 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5254 valid = FALSE;
5255 }
5256 /* XXX might check for other conditions in the future here */
5257
5258 if (!valid) {
5259 /* emit a MOV dst, {1,1,1,1} instruction. */
5260 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
5261 begin_emit_instruction(emit);
5262 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5263 emit_dst_register(emit, &inst->Dst[0]);
5264 emit_src_register(emit, &one);
5265 end_emit_instruction(emit);
5266 }
5267
5268 return valid;
5269 }
5270
5271
5272 /**
5273 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5274 */
5275 static boolean
5276 emit_tex(struct svga_shader_emitter_v10 *emit,
5277 const struct tgsi_full_instruction *inst)
5278 {
5279 const uint unit = inst->Src[1].Register.Index;
5280 const enum tgsi_texture_type target = inst->Texture.Texture;
5281 VGPU10_OPCODE_TYPE opcode;
5282 struct tgsi_full_src_register coord;
5283 int offsets[3];
5284 struct tex_swizzle_info swz_info;
5285
5286 /* check that the sampler returns a float */
5287 if (!is_valid_tex_instruction(emit, inst))
5288 return TRUE;
5289
5290 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5291
5292 get_texel_offsets(emit, inst, offsets);
5293
5294 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5295
5296 /* SAMPLE dst, coord(s0), resource, sampler */
5297 begin_emit_instruction(emit);
5298
5299 if (tgsi_is_shadow_target(target))
5300 opcode = VGPU10_OPCODE_SAMPLE_C;
5301 else
5302 opcode = VGPU10_OPCODE_SAMPLE;
5303
5304 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5305 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5306 emit_src_register(emit, &coord);
5307 emit_resource_register(emit, unit);
5308 emit_sampler_register(emit, unit);
5309 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5310 emit_tex_compare_refcoord(emit, target, &coord);
5311 }
5312 end_emit_instruction(emit);
5313
5314 end_tex_swizzle(emit, &swz_info);
5315
5316 free_temp_indexes(emit);
5317
5318 return TRUE;
5319 }
5320
5321 /**
5322 * Emit code for TGSI_OPCODE_TG4 (texture lookup for texture gather)
5323 */
5324 static boolean
5325 emit_tg4(struct svga_shader_emitter_v10 *emit,
5326 const struct tgsi_full_instruction *inst)
5327 {
5328 const uint unit = inst->Src[2].Register.Index;
5329 unsigned target = inst->Texture.Texture;
5330 struct tgsi_full_src_register coord;
5331 int offsets[3];
5332 struct tex_swizzle_info swz_info;
5333
5334 /* check that the sampler returns a float */
5335 if (!is_valid_tex_instruction(emit, inst))
5336 return TRUE;
5337
5338 if (target == TGSI_TEXTURE_CUBE_ARRAY) {
5339 debug_printf("TGSI_TEXTURE_CUBE_ARRAY is not supported\n");
5340 return TRUE;
5341 }
5342
5343 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5344
5345 get_texel_offsets(emit, inst, offsets);
5346
5347 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5348
5349 /* Gather dst, coord, resource, sampler */
5350 begin_emit_instruction(emit);
5351 emit_sample_opcode(emit, VGPU10_OPCODE_GATHER4,
5352 inst->Instruction.Saturate, offsets);
5353 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5354 emit_src_register(emit, &coord);
5355 emit_resource_register(emit, unit);
5356 emit_sampler_register(emit, unit);
5357 end_emit_instruction(emit);
5358
5359 end_tex_swizzle(emit, &swz_info);
5360
5361 free_temp_indexes(emit);
5362
5363 return TRUE;
5364 }
5365
5366
5367
5368 /**
5369 * Emit code for TGSI_OPCODE_TEX2 (texture lookup for shadow cube map arrays)
5370 */
5371 static boolean
5372 emit_tex2(struct svga_shader_emitter_v10 *emit,
5373 const struct tgsi_full_instruction *inst)
5374 {
5375 const uint unit = inst->Src[2].Register.Index;
5376 unsigned target = inst->Texture.Texture;
5377 struct tgsi_full_src_register coord, ref;
5378 int offsets[3];
5379 struct tex_swizzle_info swz_info;
5380
5381 /* check that the sampler returns a float */
5382 if (!is_valid_tex_instruction(emit, inst))
5383 return TRUE;
5384
5385 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5386
5387 get_texel_offsets(emit, inst, offsets);
5388
5389 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5390 ref = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5391
5392 /* SAMPLE_C dst, coord, resource, sampler, ref */
5393 begin_emit_instruction(emit);
5394 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_C,
5395 inst->Instruction.Saturate, offsets);
5396 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5397 emit_src_register(emit, &coord);
5398 emit_resource_register(emit, unit);
5399 emit_sampler_register(emit, unit);
5400 emit_tex_compare_refcoord(emit, target, &ref);
5401 end_emit_instruction(emit);
5402
5403 end_tex_swizzle(emit, &swz_info);
5404
5405 free_temp_indexes(emit);
5406
5407 return TRUE;
5408 }
5409
5410
5411 /**
5412 * Emit code for TGSI_OPCODE_TXP (projective texture)
5413 */
5414 static boolean
5415 emit_txp(struct svga_shader_emitter_v10 *emit,
5416 const struct tgsi_full_instruction *inst)
5417 {
5418 const uint unit = inst->Src[1].Register.Index;
5419 const enum tgsi_texture_type target = inst->Texture.Texture;
5420 VGPU10_OPCODE_TYPE opcode;
5421 int offsets[3];
5422 unsigned tmp = get_temp_index(emit);
5423 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
5424 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
5425 struct tgsi_full_src_register src0_wwww =
5426 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5427 struct tgsi_full_src_register coord;
5428 struct tex_swizzle_info swz_info;
5429
5430 /* check that the sampler returns a float */
5431 if (!is_valid_tex_instruction(emit, inst))
5432 return TRUE;
5433
5434 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5435
5436 get_texel_offsets(emit, inst, offsets);
5437
5438 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5439
5440 /* DIV tmp, coord, coord.wwww */
5441 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst,
5442 &coord, &src0_wwww, FALSE);
5443
5444 /* SAMPLE dst, coord(tmp), resource, sampler */
5445 begin_emit_instruction(emit);
5446
5447 if (tgsi_is_shadow_target(target))
5448 /* NOTE: for non-fragment shaders, we should use
5449 * VGPU10_OPCODE_SAMPLE_C_LZ, but our virtual GPU accepts this as-is.
5450 */
5451 opcode = VGPU10_OPCODE_SAMPLE_C;
5452 else
5453 opcode = VGPU10_OPCODE_SAMPLE;
5454
5455 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5456 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5457 emit_src_register(emit, &tmp_src); /* projected coord */
5458 emit_resource_register(emit, unit);
5459 emit_sampler_register(emit, unit);
5460 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5461 emit_tex_compare_refcoord(emit, target, &tmp_src);
5462 }
5463 end_emit_instruction(emit);
5464
5465 end_tex_swizzle(emit, &swz_info);
5466
5467 free_temp_indexes(emit);
5468
5469 return TRUE;
5470 }
5471
5472
5473 /**
5474 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5475 */
5476 static boolean
5477 emit_txd(struct svga_shader_emitter_v10 *emit,
5478 const struct tgsi_full_instruction *inst)
5479 {
5480 const uint unit = inst->Src[3].Register.Index;
5481 const enum tgsi_texture_type target = inst->Texture.Texture;
5482 int offsets[3];
5483 struct tgsi_full_src_register coord;
5484 struct tex_swizzle_info swz_info;
5485
5486 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5487 &swz_info);
5488
5489 get_texel_offsets(emit, inst, offsets);
5490
5491 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5492
5493 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5494 begin_emit_instruction(emit);
5495 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_D,
5496 inst->Instruction.Saturate, offsets);
5497 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5498 emit_src_register(emit, &coord);
5499 emit_resource_register(emit, unit);
5500 emit_sampler_register(emit, unit);
5501 emit_src_register(emit, &inst->Src[1]); /* Xderiv */
5502 emit_src_register(emit, &inst->Src[2]); /* Yderiv */
5503 end_emit_instruction(emit);
5504
5505 end_tex_swizzle(emit, &swz_info);
5506
5507 free_temp_indexes(emit);
5508
5509 return TRUE;
5510 }
5511
5512
5513 /**
5514 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5515 */
5516 static boolean
5517 emit_txf(struct svga_shader_emitter_v10 *emit,
5518 const struct tgsi_full_instruction *inst)
5519 {
5520 const uint unit = inst->Src[1].Register.Index;
5521 const boolean msaa = tgsi_is_msaa_target(inst->Texture.Texture);
5522 int offsets[3];
5523 struct tex_swizzle_info swz_info;
5524
5525 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5526
5527 get_texel_offsets(emit, inst, offsets);
5528
5529 if (msaa) {
5530 /* Fetch one sample from an MSAA texture */
5531 struct tgsi_full_src_register sampleIndex =
5532 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5533 /* LD_MS dst, coord(s0), resource, sampleIndex */
5534 begin_emit_instruction(emit);
5535 emit_sample_opcode(emit, VGPU10_OPCODE_LD_MS,
5536 inst->Instruction.Saturate, offsets);
5537 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5538 emit_src_register(emit, &inst->Src[0]);
5539 emit_resource_register(emit, unit);
5540 emit_src_register(emit, &sampleIndex);
5541 end_emit_instruction(emit);
5542 }
5543 else {
5544 /* Fetch one texel specified by integer coordinate */
5545 /* LD dst, coord(s0), resource */
5546 begin_emit_instruction(emit);
5547 emit_sample_opcode(emit, VGPU10_OPCODE_LD,
5548 inst->Instruction.Saturate, offsets);
5549 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5550 emit_src_register(emit, &inst->Src[0]);
5551 emit_resource_register(emit, unit);
5552 end_emit_instruction(emit);
5553 }
5554
5555 end_tex_swizzle(emit, &swz_info);
5556
5557 free_temp_indexes(emit);
5558
5559 return TRUE;
5560 }
5561
5562
5563 /**
5564 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5565 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5566 */
5567 static boolean
5568 emit_txl_txb(struct svga_shader_emitter_v10 *emit,
5569 const struct tgsi_full_instruction *inst)
5570 {
5571 const enum tgsi_texture_type target = inst->Texture.Texture;
5572 VGPU10_OPCODE_TYPE opcode;
5573 unsigned unit;
5574 int offsets[3];
5575 struct tgsi_full_src_register coord, lod_bias;
5576 struct tex_swizzle_info swz_info;
5577
5578 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5579 inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5580 inst->Instruction.Opcode == TGSI_OPCODE_TXB2);
5581
5582 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
5583 lod_bias = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5584 unit = inst->Src[2].Register.Index;
5585 }
5586 else {
5587 lod_bias = scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5588 unit = inst->Src[1].Register.Index;
5589 }
5590
5591 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5592 &swz_info);
5593
5594 get_texel_offsets(emit, inst, offsets);
5595
5596 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5597
5598 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5599 begin_emit_instruction(emit);
5600 if (inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
5601 opcode = VGPU10_OPCODE_SAMPLE_L;
5602 }
5603 else {
5604 opcode = VGPU10_OPCODE_SAMPLE_B;
5605 }
5606 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5607 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5608 emit_src_register(emit, &coord);
5609 emit_resource_register(emit, unit);
5610 emit_sampler_register(emit, unit);
5611 emit_src_register(emit, &lod_bias);
5612 end_emit_instruction(emit);
5613
5614 end_tex_swizzle(emit, &swz_info);
5615
5616 free_temp_indexes(emit);
5617
5618 return TRUE;
5619 }
5620
5621
5622 /**
5623 * Emit code for TGSI_OPCODE_TXL2 (explicit LOD) for cubemap array.
5624 */
5625 static boolean
5626 emit_txl2(struct svga_shader_emitter_v10 *emit,
5627 const struct tgsi_full_instruction *inst)
5628 {
5629 unsigned target = inst->Texture.Texture;
5630 unsigned opcode, unit;
5631 int offsets[3];
5632 struct tgsi_full_src_register coord, lod;
5633 struct tex_swizzle_info swz_info;
5634
5635 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL2);
5636
5637 lod = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5638 unit = inst->Src[2].Register.Index;
5639
5640 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5641 &swz_info);
5642
5643 get_texel_offsets(emit, inst, offsets);
5644
5645 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5646
5647 /* SAMPLE_L dst, coord(s0), resource, sampler, lod(s3) */
5648 begin_emit_instruction(emit);
5649 opcode = VGPU10_OPCODE_SAMPLE_L;
5650 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5651 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5652 emit_src_register(emit, &coord);
5653 emit_resource_register(emit, unit);
5654 emit_sampler_register(emit, unit);
5655 emit_src_register(emit, &lod);
5656 end_emit_instruction(emit);
5657
5658 end_tex_swizzle(emit, &swz_info);
5659
5660 free_temp_indexes(emit);
5661
5662 return TRUE;
5663 }
5664
5665
5666 /**
5667 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5668 */
5669 static boolean
5670 emit_txq(struct svga_shader_emitter_v10 *emit,
5671 const struct tgsi_full_instruction *inst)
5672 {
5673 const uint unit = inst->Src[1].Register.Index;
5674
5675 if (emit->sampler_target[unit] == TGSI_TEXTURE_BUFFER) {
5676 /* RESINFO does not support querying texture buffers, so we instead
5677 * store texture buffer sizes in shader constants, then copy them to
5678 * implement TXQ instead of emitting RESINFO.
5679 * MOV dst, const[texture_buffer_size_index[unit]]
5680 */
5681 struct tgsi_full_src_register size_src =
5682 make_src_const_reg(emit->texture_buffer_size_index[unit]);
5683 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &size_src,
5684 FALSE);
5685 } else {
5686 /* RESINFO dst, srcMipLevel, resource */
5687 begin_emit_instruction(emit);
5688 emit_opcode_resinfo(emit, VGPU10_RESINFO_RETURN_UINT);
5689 emit_dst_register(emit, &inst->Dst[0]);
5690 emit_src_register(emit, &inst->Src[0]);
5691 emit_resource_register(emit, unit);
5692 end_emit_instruction(emit);
5693 }
5694
5695 free_temp_indexes(emit);
5696
5697 return TRUE;
5698 }
5699
5700
5701 /**
5702 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5703 */
5704 static boolean
5705 emit_simple(struct svga_shader_emitter_v10 *emit,
5706 const struct tgsi_full_instruction *inst)
5707 {
5708 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5709 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5710 unsigned i;
5711
5712 begin_emit_instruction(emit);
5713 emit_opcode(emit, translate_opcode(opcode), inst->Instruction.Saturate);
5714 for (i = 0; i < op->num_dst; i++) {
5715 emit_dst_register(emit, &inst->Dst[i]);
5716 }
5717 for (i = 0; i < op->num_src; i++) {
5718 emit_src_register(emit, &inst->Src[i]);
5719 }
5720 end_emit_instruction(emit);
5721
5722 return TRUE;
5723 }
5724
5725
5726 /**
5727 * We only special case the MOV instruction to try to detect constant
5728 * color writes in the fragment shader.
5729 */
5730 static boolean
5731 emit_mov(struct svga_shader_emitter_v10 *emit,
5732 const struct tgsi_full_instruction *inst)
5733 {
5734 const struct tgsi_full_src_register *src = &inst->Src[0];
5735 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
5736
5737 if (emit->unit == PIPE_SHADER_FRAGMENT &&
5738 dst->Register.File == TGSI_FILE_OUTPUT &&
5739 dst->Register.Index == 0 &&
5740 src->Register.File == TGSI_FILE_CONSTANT &&
5741 !src->Register.Indirect) {
5742 emit->constant_color_output = TRUE;
5743 }
5744
5745 return emit_simple(emit, inst);
5746 }
5747
5748
5749 /**
5750 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5751 * where TGSI only uses one dest register.
5752 */
5753 static boolean
5754 emit_simple_1dst(struct svga_shader_emitter_v10 *emit,
5755 const struct tgsi_full_instruction *inst,
5756 unsigned dst_count,
5757 unsigned dst_index)
5758 {
5759 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5760 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5761 unsigned i;
5762
5763 begin_emit_instruction(emit);
5764 emit_opcode(emit, translate_opcode(opcode), inst->Instruction.Saturate);
5765
5766 for (i = 0; i < dst_count; i++) {
5767 if (i == dst_index) {
5768 emit_dst_register(emit, &inst->Dst[0]);
5769 } else {
5770 emit_null_dst_register(emit);
5771 }
5772 }
5773
5774 for (i = 0; i < op->num_src; i++) {
5775 emit_src_register(emit, &inst->Src[i]);
5776 }
5777 end_emit_instruction(emit);
5778
5779 return TRUE;
5780 }
5781
5782
5783 /**
5784 * Translate a single TGSI instruction to VGPU10.
5785 */
5786 static boolean
5787 emit_vgpu10_instruction(struct svga_shader_emitter_v10 *emit,
5788 unsigned inst_number,
5789 const struct tgsi_full_instruction *inst)
5790 {
5791 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5792
5793 switch (opcode) {
5794 case TGSI_OPCODE_ADD:
5795 case TGSI_OPCODE_AND:
5796 case TGSI_OPCODE_BGNLOOP:
5797 case TGSI_OPCODE_BRK:
5798 case TGSI_OPCODE_CEIL:
5799 case TGSI_OPCODE_CONT:
5800 case TGSI_OPCODE_DDX:
5801 case TGSI_OPCODE_DDY:
5802 case TGSI_OPCODE_DIV:
5803 case TGSI_OPCODE_DP2:
5804 case TGSI_OPCODE_DP3:
5805 case TGSI_OPCODE_DP4:
5806 case TGSI_OPCODE_ELSE:
5807 case TGSI_OPCODE_ENDIF:
5808 case TGSI_OPCODE_ENDLOOP:
5809 case TGSI_OPCODE_ENDSUB:
5810 case TGSI_OPCODE_F2I:
5811 case TGSI_OPCODE_F2U:
5812 case TGSI_OPCODE_FLR:
5813 case TGSI_OPCODE_FRC:
5814 case TGSI_OPCODE_FSEQ:
5815 case TGSI_OPCODE_FSGE:
5816 case TGSI_OPCODE_FSLT:
5817 case TGSI_OPCODE_FSNE:
5818 case TGSI_OPCODE_I2F:
5819 case TGSI_OPCODE_IMAX:
5820 case TGSI_OPCODE_IMIN:
5821 case TGSI_OPCODE_INEG:
5822 case TGSI_OPCODE_ISGE:
5823 case TGSI_OPCODE_ISHR:
5824 case TGSI_OPCODE_ISLT:
5825 case TGSI_OPCODE_MAD:
5826 case TGSI_OPCODE_MAX:
5827 case TGSI_OPCODE_MIN:
5828 case TGSI_OPCODE_MUL:
5829 case TGSI_OPCODE_NOP:
5830 case TGSI_OPCODE_NOT:
5831 case TGSI_OPCODE_OR:
5832 case TGSI_OPCODE_RET:
5833 case TGSI_OPCODE_UADD:
5834 case TGSI_OPCODE_USEQ:
5835 case TGSI_OPCODE_USGE:
5836 case TGSI_OPCODE_USLT:
5837 case TGSI_OPCODE_UMIN:
5838 case TGSI_OPCODE_UMAD:
5839 case TGSI_OPCODE_UMAX:
5840 case TGSI_OPCODE_ROUND:
5841 case TGSI_OPCODE_SQRT:
5842 case TGSI_OPCODE_SHL:
5843 case TGSI_OPCODE_TRUNC:
5844 case TGSI_OPCODE_U2F:
5845 case TGSI_OPCODE_UCMP:
5846 case TGSI_OPCODE_USHR:
5847 case TGSI_OPCODE_USNE:
5848 case TGSI_OPCODE_XOR:
5849 /* simple instructions */
5850 return emit_simple(emit, inst);
5851
5852 case TGSI_OPCODE_MOV:
5853 return emit_mov(emit, inst);
5854 case TGSI_OPCODE_EMIT:
5855 return emit_vertex(emit, inst);
5856 case TGSI_OPCODE_ENDPRIM:
5857 return emit_endprim(emit, inst);
5858 case TGSI_OPCODE_IABS:
5859 return emit_iabs(emit, inst);
5860 case TGSI_OPCODE_ARL:
5861 /* fall-through */
5862 case TGSI_OPCODE_UARL:
5863 return emit_arl_uarl(emit, inst);
5864 case TGSI_OPCODE_BGNSUB:
5865 /* no-op */
5866 return TRUE;
5867 case TGSI_OPCODE_CAL:
5868 return emit_cal(emit, inst);
5869 case TGSI_OPCODE_CMP:
5870 return emit_cmp(emit, inst);
5871 case TGSI_OPCODE_COS:
5872 return emit_sincos(emit, inst);
5873 case TGSI_OPCODE_DST:
5874 return emit_dst(emit, inst);
5875 case TGSI_OPCODE_EX2:
5876 return emit_ex2(emit, inst);
5877 case TGSI_OPCODE_EXP:
5878 return emit_exp(emit, inst);
5879 case TGSI_OPCODE_IF:
5880 return emit_if(emit, inst);
5881 case TGSI_OPCODE_KILL:
5882 return emit_kill(emit, inst);
5883 case TGSI_OPCODE_KILL_IF:
5884 return emit_kill_if(emit, inst);
5885 case TGSI_OPCODE_LG2:
5886 return emit_lg2(emit, inst);
5887 case TGSI_OPCODE_LIT:
5888 return emit_lit(emit, inst);
5889 case TGSI_OPCODE_LODQ:
5890 return emit_lodq(emit, inst);
5891 case TGSI_OPCODE_LOG:
5892 return emit_log(emit, inst);
5893 case TGSI_OPCODE_LRP:
5894 return emit_lrp(emit, inst);
5895 case TGSI_OPCODE_POW:
5896 return emit_pow(emit, inst);
5897 case TGSI_OPCODE_RCP:
5898 return emit_rcp(emit, inst);
5899 case TGSI_OPCODE_RSQ:
5900 return emit_rsq(emit, inst);
5901 case TGSI_OPCODE_SAMPLE:
5902 return emit_sample(emit, inst);
5903 case TGSI_OPCODE_SEQ:
5904 return emit_seq(emit, inst);
5905 case TGSI_OPCODE_SGE:
5906 return emit_sge(emit, inst);
5907 case TGSI_OPCODE_SGT:
5908 return emit_sgt(emit, inst);
5909 case TGSI_OPCODE_SIN:
5910 return emit_sincos(emit, inst);
5911 case TGSI_OPCODE_SLE:
5912 return emit_sle(emit, inst);
5913 case TGSI_OPCODE_SLT:
5914 return emit_slt(emit, inst);
5915 case TGSI_OPCODE_SNE:
5916 return emit_sne(emit, inst);
5917 case TGSI_OPCODE_SSG:
5918 return emit_ssg(emit, inst);
5919 case TGSI_OPCODE_ISSG:
5920 return emit_issg(emit, inst);
5921 case TGSI_OPCODE_TEX:
5922 return emit_tex(emit, inst);
5923 case TGSI_OPCODE_TG4:
5924 return emit_tg4(emit, inst);
5925 case TGSI_OPCODE_TEX2:
5926 return emit_tex2(emit, inst);
5927 case TGSI_OPCODE_TXP:
5928 return emit_txp(emit, inst);
5929 case TGSI_OPCODE_TXB:
5930 case TGSI_OPCODE_TXB2:
5931 case TGSI_OPCODE_TXL:
5932 return emit_txl_txb(emit, inst);
5933 case TGSI_OPCODE_TXD:
5934 return emit_txd(emit, inst);
5935 case TGSI_OPCODE_TXF:
5936 return emit_txf(emit, inst);
5937 case TGSI_OPCODE_TXL2:
5938 return emit_txl2(emit, inst);
5939 case TGSI_OPCODE_TXQ:
5940 return emit_txq(emit, inst);
5941 case TGSI_OPCODE_UIF:
5942 return emit_if(emit, inst);
5943 case TGSI_OPCODE_UMUL_HI:
5944 case TGSI_OPCODE_IMUL_HI:
5945 case TGSI_OPCODE_UDIV:
5946 case TGSI_OPCODE_IDIV:
5947 /* These cases use only the FIRST of two destination registers */
5948 return emit_simple_1dst(emit, inst, 2, 0);
5949 case TGSI_OPCODE_UMUL:
5950 case TGSI_OPCODE_UMOD:
5951 case TGSI_OPCODE_MOD:
5952 /* These cases use only the SECOND of two destination registers */
5953 return emit_simple_1dst(emit, inst, 2, 1);
5954 case TGSI_OPCODE_END:
5955 if (!emit_post_helpers(emit))
5956 return FALSE;
5957 return emit_simple(emit, inst);
5958
5959 default:
5960 debug_printf("Unimplemented tgsi instruction %s\n",
5961 tgsi_get_opcode_name(opcode));
5962 return FALSE;
5963 }
5964
5965 return TRUE;
5966 }
5967
5968
5969 /**
5970 * Emit the extra instructions to adjust the vertex position.
5971 * There are two possible adjustments:
5972 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5973 * "prescale" and "pretranslate" values.
5974 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5975 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5976 */
5977 static void
5978 emit_vpos_instructions(struct svga_shader_emitter_v10 *emit,
5979 unsigned vs_pos_tmp_index)
5980 {
5981 struct tgsi_full_src_register tmp_pos_src;
5982 struct tgsi_full_dst_register pos_dst;
5983
5984 /* Don't bother to emit any extra vertex instructions if vertex position is
5985 * not written out
5986 */
5987 if (emit->vposition.out_index == INVALID_INDEX)
5988 return;
5989
5990 tmp_pos_src = make_src_temp_reg(vs_pos_tmp_index);
5991 pos_dst = make_dst_output_reg(emit->vposition.out_index);
5992
5993 /* If non-adjusted vertex position register index
5994 * is valid, copy the vertex position from the temporary
5995 * vertex position register before it is modified by the
5996 * prescale computation.
5997 */
5998 if (emit->vposition.so_index != INVALID_INDEX) {
5999 struct tgsi_full_dst_register pos_so_dst =
6000 make_dst_output_reg(emit->vposition.so_index);
6001
6002 /* MOV pos_so, tmp_pos */
6003 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_so_dst,
6004 &tmp_pos_src, FALSE);
6005 }
6006
6007 if (emit->vposition.need_prescale) {
6008 /* This code adjusts the vertex position to match the VGPU10 convention.
6009 * If p is the position computed by the shader (usually by applying the
6010 * modelview and projection matrices), the new position q is computed by:
6011 *
6012 * q.x = p.w * trans.x + p.x * scale.x
6013 * q.y = p.w * trans.y + p.y * scale.y
6014 * q.z = p.w * trans.z + p.z * scale.z;
6015 * q.w = p.w * trans.w + p.w;
6016 */
6017 struct tgsi_full_src_register tmp_pos_src_w =
6018 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
6019 struct tgsi_full_dst_register tmp_pos_dst =
6020 make_dst_temp_reg(vs_pos_tmp_index);
6021 struct tgsi_full_dst_register tmp_pos_dst_xyz =
6022 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XYZ);
6023
6024 struct tgsi_full_src_register prescale_scale =
6025 make_src_const_reg(emit->vposition.prescale_scale_index);
6026 struct tgsi_full_src_register prescale_trans =
6027 make_src_const_reg(emit->vposition.prescale_trans_index);
6028
6029 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
6030 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xyz,
6031 &tmp_pos_src, &prescale_scale, FALSE);
6032
6033 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
6034 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &pos_dst, &tmp_pos_src_w,
6035 &prescale_trans, &tmp_pos_src, FALSE);
6036 }
6037 else if (emit->key.vs.undo_viewport) {
6038 /* This code computes the final vertex position from the temporary
6039 * vertex position by undoing the viewport transformation and the
6040 * divide-by-W operation (we convert window coords back to clip coords).
6041 * This is needed when we use the 'draw' module for fallbacks.
6042 * If p is the temp pos in window coords, then the NDC coord q is:
6043 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
6044 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
6045 * q.z = p.z * p.w
6046 * q.w = p.w
6047 * CONST[vs_viewport_index] contains:
6048 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
6049 */
6050 struct tgsi_full_dst_register tmp_pos_dst =
6051 make_dst_temp_reg(vs_pos_tmp_index);
6052 struct tgsi_full_dst_register tmp_pos_dst_xy =
6053 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XY);
6054 struct tgsi_full_src_register tmp_pos_src_wwww =
6055 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
6056
6057 struct tgsi_full_dst_register pos_dst_xyz =
6058 writemask_dst(&pos_dst, TGSI_WRITEMASK_XYZ);
6059 struct tgsi_full_dst_register pos_dst_w =
6060 writemask_dst(&pos_dst, TGSI_WRITEMASK_W);
6061
6062 struct tgsi_full_src_register vp_xyzw =
6063 make_src_const_reg(emit->vs.viewport_index);
6064 struct tgsi_full_src_register vp_zwww =
6065 swizzle_src(&vp_xyzw, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
6066 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
6067
6068 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
6069 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_pos_dst_xy,
6070 &tmp_pos_src, &vp_zwww, FALSE);
6071
6072 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
6073 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xy,
6074 &tmp_pos_src, &vp_xyzw, FALSE);
6075
6076 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
6077 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &pos_dst_xyz,
6078 &tmp_pos_src, &tmp_pos_src_wwww, FALSE);
6079
6080 /* MOV pos.w, tmp_pos.w */
6081 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_dst_w,
6082 &tmp_pos_src, FALSE);
6083 }
6084 else if (vs_pos_tmp_index != INVALID_INDEX) {
6085 /* This code is to handle the case where the temporary vertex
6086 * position register is created when the vertex shader has stream
6087 * output and prescale is disabled because rasterization is to be
6088 * discarded.
6089 */
6090 struct tgsi_full_dst_register pos_dst =
6091 make_dst_output_reg(emit->vposition.out_index);
6092
6093 /* MOV pos, tmp_pos */
6094 begin_emit_instruction(emit);
6095 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6096 emit_dst_register(emit, &pos_dst);
6097 emit_src_register(emit, &tmp_pos_src);
6098 end_emit_instruction(emit);
6099 }
6100 }
6101
6102 static void
6103 emit_clipping_instructions(struct svga_shader_emitter_v10 *emit)
6104 {
6105 if (emit->clip_mode == CLIP_DISTANCE) {
6106 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
6107 emit_clip_distance_instructions(emit);
6108
6109 } else if (emit->clip_mode == CLIP_VERTEX) {
6110 /* Convert TGSI CLIPVERTEX to CLIPDIST */
6111 emit_clip_vertex_instructions(emit);
6112 }
6113
6114 /**
6115 * Emit vertex position and take care of legacy user planes only if
6116 * there is a valid vertex position register index.
6117 * This is to take care of the case
6118 * where the shader doesn't output vertex position. Then in
6119 * this case, don't bother to emit more vertex instructions.
6120 */
6121 if (emit->vposition.out_index == INVALID_INDEX)
6122 return;
6123
6124 /**
6125 * Emit per-vertex clipping instructions for legacy user defined clip planes.
6126 * NOTE: we must emit the clip distance instructions before the
6127 * emit_vpos_instructions() call since the later function will change
6128 * the TEMP[vs_pos_tmp_index] value.
6129 */
6130 if (emit->clip_mode == CLIP_LEGACY) {
6131 /* Emit CLIPDIST for legacy user defined clip planes */
6132 emit_clip_distance_from_vpos(emit, emit->vposition.tmp_index);
6133 }
6134 }
6135
6136
6137 /**
6138 * Emit extra per-vertex instructions. This includes clip-coordinate
6139 * space conversion and computing clip distances. This is called for
6140 * each GS emit-vertex instruction and at the end of VS translation.
6141 */
6142 static void
6143 emit_vertex_instructions(struct svga_shader_emitter_v10 *emit)
6144 {
6145 const unsigned vs_pos_tmp_index = emit->vposition.tmp_index;
6146
6147 /* Emit clipping instructions based on clipping mode */
6148 emit_clipping_instructions(emit);
6149
6150 /**
6151 * Reset the temporary vertex position register index
6152 * so that emit_dst_register() will use the real vertex position output
6153 */
6154 emit->vposition.tmp_index = INVALID_INDEX;
6155
6156 /* Emit vertex position instructions */
6157 emit_vpos_instructions(emit, vs_pos_tmp_index);
6158
6159 /* Restore original vposition.tmp_index value for the next GS vertex.
6160 * It doesn't matter for VS.
6161 */
6162 emit->vposition.tmp_index = vs_pos_tmp_index;
6163 }
6164
6165 /**
6166 * Translate the TGSI_OPCODE_EMIT GS instruction.
6167 */
6168 static boolean
6169 emit_vertex(struct svga_shader_emitter_v10 *emit,
6170 const struct tgsi_full_instruction *inst)
6171 {
6172 unsigned ret = TRUE;
6173
6174 assert(emit->unit == PIPE_SHADER_GEOMETRY);
6175
6176 emit_vertex_instructions(emit);
6177
6178 /* We can't use emit_simple() because the TGSI instruction has one
6179 * operand (vertex stream number) which we must ignore for VGPU10.
6180 */
6181 begin_emit_instruction(emit);
6182 emit_opcode(emit, VGPU10_OPCODE_EMIT, FALSE);
6183 end_emit_instruction(emit);
6184
6185 return ret;
6186 }
6187
6188
6189 /**
6190 * Emit the extra code to convert from VGPU10's boolean front-face
6191 * register to TGSI's signed front-face register.
6192 *
6193 * TODO: Make temporary front-face register a scalar.
6194 */
6195 static void
6196 emit_frontface_instructions(struct svga_shader_emitter_v10 *emit)
6197 {
6198 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6199
6200 if (emit->fs.face_input_index != INVALID_INDEX) {
6201 /* convert vgpu10 boolean face register to gallium +/-1 value */
6202 struct tgsi_full_dst_register tmp_dst =
6203 make_dst_temp_reg(emit->fs.face_tmp_index);
6204 struct tgsi_full_src_register one =
6205 make_immediate_reg_float(emit, 1.0f);
6206 struct tgsi_full_src_register neg_one =
6207 make_immediate_reg_float(emit, -1.0f);
6208
6209 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6210 begin_emit_instruction(emit);
6211 emit_opcode(emit, VGPU10_OPCODE_MOVC, FALSE);
6212 emit_dst_register(emit, &tmp_dst);
6213 emit_face_register(emit);
6214 emit_src_register(emit, &one);
6215 emit_src_register(emit, &neg_one);
6216 end_emit_instruction(emit);
6217 }
6218 }
6219
6220
6221 /**
6222 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6223 */
6224 static void
6225 emit_fragcoord_instructions(struct svga_shader_emitter_v10 *emit)
6226 {
6227 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6228
6229 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
6230 struct tgsi_full_dst_register tmp_dst =
6231 make_dst_temp_reg(emit->fs.fragcoord_tmp_index);
6232 struct tgsi_full_dst_register tmp_dst_xyz =
6233 writemask_dst(&tmp_dst, TGSI_WRITEMASK_XYZ);
6234 struct tgsi_full_dst_register tmp_dst_w =
6235 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6236 struct tgsi_full_src_register one =
6237 make_immediate_reg_float(emit, 1.0f);
6238 struct tgsi_full_src_register fragcoord =
6239 make_src_reg(TGSI_FILE_INPUT, emit->fs.fragcoord_input_index);
6240
6241 /* save the input index */
6242 unsigned fragcoord_input_index = emit->fs.fragcoord_input_index;
6243 /* set to invalid to prevent substitution in emit_src_register() */
6244 emit->fs.fragcoord_input_index = INVALID_INDEX;
6245
6246 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6247 begin_emit_instruction(emit);
6248 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6249 emit_dst_register(emit, &tmp_dst_xyz);
6250 emit_src_register(emit, &fragcoord);
6251 end_emit_instruction(emit);
6252
6253 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6254 begin_emit_instruction(emit);
6255 emit_opcode(emit, VGPU10_OPCODE_DIV, FALSE);
6256 emit_dst_register(emit, &tmp_dst_w);
6257 emit_src_register(emit, &one);
6258 emit_src_register(emit, &fragcoord);
6259 end_emit_instruction(emit);
6260
6261 /* restore saved value */
6262 emit->fs.fragcoord_input_index = fragcoord_input_index;
6263 }
6264 }
6265
6266
6267 /**
6268 * Emit the extra code to get the current sample position value and
6269 * put it into a temp register.
6270 */
6271 static void
6272 emit_sample_position_instructions(struct svga_shader_emitter_v10 *emit)
6273 {
6274 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6275
6276 if (emit->fs.sample_pos_sys_index != INVALID_INDEX) {
6277 assert(emit->version >= 41);
6278
6279 struct tgsi_full_dst_register tmp_dst =
6280 make_dst_temp_reg(emit->fs.sample_pos_tmp_index);
6281 struct tgsi_full_src_register half =
6282 make_immediate_reg_float4(emit, 0.5, 0.5, 0.0, 0.0);
6283
6284 struct tgsi_full_src_register tmp_src =
6285 make_src_temp_reg(emit->fs.sample_pos_tmp_index);
6286 struct tgsi_full_src_register sample_index_reg =
6287 make_src_scalar_reg(TGSI_FILE_SYSTEM_VALUE,
6288 emit->fs.sample_id_sys_index, TGSI_SWIZZLE_X);
6289
6290 /* The first src register is a shader resource (if we want a
6291 * multisampled resource sample position) or the rasterizer register
6292 * (if we want the current sample position in the color buffer). We
6293 * want the later.
6294 */
6295
6296 /* SAMPLE_POS dst, RASTERIZER, sampleIndex */
6297 begin_emit_instruction(emit);
6298 emit_opcode(emit, VGPU10_OPCODE_SAMPLE_POS, FALSE);
6299 emit_dst_register(emit, &tmp_dst);
6300 emit_rasterizer_register(emit);
6301 emit_src_register(emit, &sample_index_reg);
6302 end_emit_instruction(emit);
6303
6304 /* Convert from D3D coords to GL coords by adding 0.5 bias */
6305 /* ADD dst, dst, half */
6306 begin_emit_instruction(emit);
6307 emit_opcode(emit, VGPU10_OPCODE_ADD, FALSE);
6308 emit_dst_register(emit, &tmp_dst);
6309 emit_src_register(emit, &tmp_src);
6310 emit_src_register(emit, &half);
6311 end_emit_instruction(emit);
6312 }
6313 }
6314
6315
6316 /**
6317 * Emit extra instructions to adjust VS inputs/attributes. This can
6318 * mean casting a vertex attribute from int to float or setting the
6319 * W component to 1, or both.
6320 */
6321 static void
6322 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10 *emit)
6323 {
6324 const unsigned save_w_1_mask = emit->key.vs.adjust_attrib_w_1;
6325 const unsigned save_itof_mask = emit->key.vs.adjust_attrib_itof;
6326 const unsigned save_utof_mask = emit->key.vs.adjust_attrib_utof;
6327 const unsigned save_is_bgra_mask = emit->key.vs.attrib_is_bgra;
6328 const unsigned save_puint_to_snorm_mask = emit->key.vs.attrib_puint_to_snorm;
6329 const unsigned save_puint_to_uscaled_mask = emit->key.vs.attrib_puint_to_uscaled;
6330 const unsigned save_puint_to_sscaled_mask = emit->key.vs.attrib_puint_to_sscaled;
6331
6332 unsigned adjust_mask = (save_w_1_mask |
6333 save_itof_mask |
6334 save_utof_mask |
6335 save_is_bgra_mask |
6336 save_puint_to_snorm_mask |
6337 save_puint_to_uscaled_mask |
6338 save_puint_to_sscaled_mask);
6339
6340 assert(emit->unit == PIPE_SHADER_VERTEX);
6341
6342 if (adjust_mask) {
6343 struct tgsi_full_src_register one =
6344 make_immediate_reg_float(emit, 1.0f);
6345
6346 struct tgsi_full_src_register one_int =
6347 make_immediate_reg_int(emit, 1);
6348
6349 /* We need to turn off these bitmasks while emitting the
6350 * instructions below, then restore them afterward.
6351 */
6352 emit->key.vs.adjust_attrib_w_1 = 0;
6353 emit->key.vs.adjust_attrib_itof = 0;
6354 emit->key.vs.adjust_attrib_utof = 0;
6355 emit->key.vs.attrib_is_bgra = 0;
6356 emit->key.vs.attrib_puint_to_snorm = 0;
6357 emit->key.vs.attrib_puint_to_uscaled = 0;
6358 emit->key.vs.attrib_puint_to_sscaled = 0;
6359
6360 while (adjust_mask) {
6361 unsigned index = u_bit_scan(&adjust_mask);
6362
6363 /* skip the instruction if this vertex attribute is not being used */
6364 if (emit->info.input_usage_mask[index] == 0)
6365 continue;
6366
6367 unsigned tmp = emit->vs.adjusted_input[index];
6368 struct tgsi_full_src_register input_src =
6369 make_src_reg(TGSI_FILE_INPUT, index);
6370
6371 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6372 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6373 struct tgsi_full_dst_register tmp_dst_w =
6374 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6375
6376 /* ITOF/UTOF/MOV tmp, input[index] */
6377 if (save_itof_mask & (1 << index)) {
6378 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF,
6379 &tmp_dst, &input_src, FALSE);
6380 }
6381 else if (save_utof_mask & (1 << index)) {
6382 emit_instruction_op1(emit, VGPU10_OPCODE_UTOF,
6383 &tmp_dst, &input_src, FALSE);
6384 }
6385 else if (save_puint_to_snorm_mask & (1 << index)) {
6386 emit_puint_to_snorm(emit, &tmp_dst, &input_src);
6387 }
6388 else if (save_puint_to_uscaled_mask & (1 << index)) {
6389 emit_puint_to_uscaled(emit, &tmp_dst, &input_src);
6390 }
6391 else if (save_puint_to_sscaled_mask & (1 << index)) {
6392 emit_puint_to_sscaled(emit, &tmp_dst, &input_src);
6393 }
6394 else {
6395 assert((save_w_1_mask | save_is_bgra_mask) & (1 << index));
6396 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6397 &tmp_dst, &input_src, FALSE);
6398 }
6399
6400 if (save_is_bgra_mask & (1 << index)) {
6401 emit_swap_r_b(emit, &tmp_dst, &tmp_src);
6402 }
6403
6404 if (save_w_1_mask & (1 << index)) {
6405 /* MOV tmp.w, 1.0 */
6406 if (emit->key.vs.attrib_is_pure_int & (1 << index)) {
6407 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6408 &tmp_dst_w, &one_int, FALSE);
6409 }
6410 else {
6411 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6412 &tmp_dst_w, &one, FALSE);
6413 }
6414 }
6415 }
6416
6417 emit->key.vs.adjust_attrib_w_1 = save_w_1_mask;
6418 emit->key.vs.adjust_attrib_itof = save_itof_mask;
6419 emit->key.vs.adjust_attrib_utof = save_utof_mask;
6420 emit->key.vs.attrib_is_bgra = save_is_bgra_mask;
6421 emit->key.vs.attrib_puint_to_snorm = save_puint_to_snorm_mask;
6422 emit->key.vs.attrib_puint_to_uscaled = save_puint_to_uscaled_mask;
6423 emit->key.vs.attrib_puint_to_sscaled = save_puint_to_sscaled_mask;
6424 }
6425 }
6426
6427
6428 /**
6429 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6430 * to implement some instructions. We pre-allocate those values here
6431 * in the immediate constant buffer.
6432 */
6433 static void
6434 alloc_common_immediates(struct svga_shader_emitter_v10 *emit)
6435 {
6436 unsigned n = 0;
6437
6438 emit->common_immediate_pos[n++] =
6439 alloc_immediate_float4(emit, 0.0f, 1.0f, 0.5f, -1.0f);
6440
6441 if (emit->info.opcode_count[TGSI_OPCODE_LIT] > 0) {
6442 emit->common_immediate_pos[n++] =
6443 alloc_immediate_float4(emit, 128.0f, -128.0f, 0.0f, 0.0f);
6444 }
6445
6446 emit->common_immediate_pos[n++] =
6447 alloc_immediate_int4(emit, 0, 1, 0, -1);
6448
6449 if (emit->key.vs.attrib_puint_to_snorm) {
6450 emit->common_immediate_pos[n++] =
6451 alloc_immediate_float4(emit, -2.0f, 2.0f, 3.0f, -1.66666f);
6452 }
6453
6454 if (emit->key.vs.attrib_puint_to_uscaled) {
6455 emit->common_immediate_pos[n++] =
6456 alloc_immediate_float4(emit, 1023.0f, 3.0f, 0.0f, 0.0f);
6457 }
6458
6459 if (emit->key.vs.attrib_puint_to_sscaled) {
6460 emit->common_immediate_pos[n++] =
6461 alloc_immediate_int4(emit, 22, 12, 2, 0);
6462
6463 emit->common_immediate_pos[n++] =
6464 alloc_immediate_int4(emit, 22, 30, 0, 0);
6465 }
6466
6467 unsigned i;
6468
6469 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
6470 if (emit->key.tex[i].texel_bias) {
6471 /* Replace 0.0f if more immediate float value is needed */
6472 emit->common_immediate_pos[n++] =
6473 alloc_immediate_float4(emit, 0.0001f, 0.0f, 0.0f, 0.0f);
6474 break;
6475 }
6476 }
6477
6478 assert(n <= ARRAY_SIZE(emit->common_immediate_pos));
6479 emit->num_common_immediates = n;
6480 }
6481
6482
6483 /**
6484 * Emit any extra/helper declarations/code that we might need between
6485 * the declaration section and code section.
6486 */
6487 static boolean
6488 emit_pre_helpers(struct svga_shader_emitter_v10 *emit)
6489 {
6490 /* Properties */
6491 if (emit->unit == PIPE_SHADER_GEOMETRY)
6492 emit_property_instructions(emit);
6493
6494 /* Declare inputs */
6495 if (!emit_input_declarations(emit))
6496 return FALSE;
6497
6498 /* Declare outputs */
6499 if (!emit_output_declarations(emit))
6500 return FALSE;
6501
6502 /* Declare temporary registers */
6503 emit_temporaries_declaration(emit);
6504
6505 /* Declare constant registers */
6506 emit_constant_declaration(emit);
6507
6508 /* Declare samplers and resources */
6509 emit_sampler_declarations(emit);
6510 emit_resource_declarations(emit);
6511
6512 /* Declare clip distance output registers */
6513 if (emit->unit == PIPE_SHADER_VERTEX ||
6514 emit->unit == PIPE_SHADER_GEOMETRY) {
6515 emit_clip_distance_declarations(emit);
6516 }
6517
6518 alloc_common_immediates(emit);
6519
6520 if (emit->unit == PIPE_SHADER_FRAGMENT &&
6521 emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6522 float alpha = emit->key.fs.alpha_ref;
6523 emit->fs.alpha_ref_index =
6524 alloc_immediate_float4(emit, alpha, alpha, alpha, alpha);
6525 }
6526
6527 /* Now, emit the constant block containing all the immediates
6528 * declared by shader, as well as the extra ones seen above.
6529 */
6530 emit_vgpu10_immediates_block(emit);
6531
6532 if (emit->unit == PIPE_SHADER_FRAGMENT) {
6533 emit_frontface_instructions(emit);
6534 emit_fragcoord_instructions(emit);
6535 emit_sample_position_instructions(emit);
6536 }
6537 else if (emit->unit == PIPE_SHADER_VERTEX) {
6538 emit_vertex_attrib_instructions(emit);
6539 }
6540
6541 return TRUE;
6542 }
6543
6544
6545 /**
6546 * The device has no direct support for the pipe_blend_state::alpha_to_one
6547 * option so we implement it here with shader code.
6548 *
6549 * Note that this is kind of pointless, actually. Here we're clobbering
6550 * the alpha value with 1.0. So if alpha-to-coverage is enabled, we'll wind
6551 * up with 100% coverage. That's almost certainly not what the user wants.
6552 * The work-around is to add extra shader code to compute coverage from alpha
6553 * and write it to the coverage output register (if the user's shader doesn't
6554 * do so already). We'll probably do that in the future.
6555 */
6556 static void
6557 emit_alpha_to_one_instructions(struct svga_shader_emitter_v10 *emit,
6558 unsigned fs_color_tmp_index)
6559 {
6560 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
6561 unsigned i;
6562
6563 /* Note: it's not 100% clear from the spec if we're supposed to clobber
6564 * the alpha for all render targets. But that's what NVIDIA does and
6565 * that's what Piglit tests.
6566 */
6567 for (i = 0; i < emit->fs.num_color_outputs; i++) {
6568 struct tgsi_full_dst_register color_dst;
6569
6570 if (fs_color_tmp_index != INVALID_INDEX && i == 0) {
6571 /* write to the temp color register */
6572 color_dst = make_dst_temp_reg(fs_color_tmp_index);
6573 }
6574 else {
6575 /* write directly to the color[i] output */
6576 color_dst = make_dst_output_reg(emit->fs.color_out_index[i]);
6577 }
6578
6579 color_dst = writemask_dst(&color_dst, TGSI_WRITEMASK_W);
6580
6581 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst, &one, FALSE);
6582 }
6583 }
6584
6585
6586 /**
6587 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6588 * against the alpha reference value and discards the fragment if the
6589 * comparison fails.
6590 */
6591 static void
6592 emit_alpha_test_instructions(struct svga_shader_emitter_v10 *emit,
6593 unsigned fs_color_tmp_index)
6594 {
6595 /* compare output color's alpha to alpha ref and kill */
6596 unsigned tmp = get_temp_index(emit);
6597 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6598 struct tgsi_full_src_register tmp_src_x =
6599 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
6600 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6601 struct tgsi_full_src_register color_src =
6602 make_src_temp_reg(fs_color_tmp_index);
6603 struct tgsi_full_src_register color_src_w =
6604 scalar_src(&color_src, TGSI_SWIZZLE_W);
6605 struct tgsi_full_src_register ref_src =
6606 make_src_immediate_reg(emit->fs.alpha_ref_index);
6607 struct tgsi_full_dst_register color_dst =
6608 make_dst_output_reg(emit->fs.color_out_index[0]);
6609
6610 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6611
6612 /* dst = src0 'alpha_func' src1 */
6613 emit_comparison(emit, emit->key.fs.alpha_func, &tmp_dst,
6614 &color_src_w, &ref_src);
6615
6616 /* DISCARD if dst.x == 0 */
6617 begin_emit_instruction(emit);
6618 emit_discard_opcode(emit, FALSE); /* discard if src0.x is zero */
6619 emit_src_register(emit, &tmp_src_x);
6620 end_emit_instruction(emit);
6621
6622 /* If we don't need to broadcast the color below, emit the final color here.
6623 */
6624 if (emit->key.fs.write_color0_to_n_cbufs <= 1) {
6625 /* MOV output.color, tempcolor */
6626 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6627 &color_src, FALSE); /* XXX saturate? */
6628 }
6629
6630 free_temp_indexes(emit);
6631 }
6632
6633
6634 /**
6635 * Emit instructions for writing a single color output to multiple
6636 * color buffers.
6637 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS (or
6638 * when key.fs.white_fragments is true).
6639 * property is set and the number of render targets is greater than one.
6640 * \param fs_color_tmp_index index of the temp register that holds the
6641 * color to broadcast.
6642 */
6643 static void
6644 emit_broadcast_color_instructions(struct svga_shader_emitter_v10 *emit,
6645 unsigned fs_color_tmp_index)
6646 {
6647 const unsigned n = emit->key.fs.write_color0_to_n_cbufs;
6648 unsigned i;
6649 struct tgsi_full_src_register color_src;
6650
6651 if (emit->key.fs.white_fragments) {
6652 /* set all color outputs to white */
6653 color_src = make_immediate_reg_float(emit, 1.0f);
6654 }
6655 else {
6656 /* set all color outputs to TEMP[fs_color_tmp_index] */
6657 assert(fs_color_tmp_index != INVALID_INDEX);
6658 color_src = make_src_temp_reg(fs_color_tmp_index);
6659 }
6660
6661 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6662
6663 for (i = 0; i < n; i++) {
6664 unsigned output_reg = emit->fs.color_out_index[i];
6665 struct tgsi_full_dst_register color_dst =
6666 make_dst_output_reg(output_reg);
6667
6668 /* Fill in this semantic here since we'll use it later in
6669 * emit_dst_register().
6670 */
6671 emit->info.output_semantic_name[output_reg] = TGSI_SEMANTIC_COLOR;
6672
6673 /* MOV output.color[i], tempcolor */
6674 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6675 &color_src, FALSE); /* XXX saturate? */
6676 }
6677 }
6678
6679
6680 /**
6681 * Emit extra helper code after the original shader code, but before the
6682 * last END/RET instruction.
6683 * For vertex shaders this means emitting the extra code to apply the
6684 * prescale scale/translation.
6685 */
6686 static boolean
6687 emit_post_helpers(struct svga_shader_emitter_v10 *emit)
6688 {
6689 if (emit->unit == PIPE_SHADER_VERTEX) {
6690 emit_vertex_instructions(emit);
6691 }
6692 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
6693 const unsigned fs_color_tmp_index = emit->fs.color_tmp_index;
6694
6695 assert(!(emit->key.fs.white_fragments &&
6696 emit->key.fs.write_color0_to_n_cbufs == 0));
6697
6698 /* We no longer want emit_dst_register() to substitute the
6699 * temporary fragment color register for the real color output.
6700 */
6701 emit->fs.color_tmp_index = INVALID_INDEX;
6702
6703 if (emit->key.fs.alpha_to_one) {
6704 emit_alpha_to_one_instructions(emit, fs_color_tmp_index);
6705 }
6706 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6707 emit_alpha_test_instructions(emit, fs_color_tmp_index);
6708 }
6709 if (emit->key.fs.write_color0_to_n_cbufs > 1 ||
6710 emit->key.fs.white_fragments) {
6711 emit_broadcast_color_instructions(emit, fs_color_tmp_index);
6712 }
6713 }
6714
6715 return TRUE;
6716 }
6717
6718
6719 /**
6720 * Translate the TGSI tokens into VGPU10 tokens.
6721 */
6722 static boolean
6723 emit_vgpu10_instructions(struct svga_shader_emitter_v10 *emit,
6724 const struct tgsi_token *tokens)
6725 {
6726 struct tgsi_parse_context parse;
6727 boolean ret = TRUE;
6728 boolean pre_helpers_emitted = FALSE;
6729 unsigned inst_number = 0;
6730
6731 tgsi_parse_init(&parse, tokens);
6732
6733 while (!tgsi_parse_end_of_tokens(&parse)) {
6734 tgsi_parse_token(&parse);
6735
6736 switch (parse.FullToken.Token.Type) {
6737 case TGSI_TOKEN_TYPE_IMMEDIATE:
6738 ret = emit_vgpu10_immediate(emit, &parse.FullToken.FullImmediate);
6739 if (!ret)
6740 goto done;
6741 break;
6742
6743 case TGSI_TOKEN_TYPE_DECLARATION:
6744 ret = emit_vgpu10_declaration(emit, &parse.FullToken.FullDeclaration);
6745 if (!ret)
6746 goto done;
6747 break;
6748
6749 case TGSI_TOKEN_TYPE_INSTRUCTION:
6750 if (!pre_helpers_emitted) {
6751 ret = emit_pre_helpers(emit);
6752 if (!ret)
6753 goto done;
6754 pre_helpers_emitted = TRUE;
6755 }
6756 ret = emit_vgpu10_instruction(emit, inst_number++,
6757 &parse.FullToken.FullInstruction);
6758 if (!ret)
6759 goto done;
6760 break;
6761
6762 case TGSI_TOKEN_TYPE_PROPERTY:
6763 ret = emit_vgpu10_property(emit, &parse.FullToken.FullProperty);
6764 if (!ret)
6765 goto done;
6766 break;
6767
6768 default:
6769 break;
6770 }
6771 }
6772
6773 done:
6774 tgsi_parse_free(&parse);
6775 return ret;
6776 }
6777
6778
6779 /**
6780 * Emit the first VGPU10 shader tokens.
6781 */
6782 static boolean
6783 emit_vgpu10_header(struct svga_shader_emitter_v10 *emit)
6784 {
6785 VGPU10ProgramToken ptoken;
6786
6787 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6788 ptoken.majorVersion = emit->version / 10;
6789 ptoken.minorVersion = emit->version % 10;
6790 ptoken.programType = translate_shader_type(emit->unit);
6791 if (!emit_dword(emit, ptoken.value))
6792 return FALSE;
6793
6794 /* Second token: total length of shader, in tokens. We can't fill this
6795 * in until we're all done. Emit zero for now.
6796 */
6797 return emit_dword(emit, 0);
6798 }
6799
6800
6801 static boolean
6802 emit_vgpu10_tail(struct svga_shader_emitter_v10 *emit)
6803 {
6804 VGPU10ProgramToken *tokens;
6805
6806 /* Replace the second token with total shader length */
6807 tokens = (VGPU10ProgramToken *) emit->buf;
6808 tokens[1].value = emit_get_num_tokens(emit);
6809
6810 return TRUE;
6811 }
6812
6813
6814 /**
6815 * Modify the FS to read the BCOLORs and use the FACE register
6816 * to choose between the front/back colors.
6817 */
6818 static const struct tgsi_token *
6819 transform_fs_twoside(const struct tgsi_token *tokens)
6820 {
6821 if (0) {
6822 debug_printf("Before tgsi_add_two_side ------------------\n");
6823 tgsi_dump(tokens,0);
6824 }
6825 tokens = tgsi_add_two_side(tokens);
6826 if (0) {
6827 debug_printf("After tgsi_add_two_side ------------------\n");
6828 tgsi_dump(tokens, 0);
6829 }
6830 return tokens;
6831 }
6832
6833
6834 /**
6835 * Modify the FS to do polygon stipple.
6836 */
6837 static const struct tgsi_token *
6838 transform_fs_pstipple(struct svga_shader_emitter_v10 *emit,
6839 const struct tgsi_token *tokens)
6840 {
6841 const struct tgsi_token *new_tokens;
6842 unsigned unit;
6843
6844 if (0) {
6845 debug_printf("Before pstipple ------------------\n");
6846 tgsi_dump(tokens,0);
6847 }
6848
6849 new_tokens = util_pstipple_create_fragment_shader(tokens, &unit, 0,
6850 TGSI_FILE_INPUT);
6851
6852 emit->fs.pstipple_sampler_unit = unit;
6853
6854 /* Setup texture state for stipple */
6855 emit->sampler_target[unit] = TGSI_TEXTURE_2D;
6856 emit->key.tex[unit].swizzle_r = TGSI_SWIZZLE_X;
6857 emit->key.tex[unit].swizzle_g = TGSI_SWIZZLE_Y;
6858 emit->key.tex[unit].swizzle_b = TGSI_SWIZZLE_Z;
6859 emit->key.tex[unit].swizzle_a = TGSI_SWIZZLE_W;
6860
6861 if (0) {
6862 debug_printf("After pstipple ------------------\n");
6863 tgsi_dump(new_tokens, 0);
6864 }
6865
6866 return new_tokens;
6867 }
6868
6869 /**
6870 * Modify the FS to support anti-aliasing point.
6871 */
6872 static const struct tgsi_token *
6873 transform_fs_aapoint(const struct tgsi_token *tokens,
6874 int aa_coord_index)
6875 {
6876 if (0) {
6877 debug_printf("Before tgsi_add_aa_point ------------------\n");
6878 tgsi_dump(tokens,0);
6879 }
6880 tokens = tgsi_add_aa_point(tokens, aa_coord_index);
6881 if (0) {
6882 debug_printf("After tgsi_add_aa_point ------------------\n");
6883 tgsi_dump(tokens, 0);
6884 }
6885 return tokens;
6886 }
6887
6888 /**
6889 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6890 */
6891 struct svga_shader_variant *
6892 svga_tgsi_vgpu10_translate(struct svga_context *svga,
6893 const struct svga_shader *shader,
6894 const struct svga_compile_key *key,
6895 enum pipe_shader_type unit)
6896 {
6897 struct svga_shader_variant *variant = NULL;
6898 struct svga_shader_emitter_v10 *emit;
6899 const struct tgsi_token *tokens = shader->tokens;
6900 struct svga_vertex_shader *vs = svga->curr.vs;
6901 struct svga_geometry_shader *gs = svga->curr.gs;
6902
6903 assert(unit == PIPE_SHADER_VERTEX ||
6904 unit == PIPE_SHADER_GEOMETRY ||
6905 unit == PIPE_SHADER_FRAGMENT);
6906
6907 /* These two flags cannot be used together */
6908 assert(key->vs.need_prescale + key->vs.undo_viewport <= 1);
6909
6910 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE);
6911 /*
6912 * Setup the code emitter
6913 */
6914 emit = alloc_emitter();
6915 if (!emit)
6916 goto done;
6917
6918 emit->unit = unit;
6919 emit->version = svga_have_sm4_1(svga) ? 41 : 40;
6920
6921 emit->key = *key;
6922
6923 emit->vposition.need_prescale = (emit->key.vs.need_prescale ||
6924 emit->key.gs.need_prescale);
6925 emit->vposition.tmp_index = INVALID_INDEX;
6926 emit->vposition.so_index = INVALID_INDEX;
6927 emit->vposition.out_index = INVALID_INDEX;
6928
6929 emit->fs.color_tmp_index = INVALID_INDEX;
6930 emit->fs.face_input_index = INVALID_INDEX;
6931 emit->fs.fragcoord_input_index = INVALID_INDEX;
6932 emit->fs.sample_id_sys_index = INVALID_INDEX;
6933 emit->fs.sample_pos_sys_index = INVALID_INDEX;
6934
6935 emit->gs.prim_id_index = INVALID_INDEX;
6936
6937 emit->clip_dist_out_index = INVALID_INDEX;
6938 emit->clip_dist_tmp_index = INVALID_INDEX;
6939 emit->clip_dist_so_index = INVALID_INDEX;
6940 emit->clip_vertex_out_index = INVALID_INDEX;
6941
6942 if (emit->key.fs.alpha_func == SVGA3D_CMP_INVALID) {
6943 emit->key.fs.alpha_func = SVGA3D_CMP_ALWAYS;
6944 }
6945
6946 if (unit == PIPE_SHADER_FRAGMENT) {
6947 if (key->fs.light_twoside) {
6948 tokens = transform_fs_twoside(tokens);
6949 }
6950 if (key->fs.pstipple) {
6951 const struct tgsi_token *new_tokens =
6952 transform_fs_pstipple(emit, tokens);
6953 if (tokens != shader->tokens) {
6954 /* free the two-sided shader tokens */
6955 tgsi_free_tokens(tokens);
6956 }
6957 tokens = new_tokens;
6958 }
6959 if (key->fs.aa_point) {
6960 tokens = transform_fs_aapoint(tokens, key->fs.aa_point_coord_index);
6961 }
6962 }
6963
6964 if (SVGA_DEBUG & DEBUG_TGSI) {
6965 debug_printf("#####################################\n");
6966 debug_printf("### TGSI Shader %u\n", shader->id);
6967 tgsi_dump(tokens, 0);
6968 }
6969
6970 /**
6971 * Rescan the header if the token string is different from the one
6972 * included in the shader; otherwise, the header info is already up-to-date
6973 */
6974 if (tokens != shader->tokens) {
6975 tgsi_scan_shader(tokens, &emit->info);
6976 } else {
6977 emit->info = shader->info;
6978 }
6979
6980 emit->num_outputs = emit->info.num_outputs;
6981
6982 if (unit == PIPE_SHADER_FRAGMENT) {
6983 /* Compute FS input remapping to match the output from VS/GS */
6984 if (gs) {
6985 svga_link_shaders(&gs->base.info, &emit->info, &emit->linkage);
6986 } else {
6987 assert(vs);
6988 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6989 }
6990 } else if (unit == PIPE_SHADER_GEOMETRY) {
6991 assert(vs);
6992 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6993 }
6994
6995 /* Since vertex shader does not need to go through the linker to
6996 * establish the input map, we need to make sure the highest index
6997 * of input registers is set properly here.
6998 */
6999 emit->linkage.input_map_max = MAX2((int)emit->linkage.input_map_max,
7000 emit->info.file_max[TGSI_FILE_INPUT]);
7001
7002 determine_clipping_mode(emit);
7003
7004 if (unit == PIPE_SHADER_GEOMETRY || unit == PIPE_SHADER_VERTEX) {
7005 if (shader->stream_output != NULL || emit->clip_mode == CLIP_DISTANCE) {
7006 /* if there is stream output declarations associated
7007 * with this shader or the shader writes to ClipDistance
7008 * then reserve extra registers for the non-adjusted vertex position
7009 * and the ClipDistance shadow copy
7010 */
7011 emit->vposition.so_index = emit->num_outputs++;
7012
7013 if (emit->clip_mode == CLIP_DISTANCE) {
7014 emit->clip_dist_so_index = emit->num_outputs++;
7015 if (emit->info.num_written_clipdistance > 4)
7016 emit->num_outputs++;
7017 }
7018 }
7019 }
7020
7021 /*
7022 * Do actual shader translation.
7023 */
7024 if (!emit_vgpu10_header(emit)) {
7025 debug_printf("svga: emit VGPU10 header failed\n");
7026 goto cleanup;
7027 }
7028
7029 if (!emit_vgpu10_instructions(emit, tokens)) {
7030 debug_printf("svga: emit VGPU10 instructions failed\n");
7031 goto cleanup;
7032 }
7033
7034 if (!emit_vgpu10_tail(emit)) {
7035 debug_printf("svga: emit VGPU10 tail failed\n");
7036 goto cleanup;
7037 }
7038
7039 if (emit->register_overflow) {
7040 goto cleanup;
7041 }
7042
7043 /*
7044 * Create, initialize the 'variant' object.
7045 */
7046 variant = svga_new_shader_variant(svga);
7047 if (!variant)
7048 goto cleanup;
7049
7050 variant->shader = shader;
7051 variant->nr_tokens = emit_get_num_tokens(emit);
7052 variant->tokens = (const unsigned *)emit->buf;
7053 emit->buf = NULL; /* buffer is no longer owed by emitter context */
7054 memcpy(&variant->key, key, sizeof(*key));
7055 variant->id = UTIL_BITMASK_INVALID_INDEX;
7056
7057 /* The extra constant starting offset starts with the number of
7058 * shader constants declared in the shader.
7059 */
7060 variant->extra_const_start = emit->num_shader_consts[0];
7061 if (key->gs.wide_point) {
7062 /**
7063 * The extra constant added in the transformed shader
7064 * for inverse viewport scale is to be supplied by the driver.
7065 * So the extra constant starting offset needs to be reduced by 1.
7066 */
7067 assert(variant->extra_const_start > 0);
7068 variant->extra_const_start--;
7069 }
7070
7071 variant->pstipple_sampler_unit = emit->fs.pstipple_sampler_unit;
7072
7073 /* If there was exactly one write to a fragment shader output register
7074 * and it came from a constant buffer, we know all fragments will have
7075 * the same color (except for blending).
7076 */
7077 variant->constant_color_output =
7078 emit->constant_color_output && emit->num_output_writes == 1;
7079
7080 /** keep track in the variant if flat interpolation is used
7081 * for any of the varyings.
7082 */
7083 variant->uses_flat_interp = emit->uses_flat_interp;
7084
7085 variant->fs_shadow_compare_units = emit->fs.shadow_compare_units;
7086
7087 if (tokens != shader->tokens) {
7088 tgsi_free_tokens(tokens);
7089 }
7090
7091 cleanup:
7092 free_emitter(emit);
7093
7094 done:
7095 SVGA_STATS_TIME_POP(svga_sws(svga));
7096 return variant;
7097 }