gallium: replace DRM_CONF_THROTTLE with PIPE_CAP_MAX_FRAMES_IN_FLIGHT
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_vgpu10.c
1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file svga_tgsi_vgpu10.c
28 *
29 * TGSI -> VGPU10 shader translation.
30 *
31 * \author Mingcheng Chen
32 * \author Brian Paul
33 */
34
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
51
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
57
58 #include "VGPU10ShaderTokens.h"
59
60
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
67
68
69 /**
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
72 */
73 enum clipping_mode
74 {
75 CLIP_NONE, /**< No clipping enabled */
76 CLIP_LEGACY, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
79 */
80 CLIP_DISTANCE, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
82 */
83 CLIP_VERTEX /**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
86 */
87 };
88
89
90 struct svga_shader_emitter_v10
91 {
92 /* The token output buffer */
93 unsigned size;
94 char *buf;
95 char *ptr;
96
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key;
99 struct tgsi_shader_info info;
100 unsigned unit;
101 unsigned version; /**< Either 40 or 41 at this time */
102
103 unsigned inst_start_token;
104 boolean discard_instruction; /**< throw away current instruction? */
105
106 union tgsi_immediate_data immediates[MAX_IMMEDIATE_COUNT][4];
107 unsigned num_immediates; /**< Number of immediates emitted */
108 unsigned common_immediate_pos[8]; /**< literals for common immediates */
109 unsigned num_common_immediates;
110 boolean immediates_emitted;
111
112 unsigned num_outputs; /**< include any extra outputs */
113 /** The first extra output is reserved for
114 * non-adjusted vertex position for
115 * stream output purpose
116 */
117
118 /* Temporary Registers */
119 unsigned num_shader_temps; /**< num of temps used by original shader */
120 unsigned internal_temp_count; /**< currently allocated internal temps */
121 struct {
122 unsigned start, size;
123 } temp_arrays[MAX_TEMP_ARRAYS];
124 unsigned num_temp_arrays;
125
126 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
127 struct {
128 unsigned arrayId, index;
129 } temp_map[VGPU10_MAX_TEMPS]; /**< arrayId, element */
130
131 /** Number of constants used by original shader for each constant buffer.
132 * The size should probably always match with that of svga_state.constbufs.
133 */
134 unsigned num_shader_consts[SVGA_MAX_CONST_BUFS];
135
136 /* Samplers */
137 unsigned num_samplers;
138 boolean sampler_view[PIPE_MAX_SAMPLERS]; /**< True if sampler view exists*/
139 ubyte sampler_target[PIPE_MAX_SAMPLERS]; /**< TGSI_TEXTURE_x */
140 ubyte sampler_return_type[PIPE_MAX_SAMPLERS]; /**< TGSI_RETURN_TYPE_x */
141
142 /* Address regs (really implemented with temps) */
143 unsigned num_address_regs;
144 unsigned address_reg_index[MAX_VGPU10_ADDR_REGS];
145
146 /* Output register usage masks */
147 ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
148
149 /* To map TGSI system value index to VGPU shader input indexes */
150 ubyte system_value_indexes[MAX_SYSTEM_VALUES];
151
152 struct {
153 /* vertex position scale/translation */
154 unsigned out_index; /**< the real position output reg */
155 unsigned tmp_index; /**< the fake/temp position output reg */
156 unsigned so_index; /**< the non-adjusted position output reg */
157 unsigned prescale_scale_index, prescale_trans_index;
158 boolean need_prescale;
159 } vposition;
160
161 /* For vertex shaders only */
162 struct {
163 /* viewport constant */
164 unsigned viewport_index;
165
166 /* temp index of adjusted vertex attributes */
167 unsigned adjusted_input[PIPE_MAX_SHADER_INPUTS];
168 } vs;
169
170 /* For fragment shaders only */
171 struct {
172 unsigned color_out_index[PIPE_MAX_COLOR_BUFS]; /**< the real color output regs */
173 unsigned num_color_outputs;
174 unsigned color_tmp_index; /**< fake/temp color output reg */
175 unsigned alpha_ref_index; /**< immediate constant for alpha ref */
176
177 /* front-face */
178 unsigned face_input_index; /**< real fragment shader face reg (bool) */
179 unsigned face_tmp_index; /**< temp face reg converted to -1 / +1 */
180
181 unsigned pstipple_sampler_unit;
182
183 unsigned fragcoord_input_index; /**< real fragment position input reg */
184 unsigned fragcoord_tmp_index; /**< 1/w modified position temp reg */
185
186 /** Which texture units are doing shadow comparison in the FS code */
187 unsigned shadow_compare_units;
188
189 unsigned sample_id_sys_index; /**< TGSI index of sample id sys value */
190
191 unsigned sample_pos_sys_index; /**< TGSI index of sample pos sys value */
192 unsigned sample_pos_tmp_index; /**< which temp reg has the sample pos */
193 } fs;
194
195 /* For geometry shaders only */
196 struct {
197 VGPU10_PRIMITIVE prim_type;/**< VGPU10 primitive type */
198 VGPU10_PRIMITIVE_TOPOLOGY prim_topology; /**< VGPU10 primitive topology */
199 unsigned input_size; /**< size of input arrays */
200 unsigned prim_id_index; /**< primitive id register index */
201 unsigned max_out_vertices; /**< maximum number of output vertices */
202 } gs;
203
204 /* For vertex or geometry shaders */
205 enum clipping_mode clip_mode;
206 unsigned clip_dist_out_index; /**< clip distance output register index */
207 unsigned clip_dist_tmp_index; /**< clip distance temporary register */
208 unsigned clip_dist_so_index; /**< clip distance shadow copy */
209
210 /** Index of temporary holding the clipvertex coordinate */
211 unsigned clip_vertex_out_index; /**< clip vertex output register index */
212 unsigned clip_vertex_tmp_index; /**< clip vertex temporary index */
213
214 /* user clip plane constant slot indexes */
215 unsigned clip_plane_const[PIPE_MAX_CLIP_PLANES];
216
217 unsigned num_output_writes;
218 boolean constant_color_output;
219
220 boolean uses_flat_interp;
221
222 /* For all shaders: const reg index for RECT coord scaling */
223 unsigned texcoord_scale_index[PIPE_MAX_SAMPLERS];
224
225 /* For all shaders: const reg index for texture buffer size */
226 unsigned texture_buffer_size_index[PIPE_MAX_SAMPLERS];
227
228 /* VS/GS/FS Linkage info */
229 struct shader_linkage linkage;
230
231 bool register_overflow; /**< Set if we exceed a VGPU10 register limit */
232 };
233
234
235 static boolean
236 emit_post_helpers(struct svga_shader_emitter_v10 *emit);
237
238 static boolean
239 emit_vertex(struct svga_shader_emitter_v10 *emit,
240 const struct tgsi_full_instruction *inst);
241
242 static char err_buf[128];
243
244 static boolean
245 expand(struct svga_shader_emitter_v10 *emit)
246 {
247 char *new_buf;
248 unsigned newsize = emit->size * 2;
249
250 if (emit->buf != err_buf)
251 new_buf = REALLOC(emit->buf, emit->size, newsize);
252 else
253 new_buf = NULL;
254
255 if (!new_buf) {
256 emit->ptr = err_buf;
257 emit->buf = err_buf;
258 emit->size = sizeof(err_buf);
259 return FALSE;
260 }
261
262 emit->size = newsize;
263 emit->ptr = new_buf + (emit->ptr - emit->buf);
264 emit->buf = new_buf;
265 return TRUE;
266 }
267
268 /**
269 * Create and initialize a new svga_shader_emitter_v10 object.
270 */
271 static struct svga_shader_emitter_v10 *
272 alloc_emitter(void)
273 {
274 struct svga_shader_emitter_v10 *emit = CALLOC(1, sizeof(*emit));
275
276 if (!emit)
277 return NULL;
278
279 /* to initialize the output buffer */
280 emit->size = 512;
281 if (!expand(emit)) {
282 FREE(emit);
283 return NULL;
284 }
285 return emit;
286 }
287
288 /**
289 * Free an svga_shader_emitter_v10 object.
290 */
291 static void
292 free_emitter(struct svga_shader_emitter_v10 *emit)
293 {
294 assert(emit);
295 FREE(emit->buf); /* will be NULL if translation succeeded */
296 FREE(emit);
297 }
298
299 static inline boolean
300 reserve(struct svga_shader_emitter_v10 *emit,
301 unsigned nr_dwords)
302 {
303 while (emit->ptr - emit->buf + nr_dwords * sizeof(uint32) >= emit->size) {
304 if (!expand(emit))
305 return FALSE;
306 }
307
308 return TRUE;
309 }
310
311 static boolean
312 emit_dword(struct svga_shader_emitter_v10 *emit, uint32 dword)
313 {
314 if (!reserve(emit, 1))
315 return FALSE;
316
317 *(uint32 *)emit->ptr = dword;
318 emit->ptr += sizeof dword;
319 return TRUE;
320 }
321
322 static boolean
323 emit_dwords(struct svga_shader_emitter_v10 *emit,
324 const uint32 *dwords,
325 unsigned nr)
326 {
327 if (!reserve(emit, nr))
328 return FALSE;
329
330 memcpy(emit->ptr, dwords, nr * sizeof *dwords);
331 emit->ptr += nr * sizeof *dwords;
332 return TRUE;
333 }
334
335 /** Return the number of tokens in the emitter's buffer */
336 static unsigned
337 emit_get_num_tokens(const struct svga_shader_emitter_v10 *emit)
338 {
339 return (emit->ptr - emit->buf) / sizeof(unsigned);
340 }
341
342
343 /**
344 * Check for register overflow. If we overflow we'll set an
345 * error flag. This function can be called for register declarations
346 * or use as src/dst instruction operands.
347 * \param type register type. One of VGPU10_OPERAND_TYPE_x
348 or VGPU10_OPCODE_DCL_x
349 * \param index the register index
350 */
351 static void
352 check_register_index(struct svga_shader_emitter_v10 *emit,
353 unsigned operandType, unsigned index)
354 {
355 bool overflow_before = emit->register_overflow;
356
357 switch (operandType) {
358 case VGPU10_OPERAND_TYPE_TEMP:
359 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP:
360 case VGPU10_OPCODE_DCL_TEMPS:
361 if (index >= VGPU10_MAX_TEMPS) {
362 emit->register_overflow = TRUE;
363 }
364 break;
365 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER:
366 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER:
367 if (index >= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
368 emit->register_overflow = TRUE;
369 }
370 break;
371 case VGPU10_OPERAND_TYPE_INPUT:
372 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID:
373 case VGPU10_OPCODE_DCL_INPUT:
374 case VGPU10_OPCODE_DCL_INPUT_SGV:
375 case VGPU10_OPCODE_DCL_INPUT_SIV:
376 case VGPU10_OPCODE_DCL_INPUT_PS:
377 case VGPU10_OPCODE_DCL_INPUT_PS_SGV:
378 case VGPU10_OPCODE_DCL_INPUT_PS_SIV:
379 if ((emit->unit == PIPE_SHADER_VERTEX &&
380 index >= VGPU10_MAX_VS_INPUTS) ||
381 (emit->unit == PIPE_SHADER_GEOMETRY &&
382 index >= VGPU10_MAX_GS_INPUTS) ||
383 (emit->unit == PIPE_SHADER_FRAGMENT &&
384 index >= VGPU10_MAX_FS_INPUTS)) {
385 emit->register_overflow = TRUE;
386 }
387 break;
388 case VGPU10_OPERAND_TYPE_OUTPUT:
389 case VGPU10_OPCODE_DCL_OUTPUT:
390 case VGPU10_OPCODE_DCL_OUTPUT_SGV:
391 case VGPU10_OPCODE_DCL_OUTPUT_SIV:
392 if ((emit->unit == PIPE_SHADER_VERTEX &&
393 index >= VGPU10_MAX_VS_OUTPUTS) ||
394 (emit->unit == PIPE_SHADER_GEOMETRY &&
395 index >= VGPU10_MAX_GS_OUTPUTS) ||
396 (emit->unit == PIPE_SHADER_FRAGMENT &&
397 index >= VGPU10_MAX_FS_OUTPUTS)) {
398 emit->register_overflow = TRUE;
399 }
400 break;
401 case VGPU10_OPERAND_TYPE_SAMPLER:
402 case VGPU10_OPCODE_DCL_SAMPLER:
403 if (index >= VGPU10_MAX_SAMPLERS) {
404 emit->register_overflow = TRUE;
405 }
406 break;
407 case VGPU10_OPERAND_TYPE_RESOURCE:
408 case VGPU10_OPCODE_DCL_RESOURCE:
409 if (index >= VGPU10_MAX_RESOURCES) {
410 emit->register_overflow = TRUE;
411 }
412 break;
413 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER:
414 if (index >= MAX_IMMEDIATE_COUNT) {
415 emit->register_overflow = TRUE;
416 }
417 break;
418 case VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK:
419 /* nothing */
420 break;
421 default:
422 assert(0);
423 ; /* nothing */
424 }
425
426 if (emit->register_overflow && !overflow_before) {
427 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
428 operandType, index);
429 }
430 }
431
432
433 /**
434 * Examine misc state to determine the clipping mode.
435 */
436 static void
437 determine_clipping_mode(struct svga_shader_emitter_v10 *emit)
438 {
439 if (emit->info.num_written_clipdistance > 0) {
440 emit->clip_mode = CLIP_DISTANCE;
441 }
442 else if (emit->info.writes_clipvertex) {
443 emit->clip_mode = CLIP_VERTEX;
444 }
445 else if (emit->key.clip_plane_enable) {
446 emit->clip_mode = CLIP_LEGACY;
447 }
448 else {
449 emit->clip_mode = CLIP_NONE;
450 }
451 }
452
453
454 /**
455 * For clip distance register declarations and clip distance register
456 * writes we need to mask the declaration usage or instruction writemask
457 * (respectively) against the set of the really-enabled clipping planes.
458 *
459 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
460 * has a VS that writes to all 8 clip distance registers, but the plane enable
461 * flags are a subset of that.
462 *
463 * This function is used to apply the plane enable flags to the register
464 * declaration or instruction writemask.
465 *
466 * \param writemask the declaration usage mask or instruction writemask
467 * \param clip_reg_index which clip plane register is being declared/written.
468 * The legal values are 0 and 1 (two clip planes per
469 * register, for a total of 8 clip planes)
470 */
471 static unsigned
472 apply_clip_plane_mask(struct svga_shader_emitter_v10 *emit,
473 unsigned writemask, unsigned clip_reg_index)
474 {
475 unsigned shift;
476
477 assert(clip_reg_index < 2);
478
479 /* four clip planes per clip register: */
480 shift = clip_reg_index * 4;
481 writemask &= ((emit->key.clip_plane_enable >> shift) & 0xf);
482
483 return writemask;
484 }
485
486
487 /**
488 * Translate gallium shader type into VGPU10 type.
489 */
490 static VGPU10_PROGRAM_TYPE
491 translate_shader_type(unsigned type)
492 {
493 switch (type) {
494 case PIPE_SHADER_VERTEX:
495 return VGPU10_VERTEX_SHADER;
496 case PIPE_SHADER_GEOMETRY:
497 return VGPU10_GEOMETRY_SHADER;
498 case PIPE_SHADER_FRAGMENT:
499 return VGPU10_PIXEL_SHADER;
500 default:
501 assert(!"Unexpected shader type");
502 return VGPU10_VERTEX_SHADER;
503 }
504 }
505
506
507 /**
508 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
509 * Note: we only need to translate the opcodes for "simple" instructions,
510 * as seen below. All other opcodes are handled/translated specially.
511 */
512 static VGPU10_OPCODE_TYPE
513 translate_opcode(enum tgsi_opcode opcode)
514 {
515 switch (opcode) {
516 case TGSI_OPCODE_MOV:
517 return VGPU10_OPCODE_MOV;
518 case TGSI_OPCODE_MUL:
519 return VGPU10_OPCODE_MUL;
520 case TGSI_OPCODE_ADD:
521 return VGPU10_OPCODE_ADD;
522 case TGSI_OPCODE_DP3:
523 return VGPU10_OPCODE_DP3;
524 case TGSI_OPCODE_DP4:
525 return VGPU10_OPCODE_DP4;
526 case TGSI_OPCODE_MIN:
527 return VGPU10_OPCODE_MIN;
528 case TGSI_OPCODE_MAX:
529 return VGPU10_OPCODE_MAX;
530 case TGSI_OPCODE_MAD:
531 return VGPU10_OPCODE_MAD;
532 case TGSI_OPCODE_SQRT:
533 return VGPU10_OPCODE_SQRT;
534 case TGSI_OPCODE_FRC:
535 return VGPU10_OPCODE_FRC;
536 case TGSI_OPCODE_FLR:
537 return VGPU10_OPCODE_ROUND_NI;
538 case TGSI_OPCODE_FSEQ:
539 return VGPU10_OPCODE_EQ;
540 case TGSI_OPCODE_FSGE:
541 return VGPU10_OPCODE_GE;
542 case TGSI_OPCODE_FSNE:
543 return VGPU10_OPCODE_NE;
544 case TGSI_OPCODE_DDX:
545 return VGPU10_OPCODE_DERIV_RTX;
546 case TGSI_OPCODE_DDY:
547 return VGPU10_OPCODE_DERIV_RTY;
548 case TGSI_OPCODE_RET:
549 return VGPU10_OPCODE_RET;
550 case TGSI_OPCODE_DIV:
551 return VGPU10_OPCODE_DIV;
552 case TGSI_OPCODE_IDIV:
553 return VGPU10_OPCODE_IDIV;
554 case TGSI_OPCODE_DP2:
555 return VGPU10_OPCODE_DP2;
556 case TGSI_OPCODE_BRK:
557 return VGPU10_OPCODE_BREAK;
558 case TGSI_OPCODE_IF:
559 return VGPU10_OPCODE_IF;
560 case TGSI_OPCODE_ELSE:
561 return VGPU10_OPCODE_ELSE;
562 case TGSI_OPCODE_ENDIF:
563 return VGPU10_OPCODE_ENDIF;
564 case TGSI_OPCODE_CEIL:
565 return VGPU10_OPCODE_ROUND_PI;
566 case TGSI_OPCODE_I2F:
567 return VGPU10_OPCODE_ITOF;
568 case TGSI_OPCODE_NOT:
569 return VGPU10_OPCODE_NOT;
570 case TGSI_OPCODE_TRUNC:
571 return VGPU10_OPCODE_ROUND_Z;
572 case TGSI_OPCODE_SHL:
573 return VGPU10_OPCODE_ISHL;
574 case TGSI_OPCODE_AND:
575 return VGPU10_OPCODE_AND;
576 case TGSI_OPCODE_OR:
577 return VGPU10_OPCODE_OR;
578 case TGSI_OPCODE_XOR:
579 return VGPU10_OPCODE_XOR;
580 case TGSI_OPCODE_CONT:
581 return VGPU10_OPCODE_CONTINUE;
582 case TGSI_OPCODE_EMIT:
583 return VGPU10_OPCODE_EMIT;
584 case TGSI_OPCODE_ENDPRIM:
585 return VGPU10_OPCODE_CUT;
586 case TGSI_OPCODE_BGNLOOP:
587 return VGPU10_OPCODE_LOOP;
588 case TGSI_OPCODE_ENDLOOP:
589 return VGPU10_OPCODE_ENDLOOP;
590 case TGSI_OPCODE_ENDSUB:
591 return VGPU10_OPCODE_RET;
592 case TGSI_OPCODE_NOP:
593 return VGPU10_OPCODE_NOP;
594 case TGSI_OPCODE_END:
595 return VGPU10_OPCODE_RET;
596 case TGSI_OPCODE_F2I:
597 return VGPU10_OPCODE_FTOI;
598 case TGSI_OPCODE_IMAX:
599 return VGPU10_OPCODE_IMAX;
600 case TGSI_OPCODE_IMIN:
601 return VGPU10_OPCODE_IMIN;
602 case TGSI_OPCODE_UDIV:
603 case TGSI_OPCODE_UMOD:
604 case TGSI_OPCODE_MOD:
605 return VGPU10_OPCODE_UDIV;
606 case TGSI_OPCODE_IMUL_HI:
607 return VGPU10_OPCODE_IMUL;
608 case TGSI_OPCODE_INEG:
609 return VGPU10_OPCODE_INEG;
610 case TGSI_OPCODE_ISHR:
611 return VGPU10_OPCODE_ISHR;
612 case TGSI_OPCODE_ISGE:
613 return VGPU10_OPCODE_IGE;
614 case TGSI_OPCODE_ISLT:
615 return VGPU10_OPCODE_ILT;
616 case TGSI_OPCODE_F2U:
617 return VGPU10_OPCODE_FTOU;
618 case TGSI_OPCODE_UADD:
619 return VGPU10_OPCODE_IADD;
620 case TGSI_OPCODE_U2F:
621 return VGPU10_OPCODE_UTOF;
622 case TGSI_OPCODE_UCMP:
623 return VGPU10_OPCODE_MOVC;
624 case TGSI_OPCODE_UMAD:
625 return VGPU10_OPCODE_UMAD;
626 case TGSI_OPCODE_UMAX:
627 return VGPU10_OPCODE_UMAX;
628 case TGSI_OPCODE_UMIN:
629 return VGPU10_OPCODE_UMIN;
630 case TGSI_OPCODE_UMUL:
631 case TGSI_OPCODE_UMUL_HI:
632 return VGPU10_OPCODE_UMUL;
633 case TGSI_OPCODE_USEQ:
634 return VGPU10_OPCODE_IEQ;
635 case TGSI_OPCODE_USGE:
636 return VGPU10_OPCODE_UGE;
637 case TGSI_OPCODE_USHR:
638 return VGPU10_OPCODE_USHR;
639 case TGSI_OPCODE_USLT:
640 return VGPU10_OPCODE_ULT;
641 case TGSI_OPCODE_USNE:
642 return VGPU10_OPCODE_INE;
643 case TGSI_OPCODE_SWITCH:
644 return VGPU10_OPCODE_SWITCH;
645 case TGSI_OPCODE_CASE:
646 return VGPU10_OPCODE_CASE;
647 case TGSI_OPCODE_DEFAULT:
648 return VGPU10_OPCODE_DEFAULT;
649 case TGSI_OPCODE_ENDSWITCH:
650 return VGPU10_OPCODE_ENDSWITCH;
651 case TGSI_OPCODE_FSLT:
652 return VGPU10_OPCODE_LT;
653 case TGSI_OPCODE_ROUND:
654 return VGPU10_OPCODE_ROUND_NE;
655 case TGSI_OPCODE_SAMPLE_POS:
656 /* Note: we never actually get this opcode because there's no GLSL
657 * function to query multisample resource sample positions. There's
658 * only the TGSI_SEMANTIC_SAMPLEPOS system value which contains the
659 * position of the current sample in the render target.
660 */
661 /* FALL-THROUGH */
662 case TGSI_OPCODE_SAMPLE_INFO:
663 /* NOTE: we never actually get this opcode because the GLSL compiler
664 * implements the gl_NumSamples variable with a simple constant in the
665 * constant buffer.
666 */
667 /* FALL-THROUGH */
668 default:
669 assert(!"Unexpected TGSI opcode in translate_opcode()");
670 return VGPU10_OPCODE_NOP;
671 }
672 }
673
674
675 /**
676 * Translate a TGSI register file type into a VGPU10 operand type.
677 * \param array is the TGSI_FILE_TEMPORARY register an array?
678 */
679 static VGPU10_OPERAND_TYPE
680 translate_register_file(enum tgsi_file_type file, boolean array)
681 {
682 switch (file) {
683 case TGSI_FILE_CONSTANT:
684 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
685 case TGSI_FILE_INPUT:
686 return VGPU10_OPERAND_TYPE_INPUT;
687 case TGSI_FILE_OUTPUT:
688 return VGPU10_OPERAND_TYPE_OUTPUT;
689 case TGSI_FILE_TEMPORARY:
690 return array ? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
691 : VGPU10_OPERAND_TYPE_TEMP;
692 case TGSI_FILE_IMMEDIATE:
693 /* all immediates are 32-bit values at this time so
694 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
695 */
696 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER;
697 case TGSI_FILE_SAMPLER:
698 return VGPU10_OPERAND_TYPE_SAMPLER;
699 case TGSI_FILE_SYSTEM_VALUE:
700 return VGPU10_OPERAND_TYPE_INPUT;
701
702 /* XXX TODO more cases to finish */
703
704 default:
705 assert(!"Bad tgsi register file!");
706 return VGPU10_OPERAND_TYPE_NULL;
707 }
708 }
709
710
711 /**
712 * Emit a null dst register
713 */
714 static void
715 emit_null_dst_register(struct svga_shader_emitter_v10 *emit)
716 {
717 VGPU10OperandToken0 operand;
718
719 operand.value = 0;
720 operand.operandType = VGPU10_OPERAND_TYPE_NULL;
721 operand.numComponents = VGPU10_OPERAND_0_COMPONENT;
722
723 emit_dword(emit, operand.value);
724 }
725
726
727 /**
728 * If the given register is a temporary, return the array ID.
729 * Else return zero.
730 */
731 static unsigned
732 get_temp_array_id(const struct svga_shader_emitter_v10 *emit,
733 enum tgsi_file_type file, unsigned index)
734 {
735 if (file == TGSI_FILE_TEMPORARY) {
736 return emit->temp_map[index].arrayId;
737 }
738 else {
739 return 0;
740 }
741 }
742
743
744 /**
745 * If the given register is a temporary, convert the index from a TGSI
746 * TEMPORARY index to a VGPU10 temp index.
747 */
748 static unsigned
749 remap_temp_index(const struct svga_shader_emitter_v10 *emit,
750 enum tgsi_file_type file, unsigned index)
751 {
752 if (file == TGSI_FILE_TEMPORARY) {
753 return emit->temp_map[index].index;
754 }
755 else {
756 return index;
757 }
758 }
759
760
761 /**
762 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
763 * Note: the operandType field must already be initialized.
764 */
765 static VGPU10OperandToken0
766 setup_operand0_indexing(struct svga_shader_emitter_v10 *emit,
767 VGPU10OperandToken0 operand0,
768 enum tgsi_file_type file,
769 boolean indirect, boolean index2D,
770 unsigned tempArrayID)
771 {
772 unsigned indexDim, index0Rep, index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
773
774 /*
775 * Compute index dimensions
776 */
777 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32 ||
778 operand0.operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
779 /* there's no swizzle for in-line immediates */
780 indexDim = VGPU10_OPERAND_INDEX_0D;
781 assert(operand0.selectionMode == 0);
782 }
783 else {
784 if (index2D ||
785 tempArrayID > 0 ||
786 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
787 indexDim = VGPU10_OPERAND_INDEX_2D;
788 }
789 else {
790 indexDim = VGPU10_OPERAND_INDEX_1D;
791 }
792 }
793
794 /*
795 * Compute index representations (immediate, relative, etc).
796 */
797 if (tempArrayID > 0) {
798 assert(file == TGSI_FILE_TEMPORARY);
799 /* First index is the array ID, second index is the array element */
800 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
801 if (indirect) {
802 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
803 }
804 else {
805 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
806 }
807 }
808 else if (indirect) {
809 if (file == TGSI_FILE_CONSTANT) {
810 /* index[0] indicates which constant buffer while index[1] indicates
811 * the position in the constant buffer.
812 */
813 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
814 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
815 }
816 else {
817 /* All other register files are 1-dimensional */
818 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
819 }
820 }
821 else {
822 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
823 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
824 }
825
826 operand0.indexDimension = indexDim;
827 operand0.index0Representation = index0Rep;
828 operand0.index1Representation = index1Rep;
829
830 return operand0;
831 }
832
833
834 /**
835 * Emit the operand for expressing an address register for indirect indexing.
836 * Note that the address register is really just a temp register.
837 * \param addr_reg_index which address register to use
838 */
839 static void
840 emit_indirect_register(struct svga_shader_emitter_v10 *emit,
841 unsigned addr_reg_index)
842 {
843 unsigned tmp_reg_index;
844 VGPU10OperandToken0 operand0;
845
846 assert(addr_reg_index < MAX_VGPU10_ADDR_REGS);
847
848 tmp_reg_index = emit->address_reg_index[addr_reg_index];
849
850 /* operand0 is a simple temporary register, selecting one component */
851 operand0.value = 0;
852 operand0.operandType = VGPU10_OPERAND_TYPE_TEMP;
853 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
854 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
855 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
856 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
857 operand0.swizzleX = 0;
858 operand0.swizzleY = 1;
859 operand0.swizzleZ = 2;
860 operand0.swizzleW = 3;
861
862 emit_dword(emit, operand0.value);
863 emit_dword(emit, remap_temp_index(emit, TGSI_FILE_TEMPORARY, tmp_reg_index));
864 }
865
866
867 /**
868 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
869 * \param emit the emitter context
870 * \param reg the TGSI dst register to translate
871 */
872 static void
873 emit_dst_register(struct svga_shader_emitter_v10 *emit,
874 const struct tgsi_full_dst_register *reg)
875 {
876 enum tgsi_file_type file = reg->Register.File;
877 unsigned index = reg->Register.Index;
878 const enum tgsi_semantic sem_name = emit->info.output_semantic_name[index];
879 const unsigned sem_index = emit->info.output_semantic_index[index];
880 unsigned writemask = reg->Register.WriteMask;
881 const boolean indirect = reg->Register.Indirect;
882 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
883 const boolean index2d = reg->Register.Dimension;
884 VGPU10OperandToken0 operand0;
885
886 if (file == TGSI_FILE_OUTPUT) {
887 if (emit->unit == PIPE_SHADER_VERTEX ||
888 emit->unit == PIPE_SHADER_GEOMETRY) {
889 if (index == emit->vposition.out_index &&
890 emit->vposition.tmp_index != INVALID_INDEX) {
891 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
892 * vertex position result in a temporary so that we can modify
893 * it in the post_helper() code.
894 */
895 file = TGSI_FILE_TEMPORARY;
896 index = emit->vposition.tmp_index;
897 }
898 else if (sem_name == TGSI_SEMANTIC_CLIPDIST &&
899 emit->clip_dist_tmp_index != INVALID_INDEX) {
900 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
901 * We store the clip distance in a temporary first, then
902 * we'll copy it to the shadow copy and to CLIPDIST with the
903 * enabled planes mask in emit_clip_distance_instructions().
904 */
905 file = TGSI_FILE_TEMPORARY;
906 index = emit->clip_dist_tmp_index + sem_index;
907 }
908 else if (sem_name == TGSI_SEMANTIC_CLIPVERTEX &&
909 emit->clip_vertex_tmp_index != INVALID_INDEX) {
910 /* replace the CLIPVERTEX output register with a temporary */
911 assert(emit->clip_mode == CLIP_VERTEX);
912 assert(sem_index == 0);
913 file = TGSI_FILE_TEMPORARY;
914 index = emit->clip_vertex_tmp_index;
915 }
916 }
917 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
918 if (sem_name == TGSI_SEMANTIC_POSITION) {
919 /* Fragment depth output register */
920 operand0.value = 0;
921 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
922 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
923 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
924 emit_dword(emit, operand0.value);
925 return;
926 }
927 else if (sem_name == TGSI_SEMANTIC_SAMPLEMASK) {
928 /* Fragment sample mask output */
929 operand0.value = 0;
930 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK;
931 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
932 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
933 emit_dword(emit, operand0.value);
934 return;
935 }
936 else if (index == emit->fs.color_out_index[0] &&
937 emit->fs.color_tmp_index != INVALID_INDEX) {
938 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
939 * fragment color result in a temporary so that we can read it
940 * it in the post_helper() code.
941 */
942 file = TGSI_FILE_TEMPORARY;
943 index = emit->fs.color_tmp_index;
944 }
945 else {
946 /* Typically, for fragment shaders, the output register index
947 * matches the color semantic index. But not when we write to
948 * the fragment depth register. In that case, OUT[0] will be
949 * fragdepth and OUT[1] will be the 0th color output. We need
950 * to use the semantic index for color outputs.
951 */
952 assert(sem_name == TGSI_SEMANTIC_COLOR);
953 index = emit->info.output_semantic_index[index];
954
955 emit->num_output_writes++;
956 }
957 }
958 }
959
960 /* init operand tokens to all zero */
961 operand0.value = 0;
962
963 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
964
965 /* the operand has a writemask */
966 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
967
968 /* Which of the four dest components to write to. Note that we can use a
969 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
970 */
971 STATIC_ASSERT(TGSI_WRITEMASK_X == VGPU10_OPERAND_4_COMPONENT_MASK_X);
972 operand0.mask = writemask;
973
974 /* translate TGSI register file type to VGPU10 operand type */
975 operand0.operandType = translate_register_file(file, tempArrayId > 0);
976
977 check_register_index(emit, operand0.operandType, index);
978
979 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
980 index2d, tempArrayId);
981
982 /* Emit tokens */
983 emit_dword(emit, operand0.value);
984 if (tempArrayId > 0) {
985 emit_dword(emit, tempArrayId);
986 }
987
988 emit_dword(emit, remap_temp_index(emit, file, index));
989
990 if (indirect) {
991 emit_indirect_register(emit, reg->Indirect.Index);
992 }
993 }
994
995
996 /**
997 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
998 * In quite a few cases, we do register substitution. For example, if
999 * the TGSI register is the front/back-face register, we replace that with
1000 * a temp register containing a value we computed earlier.
1001 */
1002 static void
1003 emit_src_register(struct svga_shader_emitter_v10 *emit,
1004 const struct tgsi_full_src_register *reg)
1005 {
1006 enum tgsi_file_type file = reg->Register.File;
1007 unsigned index = reg->Register.Index;
1008 const boolean indirect = reg->Register.Indirect;
1009 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
1010 const boolean index2d = reg->Register.Dimension;
1011 const unsigned swizzleX = reg->Register.SwizzleX;
1012 const unsigned swizzleY = reg->Register.SwizzleY;
1013 const unsigned swizzleZ = reg->Register.SwizzleZ;
1014 const unsigned swizzleW = reg->Register.SwizzleW;
1015 const boolean absolute = reg->Register.Absolute;
1016 const boolean negate = reg->Register.Negate;
1017 bool is_prim_id = FALSE;
1018
1019 VGPU10OperandToken0 operand0;
1020 VGPU10OperandToken1 operand1;
1021
1022 if (emit->unit == PIPE_SHADER_FRAGMENT){
1023 if (file == TGSI_FILE_INPUT) {
1024 if (index == emit->fs.face_input_index) {
1025 /* Replace INPUT[FACE] with TEMP[FACE] */
1026 file = TGSI_FILE_TEMPORARY;
1027 index = emit->fs.face_tmp_index;
1028 }
1029 else if (index == emit->fs.fragcoord_input_index) {
1030 /* Replace INPUT[POSITION] with TEMP[POSITION] */
1031 file = TGSI_FILE_TEMPORARY;
1032 index = emit->fs.fragcoord_tmp_index;
1033 }
1034 else {
1035 /* We remap fragment shader inputs to that FS input indexes
1036 * match up with VS/GS output indexes.
1037 */
1038 index = emit->linkage.input_map[index];
1039 }
1040 }
1041 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1042 if (index == emit->fs.sample_pos_sys_index) {
1043 assert(emit->version >= 41);
1044 /* Current sample position is in a temp register */
1045 file = TGSI_FILE_TEMPORARY;
1046 index = emit->fs.sample_pos_tmp_index;
1047 }
1048 else {
1049 /* Map the TGSI system value to a VGPU10 input register */
1050 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1051 file = TGSI_FILE_INPUT;
1052 index = emit->system_value_indexes[index];
1053 }
1054 }
1055 }
1056 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
1057 if (file == TGSI_FILE_INPUT) {
1058 is_prim_id = (index == emit->gs.prim_id_index);
1059 index = emit->linkage.input_map[index];
1060 }
1061 }
1062 else if (emit->unit == PIPE_SHADER_VERTEX) {
1063 if (file == TGSI_FILE_INPUT) {
1064 /* if input is adjusted... */
1065 if ((emit->key.vs.adjust_attrib_w_1 |
1066 emit->key.vs.adjust_attrib_itof |
1067 emit->key.vs.adjust_attrib_utof |
1068 emit->key.vs.attrib_is_bgra |
1069 emit->key.vs.attrib_puint_to_snorm |
1070 emit->key.vs.attrib_puint_to_uscaled |
1071 emit->key.vs.attrib_puint_to_sscaled) & (1 << index)) {
1072 file = TGSI_FILE_TEMPORARY;
1073 index = emit->vs.adjusted_input[index];
1074 }
1075 }
1076 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1077 /* Map the TGSI system value to a VGPU10 input register */
1078 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1079 file = TGSI_FILE_INPUT;
1080 index = emit->system_value_indexes[index];
1081 }
1082 }
1083
1084 operand0.value = operand1.value = 0;
1085
1086 if (is_prim_id) {
1087 /* NOTE: we should be using VGPU10_OPERAND_1_COMPONENT here, but
1088 * our virtual GPU accepts this as-is.
1089 */
1090 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
1091 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
1092 }
1093 else {
1094 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1095 operand0.operandType = translate_register_file(file, tempArrayId > 0);
1096 }
1097
1098 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
1099 index2d, tempArrayId);
1100
1101 if (operand0.operandType != VGPU10_OPERAND_TYPE_IMMEDIATE32 &&
1102 operand0.operandType != VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
1103 /* there's no swizzle for in-line immediates */
1104 if (swizzleX == swizzleY &&
1105 swizzleX == swizzleZ &&
1106 swizzleX == swizzleW) {
1107 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1108 }
1109 else {
1110 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1111 }
1112
1113 operand0.swizzleX = swizzleX;
1114 operand0.swizzleY = swizzleY;
1115 operand0.swizzleZ = swizzleZ;
1116 operand0.swizzleW = swizzleW;
1117
1118 if (absolute || negate) {
1119 operand0.extended = 1;
1120 operand1.extendedOperandType = VGPU10_EXTENDED_OPERAND_MODIFIER;
1121 if (absolute && !negate)
1122 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABS;
1123 if (!absolute && negate)
1124 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_NEG;
1125 if (absolute && negate)
1126 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABSNEG;
1127 }
1128 }
1129
1130 /* Emit the operand tokens */
1131 emit_dword(emit, operand0.value);
1132 if (operand0.extended)
1133 emit_dword(emit, operand1.value);
1134
1135 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32) {
1136 /* Emit the four float/int in-line immediate values */
1137 unsigned *c;
1138 assert(index < ARRAY_SIZE(emit->immediates));
1139 assert(file == TGSI_FILE_IMMEDIATE);
1140 assert(swizzleX < 4);
1141 assert(swizzleY < 4);
1142 assert(swizzleZ < 4);
1143 assert(swizzleW < 4);
1144 c = (unsigned *) emit->immediates[index];
1145 emit_dword(emit, c[swizzleX]);
1146 emit_dword(emit, c[swizzleY]);
1147 emit_dword(emit, c[swizzleZ]);
1148 emit_dword(emit, c[swizzleW]);
1149 }
1150 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_1D) {
1151 /* Emit the register index(es) */
1152 if (index2d ||
1153 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
1154 emit_dword(emit, reg->Dimension.Index);
1155 }
1156
1157 if (tempArrayId > 0) {
1158 emit_dword(emit, tempArrayId);
1159 }
1160
1161 emit_dword(emit, remap_temp_index(emit, file, index));
1162
1163 if (indirect) {
1164 emit_indirect_register(emit, reg->Indirect.Index);
1165 }
1166 }
1167 }
1168
1169
1170 /**
1171 * Emit a resource operand (for use with a SAMPLE instruction).
1172 */
1173 static void
1174 emit_resource_register(struct svga_shader_emitter_v10 *emit,
1175 unsigned resource_number)
1176 {
1177 VGPU10OperandToken0 operand0;
1178
1179 check_register_index(emit, VGPU10_OPERAND_TYPE_RESOURCE, resource_number);
1180
1181 /* init */
1182 operand0.value = 0;
1183
1184 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
1185 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1186 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1187 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1188 operand0.swizzleX = VGPU10_COMPONENT_X;
1189 operand0.swizzleY = VGPU10_COMPONENT_Y;
1190 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1191 operand0.swizzleW = VGPU10_COMPONENT_W;
1192
1193 emit_dword(emit, operand0.value);
1194 emit_dword(emit, resource_number);
1195 }
1196
1197
1198 /**
1199 * Emit a sampler operand (for use with a SAMPLE instruction).
1200 */
1201 static void
1202 emit_sampler_register(struct svga_shader_emitter_v10 *emit,
1203 unsigned sampler_number)
1204 {
1205 VGPU10OperandToken0 operand0;
1206
1207 check_register_index(emit, VGPU10_OPERAND_TYPE_SAMPLER, sampler_number);
1208
1209 /* init */
1210 operand0.value = 0;
1211
1212 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
1213 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1214
1215 emit_dword(emit, operand0.value);
1216 emit_dword(emit, sampler_number);
1217 }
1218
1219
1220 /**
1221 * Emit an operand which reads the IS_FRONT_FACING register.
1222 */
1223 static void
1224 emit_face_register(struct svga_shader_emitter_v10 *emit)
1225 {
1226 VGPU10OperandToken0 operand0;
1227 unsigned index = emit->linkage.input_map[emit->fs.face_input_index];
1228
1229 /* init */
1230 operand0.value = 0;
1231
1232 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT;
1233 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1234 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1235 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1236
1237 operand0.swizzleX = VGPU10_COMPONENT_X;
1238 operand0.swizzleY = VGPU10_COMPONENT_X;
1239 operand0.swizzleZ = VGPU10_COMPONENT_X;
1240 operand0.swizzleW = VGPU10_COMPONENT_X;
1241
1242 emit_dword(emit, operand0.value);
1243 emit_dword(emit, index);
1244 }
1245
1246
1247 /**
1248 * Emit tokens for the "rasterizer" register used by the SAMPLE_POS
1249 * instruction.
1250 */
1251 static void
1252 emit_rasterizer_register(struct svga_shader_emitter_v10 *emit)
1253 {
1254 VGPU10OperandToken0 operand0;
1255
1256 /* init */
1257 operand0.value = 0;
1258
1259 /* No register index for rasterizer index (there's only one) */
1260 operand0.operandType = VGPU10_OPERAND_TYPE_RASTERIZER;
1261 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
1262 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1263 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1264 operand0.swizzleX = VGPU10_COMPONENT_X;
1265 operand0.swizzleY = VGPU10_COMPONENT_Y;
1266 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1267 operand0.swizzleW = VGPU10_COMPONENT_W;
1268
1269 emit_dword(emit, operand0.value);
1270 }
1271
1272
1273 /**
1274 * Emit the token for a VGPU10 opcode.
1275 * \param saturate clamp result to [0,1]?
1276 */
1277 static void
1278 emit_opcode(struct svga_shader_emitter_v10 *emit,
1279 VGPU10_OPCODE_TYPE vgpu10_opcode, boolean saturate)
1280 {
1281 VGPU10OpcodeToken0 token0;
1282
1283 token0.value = 0; /* init all fields to zero */
1284 token0.opcodeType = vgpu10_opcode;
1285 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1286 token0.saturate = saturate;
1287
1288 emit_dword(emit, token0.value);
1289 }
1290
1291
1292 /**
1293 * Emit the token for a VGPU10 resinfo instruction.
1294 * \param modifier return type modifier, _uint or _rcpFloat.
1295 * TODO: We may want to remove this parameter if it will
1296 * only ever be used as _uint.
1297 */
1298 static void
1299 emit_opcode_resinfo(struct svga_shader_emitter_v10 *emit,
1300 VGPU10_RESINFO_RETURN_TYPE modifier)
1301 {
1302 VGPU10OpcodeToken0 token0;
1303
1304 token0.value = 0; /* init all fields to zero */
1305 token0.opcodeType = VGPU10_OPCODE_RESINFO;
1306 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1307 token0.resinfoReturnType = modifier;
1308
1309 emit_dword(emit, token0.value);
1310 }
1311
1312
1313 /**
1314 * Emit opcode tokens for a texture sample instruction. Texture instructions
1315 * can be rather complicated (texel offsets, etc) so we have this specialized
1316 * function.
1317 */
1318 static void
1319 emit_sample_opcode(struct svga_shader_emitter_v10 *emit,
1320 unsigned vgpu10_opcode, boolean saturate,
1321 const int offsets[3])
1322 {
1323 VGPU10OpcodeToken0 token0;
1324 VGPU10OpcodeToken1 token1;
1325
1326 token0.value = 0; /* init all fields to zero */
1327 token0.opcodeType = vgpu10_opcode;
1328 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1329 token0.saturate = saturate;
1330
1331 if (offsets[0] || offsets[1] || offsets[2]) {
1332 assert(offsets[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1333 assert(offsets[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1334 assert(offsets[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1335 assert(offsets[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1336 assert(offsets[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1337 assert(offsets[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1338
1339 token0.extended = 1;
1340 token1.value = 0;
1341 token1.opcodeType = VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS;
1342 token1.offsetU = offsets[0];
1343 token1.offsetV = offsets[1];
1344 token1.offsetW = offsets[2];
1345 }
1346
1347 emit_dword(emit, token0.value);
1348 if (token0.extended) {
1349 emit_dword(emit, token1.value);
1350 }
1351 }
1352
1353
1354 /**
1355 * Emit a DISCARD opcode token.
1356 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1357 * Otherwise, we'll discard the fragment if the X component is 0.
1358 */
1359 static void
1360 emit_discard_opcode(struct svga_shader_emitter_v10 *emit, boolean nonzero)
1361 {
1362 VGPU10OpcodeToken0 opcode0;
1363
1364 opcode0.value = 0;
1365 opcode0.opcodeType = VGPU10_OPCODE_DISCARD;
1366 if (nonzero)
1367 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
1368
1369 emit_dword(emit, opcode0.value);
1370 }
1371
1372
1373 /**
1374 * We need to call this before we begin emitting a VGPU10 instruction.
1375 */
1376 static void
1377 begin_emit_instruction(struct svga_shader_emitter_v10 *emit)
1378 {
1379 assert(emit->inst_start_token == 0);
1380 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1381 * Note, we can't save a pointer because it would become invalid if
1382 * we have to realloc the output buffer.
1383 */
1384 emit->inst_start_token = emit_get_num_tokens(emit);
1385 }
1386
1387
1388 /**
1389 * We need to call this after we emit the last token of a VGPU10 instruction.
1390 * This function patches in the opcode token's instructionLength field.
1391 */
1392 static void
1393 end_emit_instruction(struct svga_shader_emitter_v10 *emit)
1394 {
1395 VGPU10OpcodeToken0 *tokens = (VGPU10OpcodeToken0 *) emit->buf;
1396 unsigned inst_length;
1397
1398 assert(emit->inst_start_token > 0);
1399
1400 if (emit->discard_instruction) {
1401 /* Back up the emit->ptr to where this instruction started so
1402 * that we discard the current instruction.
1403 */
1404 emit->ptr = (char *) (tokens + emit->inst_start_token);
1405 }
1406 else {
1407 /* Compute instruction length and patch that into the start of
1408 * the instruction.
1409 */
1410 inst_length = emit_get_num_tokens(emit) - emit->inst_start_token;
1411
1412 assert(inst_length > 0);
1413
1414 tokens[emit->inst_start_token].instructionLength = inst_length;
1415 }
1416
1417 emit->inst_start_token = 0; /* reset to zero for error checking */
1418 emit->discard_instruction = FALSE;
1419 }
1420
1421
1422 /**
1423 * Return index for a free temporary register.
1424 */
1425 static unsigned
1426 get_temp_index(struct svga_shader_emitter_v10 *emit)
1427 {
1428 assert(emit->internal_temp_count < MAX_INTERNAL_TEMPS);
1429 return emit->num_shader_temps + emit->internal_temp_count++;
1430 }
1431
1432
1433 /**
1434 * Release the temporaries which were generated by get_temp_index().
1435 */
1436 static void
1437 free_temp_indexes(struct svga_shader_emitter_v10 *emit)
1438 {
1439 emit->internal_temp_count = 0;
1440 }
1441
1442
1443 /**
1444 * Create a tgsi_full_src_register.
1445 */
1446 static struct tgsi_full_src_register
1447 make_src_reg(enum tgsi_file_type file, unsigned index)
1448 {
1449 struct tgsi_full_src_register reg;
1450
1451 memset(&reg, 0, sizeof(reg));
1452 reg.Register.File = file;
1453 reg.Register.Index = index;
1454 reg.Register.SwizzleX = TGSI_SWIZZLE_X;
1455 reg.Register.SwizzleY = TGSI_SWIZZLE_Y;
1456 reg.Register.SwizzleZ = TGSI_SWIZZLE_Z;
1457 reg.Register.SwizzleW = TGSI_SWIZZLE_W;
1458 return reg;
1459 }
1460
1461
1462 /**
1463 * Create a tgsi_full_src_register with a swizzle such that all four
1464 * vector components have the same scalar value.
1465 */
1466 static struct tgsi_full_src_register
1467 make_src_scalar_reg(enum tgsi_file_type file, unsigned index, unsigned component)
1468 {
1469 struct tgsi_full_src_register reg;
1470
1471 assert(component >= TGSI_SWIZZLE_X);
1472 assert(component <= TGSI_SWIZZLE_W);
1473
1474 memset(&reg, 0, sizeof(reg));
1475 reg.Register.File = file;
1476 reg.Register.Index = index;
1477 reg.Register.SwizzleX =
1478 reg.Register.SwizzleY =
1479 reg.Register.SwizzleZ =
1480 reg.Register.SwizzleW = component;
1481 return reg;
1482 }
1483
1484
1485 /**
1486 * Create a tgsi_full_src_register for a temporary.
1487 */
1488 static struct tgsi_full_src_register
1489 make_src_temp_reg(unsigned index)
1490 {
1491 return make_src_reg(TGSI_FILE_TEMPORARY, index);
1492 }
1493
1494
1495 /**
1496 * Create a tgsi_full_src_register for a constant.
1497 */
1498 static struct tgsi_full_src_register
1499 make_src_const_reg(unsigned index)
1500 {
1501 return make_src_reg(TGSI_FILE_CONSTANT, index);
1502 }
1503
1504
1505 /**
1506 * Create a tgsi_full_src_register for an immediate constant.
1507 */
1508 static struct tgsi_full_src_register
1509 make_src_immediate_reg(unsigned index)
1510 {
1511 return make_src_reg(TGSI_FILE_IMMEDIATE, index);
1512 }
1513
1514
1515 /**
1516 * Create a tgsi_full_dst_register.
1517 */
1518 static struct tgsi_full_dst_register
1519 make_dst_reg(enum tgsi_file_type file, unsigned index)
1520 {
1521 struct tgsi_full_dst_register reg;
1522
1523 memset(&reg, 0, sizeof(reg));
1524 reg.Register.File = file;
1525 reg.Register.Index = index;
1526 reg.Register.WriteMask = TGSI_WRITEMASK_XYZW;
1527 return reg;
1528 }
1529
1530
1531 /**
1532 * Create a tgsi_full_dst_register for a temporary.
1533 */
1534 static struct tgsi_full_dst_register
1535 make_dst_temp_reg(unsigned index)
1536 {
1537 return make_dst_reg(TGSI_FILE_TEMPORARY, index);
1538 }
1539
1540
1541 /**
1542 * Create a tgsi_full_dst_register for an output.
1543 */
1544 static struct tgsi_full_dst_register
1545 make_dst_output_reg(unsigned index)
1546 {
1547 return make_dst_reg(TGSI_FILE_OUTPUT, index);
1548 }
1549
1550
1551 /**
1552 * Create negated tgsi_full_src_register.
1553 */
1554 static struct tgsi_full_src_register
1555 negate_src(const struct tgsi_full_src_register *reg)
1556 {
1557 struct tgsi_full_src_register neg = *reg;
1558 neg.Register.Negate = !reg->Register.Negate;
1559 return neg;
1560 }
1561
1562 /**
1563 * Create absolute value of a tgsi_full_src_register.
1564 */
1565 static struct tgsi_full_src_register
1566 absolute_src(const struct tgsi_full_src_register *reg)
1567 {
1568 struct tgsi_full_src_register absolute = *reg;
1569 absolute.Register.Absolute = 1;
1570 return absolute;
1571 }
1572
1573
1574 /** Return the named swizzle term from the src register */
1575 static inline unsigned
1576 get_swizzle(const struct tgsi_full_src_register *reg, enum tgsi_swizzle term)
1577 {
1578 switch (term) {
1579 case TGSI_SWIZZLE_X:
1580 return reg->Register.SwizzleX;
1581 case TGSI_SWIZZLE_Y:
1582 return reg->Register.SwizzleY;
1583 case TGSI_SWIZZLE_Z:
1584 return reg->Register.SwizzleZ;
1585 case TGSI_SWIZZLE_W:
1586 return reg->Register.SwizzleW;
1587 default:
1588 assert(!"Bad swizzle");
1589 return TGSI_SWIZZLE_X;
1590 }
1591 }
1592
1593
1594 /**
1595 * Create swizzled tgsi_full_src_register.
1596 */
1597 static struct tgsi_full_src_register
1598 swizzle_src(const struct tgsi_full_src_register *reg,
1599 enum tgsi_swizzle swizzleX, enum tgsi_swizzle swizzleY,
1600 enum tgsi_swizzle swizzleZ, enum tgsi_swizzle swizzleW)
1601 {
1602 struct tgsi_full_src_register swizzled = *reg;
1603 /* Note: we swizzle the current swizzle */
1604 swizzled.Register.SwizzleX = get_swizzle(reg, swizzleX);
1605 swizzled.Register.SwizzleY = get_swizzle(reg, swizzleY);
1606 swizzled.Register.SwizzleZ = get_swizzle(reg, swizzleZ);
1607 swizzled.Register.SwizzleW = get_swizzle(reg, swizzleW);
1608 return swizzled;
1609 }
1610
1611
1612 /**
1613 * Create swizzled tgsi_full_src_register where all the swizzle
1614 * terms are the same.
1615 */
1616 static struct tgsi_full_src_register
1617 scalar_src(const struct tgsi_full_src_register *reg, enum tgsi_swizzle swizzle)
1618 {
1619 struct tgsi_full_src_register swizzled = *reg;
1620 /* Note: we swizzle the current swizzle */
1621 swizzled.Register.SwizzleX =
1622 swizzled.Register.SwizzleY =
1623 swizzled.Register.SwizzleZ =
1624 swizzled.Register.SwizzleW = get_swizzle(reg, swizzle);
1625 return swizzled;
1626 }
1627
1628
1629 /**
1630 * Create new tgsi_full_dst_register with writemask.
1631 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1632 */
1633 static struct tgsi_full_dst_register
1634 writemask_dst(const struct tgsi_full_dst_register *reg, unsigned mask)
1635 {
1636 struct tgsi_full_dst_register masked = *reg;
1637 masked.Register.WriteMask = mask;
1638 return masked;
1639 }
1640
1641
1642 /**
1643 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1644 */
1645 static boolean
1646 same_swizzle_terms(const struct tgsi_full_src_register *reg)
1647 {
1648 return (reg->Register.SwizzleX == reg->Register.SwizzleY &&
1649 reg->Register.SwizzleY == reg->Register.SwizzleZ &&
1650 reg->Register.SwizzleZ == reg->Register.SwizzleW);
1651 }
1652
1653
1654 /**
1655 * Search the vector for the value 'x' and return its position.
1656 */
1657 static int
1658 find_imm_in_vec4(const union tgsi_immediate_data vec[4],
1659 union tgsi_immediate_data x)
1660 {
1661 unsigned i;
1662 for (i = 0; i < 4; i++) {
1663 if (vec[i].Int == x.Int)
1664 return i;
1665 }
1666 return -1;
1667 }
1668
1669
1670 /**
1671 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1672 */
1673 static int
1674 find_immediate(struct svga_shader_emitter_v10 *emit,
1675 union tgsi_immediate_data x, unsigned startIndex)
1676 {
1677 const unsigned endIndex = emit->num_immediates;
1678 unsigned i;
1679
1680 assert(emit->immediates_emitted);
1681
1682 /* Search immediates for x, y, z, w */
1683 for (i = startIndex; i < endIndex; i++) {
1684 if (x.Int == emit->immediates[i][0].Int ||
1685 x.Int == emit->immediates[i][1].Int ||
1686 x.Int == emit->immediates[i][2].Int ||
1687 x.Int == emit->immediates[i][3].Int) {
1688 return i;
1689 }
1690 }
1691 /* Should never try to use an immediate value that wasn't pre-declared */
1692 assert(!"find_immediate() failed!");
1693 return -1;
1694 }
1695
1696
1697 /**
1698 * Return a tgsi_full_src_register for an immediate/literal
1699 * union tgsi_immediate_data[4] value.
1700 * Note: the values must have been previously declared/allocated in
1701 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1702 * vec4 immediate.
1703 */
1704 static struct tgsi_full_src_register
1705 make_immediate_reg_4(struct svga_shader_emitter_v10 *emit,
1706 const union tgsi_immediate_data imm[4])
1707 {
1708 struct tgsi_full_src_register reg;
1709 unsigned i;
1710
1711 for (i = 0; i < emit->num_common_immediates; i++) {
1712 /* search for first component value */
1713 int immpos = find_immediate(emit, imm[0], i);
1714 int x, y, z, w;
1715
1716 assert(immpos >= 0);
1717
1718 /* find remaining components within the immediate vector */
1719 x = find_imm_in_vec4(emit->immediates[immpos], imm[0]);
1720 y = find_imm_in_vec4(emit->immediates[immpos], imm[1]);
1721 z = find_imm_in_vec4(emit->immediates[immpos], imm[2]);
1722 w = find_imm_in_vec4(emit->immediates[immpos], imm[3]);
1723
1724 if (x >=0 && y >= 0 && z >= 0 && w >= 0) {
1725 /* found them all */
1726 memset(&reg, 0, sizeof(reg));
1727 reg.Register.File = TGSI_FILE_IMMEDIATE;
1728 reg.Register.Index = immpos;
1729 reg.Register.SwizzleX = x;
1730 reg.Register.SwizzleY = y;
1731 reg.Register.SwizzleZ = z;
1732 reg.Register.SwizzleW = w;
1733 return reg;
1734 }
1735 /* else, keep searching */
1736 }
1737
1738 assert(!"Failed to find immediate register!");
1739
1740 /* Just return IMM[0].xxxx */
1741 memset(&reg, 0, sizeof(reg));
1742 reg.Register.File = TGSI_FILE_IMMEDIATE;
1743 return reg;
1744 }
1745
1746
1747 /**
1748 * Return a tgsi_full_src_register for an immediate/literal
1749 * union tgsi_immediate_data value of the form {value, value, value, value}.
1750 * \sa make_immediate_reg_4() regarding allowed values.
1751 */
1752 static struct tgsi_full_src_register
1753 make_immediate_reg(struct svga_shader_emitter_v10 *emit,
1754 union tgsi_immediate_data value)
1755 {
1756 struct tgsi_full_src_register reg;
1757 int immpos = find_immediate(emit, value, 0);
1758
1759 assert(immpos >= 0);
1760
1761 memset(&reg, 0, sizeof(reg));
1762 reg.Register.File = TGSI_FILE_IMMEDIATE;
1763 reg.Register.Index = immpos;
1764 reg.Register.SwizzleX =
1765 reg.Register.SwizzleY =
1766 reg.Register.SwizzleZ =
1767 reg.Register.SwizzleW = find_imm_in_vec4(emit->immediates[immpos], value);
1768
1769 return reg;
1770 }
1771
1772
1773 /**
1774 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1775 * \sa make_immediate_reg_4() regarding allowed values.
1776 */
1777 static struct tgsi_full_src_register
1778 make_immediate_reg_float4(struct svga_shader_emitter_v10 *emit,
1779 float x, float y, float z, float w)
1780 {
1781 union tgsi_immediate_data imm[4];
1782 imm[0].Float = x;
1783 imm[1].Float = y;
1784 imm[2].Float = z;
1785 imm[3].Float = w;
1786 return make_immediate_reg_4(emit, imm);
1787 }
1788
1789
1790 /**
1791 * Return a tgsi_full_src_register for an immediate/literal float value
1792 * of the form {value, value, value, value}.
1793 * \sa make_immediate_reg_4() regarding allowed values.
1794 */
1795 static struct tgsi_full_src_register
1796 make_immediate_reg_float(struct svga_shader_emitter_v10 *emit, float value)
1797 {
1798 union tgsi_immediate_data imm;
1799 imm.Float = value;
1800 return make_immediate_reg(emit, imm);
1801 }
1802
1803
1804 /**
1805 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1806 */
1807 static struct tgsi_full_src_register
1808 make_immediate_reg_int4(struct svga_shader_emitter_v10 *emit,
1809 int x, int y, int z, int w)
1810 {
1811 union tgsi_immediate_data imm[4];
1812 imm[0].Int = x;
1813 imm[1].Int = y;
1814 imm[2].Int = z;
1815 imm[3].Int = w;
1816 return make_immediate_reg_4(emit, imm);
1817 }
1818
1819
1820 /**
1821 * Return a tgsi_full_src_register for an immediate/literal int value
1822 * of the form {value, value, value, value}.
1823 * \sa make_immediate_reg_4() regarding allowed values.
1824 */
1825 static struct tgsi_full_src_register
1826 make_immediate_reg_int(struct svga_shader_emitter_v10 *emit, int value)
1827 {
1828 union tgsi_immediate_data imm;
1829 imm.Int = value;
1830 return make_immediate_reg(emit, imm);
1831 }
1832
1833
1834 /**
1835 * Allocate space for a union tgsi_immediate_data[4] immediate.
1836 * \return the index/position of the immediate.
1837 */
1838 static unsigned
1839 alloc_immediate_4(struct svga_shader_emitter_v10 *emit,
1840 const union tgsi_immediate_data imm[4])
1841 {
1842 unsigned n = emit->num_immediates++;
1843 assert(!emit->immediates_emitted);
1844 assert(n < ARRAY_SIZE(emit->immediates));
1845 emit->immediates[n][0] = imm[0];
1846 emit->immediates[n][1] = imm[1];
1847 emit->immediates[n][2] = imm[2];
1848 emit->immediates[n][3] = imm[3];
1849 return n;
1850 }
1851
1852
1853 /**
1854 * Allocate space for a float[4] immediate.
1855 * \return the index/position of the immediate.
1856 */
1857 static unsigned
1858 alloc_immediate_float4(struct svga_shader_emitter_v10 *emit,
1859 float x, float y, float z, float w)
1860 {
1861 union tgsi_immediate_data imm[4];
1862 imm[0].Float = x;
1863 imm[1].Float = y;
1864 imm[2].Float = z;
1865 imm[3].Float = w;
1866 return alloc_immediate_4(emit, imm);
1867 }
1868
1869
1870 /**
1871 * Allocate space for an int[4] immediate.
1872 * \return the index/position of the immediate.
1873 */
1874 static unsigned
1875 alloc_immediate_int4(struct svga_shader_emitter_v10 *emit,
1876 int x, int y, int z, int w)
1877 {
1878 union tgsi_immediate_data imm[4];
1879 imm[0].Int = x;
1880 imm[1].Int = y;
1881 imm[2].Int = z;
1882 imm[3].Int = w;
1883 return alloc_immediate_4(emit, imm);
1884 }
1885
1886
1887 /**
1888 * Allocate a shader input to store a system value.
1889 */
1890 static unsigned
1891 alloc_system_value_index(struct svga_shader_emitter_v10 *emit, unsigned index)
1892 {
1893 const unsigned n = emit->linkage.input_map_max + 1 + index;
1894 assert(index < ARRAY_SIZE(emit->system_value_indexes));
1895 emit->system_value_indexes[index] = n;
1896 return n;
1897 }
1898
1899
1900 /**
1901 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1902 */
1903 static boolean
1904 emit_vgpu10_immediate(struct svga_shader_emitter_v10 *emit,
1905 const struct tgsi_full_immediate *imm)
1906 {
1907 /* We don't actually emit any code here. We just save the
1908 * immediate values and emit them later.
1909 */
1910 alloc_immediate_4(emit, imm->u);
1911 return TRUE;
1912 }
1913
1914
1915 /**
1916 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1917 * containing all the immediate values previously allocated
1918 * with alloc_immediate_4().
1919 */
1920 static boolean
1921 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10 *emit)
1922 {
1923 VGPU10OpcodeToken0 token;
1924
1925 assert(!emit->immediates_emitted);
1926
1927 token.value = 0;
1928 token.opcodeType = VGPU10_OPCODE_CUSTOMDATA;
1929 token.customDataClass = VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER;
1930
1931 /* Note: no begin/end_emit_instruction() calls */
1932 emit_dword(emit, token.value);
1933 emit_dword(emit, 2 + 4 * emit->num_immediates);
1934 emit_dwords(emit, (unsigned *) emit->immediates, 4 * emit->num_immediates);
1935
1936 emit->immediates_emitted = TRUE;
1937
1938 return TRUE;
1939 }
1940
1941
1942 /**
1943 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1944 * interpolation mode.
1945 * \return a VGPU10_INTERPOLATION_x value
1946 */
1947 static unsigned
1948 translate_interpolation(const struct svga_shader_emitter_v10 *emit,
1949 enum tgsi_interpolate_mode interp,
1950 enum tgsi_interpolate_loc interpolate_loc)
1951 {
1952 if (interp == TGSI_INTERPOLATE_COLOR) {
1953 interp = emit->key.fs.flatshade ?
1954 TGSI_INTERPOLATE_CONSTANT : TGSI_INTERPOLATE_PERSPECTIVE;
1955 }
1956
1957 switch (interp) {
1958 case TGSI_INTERPOLATE_CONSTANT:
1959 return VGPU10_INTERPOLATION_CONSTANT;
1960 case TGSI_INTERPOLATE_LINEAR:
1961 if (interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID) {
1962 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID;
1963 } else if (interpolate_loc == TGSI_INTERPOLATE_LOC_SAMPLE &&
1964 emit->version >= 41) {
1965 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE;
1966 } else {
1967 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE;
1968 }
1969 break;
1970 case TGSI_INTERPOLATE_PERSPECTIVE:
1971 if (interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID) {
1972 return VGPU10_INTERPOLATION_LINEAR_CENTROID;
1973 } else if (interpolate_loc == TGSI_INTERPOLATE_LOC_SAMPLE &&
1974 emit->version >= 41) {
1975 return VGPU10_INTERPOLATION_LINEAR_SAMPLE;
1976 } else {
1977 return VGPU10_INTERPOLATION_LINEAR;
1978 }
1979 break;
1980 default:
1981 assert(!"Unexpected interpolation mode");
1982 return VGPU10_INTERPOLATION_CONSTANT;
1983 }
1984 }
1985
1986
1987 /**
1988 * Translate a TGSI property to VGPU10.
1989 * Don't emit any instructions yet, only need to gather the primitive property
1990 * information. The output primitive topology might be changed later. The
1991 * final property instructions will be emitted as part of the pre-helper code.
1992 */
1993 static boolean
1994 emit_vgpu10_property(struct svga_shader_emitter_v10 *emit,
1995 const struct tgsi_full_property *prop)
1996 {
1997 static const VGPU10_PRIMITIVE primType[] = {
1998 VGPU10_PRIMITIVE_POINT, /* PIPE_PRIM_POINTS */
1999 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINES */
2000 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_LOOP */
2001 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_STRIP */
2002 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLES */
2003 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_STRIP */
2004 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_FAN */
2005 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUADS */
2006 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
2007 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_POLYGON */
2008 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
2009 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2010 VGPU10_PRIMITIVE_TRIANGLE_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2011 VGPU10_PRIMITIVE_TRIANGLE_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2012 };
2013
2014 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology[] = {
2015 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST, /* PIPE_PRIM_POINTS */
2016 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINES */
2017 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINE_LOOP */
2018 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP, /* PIPE_PRIM_LINE_STRIP */
2019 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST, /* PIPE_PRIM_TRIANGLES */
2020 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_STRIP */
2021 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_FAN */
2022 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUADS */
2023 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
2024 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_POLYGON */
2025 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
2026 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2027 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2028 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2029 };
2030
2031 static const unsigned inputArraySize[] = {
2032 0, /* VGPU10_PRIMITIVE_UNDEFINED */
2033 1, /* VGPU10_PRIMITIVE_POINT */
2034 2, /* VGPU10_PRIMITIVE_LINE */
2035 3, /* VGPU10_PRIMITIVE_TRIANGLE */
2036 0,
2037 0,
2038 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
2039 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
2040 };
2041
2042 switch (prop->Property.PropertyName) {
2043 case TGSI_PROPERTY_GS_INPUT_PRIM:
2044 assert(prop->u[0].Data < ARRAY_SIZE(primType));
2045 emit->gs.prim_type = primType[prop->u[0].Data];
2046 assert(emit->gs.prim_type != VGPU10_PRIMITIVE_UNDEFINED);
2047 emit->gs.input_size = inputArraySize[emit->gs.prim_type];
2048 break;
2049
2050 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
2051 assert(prop->u[0].Data < ARRAY_SIZE(primTopology));
2052 emit->gs.prim_topology = primTopology[prop->u[0].Data];
2053 assert(emit->gs.prim_topology != VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED);
2054 break;
2055
2056 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
2057 emit->gs.max_out_vertices = prop->u[0].Data;
2058 break;
2059
2060 default:
2061 break;
2062 }
2063
2064 return TRUE;
2065 }
2066
2067
2068 static void
2069 emit_property_instruction(struct svga_shader_emitter_v10 *emit,
2070 VGPU10OpcodeToken0 opcode0, unsigned nData,
2071 unsigned data)
2072 {
2073 begin_emit_instruction(emit);
2074 emit_dword(emit, opcode0.value);
2075 if (nData)
2076 emit_dword(emit, data);
2077 end_emit_instruction(emit);
2078 }
2079
2080
2081 /**
2082 * Emit property instructions
2083 */
2084 static void
2085 emit_property_instructions(struct svga_shader_emitter_v10 *emit)
2086 {
2087 VGPU10OpcodeToken0 opcode0;
2088
2089 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2090
2091 /* emit input primitive type declaration */
2092 opcode0.value = 0;
2093 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE;
2094 opcode0.primitive = emit->gs.prim_type;
2095 emit_property_instruction(emit, opcode0, 0, 0);
2096
2097 /* emit output primitive topology declaration */
2098 opcode0.value = 0;
2099 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY;
2100 opcode0.primitiveTopology = emit->gs.prim_topology;
2101 emit_property_instruction(emit, opcode0, 0, 0);
2102
2103 /* emit max output vertices */
2104 opcode0.value = 0;
2105 opcode0.opcodeType = VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT;
2106 emit_property_instruction(emit, opcode0, 1, emit->gs.max_out_vertices);
2107 }
2108
2109
2110 /**
2111 * Emit a vgpu10 declaration "instruction".
2112 * \param index the register index
2113 * \param size array size of the operand. In most cases, it is 1,
2114 * but for inputs to geometry shader, the array size varies
2115 * depending on the primitive type.
2116 */
2117 static void
2118 emit_decl_instruction(struct svga_shader_emitter_v10 *emit,
2119 VGPU10OpcodeToken0 opcode0,
2120 VGPU10OperandToken0 operand0,
2121 VGPU10NameToken name_token,
2122 unsigned index, unsigned size)
2123 {
2124 assert(opcode0.opcodeType);
2125 assert(operand0.mask ||
2126 (operand0.operandType == VGPU10_OPERAND_TYPE_OUTPUT_DEPTH) ||
2127 (operand0.operandType == VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK));
2128
2129 begin_emit_instruction(emit);
2130 emit_dword(emit, opcode0.value);
2131
2132 emit_dword(emit, operand0.value);
2133
2134 if (operand0.indexDimension == VGPU10_OPERAND_INDEX_1D) {
2135 /* Next token is the index of the register to declare */
2136 emit_dword(emit, index);
2137 }
2138 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_2D) {
2139 /* Next token is the size of the register */
2140 emit_dword(emit, size);
2141
2142 /* Followed by the index of the register */
2143 emit_dword(emit, index);
2144 }
2145
2146 if (name_token.value) {
2147 emit_dword(emit, name_token.value);
2148 }
2149
2150 end_emit_instruction(emit);
2151 }
2152
2153
2154 /**
2155 * Emit the declaration for a shader input.
2156 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2157 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2158 * \param dim index dimension
2159 * \param index the input register index
2160 * \param size array size of the operand. In most cases, it is 1,
2161 * but for inputs to geometry shader, the array size varies
2162 * depending on the primitive type.
2163 * \param name one of VGPU10_NAME_x
2164 * \parma numComp number of components
2165 * \param selMode component selection mode
2166 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2167 * \param interpMode interpolation mode
2168 */
2169 static void
2170 emit_input_declaration(struct svga_shader_emitter_v10 *emit,
2171 VGPU10_OPCODE_TYPE opcodeType,
2172 VGPU10_OPERAND_TYPE operandType,
2173 VGPU10_OPERAND_INDEX_DIMENSION dim,
2174 unsigned index, unsigned size,
2175 VGPU10_SYSTEM_NAME name,
2176 VGPU10_OPERAND_NUM_COMPONENTS numComp,
2177 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode,
2178 unsigned usageMask,
2179 VGPU10_INTERPOLATION_MODE interpMode)
2180 {
2181 VGPU10OpcodeToken0 opcode0;
2182 VGPU10OperandToken0 operand0;
2183 VGPU10NameToken name_token;
2184
2185 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2186 assert(opcodeType == VGPU10_OPCODE_DCL_INPUT ||
2187 opcodeType == VGPU10_OPCODE_DCL_INPUT_SIV ||
2188 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS ||
2189 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SIV ||
2190 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SGV);
2191 assert(operandType == VGPU10_OPERAND_TYPE_INPUT ||
2192 operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID);
2193 assert(numComp <= VGPU10_OPERAND_4_COMPONENT);
2194 assert(selMode <= VGPU10_OPERAND_4_COMPONENT_MASK_MODE);
2195 assert(dim <= VGPU10_OPERAND_INDEX_3D);
2196 assert(name == VGPU10_NAME_UNDEFINED ||
2197 name == VGPU10_NAME_POSITION ||
2198 name == VGPU10_NAME_INSTANCE_ID ||
2199 name == VGPU10_NAME_VERTEX_ID ||
2200 name == VGPU10_NAME_PRIMITIVE_ID ||
2201 name == VGPU10_NAME_IS_FRONT_FACE ||
2202 name == VGPU10_NAME_SAMPLE_INDEX);
2203
2204 assert(interpMode == VGPU10_INTERPOLATION_UNDEFINED ||
2205 interpMode == VGPU10_INTERPOLATION_CONSTANT ||
2206 interpMode == VGPU10_INTERPOLATION_LINEAR ||
2207 interpMode == VGPU10_INTERPOLATION_LINEAR_CENTROID ||
2208 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE ||
2209 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID ||
2210 interpMode == VGPU10_INTERPOLATION_LINEAR_SAMPLE ||
2211 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE);
2212
2213 check_register_index(emit, opcodeType, index);
2214
2215 opcode0.value = operand0.value = name_token.value = 0;
2216
2217 opcode0.opcodeType = opcodeType;
2218 opcode0.interpolationMode = interpMode;
2219
2220 operand0.operandType = operandType;
2221 operand0.numComponents = numComp;
2222 operand0.selectionMode = selMode;
2223 operand0.mask = usageMask;
2224 operand0.indexDimension = dim;
2225 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2226 if (dim == VGPU10_OPERAND_INDEX_2D)
2227 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2228
2229 name_token.name = name;
2230
2231 emit_decl_instruction(emit, opcode0, operand0, name_token, index, size);
2232 }
2233
2234
2235 /**
2236 * Emit the declaration for a shader output.
2237 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2238 * \param index the output register index
2239 * \param name one of VGPU10_NAME_x
2240 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2241 */
2242 static void
2243 emit_output_declaration(struct svga_shader_emitter_v10 *emit,
2244 VGPU10_OPCODE_TYPE type, unsigned index,
2245 VGPU10_SYSTEM_NAME name,
2246 unsigned usageMask)
2247 {
2248 VGPU10OpcodeToken0 opcode0;
2249 VGPU10OperandToken0 operand0;
2250 VGPU10NameToken name_token;
2251
2252 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2253 assert(type == VGPU10_OPCODE_DCL_OUTPUT ||
2254 type == VGPU10_OPCODE_DCL_OUTPUT_SGV ||
2255 type == VGPU10_OPCODE_DCL_OUTPUT_SIV);
2256 assert(name == VGPU10_NAME_UNDEFINED ||
2257 name == VGPU10_NAME_POSITION ||
2258 name == VGPU10_NAME_PRIMITIVE_ID ||
2259 name == VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX ||
2260 name == VGPU10_NAME_CLIP_DISTANCE);
2261
2262 check_register_index(emit, type, index);
2263
2264 opcode0.value = operand0.value = name_token.value = 0;
2265
2266 opcode0.opcodeType = type;
2267 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT;
2268 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2269 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2270 operand0.mask = usageMask;
2271 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2272 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2273
2274 name_token.name = name;
2275
2276 emit_decl_instruction(emit, opcode0, operand0, name_token, index, 1);
2277 }
2278
2279
2280 /**
2281 * Emit the declaration for the fragment depth output.
2282 */
2283 static void
2284 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10 *emit)
2285 {
2286 VGPU10OpcodeToken0 opcode0;
2287 VGPU10OperandToken0 operand0;
2288 VGPU10NameToken name_token;
2289
2290 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2291
2292 opcode0.value = operand0.value = name_token.value = 0;
2293
2294 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2295 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
2296 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
2297 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2298 operand0.mask = 0;
2299
2300 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2301 }
2302
2303
2304 /**
2305 * Emit the declaration for the fragment sample mask/coverage output.
2306 */
2307 static void
2308 emit_samplemask_output_declaration(struct svga_shader_emitter_v10 *emit)
2309 {
2310 VGPU10OpcodeToken0 opcode0;
2311 VGPU10OperandToken0 operand0;
2312 VGPU10NameToken name_token;
2313
2314 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2315 assert(emit->version >= 41);
2316
2317 opcode0.value = operand0.value = name_token.value = 0;
2318
2319 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2320 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK;
2321 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
2322 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2323 operand0.mask = 0;
2324
2325 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2326 }
2327
2328
2329 /**
2330 * Emit the declaration for a system value input/output.
2331 */
2332 static void
2333 emit_system_value_declaration(struct svga_shader_emitter_v10 *emit,
2334 enum tgsi_semantic semantic_name, unsigned index)
2335 {
2336 switch (semantic_name) {
2337 case TGSI_SEMANTIC_INSTANCEID:
2338 index = alloc_system_value_index(emit, index);
2339 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2340 VGPU10_OPERAND_TYPE_INPUT,
2341 VGPU10_OPERAND_INDEX_1D,
2342 index, 1,
2343 VGPU10_NAME_INSTANCE_ID,
2344 VGPU10_OPERAND_4_COMPONENT,
2345 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2346 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2347 VGPU10_INTERPOLATION_UNDEFINED);
2348 break;
2349 case TGSI_SEMANTIC_VERTEXID:
2350 index = alloc_system_value_index(emit, index);
2351 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2352 VGPU10_OPERAND_TYPE_INPUT,
2353 VGPU10_OPERAND_INDEX_1D,
2354 index, 1,
2355 VGPU10_NAME_VERTEX_ID,
2356 VGPU10_OPERAND_4_COMPONENT,
2357 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2358 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2359 VGPU10_INTERPOLATION_UNDEFINED);
2360 break;
2361 case TGSI_SEMANTIC_SAMPLEID:
2362 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2363 emit->fs.sample_id_sys_index = index;
2364 index = alloc_system_value_index(emit, index);
2365 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_PS_SIV,
2366 VGPU10_OPERAND_TYPE_INPUT,
2367 VGPU10_OPERAND_INDEX_1D,
2368 index, 1,
2369 VGPU10_NAME_SAMPLE_INDEX,
2370 VGPU10_OPERAND_4_COMPONENT,
2371 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2372 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2373 VGPU10_INTERPOLATION_CONSTANT);
2374 break;
2375 case TGSI_SEMANTIC_SAMPLEPOS:
2376 /* This system value contains the position of the current sample
2377 * when using per-sample shading. We implement this by calling
2378 * the VGPU10_OPCODE_SAMPLE_POS instruction with the current sample
2379 * index as the argument. See emit_sample_position_instructions().
2380 */
2381 assert(emit->version >= 41);
2382 emit->fs.sample_pos_sys_index = index;
2383 index = alloc_system_value_index(emit, index);
2384 break;
2385 default:
2386 debug_printf("unexpected sytem value semantic index %u\n",
2387 semantic_name);
2388 }
2389 }
2390
2391 /**
2392 * Translate a TGSI declaration to VGPU10.
2393 */
2394 static boolean
2395 emit_vgpu10_declaration(struct svga_shader_emitter_v10 *emit,
2396 const struct tgsi_full_declaration *decl)
2397 {
2398 switch (decl->Declaration.File) {
2399 case TGSI_FILE_INPUT:
2400 /* do nothing - see emit_input_declarations() */
2401 return TRUE;
2402
2403 case TGSI_FILE_OUTPUT:
2404 assert(decl->Range.First == decl->Range.Last);
2405 emit->output_usage_mask[decl->Range.First] = decl->Declaration.UsageMask;
2406 return TRUE;
2407
2408 case TGSI_FILE_TEMPORARY:
2409 /* Don't declare the temps here. Just keep track of how many
2410 * and emit the declaration later.
2411 */
2412 if (decl->Declaration.Array) {
2413 /* Indexed temporary array. Save the start index of the array
2414 * and the size of the array.
2415 */
2416 const unsigned arrayID = MIN2(decl->Array.ArrayID, MAX_TEMP_ARRAYS);
2417 unsigned i;
2418
2419 assert(arrayID < ARRAY_SIZE(emit->temp_arrays));
2420
2421 /* Save this array so we can emit the declaration for it later */
2422 emit->temp_arrays[arrayID].start = decl->Range.First;
2423 emit->temp_arrays[arrayID].size =
2424 decl->Range.Last - decl->Range.First + 1;
2425
2426 emit->num_temp_arrays = MAX2(emit->num_temp_arrays, arrayID + 1);
2427 assert(emit->num_temp_arrays <= MAX_TEMP_ARRAYS);
2428 emit->num_temp_arrays = MIN2(emit->num_temp_arrays, MAX_TEMP_ARRAYS);
2429
2430 /* Fill in the temp_map entries for this array */
2431 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2432 emit->temp_map[i].arrayId = arrayID;
2433 emit->temp_map[i].index = i - decl->Range.First;
2434 }
2435 }
2436
2437 /* for all temps, indexed or not, keep track of highest index */
2438 emit->num_shader_temps = MAX2(emit->num_shader_temps,
2439 decl->Range.Last + 1);
2440 return TRUE;
2441
2442 case TGSI_FILE_CONSTANT:
2443 /* Don't declare constants here. Just keep track and emit later. */
2444 {
2445 unsigned constbuf = 0, num_consts;
2446 if (decl->Declaration.Dimension) {
2447 constbuf = decl->Dim.Index2D;
2448 }
2449 /* We throw an assertion here when, in fact, the shader should never
2450 * have linked due to constbuf index out of bounds, so we shouldn't
2451 * have reached here.
2452 */
2453 assert(constbuf < ARRAY_SIZE(emit->num_shader_consts));
2454
2455 num_consts = MAX2(emit->num_shader_consts[constbuf],
2456 decl->Range.Last + 1);
2457
2458 if (num_consts > VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
2459 debug_printf("Warning: constant buffer is declared to size [%u]"
2460 " but [%u] is the limit.\n",
2461 num_consts,
2462 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2463 }
2464 /* The linker doesn't enforce the max UBO size so we clamp here */
2465 emit->num_shader_consts[constbuf] =
2466 MIN2(num_consts, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2467 }
2468 return TRUE;
2469
2470 case TGSI_FILE_IMMEDIATE:
2471 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2472 return FALSE;
2473
2474 case TGSI_FILE_SYSTEM_VALUE:
2475 emit_system_value_declaration(emit, decl->Semantic.Name,
2476 decl->Range.First);
2477 return TRUE;
2478
2479 case TGSI_FILE_SAMPLER:
2480 /* Don't declare samplers here. Just keep track and emit later. */
2481 emit->num_samplers = MAX2(emit->num_samplers, decl->Range.Last + 1);
2482 return TRUE;
2483
2484 #if 0
2485 case TGSI_FILE_RESOURCE:
2486 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2487 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2488 assert(!"TGSI_FILE_RESOURCE not handled yet");
2489 return FALSE;
2490 #endif
2491
2492 case TGSI_FILE_ADDRESS:
2493 emit->num_address_regs = MAX2(emit->num_address_regs,
2494 decl->Range.Last + 1);
2495 return TRUE;
2496
2497 case TGSI_FILE_SAMPLER_VIEW:
2498 {
2499 unsigned unit = decl->Range.First;
2500 assert(decl->Range.First == decl->Range.Last);
2501 emit->sampler_target[unit] = decl->SamplerView.Resource;
2502 /* Note: we can ignore YZW return types for now */
2503 emit->sampler_return_type[unit] = decl->SamplerView.ReturnTypeX;
2504 emit->sampler_view[unit] = TRUE;
2505 }
2506 return TRUE;
2507
2508 default:
2509 assert(!"Unexpected type of declaration");
2510 return FALSE;
2511 }
2512 }
2513
2514
2515
2516 /**
2517 * Emit all input declarations.
2518 */
2519 static boolean
2520 emit_input_declarations(struct svga_shader_emitter_v10 *emit)
2521 {
2522 unsigned i;
2523
2524 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2525
2526 for (i = 0; i < emit->linkage.num_inputs; i++) {
2527 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2528 unsigned usage_mask = emit->info.input_usage_mask[i];
2529 unsigned index = emit->linkage.input_map[i];
2530 VGPU10_OPCODE_TYPE type;
2531 VGPU10_INTERPOLATION_MODE interpolationMode;
2532 VGPU10_SYSTEM_NAME name;
2533
2534 if (usage_mask == 0)
2535 continue; /* register is not actually used */
2536
2537 if (semantic_name == TGSI_SEMANTIC_POSITION) {
2538 /* fragment position input */
2539 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2540 interpolationMode = VGPU10_INTERPOLATION_LINEAR;
2541 name = VGPU10_NAME_POSITION;
2542 if (usage_mask & TGSI_WRITEMASK_W) {
2543 /* we need to replace use of 'w' with '1/w' */
2544 emit->fs.fragcoord_input_index = i;
2545 }
2546 }
2547 else if (semantic_name == TGSI_SEMANTIC_FACE) {
2548 /* fragment front-facing input */
2549 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2550 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2551 name = VGPU10_NAME_IS_FRONT_FACE;
2552 emit->fs.face_input_index = i;
2553 }
2554 else if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2555 /* primitive ID */
2556 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2557 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2558 name = VGPU10_NAME_PRIMITIVE_ID;
2559 }
2560 else if (semantic_name == TGSI_SEMANTIC_SAMPLEID) {
2561 /* sample index / ID */
2562 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2563 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2564 name = VGPU10_NAME_SAMPLE_INDEX;
2565 }
2566 else {
2567 /* general fragment input */
2568 type = VGPU10_OPCODE_DCL_INPUT_PS;
2569 interpolationMode =
2570 translate_interpolation(emit,
2571 emit->info.input_interpolate[i],
2572 emit->info.input_interpolate_loc[i]);
2573
2574 /* keeps track if flat interpolation mode is being used */
2575 emit->uses_flat_interp |=
2576 (interpolationMode == VGPU10_INTERPOLATION_CONSTANT);
2577
2578 name = VGPU10_NAME_UNDEFINED;
2579 }
2580
2581 emit_input_declaration(emit, type,
2582 VGPU10_OPERAND_TYPE_INPUT,
2583 VGPU10_OPERAND_INDEX_1D, index, 1,
2584 name,
2585 VGPU10_OPERAND_4_COMPONENT,
2586 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2587 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2588 interpolationMode);
2589 }
2590 }
2591 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
2592
2593 for (i = 0; i < emit->info.num_inputs; i++) {
2594 enum tgsi_semantic semantic_name = emit->info.input_semantic_name[i];
2595 unsigned usage_mask = emit->info.input_usage_mask[i];
2596 unsigned index = emit->linkage.input_map[i];
2597 VGPU10_OPCODE_TYPE opcodeType, operandType;
2598 VGPU10_OPERAND_NUM_COMPONENTS numComp;
2599 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode;
2600 VGPU10_SYSTEM_NAME name;
2601 VGPU10_OPERAND_INDEX_DIMENSION dim;
2602
2603 if (usage_mask == 0)
2604 continue; /* register is not actually used */
2605
2606 opcodeType = VGPU10_OPCODE_DCL_INPUT;
2607 operandType = VGPU10_OPERAND_TYPE_INPUT;
2608 numComp = VGPU10_OPERAND_4_COMPONENT;
2609 selMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2610 name = VGPU10_NAME_UNDEFINED;
2611
2612 /* all geometry shader inputs are two dimensional except
2613 * gl_PrimitiveID
2614 */
2615 dim = VGPU10_OPERAND_INDEX_2D;
2616
2617 if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2618 /* Primitive ID */
2619 operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
2620 dim = VGPU10_OPERAND_INDEX_0D;
2621 numComp = VGPU10_OPERAND_0_COMPONENT;
2622 selMode = 0;
2623
2624 /* also save the register index so we can check for
2625 * primitive id when emit src register. We need to modify the
2626 * operand type, index dimension when emit primitive id src reg.
2627 */
2628 emit->gs.prim_id_index = i;
2629 }
2630 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2631 /* vertex position input */
2632 opcodeType = VGPU10_OPCODE_DCL_INPUT_SIV;
2633 name = VGPU10_NAME_POSITION;
2634 }
2635
2636 emit_input_declaration(emit, opcodeType, operandType,
2637 dim, index,
2638 emit->gs.input_size,
2639 name,
2640 numComp, selMode,
2641 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2642 VGPU10_INTERPOLATION_UNDEFINED);
2643 }
2644 }
2645 else {
2646 assert(emit->unit == PIPE_SHADER_VERTEX);
2647
2648 for (i = 0; i < emit->info.file_max[TGSI_FILE_INPUT] + 1; i++) {
2649 unsigned usage_mask = emit->info.input_usage_mask[i];
2650 unsigned index = i;
2651
2652 if (usage_mask == 0)
2653 continue; /* register is not actually used */
2654
2655 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT,
2656 VGPU10_OPERAND_TYPE_INPUT,
2657 VGPU10_OPERAND_INDEX_1D, index, 1,
2658 VGPU10_NAME_UNDEFINED,
2659 VGPU10_OPERAND_4_COMPONENT,
2660 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2661 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2662 VGPU10_INTERPOLATION_UNDEFINED);
2663 }
2664 }
2665
2666 return TRUE;
2667 }
2668
2669
2670 /**
2671 * Emit all output declarations.
2672 */
2673 static boolean
2674 emit_output_declarations(struct svga_shader_emitter_v10 *emit)
2675 {
2676 unsigned i;
2677
2678 for (i = 0; i < emit->info.num_outputs; i++) {
2679 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2680 const enum tgsi_semantic semantic_name =
2681 emit->info.output_semantic_name[i];
2682 const unsigned semantic_index = emit->info.output_semantic_index[i];
2683 unsigned index = i;
2684
2685 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2686 if (semantic_name == TGSI_SEMANTIC_COLOR) {
2687 assert(semantic_index < ARRAY_SIZE(emit->fs.color_out_index));
2688
2689 emit->fs.color_out_index[semantic_index] = index;
2690
2691 emit->fs.num_color_outputs = MAX2(emit->fs.num_color_outputs,
2692 index + 1);
2693
2694 /* The semantic index is the shader's color output/buffer index */
2695 emit_output_declaration(emit,
2696 VGPU10_OPCODE_DCL_OUTPUT, semantic_index,
2697 VGPU10_NAME_UNDEFINED,
2698 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2699
2700 if (semantic_index == 0) {
2701 if (emit->key.fs.write_color0_to_n_cbufs > 1) {
2702 /* Emit declarations for the additional color outputs
2703 * for broadcasting.
2704 */
2705 unsigned j;
2706 for (j = 1; j < emit->key.fs.write_color0_to_n_cbufs; j++) {
2707 /* Allocate a new output index */
2708 unsigned idx = emit->info.num_outputs + j - 1;
2709 emit->fs.color_out_index[j] = idx;
2710 emit_output_declaration(emit,
2711 VGPU10_OPCODE_DCL_OUTPUT, idx,
2712 VGPU10_NAME_UNDEFINED,
2713 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2714 emit->info.output_semantic_index[idx] = j;
2715 }
2716
2717 emit->fs.num_color_outputs =
2718 emit->key.fs.write_color0_to_n_cbufs;
2719 }
2720 }
2721 else {
2722 assert(!emit->key.fs.write_color0_to_n_cbufs);
2723 }
2724 }
2725 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2726 /* Fragment depth output */
2727 emit_fragdepth_output_declaration(emit);
2728 }
2729 else if (semantic_name == TGSI_SEMANTIC_SAMPLEMASK) {
2730 /* Fragment depth output */
2731 emit_samplemask_output_declaration(emit);
2732 }
2733 else {
2734 assert(!"Bad output semantic name");
2735 }
2736 }
2737 else {
2738 /* VS or GS */
2739 VGPU10_COMPONENT_NAME name;
2740 VGPU10_OPCODE_TYPE type;
2741 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2742
2743 switch (semantic_name) {
2744 case TGSI_SEMANTIC_POSITION:
2745 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2746 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2747 name = VGPU10_NAME_POSITION;
2748 /* Save the index of the vertex position output register */
2749 emit->vposition.out_index = index;
2750 break;
2751 case TGSI_SEMANTIC_CLIPDIST:
2752 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2753 name = VGPU10_NAME_CLIP_DISTANCE;
2754 /* save the starting index of the clip distance output register */
2755 if (semantic_index == 0)
2756 emit->clip_dist_out_index = index;
2757 writemask = emit->output_usage_mask[index];
2758 writemask = apply_clip_plane_mask(emit, writemask, semantic_index);
2759 if (writemask == 0x0) {
2760 continue; /* discard this do-nothing declaration */
2761 }
2762 break;
2763 case TGSI_SEMANTIC_PRIMID:
2764 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2765 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2766 name = VGPU10_NAME_PRIMITIVE_ID;
2767 break;
2768 case TGSI_SEMANTIC_LAYER:
2769 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2770 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2771 name = VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX;
2772 break;
2773 case TGSI_SEMANTIC_CLIPVERTEX:
2774 type = VGPU10_OPCODE_DCL_OUTPUT;
2775 name = VGPU10_NAME_UNDEFINED;
2776 emit->clip_vertex_out_index = index;
2777 break;
2778 default:
2779 /* generic output */
2780 type = VGPU10_OPCODE_DCL_OUTPUT;
2781 name = VGPU10_NAME_UNDEFINED;
2782 }
2783
2784 emit_output_declaration(emit, type, index, name, writemask);
2785 }
2786 }
2787
2788 if (emit->vposition.so_index != INVALID_INDEX &&
2789 emit->vposition.out_index != INVALID_INDEX) {
2790
2791 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2792
2793 /* Emit the declaration for the non-adjusted vertex position
2794 * for stream output purpose
2795 */
2796 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2797 emit->vposition.so_index,
2798 VGPU10_NAME_UNDEFINED,
2799 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2800 }
2801
2802 if (emit->clip_dist_so_index != INVALID_INDEX &&
2803 emit->clip_dist_out_index != INVALID_INDEX) {
2804
2805 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2806
2807 /* Emit the declaration for the clip distance shadow copy which
2808 * will be used for stream output purpose and for clip distance
2809 * varying variable
2810 */
2811 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2812 emit->clip_dist_so_index,
2813 VGPU10_NAME_UNDEFINED,
2814 emit->output_usage_mask[emit->clip_dist_out_index]);
2815
2816 if (emit->info.num_written_clipdistance > 4) {
2817 /* for the second clip distance register, each handles 4 planes */
2818 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2819 emit->clip_dist_so_index + 1,
2820 VGPU10_NAME_UNDEFINED,
2821 emit->output_usage_mask[emit->clip_dist_out_index+1]);
2822 }
2823 }
2824
2825 return TRUE;
2826 }
2827
2828
2829 /**
2830 * Emit the declaration for the temporary registers.
2831 */
2832 static boolean
2833 emit_temporaries_declaration(struct svga_shader_emitter_v10 *emit)
2834 {
2835 unsigned total_temps, reg, i;
2836
2837 total_temps = emit->num_shader_temps;
2838
2839 /* If there is indirect access to non-indexable temps in the shader,
2840 * convert those temps to indexable temps. This works around a bug
2841 * in the GLSL->TGSI translator exposed in piglit test
2842 * glsl-1.20/execution/fs-const-array-of-struct-of-array.shader_test.
2843 * Internal temps added by the driver remain as non-indexable temps.
2844 */
2845 if ((emit->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) &&
2846 emit->num_temp_arrays == 0) {
2847 unsigned arrayID;
2848
2849 arrayID = 1;
2850 emit->num_temp_arrays = arrayID + 1;
2851 emit->temp_arrays[arrayID].start = 0;
2852 emit->temp_arrays[arrayID].size = total_temps;
2853
2854 /* Fill in the temp_map entries for this temp array */
2855 for (i = 0; i < total_temps; i++) {
2856 emit->temp_map[i].arrayId = arrayID;
2857 emit->temp_map[i].index = i;
2858 }
2859 }
2860
2861 /* Allocate extra temps for specially-implemented instructions,
2862 * such as LIT.
2863 */
2864 total_temps += MAX_INTERNAL_TEMPS;
2865
2866 if (emit->unit == PIPE_SHADER_VERTEX || emit->unit == PIPE_SHADER_GEOMETRY) {
2867 if (emit->vposition.need_prescale || emit->key.vs.undo_viewport ||
2868 emit->key.clip_plane_enable ||
2869 emit->vposition.so_index != INVALID_INDEX) {
2870 emit->vposition.tmp_index = total_temps;
2871 total_temps += 1;
2872 }
2873
2874 if (emit->unit == PIPE_SHADER_VERTEX) {
2875 unsigned attrib_mask = (emit->key.vs.adjust_attrib_w_1 |
2876 emit->key.vs.adjust_attrib_itof |
2877 emit->key.vs.adjust_attrib_utof |
2878 emit->key.vs.attrib_is_bgra |
2879 emit->key.vs.attrib_puint_to_snorm |
2880 emit->key.vs.attrib_puint_to_uscaled |
2881 emit->key.vs.attrib_puint_to_sscaled);
2882 while (attrib_mask) {
2883 unsigned index = u_bit_scan(&attrib_mask);
2884 emit->vs.adjusted_input[index] = total_temps++;
2885 }
2886 }
2887
2888 if (emit->clip_mode == CLIP_DISTANCE) {
2889 /* We need to write the clip distance to a temporary register
2890 * first. Then it will be copied to the shadow copy for
2891 * the clip distance varying variable and stream output purpose.
2892 * It will also be copied to the actual CLIPDIST register
2893 * according to the enabled clip planes
2894 */
2895 emit->clip_dist_tmp_index = total_temps++;
2896 if (emit->info.num_written_clipdistance > 4)
2897 total_temps++; /* second clip register */
2898 }
2899 else if (emit->clip_mode == CLIP_VERTEX) {
2900 /* We need to convert the TGSI CLIPVERTEX output to one or more
2901 * clip distances. Allocate a temp reg for the clipvertex here.
2902 */
2903 assert(emit->info.writes_clipvertex > 0);
2904 emit->clip_vertex_tmp_index = total_temps;
2905 total_temps++;
2906 }
2907 }
2908 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
2909 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS ||
2910 emit->key.fs.write_color0_to_n_cbufs > 1) {
2911 /* Allocate a temp to hold the output color */
2912 emit->fs.color_tmp_index = total_temps;
2913 total_temps += 1;
2914 }
2915
2916 if (emit->fs.face_input_index != INVALID_INDEX) {
2917 /* Allocate a temp for the +/-1 face register */
2918 emit->fs.face_tmp_index = total_temps;
2919 total_temps += 1;
2920 }
2921
2922 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
2923 /* Allocate a temp for modified fragment position register */
2924 emit->fs.fragcoord_tmp_index = total_temps;
2925 total_temps += 1;
2926 }
2927
2928 if (emit->fs.sample_pos_sys_index != INVALID_INDEX) {
2929 /* Allocate a temp for the sample position */
2930 emit->fs.sample_pos_tmp_index = total_temps++;
2931 }
2932 }
2933
2934 for (i = 0; i < emit->num_address_regs; i++) {
2935 emit->address_reg_index[i] = total_temps++;
2936 }
2937
2938 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2939 * temp indexes. Basically, we compact all the non-array temp register
2940 * indexes into a consecutive series.
2941 *
2942 * Before, we may have some TGSI declarations like:
2943 * DCL TEMP[0..1], LOCAL
2944 * DCL TEMP[2..4], ARRAY(1), LOCAL
2945 * DCL TEMP[5..7], ARRAY(2), LOCAL
2946 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2947 *
2948 * After, we'll have a map like this:
2949 * temp_map[0] = { array 0, index 0 }
2950 * temp_map[1] = { array 0, index 1 }
2951 * temp_map[2] = { array 1, index 0 }
2952 * temp_map[3] = { array 1, index 1 }
2953 * temp_map[4] = { array 1, index 2 }
2954 * temp_map[5] = { array 2, index 0 }
2955 * temp_map[6] = { array 2, index 1 }
2956 * temp_map[7] = { array 2, index 2 }
2957 * temp_map[8] = { array 0, index 2 }
2958 * temp_map[9] = { array 0, index 3 }
2959 *
2960 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2961 * temps numbered 0..3
2962 *
2963 * Any time we emit a temporary register index, we'll have to use the
2964 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2965 *
2966 * Finally, we recompute the total_temps value here.
2967 */
2968 reg = 0;
2969 for (i = 0; i < total_temps; i++) {
2970 if (emit->temp_map[i].arrayId == 0) {
2971 emit->temp_map[i].index = reg++;
2972 }
2973 }
2974
2975 if (0) {
2976 debug_printf("total_temps %u\n", total_temps);
2977 for (i = 0; i < total_temps; i++) {
2978 debug_printf("temp %u -> array %u index %u\n",
2979 i, emit->temp_map[i].arrayId, emit->temp_map[i].index);
2980 }
2981 }
2982
2983 total_temps = reg;
2984
2985 /* Emit declaration of ordinary temp registers */
2986 if (total_temps > 0) {
2987 VGPU10OpcodeToken0 opcode0;
2988
2989 opcode0.value = 0;
2990 opcode0.opcodeType = VGPU10_OPCODE_DCL_TEMPS;
2991
2992 begin_emit_instruction(emit);
2993 emit_dword(emit, opcode0.value);
2994 emit_dword(emit, total_temps);
2995 end_emit_instruction(emit);
2996 }
2997
2998 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2999 * it's unused.
3000 */
3001 for (i = 1; i < emit->num_temp_arrays; i++) {
3002 unsigned num_temps = emit->temp_arrays[i].size;
3003
3004 if (num_temps > 0) {
3005 VGPU10OpcodeToken0 opcode0;
3006
3007 opcode0.value = 0;
3008 opcode0.opcodeType = VGPU10_OPCODE_DCL_INDEXABLE_TEMP;
3009
3010 begin_emit_instruction(emit);
3011 emit_dword(emit, opcode0.value);
3012 emit_dword(emit, i); /* which array */
3013 emit_dword(emit, num_temps);
3014 emit_dword(emit, 4); /* num components */
3015 end_emit_instruction(emit);
3016
3017 total_temps += num_temps;
3018 }
3019 }
3020
3021 /* Check that the grand total of all regular and indexed temps is
3022 * under the limit.
3023 */
3024 check_register_index(emit, VGPU10_OPCODE_DCL_TEMPS, total_temps - 1);
3025
3026 return TRUE;
3027 }
3028
3029
3030 static boolean
3031 emit_constant_declaration(struct svga_shader_emitter_v10 *emit)
3032 {
3033 VGPU10OpcodeToken0 opcode0;
3034 VGPU10OperandToken0 operand0;
3035 unsigned total_consts, i;
3036
3037 opcode0.value = 0;
3038 opcode0.opcodeType = VGPU10_OPCODE_DCL_CONSTANT_BUFFER;
3039 opcode0.accessPattern = VGPU10_CB_IMMEDIATE_INDEXED;
3040 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
3041
3042 operand0.value = 0;
3043 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
3044 operand0.indexDimension = VGPU10_OPERAND_INDEX_2D;
3045 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3046 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3047 operand0.operandType = VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
3048 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
3049 operand0.swizzleX = 0;
3050 operand0.swizzleY = 1;
3051 operand0.swizzleZ = 2;
3052 operand0.swizzleW = 3;
3053
3054 /**
3055 * Emit declaration for constant buffer [0]. We also allocate
3056 * room for the extra constants here.
3057 */
3058 total_consts = emit->num_shader_consts[0];
3059
3060 /* Now, allocate constant slots for the "extra" constants.
3061 * Note: it's critical that these extra constant locations
3062 * exactly match what's emitted by the "extra" constants code
3063 * in svga_state_constants.c
3064 */
3065
3066 /* Vertex position scale/translation */
3067 if (emit->vposition.need_prescale) {
3068 emit->vposition.prescale_scale_index = total_consts++;
3069 emit->vposition.prescale_trans_index = total_consts++;
3070 }
3071
3072 if (emit->unit == PIPE_SHADER_VERTEX) {
3073 if (emit->key.vs.undo_viewport) {
3074 emit->vs.viewport_index = total_consts++;
3075 }
3076 }
3077
3078 /* user-defined clip planes */
3079 if (emit->key.clip_plane_enable) {
3080 unsigned n = util_bitcount(emit->key.clip_plane_enable);
3081 assert(emit->unit == PIPE_SHADER_VERTEX ||
3082 emit->unit == PIPE_SHADER_GEOMETRY);
3083 for (i = 0; i < n; i++) {
3084 emit->clip_plane_const[i] = total_consts++;
3085 }
3086 }
3087
3088 for (i = 0; i < emit->num_samplers; i++) {
3089
3090 if (emit->sampler_view[i]) {
3091
3092 /* Texcoord scale factors for RECT textures */
3093 if (emit->key.tex[i].unnormalized) {
3094 emit->texcoord_scale_index[i] = total_consts++;
3095 }
3096
3097 /* Texture buffer sizes */
3098 if (emit->sampler_target[i] == TGSI_TEXTURE_BUFFER) {
3099 emit->texture_buffer_size_index[i] = total_consts++;
3100 }
3101 }
3102 }
3103
3104 if (total_consts > 0) {
3105 begin_emit_instruction(emit);
3106 emit_dword(emit, opcode0.value);
3107 emit_dword(emit, operand0.value);
3108 emit_dword(emit, 0); /* which const buffer slot */
3109 emit_dword(emit, total_consts);
3110 end_emit_instruction(emit);
3111 }
3112
3113 /* Declare remaining constant buffers (UBOs) */
3114 for (i = 1; i < ARRAY_SIZE(emit->num_shader_consts); i++) {
3115 if (emit->num_shader_consts[i] > 0) {
3116 begin_emit_instruction(emit);
3117 emit_dword(emit, opcode0.value);
3118 emit_dword(emit, operand0.value);
3119 emit_dword(emit, i); /* which const buffer slot */
3120 emit_dword(emit, emit->num_shader_consts[i]);
3121 end_emit_instruction(emit);
3122 }
3123 }
3124
3125 return TRUE;
3126 }
3127
3128
3129 /**
3130 * Emit declarations for samplers.
3131 */
3132 static boolean
3133 emit_sampler_declarations(struct svga_shader_emitter_v10 *emit)
3134 {
3135 unsigned i;
3136
3137 for (i = 0; i < emit->num_samplers; i++) {
3138 VGPU10OpcodeToken0 opcode0;
3139 VGPU10OperandToken0 operand0;
3140
3141 opcode0.value = 0;
3142 opcode0.opcodeType = VGPU10_OPCODE_DCL_SAMPLER;
3143 opcode0.samplerMode = VGPU10_SAMPLER_MODE_DEFAULT;
3144
3145 operand0.value = 0;
3146 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
3147 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
3148 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
3149 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3150
3151 begin_emit_instruction(emit);
3152 emit_dword(emit, opcode0.value);
3153 emit_dword(emit, operand0.value);
3154 emit_dword(emit, i);
3155 end_emit_instruction(emit);
3156 }
3157
3158 return TRUE;
3159 }
3160
3161
3162 /**
3163 * Translate TGSI_TEXTURE_x to VGPU10_RESOURCE_DIMENSION_x.
3164 */
3165 static unsigned
3166 tgsi_texture_to_resource_dimension(enum tgsi_texture_type target,
3167 unsigned num_samples,
3168 boolean is_array)
3169 {
3170 if (target == TGSI_TEXTURE_2D_MSAA && num_samples < 2) {
3171 target = TGSI_TEXTURE_2D;
3172 }
3173 else if (target == TGSI_TEXTURE_2D_ARRAY_MSAA && num_samples < 2) {
3174 target = TGSI_TEXTURE_2D_ARRAY;
3175 }
3176
3177 switch (target) {
3178 case TGSI_TEXTURE_BUFFER:
3179 return VGPU10_RESOURCE_DIMENSION_BUFFER;
3180 case TGSI_TEXTURE_1D:
3181 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3182 case TGSI_TEXTURE_2D:
3183 case TGSI_TEXTURE_RECT:
3184 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3185 case TGSI_TEXTURE_3D:
3186 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D;
3187 case TGSI_TEXTURE_CUBE:
3188 case TGSI_TEXTURE_SHADOWCUBE:
3189 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
3190 case TGSI_TEXTURE_SHADOW1D:
3191 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3192 case TGSI_TEXTURE_SHADOW2D:
3193 case TGSI_TEXTURE_SHADOWRECT:
3194 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3195 case TGSI_TEXTURE_1D_ARRAY:
3196 case TGSI_TEXTURE_SHADOW1D_ARRAY:
3197 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
3198 : VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
3199 case TGSI_TEXTURE_2D_ARRAY:
3200 case TGSI_TEXTURE_SHADOW2D_ARRAY:
3201 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
3202 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3203 case TGSI_TEXTURE_2D_MSAA:
3204 return VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
3205 case TGSI_TEXTURE_2D_ARRAY_MSAA:
3206 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
3207 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS;
3208 case TGSI_TEXTURE_CUBE_ARRAY:
3209 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
3210 return is_array ? VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY
3211 : VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
3212 default:
3213 assert(!"Unexpected resource type");
3214 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
3215 }
3216 }
3217
3218
3219 /**
3220 * Given a tgsi_return_type, return true iff it is an integer type.
3221 */
3222 static boolean
3223 is_integer_type(enum tgsi_return_type type)
3224 {
3225 switch (type) {
3226 case TGSI_RETURN_TYPE_SINT:
3227 case TGSI_RETURN_TYPE_UINT:
3228 return TRUE;
3229 case TGSI_RETURN_TYPE_FLOAT:
3230 case TGSI_RETURN_TYPE_UNORM:
3231 case TGSI_RETURN_TYPE_SNORM:
3232 return FALSE;
3233 case TGSI_RETURN_TYPE_COUNT:
3234 default:
3235 assert(!"is_integer_type: Unknown tgsi_return_type");
3236 return FALSE;
3237 }
3238 }
3239
3240
3241 /**
3242 * Emit declarations for resources.
3243 * XXX When we're sure that all TGSI shaders will be generated with
3244 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
3245 * rework this code.
3246 */
3247 static boolean
3248 emit_resource_declarations(struct svga_shader_emitter_v10 *emit)
3249 {
3250 unsigned i;
3251
3252 /* Emit resource decl for each sampler */
3253 for (i = 0; i < emit->num_samplers; i++) {
3254 VGPU10OpcodeToken0 opcode0;
3255 VGPU10OperandToken0 operand0;
3256 VGPU10ResourceReturnTypeToken return_type;
3257 VGPU10_RESOURCE_RETURN_TYPE rt;
3258
3259 opcode0.value = 0;
3260 opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;
3261 opcode0.resourceDimension =
3262 tgsi_texture_to_resource_dimension(emit->sampler_target[i],
3263 emit->key.tex[i].num_samples,
3264 emit->key.tex[i].is_array);
3265 opcode0.sampleCount = emit->key.tex[i].num_samples;
3266 operand0.value = 0;
3267 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
3268 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
3269 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
3270 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
3271
3272 #if 1
3273 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
3274 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM == TGSI_RETURN_TYPE_UNORM + 1);
3275 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM == TGSI_RETURN_TYPE_SNORM + 1);
3276 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT == TGSI_RETURN_TYPE_SINT + 1);
3277 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT == TGSI_RETURN_TYPE_UINT + 1);
3278 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT == TGSI_RETURN_TYPE_FLOAT + 1);
3279 assert(emit->sampler_return_type[i] <= TGSI_RETURN_TYPE_FLOAT);
3280 rt = emit->sampler_return_type[i] + 1;
3281 #else
3282 switch (emit->sampler_return_type[i]) {
3283 case TGSI_RETURN_TYPE_UNORM: rt = VGPU10_RETURN_TYPE_UNORM; break;
3284 case TGSI_RETURN_TYPE_SNORM: rt = VGPU10_RETURN_TYPE_SNORM; break;
3285 case TGSI_RETURN_TYPE_SINT: rt = VGPU10_RETURN_TYPE_SINT; break;
3286 case TGSI_RETURN_TYPE_UINT: rt = VGPU10_RETURN_TYPE_UINT; break;
3287 case TGSI_RETURN_TYPE_FLOAT: rt = VGPU10_RETURN_TYPE_FLOAT; break;
3288 case TGSI_RETURN_TYPE_COUNT:
3289 default:
3290 rt = VGPU10_RETURN_TYPE_FLOAT;
3291 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3292 }
3293 #endif
3294
3295 return_type.value = 0;
3296 return_type.component0 = rt;
3297 return_type.component1 = rt;
3298 return_type.component2 = rt;
3299 return_type.component3 = rt;
3300
3301 begin_emit_instruction(emit);
3302 emit_dword(emit, opcode0.value);
3303 emit_dword(emit, operand0.value);
3304 emit_dword(emit, i);
3305 emit_dword(emit, return_type.value);
3306 end_emit_instruction(emit);
3307 }
3308
3309 return TRUE;
3310 }
3311
3312 static void
3313 emit_instruction_op1(struct svga_shader_emitter_v10 *emit,
3314 VGPU10_OPCODE_TYPE opcode,
3315 const struct tgsi_full_dst_register *dst,
3316 const struct tgsi_full_src_register *src,
3317 boolean saturate)
3318 {
3319 begin_emit_instruction(emit);
3320 emit_opcode(emit, opcode, saturate);
3321 emit_dst_register(emit, dst);
3322 emit_src_register(emit, src);
3323 end_emit_instruction(emit);
3324 }
3325
3326 static void
3327 emit_instruction_op2(struct svga_shader_emitter_v10 *emit,
3328 VGPU10_OPCODE_TYPE opcode,
3329 const struct tgsi_full_dst_register *dst,
3330 const struct tgsi_full_src_register *src1,
3331 const struct tgsi_full_src_register *src2,
3332 boolean saturate)
3333 {
3334 begin_emit_instruction(emit);
3335 emit_opcode(emit, opcode, saturate);
3336 emit_dst_register(emit, dst);
3337 emit_src_register(emit, src1);
3338 emit_src_register(emit, src2);
3339 end_emit_instruction(emit);
3340 }
3341
3342 static void
3343 emit_instruction_op3(struct svga_shader_emitter_v10 *emit,
3344 VGPU10_OPCODE_TYPE opcode,
3345 const struct tgsi_full_dst_register *dst,
3346 const struct tgsi_full_src_register *src1,
3347 const struct tgsi_full_src_register *src2,
3348 const struct tgsi_full_src_register *src3,
3349 boolean saturate)
3350 {
3351 begin_emit_instruction(emit);
3352 emit_opcode(emit, opcode, saturate);
3353 emit_dst_register(emit, dst);
3354 emit_src_register(emit, src1);
3355 emit_src_register(emit, src2);
3356 emit_src_register(emit, src3);
3357 end_emit_instruction(emit);
3358 }
3359
3360 /**
3361 * Emit the actual clip distance instructions to be used for clipping
3362 * by copying the clip distance from the temporary registers to the
3363 * CLIPDIST registers written with the enabled planes mask.
3364 * Also copy the clip distance from the temporary to the clip distance
3365 * shadow copy register which will be referenced by the input shader
3366 */
3367 static void
3368 emit_clip_distance_instructions(struct svga_shader_emitter_v10 *emit)
3369 {
3370 struct tgsi_full_src_register tmp_clip_dist_src;
3371 struct tgsi_full_dst_register clip_dist_dst;
3372
3373 unsigned i;
3374 unsigned clip_plane_enable = emit->key.clip_plane_enable;
3375 unsigned clip_dist_tmp_index = emit->clip_dist_tmp_index;
3376 int num_written_clipdist = emit->info.num_written_clipdistance;
3377
3378 assert(emit->clip_dist_out_index != INVALID_INDEX);
3379 assert(emit->clip_dist_tmp_index != INVALID_INDEX);
3380
3381 /**
3382 * Temporary reset the temporary clip dist register index so
3383 * that the copy to the real clip dist register will not
3384 * attempt to copy to the temporary register again
3385 */
3386 emit->clip_dist_tmp_index = INVALID_INDEX;
3387
3388 for (i = 0; i < 2 && num_written_clipdist > 0; i++, num_written_clipdist-=4) {
3389
3390 tmp_clip_dist_src = make_src_temp_reg(clip_dist_tmp_index + i);
3391
3392 /**
3393 * copy to the shadow copy for use by varying variable and
3394 * stream output. All clip distances
3395 * will be written regardless of the enabled clipping planes.
3396 */
3397 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3398 emit->clip_dist_so_index + i);
3399
3400 /* MOV clip_dist_so, tmp_clip_dist */
3401 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3402 &tmp_clip_dist_src, FALSE);
3403
3404 /**
3405 * copy those clip distances to enabled clipping planes
3406 * to CLIPDIST registers for clipping
3407 */
3408 if (clip_plane_enable & 0xf) {
3409 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3410 emit->clip_dist_out_index + i);
3411 clip_dist_dst = writemask_dst(&clip_dist_dst, clip_plane_enable & 0xf);
3412
3413 /* MOV CLIPDIST, tmp_clip_dist */
3414 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3415 &tmp_clip_dist_src, FALSE);
3416 }
3417 /* four clip planes per clip register */
3418 clip_plane_enable >>= 4;
3419 }
3420 /**
3421 * set the temporary clip dist register index back to the
3422 * temporary index for the next vertex
3423 */
3424 emit->clip_dist_tmp_index = clip_dist_tmp_index;
3425 }
3426
3427 /* Declare clip distance output registers for user-defined clip planes
3428 * or the TGSI_CLIPVERTEX output.
3429 */
3430 static void
3431 emit_clip_distance_declarations(struct svga_shader_emitter_v10 *emit)
3432 {
3433 unsigned num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3434 unsigned index = emit->num_outputs;
3435 unsigned plane_mask;
3436
3437 assert(emit->unit == PIPE_SHADER_VERTEX ||
3438 emit->unit == PIPE_SHADER_GEOMETRY);
3439 assert(num_clip_planes <= 8);
3440
3441 if (emit->clip_mode != CLIP_LEGACY &&
3442 emit->clip_mode != CLIP_VERTEX) {
3443 return;
3444 }
3445
3446 if (num_clip_planes == 0)
3447 return;
3448
3449 /* Declare one or two clip output registers. The number of components
3450 * in the mask reflects the number of clip planes. For example, if 5
3451 * clip planes are needed, we'll declare outputs similar to:
3452 * dcl_output_siv o2.xyzw, clip_distance
3453 * dcl_output_siv o3.x, clip_distance
3454 */
3455 emit->clip_dist_out_index = index; /* save the starting clip dist reg index */
3456
3457 plane_mask = (1 << num_clip_planes) - 1;
3458 if (plane_mask & 0xf) {
3459 unsigned cmask = plane_mask & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3460 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index,
3461 VGPU10_NAME_CLIP_DISTANCE, cmask);
3462 emit->num_outputs++;
3463 }
3464 if (plane_mask & 0xf0) {
3465 unsigned cmask = (plane_mask >> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3466 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index + 1,
3467 VGPU10_NAME_CLIP_DISTANCE, cmask);
3468 emit->num_outputs++;
3469 }
3470 }
3471
3472
3473 /**
3474 * Emit the instructions for writing to the clip distance registers
3475 * to handle legacy/automatic clip planes.
3476 * For each clip plane, the distance is the dot product of the vertex
3477 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3478 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3479 * output registers already declared.
3480 */
3481 static void
3482 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10 *emit,
3483 unsigned vpos_tmp_index)
3484 {
3485 unsigned i, num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3486
3487 assert(emit->clip_mode == CLIP_LEGACY);
3488 assert(num_clip_planes <= 8);
3489
3490 assert(emit->unit == PIPE_SHADER_VERTEX ||
3491 emit->unit == PIPE_SHADER_GEOMETRY);
3492
3493 for (i = 0; i < num_clip_planes; i++) {
3494 struct tgsi_full_dst_register dst;
3495 struct tgsi_full_src_register plane_src, vpos_src;
3496 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3497 unsigned comp = i % 4;
3498 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3499
3500 /* create dst, src regs */
3501 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3502 dst = writemask_dst(&dst, writemask);
3503
3504 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3505 vpos_src = make_src_temp_reg(vpos_tmp_index);
3506
3507 /* DP4 clip_dist, plane, vpos */
3508 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3509 &plane_src, &vpos_src, FALSE);
3510 }
3511 }
3512
3513
3514 /**
3515 * Emit the instructions for computing the clip distance results from
3516 * the clip vertex temporary.
3517 * For each clip plane, the distance is the dot product of the clip vertex
3518 * position (found in a temp reg) and the clip plane coefficients.
3519 */
3520 static void
3521 emit_clip_vertex_instructions(struct svga_shader_emitter_v10 *emit)
3522 {
3523 const unsigned num_clip = util_bitcount(emit->key.clip_plane_enable);
3524 unsigned i;
3525 struct tgsi_full_dst_register dst;
3526 struct tgsi_full_src_register clipvert_src;
3527 const unsigned clip_vertex_tmp = emit->clip_vertex_tmp_index;
3528
3529 assert(emit->unit == PIPE_SHADER_VERTEX ||
3530 emit->unit == PIPE_SHADER_GEOMETRY);
3531
3532 assert(emit->clip_mode == CLIP_VERTEX);
3533
3534 clipvert_src = make_src_temp_reg(clip_vertex_tmp);
3535
3536 for (i = 0; i < num_clip; i++) {
3537 struct tgsi_full_src_register plane_src;
3538 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3539 unsigned comp = i % 4;
3540 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3541
3542 /* create dst, src regs */
3543 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3544 dst = writemask_dst(&dst, writemask);
3545
3546 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3547
3548 /* DP4 clip_dist, plane, vpos */
3549 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3550 &plane_src, &clipvert_src, FALSE);
3551 }
3552
3553 /* copy temporary clip vertex register to the clip vertex register */
3554
3555 assert(emit->clip_vertex_out_index != INVALID_INDEX);
3556
3557 /**
3558 * temporary reset the temporary clip vertex register index so
3559 * that copy to the clip vertex register will not attempt
3560 * to copy to the temporary register again
3561 */
3562 emit->clip_vertex_tmp_index = INVALID_INDEX;
3563
3564 /* MOV clip_vertex, clip_vertex_tmp */
3565 dst = make_dst_reg(TGSI_FILE_OUTPUT, emit->clip_vertex_out_index);
3566 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
3567 &dst, &clipvert_src, FALSE);
3568
3569 /**
3570 * set the temporary clip vertex register index back to the
3571 * temporary index for the next vertex
3572 */
3573 emit->clip_vertex_tmp_index = clip_vertex_tmp;
3574 }
3575
3576 /**
3577 * Emit code to convert RGBA to BGRA
3578 */
3579 static void
3580 emit_swap_r_b(struct svga_shader_emitter_v10 *emit,
3581 const struct tgsi_full_dst_register *dst,
3582 const struct tgsi_full_src_register *src)
3583 {
3584 struct tgsi_full_src_register bgra_src =
3585 swizzle_src(src, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W);
3586
3587 begin_emit_instruction(emit);
3588 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
3589 emit_dst_register(emit, dst);
3590 emit_src_register(emit, &bgra_src);
3591 end_emit_instruction(emit);
3592 }
3593
3594
3595 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3596 static void
3597 emit_puint_to_snorm(struct svga_shader_emitter_v10 *emit,
3598 const struct tgsi_full_dst_register *dst,
3599 const struct tgsi_full_src_register *src)
3600 {
3601 struct tgsi_full_src_register half = make_immediate_reg_float(emit, 0.5f);
3602 struct tgsi_full_src_register two =
3603 make_immediate_reg_float4(emit, 2.0f, 2.0f, 2.0f, 3.0f);
3604 struct tgsi_full_src_register neg_two =
3605 make_immediate_reg_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
3606
3607 unsigned val_tmp = get_temp_index(emit);
3608 struct tgsi_full_dst_register val_dst = make_dst_temp_reg(val_tmp);
3609 struct tgsi_full_src_register val_src = make_src_temp_reg(val_tmp);
3610
3611 unsigned bias_tmp = get_temp_index(emit);
3612 struct tgsi_full_dst_register bias_dst = make_dst_temp_reg(bias_tmp);
3613 struct tgsi_full_src_register bias_src = make_src_temp_reg(bias_tmp);
3614
3615 /* val = src * 2.0 */
3616 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &val_dst,
3617 src, &two, FALSE);
3618
3619 /* bias = src > 0.5 */
3620 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &bias_dst,
3621 src, &half, FALSE);
3622
3623 /* bias = bias & -2.0 */
3624 emit_instruction_op2(emit, VGPU10_OPCODE_AND, &bias_dst,
3625 &bias_src, &neg_two, FALSE);
3626
3627 /* dst = val + bias */
3628 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, dst,
3629 &val_src, &bias_src, FALSE);
3630
3631 free_temp_indexes(emit);
3632 }
3633
3634
3635 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3636 static void
3637 emit_puint_to_uscaled(struct svga_shader_emitter_v10 *emit,
3638 const struct tgsi_full_dst_register *dst,
3639 const struct tgsi_full_src_register *src)
3640 {
3641 struct tgsi_full_src_register scale =
3642 make_immediate_reg_float4(emit, 1023.0f, 1023.0f, 1023.0f, 3.0f);
3643
3644 /* dst = src * scale */
3645 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, dst, src, &scale, FALSE);
3646 }
3647
3648
3649 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3650 static void
3651 emit_puint_to_sscaled(struct svga_shader_emitter_v10 *emit,
3652 const struct tgsi_full_dst_register *dst,
3653 const struct tgsi_full_src_register *src)
3654 {
3655 struct tgsi_full_src_register lshift =
3656 make_immediate_reg_int4(emit, 22, 12, 2, 0);
3657 struct tgsi_full_src_register rshift =
3658 make_immediate_reg_int4(emit, 22, 22, 22, 30);
3659
3660 struct tgsi_full_src_register src_xxxx = scalar_src(src, TGSI_SWIZZLE_X);
3661
3662 unsigned tmp = get_temp_index(emit);
3663 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3664 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3665
3666 /*
3667 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3668 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3669 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3670 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3671 * dst = i_to_f(r,g,b,a); # convert to float
3672 */
3673 emit_instruction_op2(emit, VGPU10_OPCODE_ISHL, &tmp_dst,
3674 &src_xxxx, &lshift, FALSE);
3675 emit_instruction_op2(emit, VGPU10_OPCODE_ISHR, &tmp_dst,
3676 &tmp_src, &rshift, FALSE);
3677 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF, dst, &tmp_src, FALSE);
3678
3679 free_temp_indexes(emit);
3680 }
3681
3682
3683 /**
3684 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3685 */
3686 static boolean
3687 emit_arl_uarl(struct svga_shader_emitter_v10 *emit,
3688 const struct tgsi_full_instruction *inst)
3689 {
3690 unsigned index = inst->Dst[0].Register.Index;
3691 struct tgsi_full_dst_register dst;
3692 VGPU10_OPCODE_TYPE opcode;
3693
3694 assert(index < MAX_VGPU10_ADDR_REGS);
3695 dst = make_dst_temp_reg(emit->address_reg_index[index]);
3696
3697 /* ARL dst, s0
3698 * Translates into:
3699 * FTOI address_tmp, s0
3700 *
3701 * UARL dst, s0
3702 * Translates into:
3703 * MOV address_tmp, s0
3704 */
3705 if (inst->Instruction.Opcode == TGSI_OPCODE_ARL)
3706 opcode = VGPU10_OPCODE_FTOI;
3707 else
3708 opcode = VGPU10_OPCODE_MOV;
3709
3710 emit_instruction_op1(emit, opcode, &dst, &inst->Src[0], FALSE);
3711
3712 return TRUE;
3713 }
3714
3715
3716 /**
3717 * Emit code for TGSI_OPCODE_CAL instruction.
3718 */
3719 static boolean
3720 emit_cal(struct svga_shader_emitter_v10 *emit,
3721 const struct tgsi_full_instruction *inst)
3722 {
3723 unsigned label = inst->Label.Label;
3724 VGPU10OperandToken0 operand;
3725 operand.value = 0;
3726 operand.operandType = VGPU10_OPERAND_TYPE_LABEL;
3727
3728 begin_emit_instruction(emit);
3729 emit_dword(emit, operand.value);
3730 emit_dword(emit, label);
3731 end_emit_instruction(emit);
3732
3733 return TRUE;
3734 }
3735
3736
3737 /**
3738 * Emit code for TGSI_OPCODE_IABS instruction.
3739 */
3740 static boolean
3741 emit_iabs(struct svga_shader_emitter_v10 *emit,
3742 const struct tgsi_full_instruction *inst)
3743 {
3744 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3745 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3746 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3747 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3748 *
3749 * Translates into
3750 * IMAX dst, src, neg(src)
3751 */
3752 struct tgsi_full_src_register neg_src = negate_src(&inst->Src[0]);
3753 emit_instruction_op2(emit, VGPU10_OPCODE_IMAX, &inst->Dst[0],
3754 &inst->Src[0], &neg_src, FALSE);
3755
3756 return TRUE;
3757 }
3758
3759
3760 /**
3761 * Emit code for TGSI_OPCODE_CMP instruction.
3762 */
3763 static boolean
3764 emit_cmp(struct svga_shader_emitter_v10 *emit,
3765 const struct tgsi_full_instruction *inst)
3766 {
3767 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3768 * dst.y = (src0.y < 0) ? src1.y : src2.y
3769 * dst.z = (src0.z < 0) ? src1.z : src2.z
3770 * dst.w = (src0.w < 0) ? src1.w : src2.w
3771 *
3772 * Translates into
3773 * LT tmp, src0, 0.0
3774 * MOVC dst, tmp, src1, src2
3775 */
3776 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3777 unsigned tmp = get_temp_index(emit);
3778 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3779 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3780
3781 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst,
3782 &inst->Src[0], &zero, FALSE);
3783 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0],
3784 &tmp_src, &inst->Src[1], &inst->Src[2],
3785 inst->Instruction.Saturate);
3786
3787 free_temp_indexes(emit);
3788
3789 return TRUE;
3790 }
3791
3792
3793 /**
3794 * Emit code for TGSI_OPCODE_DST instruction.
3795 */
3796 static boolean
3797 emit_dst(struct svga_shader_emitter_v10 *emit,
3798 const struct tgsi_full_instruction *inst)
3799 {
3800 /*
3801 * dst.x = 1
3802 * dst.y = src0.y * src1.y
3803 * dst.z = src0.z
3804 * dst.w = src1.w
3805 */
3806
3807 struct tgsi_full_src_register s0_yyyy =
3808 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3809 struct tgsi_full_src_register s0_zzzz =
3810 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
3811 struct tgsi_full_src_register s1_yyyy =
3812 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3813 struct tgsi_full_src_register s1_wwww =
3814 scalar_src(&inst->Src[1], TGSI_SWIZZLE_W);
3815
3816 /*
3817 * If dst and either src0 and src1 are the same we need
3818 * to create a temporary for it and insert a extra move.
3819 */
3820 unsigned tmp_move = get_temp_index(emit);
3821 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3822 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3823
3824 /* MOV dst.x, 1.0 */
3825 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3826 struct tgsi_full_dst_register dst_x =
3827 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3828 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3829
3830 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
3831 }
3832
3833 /* MUL dst.y, s0.y, s1.y */
3834 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3835 struct tgsi_full_dst_register dst_y =
3836 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3837
3838 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &dst_y, &s0_yyyy,
3839 &s1_yyyy, inst->Instruction.Saturate);
3840 }
3841
3842 /* MOV dst.z, s0.z */
3843 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3844 struct tgsi_full_dst_register dst_z =
3845 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3846
3847 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z, &s0_zzzz,
3848 inst->Instruction.Saturate);
3849 }
3850
3851 /* MOV dst.w, s1.w */
3852 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3853 struct tgsi_full_dst_register dst_w =
3854 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3855
3856 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &s1_wwww,
3857 inst->Instruction.Saturate);
3858 }
3859
3860 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3861 FALSE);
3862 free_temp_indexes(emit);
3863
3864 return TRUE;
3865 }
3866
3867
3868
3869 /**
3870 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3871 */
3872 static boolean
3873 emit_endprim(struct svga_shader_emitter_v10 *emit,
3874 const struct tgsi_full_instruction *inst)
3875 {
3876 assert(emit->unit == PIPE_SHADER_GEOMETRY);
3877
3878 /* We can't use emit_simple() because the TGSI instruction has one
3879 * operand (vertex stream number) which we must ignore for VGPU10.
3880 */
3881 begin_emit_instruction(emit);
3882 emit_opcode(emit, VGPU10_OPCODE_CUT, FALSE);
3883 end_emit_instruction(emit);
3884 return TRUE;
3885 }
3886
3887
3888 /**
3889 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3890 */
3891 static boolean
3892 emit_ex2(struct svga_shader_emitter_v10 *emit,
3893 const struct tgsi_full_instruction *inst)
3894 {
3895 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3896 * while VGPU10 computes four values.
3897 *
3898 * dst = EX2(src):
3899 * dst.xyzw = 2.0 ^ src.x
3900 */
3901
3902 struct tgsi_full_src_register src_xxxx =
3903 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3904 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3905
3906 /* EXP tmp, s0.xxxx */
3907 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0], &src_xxxx,
3908 inst->Instruction.Saturate);
3909
3910 return TRUE;
3911 }
3912
3913
3914 /**
3915 * Emit code for TGSI_OPCODE_EXP instruction.
3916 */
3917 static boolean
3918 emit_exp(struct svga_shader_emitter_v10 *emit,
3919 const struct tgsi_full_instruction *inst)
3920 {
3921 /*
3922 * dst.x = 2 ^ floor(s0.x)
3923 * dst.y = s0.x - floor(s0.x)
3924 * dst.z = 2 ^ s0.x
3925 * dst.w = 1.0
3926 */
3927
3928 struct tgsi_full_src_register src_xxxx =
3929 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3930 unsigned tmp = get_temp_index(emit);
3931 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3932 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3933
3934 /*
3935 * If dst and src are the same we need to create
3936 * a temporary for it and insert a extra move.
3937 */
3938 unsigned tmp_move = get_temp_index(emit);
3939 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3940 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3941
3942 /* only use X component of temp reg */
3943 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3944 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3945
3946 /* ROUND_NI tmp.x, s0.x */
3947 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
3948 &src_xxxx, FALSE); /* round to -infinity */
3949
3950 /* EXP dst.x, tmp.x */
3951 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3952 struct tgsi_full_dst_register dst_x =
3953 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3954
3955 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_x, &tmp_src,
3956 inst->Instruction.Saturate);
3957 }
3958
3959 /* ADD dst.y, s0.x, -tmp */
3960 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3961 struct tgsi_full_dst_register dst_y =
3962 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3963 struct tgsi_full_src_register neg_tmp_src = negate_src(&tmp_src);
3964
3965 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_y, &src_xxxx,
3966 &neg_tmp_src, inst->Instruction.Saturate);
3967 }
3968
3969 /* EXP dst.z, s0.x */
3970 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3971 struct tgsi_full_dst_register dst_z =
3972 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3973
3974 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_z, &src_xxxx,
3975 inst->Instruction.Saturate);
3976 }
3977
3978 /* MOV dst.w, 1.0 */
3979 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3980 struct tgsi_full_dst_register dst_w =
3981 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3982 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3983
3984 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one,
3985 FALSE);
3986 }
3987
3988 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3989 FALSE);
3990
3991 free_temp_indexes(emit);
3992
3993 return TRUE;
3994 }
3995
3996
3997 /**
3998 * Emit code for TGSI_OPCODE_IF instruction.
3999 */
4000 static boolean
4001 emit_if(struct svga_shader_emitter_v10 *emit,
4002 const struct tgsi_full_instruction *inst)
4003 {
4004 VGPU10OpcodeToken0 opcode0;
4005
4006 /* The src register should be a scalar */
4007 assert(inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleY &&
4008 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleZ &&
4009 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleW);
4010
4011 /* The only special thing here is that we need to set the
4012 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
4013 * src.x is non-zero.
4014 */
4015 opcode0.value = 0;
4016 opcode0.opcodeType = VGPU10_OPCODE_IF;
4017 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
4018
4019 begin_emit_instruction(emit);
4020 emit_dword(emit, opcode0.value);
4021 emit_src_register(emit, &inst->Src[0]);
4022 end_emit_instruction(emit);
4023
4024 return TRUE;
4025 }
4026
4027
4028 /**
4029 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
4030 * the register components are negative).
4031 */
4032 static boolean
4033 emit_kill_if(struct svga_shader_emitter_v10 *emit,
4034 const struct tgsi_full_instruction *inst)
4035 {
4036 unsigned tmp = get_temp_index(emit);
4037 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4038 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4039
4040 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4041
4042 struct tgsi_full_dst_register tmp_dst_x =
4043 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4044 struct tgsi_full_src_register tmp_src_xxxx =
4045 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4046
4047 /* tmp = src[0] < 0.0 */
4048 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4049 &zero, FALSE);
4050
4051 if (!same_swizzle_terms(&inst->Src[0])) {
4052 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
4053 * logically OR the swizzle terms. Most uses of KILL_IF only
4054 * test one channel so it's good to avoid these extra steps.
4055 */
4056 struct tgsi_full_src_register tmp_src_yyyy =
4057 scalar_src(&tmp_src, TGSI_SWIZZLE_Y);
4058 struct tgsi_full_src_register tmp_src_zzzz =
4059 scalar_src(&tmp_src, TGSI_SWIZZLE_Z);
4060 struct tgsi_full_src_register tmp_src_wwww =
4061 scalar_src(&tmp_src, TGSI_SWIZZLE_W);
4062
4063 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4064 &tmp_src_yyyy, FALSE);
4065 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4066 &tmp_src_zzzz, FALSE);
4067 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
4068 &tmp_src_wwww, FALSE);
4069 }
4070
4071 begin_emit_instruction(emit);
4072 emit_discard_opcode(emit, TRUE); /* discard if src0.x is non-zero */
4073 emit_src_register(emit, &tmp_src_xxxx);
4074 end_emit_instruction(emit);
4075
4076 free_temp_indexes(emit);
4077
4078 return TRUE;
4079 }
4080
4081
4082 /**
4083 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
4084 */
4085 static boolean
4086 emit_kill(struct svga_shader_emitter_v10 *emit,
4087 const struct tgsi_full_instruction *inst)
4088 {
4089 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4090
4091 /* DISCARD if 0.0 is zero */
4092 begin_emit_instruction(emit);
4093 emit_discard_opcode(emit, FALSE);
4094 emit_src_register(emit, &zero);
4095 end_emit_instruction(emit);
4096
4097 return TRUE;
4098 }
4099
4100
4101 /**
4102 * Emit code for TGSI_OPCODE_LG2 instruction.
4103 */
4104 static boolean
4105 emit_lg2(struct svga_shader_emitter_v10 *emit,
4106 const struct tgsi_full_instruction *inst)
4107 {
4108 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
4109 * while VGPU10 computes four values.
4110 *
4111 * dst = LG2(src):
4112 * dst.xyzw = log2(src.x)
4113 */
4114
4115 struct tgsi_full_src_register src_xxxx =
4116 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4117 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4118
4119 /* LOG tmp, s0.xxxx */
4120 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &inst->Dst[0], &src_xxxx,
4121 inst->Instruction.Saturate);
4122
4123 return TRUE;
4124 }
4125
4126
4127 /**
4128 * Emit code for TGSI_OPCODE_LIT instruction.
4129 */
4130 static boolean
4131 emit_lit(struct svga_shader_emitter_v10 *emit,
4132 const struct tgsi_full_instruction *inst)
4133 {
4134 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4135
4136 /*
4137 * If dst and src are the same we need to create
4138 * a temporary for it and insert a extra move.
4139 */
4140 unsigned tmp_move = get_temp_index(emit);
4141 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
4142 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
4143
4144 /*
4145 * dst.x = 1
4146 * dst.y = max(src.x, 0)
4147 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
4148 * dst.w = 1
4149 */
4150
4151 /* MOV dst.x, 1.0 */
4152 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4153 struct tgsi_full_dst_register dst_x =
4154 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
4155 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
4156 }
4157
4158 /* MOV dst.w, 1.0 */
4159 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4160 struct tgsi_full_dst_register dst_w =
4161 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
4162 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4163 }
4164
4165 /* MAX dst.y, src.x, 0.0 */
4166 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4167 struct tgsi_full_dst_register dst_y =
4168 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
4169 struct tgsi_full_src_register zero =
4170 make_immediate_reg_float(emit, 0.0f);
4171 struct tgsi_full_src_register src_xxxx =
4172 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4173 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4174
4175 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &dst_y, &src_xxxx,
4176 &zero, inst->Instruction.Saturate);
4177 }
4178
4179 /*
4180 * tmp1 = clamp(src.w, -128, 128);
4181 * MAX tmp1, src.w, -128
4182 * MIN tmp1, tmp1, 128
4183 *
4184 * tmp2 = max(tmp2, 0);
4185 * MAX tmp2, src.y, 0
4186 *
4187 * tmp1 = pow(tmp2, tmp1);
4188 * LOG tmp2, tmp2
4189 * MUL tmp1, tmp2, tmp1
4190 * EXP tmp1, tmp1
4191 *
4192 * tmp1 = (src.w == 0) ? 1 : tmp1;
4193 * EQ tmp2, 0, src.w
4194 * MOVC tmp1, tmp2, 1.0, tmp1
4195 *
4196 * dst.z = (0 < src.x) ? tmp1 : 0;
4197 * LT tmp2, 0, src.x
4198 * MOVC dst.z, tmp2, tmp1, 0.0
4199 */
4200 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4201 struct tgsi_full_dst_register dst_z =
4202 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
4203
4204 unsigned tmp1 = get_temp_index(emit);
4205 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4206 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4207 unsigned tmp2 = get_temp_index(emit);
4208 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4209 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4210
4211 struct tgsi_full_src_register src_xxxx =
4212 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4213 struct tgsi_full_src_register src_yyyy =
4214 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
4215 struct tgsi_full_src_register src_wwww =
4216 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
4217
4218 struct tgsi_full_src_register zero =
4219 make_immediate_reg_float(emit, 0.0f);
4220 struct tgsi_full_src_register lowerbound =
4221 make_immediate_reg_float(emit, -128.0f);
4222 struct tgsi_full_src_register upperbound =
4223 make_immediate_reg_float(emit, 128.0f);
4224
4225 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp1_dst, &src_wwww,
4226 &lowerbound, FALSE);
4227 emit_instruction_op2(emit, VGPU10_OPCODE_MIN, &tmp1_dst, &tmp1_src,
4228 &upperbound, FALSE);
4229 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp2_dst, &src_yyyy,
4230 &zero, FALSE);
4231
4232 /* POW tmp1, tmp2, tmp1 */
4233 /* LOG tmp2, tmp2 */
4234 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp2_dst, &tmp2_src,
4235 FALSE);
4236
4237 /* MUL tmp1, tmp2, tmp1 */
4238 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &tmp2_src,
4239 &tmp1_src, FALSE);
4240
4241 /* EXP tmp1, tmp1 */
4242 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp1_dst, &tmp1_src,
4243 FALSE);
4244
4245 /* EQ tmp2, 0, src.w */
4246 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp2_dst, &zero,
4247 &src_wwww, FALSE);
4248 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4249 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp1_dst,
4250 &tmp2_src, &one, &tmp1_src, FALSE);
4251
4252 /* LT tmp2, 0, src.x */
4253 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp2_dst, &zero,
4254 &src_xxxx, FALSE);
4255 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4256 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &dst_z,
4257 &tmp2_src, &tmp1_src, &zero, FALSE);
4258 }
4259
4260 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
4261 FALSE);
4262 free_temp_indexes(emit);
4263
4264 return TRUE;
4265 }
4266
4267
4268 /**
4269 * Emit Level Of Detail Query (LODQ) instruction.
4270 */
4271 static boolean
4272 emit_lodq(struct svga_shader_emitter_v10 *emit,
4273 const struct tgsi_full_instruction *inst)
4274 {
4275 const uint unit = inst->Src[1].Register.Index;
4276
4277 assert(emit->version >= 41);
4278
4279 /* LOD dst, coord, resource, sampler */
4280 begin_emit_instruction(emit);
4281 emit_opcode(emit, VGPU10_OPCODE_LOD, FALSE);
4282 emit_dst_register(emit, &inst->Dst[0]);
4283 emit_src_register(emit, &inst->Src[0]); /* coord */
4284 emit_resource_register(emit, unit);
4285 emit_sampler_register(emit, unit);
4286 end_emit_instruction(emit);
4287
4288 return TRUE;
4289 }
4290
4291
4292 /**
4293 * Emit code for TGSI_OPCODE_LOG instruction.
4294 */
4295 static boolean
4296 emit_log(struct svga_shader_emitter_v10 *emit,
4297 const struct tgsi_full_instruction *inst)
4298 {
4299 /*
4300 * dst.x = floor(lg2(abs(s0.x)))
4301 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4302 * dst.z = lg2(abs(s0.x))
4303 * dst.w = 1.0
4304 */
4305
4306 struct tgsi_full_src_register src_xxxx =
4307 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4308 unsigned tmp = get_temp_index(emit);
4309 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4310 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4311 struct tgsi_full_src_register abs_src_xxxx = absolute_src(&src_xxxx);
4312
4313 /* only use X component of temp reg */
4314 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4315 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4316
4317 /* LOG tmp.x, abs(s0.x) */
4318 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XYZ) {
4319 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst,
4320 &abs_src_xxxx, FALSE);
4321 }
4322
4323 /* MOV dst.z, tmp.x */
4324 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4325 struct tgsi_full_dst_register dst_z =
4326 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Z);
4327
4328 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z,
4329 &tmp_src, inst->Instruction.Saturate);
4330 }
4331
4332 /* FLR tmp.x, tmp.x */
4333 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY) {
4334 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
4335 &tmp_src, FALSE);
4336 }
4337
4338 /* MOV dst.x, tmp.x */
4339 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4340 struct tgsi_full_dst_register dst_x =
4341 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4342
4343 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &tmp_src,
4344 inst->Instruction.Saturate);
4345 }
4346
4347 /* EXP tmp.x, tmp.x */
4348 /* DIV dst.y, abs(s0.x), tmp.x */
4349 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4350 struct tgsi_full_dst_register dst_y =
4351 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4352
4353 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp_dst, &tmp_src,
4354 FALSE);
4355 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &dst_y, &abs_src_xxxx,
4356 &tmp_src, inst->Instruction.Saturate);
4357 }
4358
4359 /* MOV dst.w, 1.0 */
4360 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4361 struct tgsi_full_dst_register dst_w =
4362 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_W);
4363 struct tgsi_full_src_register one =
4364 make_immediate_reg_float(emit, 1.0f);
4365
4366 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4367 }
4368
4369 free_temp_indexes(emit);
4370
4371 return TRUE;
4372 }
4373
4374
4375 /**
4376 * Emit code for TGSI_OPCODE_LRP instruction.
4377 */
4378 static boolean
4379 emit_lrp(struct svga_shader_emitter_v10 *emit,
4380 const struct tgsi_full_instruction *inst)
4381 {
4382 /* dst = LRP(s0, s1, s2):
4383 * dst = s0 * (s1 - s2) + s2
4384 * Translates into:
4385 * SUB tmp, s1, s2; tmp = s1 - s2
4386 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4387 */
4388 unsigned tmp = get_temp_index(emit);
4389 struct tgsi_full_src_register src_tmp = make_src_temp_reg(tmp);
4390 struct tgsi_full_dst_register dst_tmp = make_dst_temp_reg(tmp);
4391 struct tgsi_full_src_register neg_src2 = negate_src(&inst->Src[2]);
4392
4393 /* ADD tmp, s1, -s2 */
4394 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_tmp,
4395 &inst->Src[1], &neg_src2, FALSE);
4396
4397 /* MAD dst, s1, tmp, s3 */
4398 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &inst->Dst[0],
4399 &inst->Src[0], &src_tmp, &inst->Src[2],
4400 inst->Instruction.Saturate);
4401
4402 free_temp_indexes(emit);
4403
4404 return TRUE;
4405 }
4406
4407
4408 /**
4409 * Emit code for TGSI_OPCODE_POW instruction.
4410 */
4411 static boolean
4412 emit_pow(struct svga_shader_emitter_v10 *emit,
4413 const struct tgsi_full_instruction *inst)
4414 {
4415 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4416 * src1.x while VGPU10 computes four values.
4417 *
4418 * dst = POW(src0, src1):
4419 * dst.xyzw = src0.x ^ src1.x
4420 */
4421 unsigned tmp = get_temp_index(emit);
4422 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4423 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4424 struct tgsi_full_src_register src0_xxxx =
4425 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4426 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4427 struct tgsi_full_src_register src1_xxxx =
4428 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4429 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4430
4431 /* LOG tmp, s0.xxxx */
4432 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst, &src0_xxxx,
4433 FALSE);
4434
4435 /* MUL tmp, tmp, s1.xxxx */
4436 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst, &tmp_src,
4437 &src1_xxxx, FALSE);
4438
4439 /* EXP tmp, s0.xxxx */
4440 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0],
4441 &tmp_src, inst->Instruction.Saturate);
4442
4443 /* free tmp */
4444 free_temp_indexes(emit);
4445
4446 return TRUE;
4447 }
4448
4449
4450 /**
4451 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4452 */
4453 static boolean
4454 emit_rcp(struct svga_shader_emitter_v10 *emit,
4455 const struct tgsi_full_instruction *inst)
4456 {
4457 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4458
4459 unsigned tmp = get_temp_index(emit);
4460 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4461 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4462
4463 struct tgsi_full_dst_register tmp_dst_x =
4464 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4465 struct tgsi_full_src_register tmp_src_xxxx =
4466 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4467
4468 /* DIV tmp.x, 1.0, s0 */
4469 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst_x, &one,
4470 &inst->Src[0], FALSE);
4471
4472 /* MOV dst, tmp.xxxx */
4473 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4474 &tmp_src_xxxx, inst->Instruction.Saturate);
4475
4476 free_temp_indexes(emit);
4477
4478 return TRUE;
4479 }
4480
4481
4482 /**
4483 * Emit code for TGSI_OPCODE_RSQ instruction.
4484 */
4485 static boolean
4486 emit_rsq(struct svga_shader_emitter_v10 *emit,
4487 const struct tgsi_full_instruction *inst)
4488 {
4489 /* dst = RSQ(src):
4490 * dst.xyzw = 1 / sqrt(src.x)
4491 * Translates into:
4492 * RSQ tmp, src.x
4493 * MOV dst, tmp.xxxx
4494 */
4495
4496 unsigned tmp = get_temp_index(emit);
4497 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4498 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4499
4500 struct tgsi_full_dst_register tmp_dst_x =
4501 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4502 struct tgsi_full_src_register tmp_src_xxxx =
4503 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4504
4505 /* RSQ tmp, src.x */
4506 emit_instruction_op1(emit, VGPU10_OPCODE_RSQ, &tmp_dst_x,
4507 &inst->Src[0], FALSE);
4508
4509 /* MOV dst, tmp.xxxx */
4510 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4511 &tmp_src_xxxx, inst->Instruction.Saturate);
4512
4513 /* free tmp */
4514 free_temp_indexes(emit);
4515
4516 return TRUE;
4517 }
4518
4519
4520 /**
4521 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4522 */
4523 static boolean
4524 emit_seq(struct svga_shader_emitter_v10 *emit,
4525 const struct tgsi_full_instruction *inst)
4526 {
4527 /* dst = SEQ(s0, s1):
4528 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4529 * Translates into:
4530 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4531 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4532 */
4533 unsigned tmp = get_temp_index(emit);
4534 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4535 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4536 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4537 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4538
4539 /* EQ tmp, s0, s1 */
4540 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp_dst, &inst->Src[0],
4541 &inst->Src[1], FALSE);
4542
4543 /* MOVC dst, tmp, one, zero */
4544 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4545 &one, &zero, FALSE);
4546
4547 free_temp_indexes(emit);
4548
4549 return TRUE;
4550 }
4551
4552
4553 /**
4554 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4555 */
4556 static boolean
4557 emit_sge(struct svga_shader_emitter_v10 *emit,
4558 const struct tgsi_full_instruction *inst)
4559 {
4560 /* dst = SGE(s0, s1):
4561 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4562 * Translates into:
4563 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4564 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4565 */
4566 unsigned tmp = get_temp_index(emit);
4567 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4568 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4569 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4570 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4571
4572 /* GE tmp, s0, s1 */
4573 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[0],
4574 &inst->Src[1], FALSE);
4575
4576 /* MOVC dst, tmp, one, zero */
4577 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4578 &one, &zero, FALSE);
4579
4580 free_temp_indexes(emit);
4581
4582 return TRUE;
4583 }
4584
4585
4586 /**
4587 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4588 */
4589 static boolean
4590 emit_sgt(struct svga_shader_emitter_v10 *emit,
4591 const struct tgsi_full_instruction *inst)
4592 {
4593 /* dst = SGT(s0, s1):
4594 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4595 * Translates into:
4596 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4597 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4598 */
4599 unsigned tmp = get_temp_index(emit);
4600 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4601 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4602 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4603 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4604
4605 /* LT tmp, s1, s0 */
4606 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[1],
4607 &inst->Src[0], FALSE);
4608
4609 /* MOVC dst, tmp, one, zero */
4610 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4611 &one, &zero, FALSE);
4612
4613 free_temp_indexes(emit);
4614
4615 return TRUE;
4616 }
4617
4618
4619 /**
4620 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4621 */
4622 static boolean
4623 emit_sincos(struct svga_shader_emitter_v10 *emit,
4624 const struct tgsi_full_instruction *inst)
4625 {
4626 unsigned tmp = get_temp_index(emit);
4627 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4628 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4629
4630 struct tgsi_full_src_register tmp_src_xxxx =
4631 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4632 struct tgsi_full_dst_register tmp_dst_x =
4633 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4634
4635 begin_emit_instruction(emit);
4636 emit_opcode(emit, VGPU10_OPCODE_SINCOS, FALSE);
4637
4638 if(inst->Instruction.Opcode == TGSI_OPCODE_SIN)
4639 {
4640 emit_dst_register(emit, &tmp_dst_x); /* first destination register */
4641 emit_null_dst_register(emit); /* second destination register */
4642 }
4643 else {
4644 emit_null_dst_register(emit);
4645 emit_dst_register(emit, &tmp_dst_x);
4646 }
4647
4648 emit_src_register(emit, &inst->Src[0]);
4649 end_emit_instruction(emit);
4650
4651 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4652 &tmp_src_xxxx, inst->Instruction.Saturate);
4653
4654 free_temp_indexes(emit);
4655
4656 return TRUE;
4657 }
4658
4659
4660 /**
4661 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4662 */
4663 static boolean
4664 emit_sle(struct svga_shader_emitter_v10 *emit,
4665 const struct tgsi_full_instruction *inst)
4666 {
4667 /* dst = SLE(s0, s1):
4668 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4669 * Translates into:
4670 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4671 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4672 */
4673 unsigned tmp = get_temp_index(emit);
4674 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4675 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4676 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4677 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4678
4679 /* GE tmp, s1, s0 */
4680 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[1],
4681 &inst->Src[0], FALSE);
4682
4683 /* MOVC dst, tmp, one, zero */
4684 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4685 &one, &zero, FALSE);
4686
4687 free_temp_indexes(emit);
4688
4689 return TRUE;
4690 }
4691
4692
4693 /**
4694 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4695 */
4696 static boolean
4697 emit_slt(struct svga_shader_emitter_v10 *emit,
4698 const struct tgsi_full_instruction *inst)
4699 {
4700 /* dst = SLT(s0, s1):
4701 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4702 * Translates into:
4703 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4704 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4705 */
4706 unsigned tmp = get_temp_index(emit);
4707 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4708 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4709 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4710 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4711
4712 /* LT tmp, s0, s1 */
4713 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4714 &inst->Src[1], FALSE);
4715
4716 /* MOVC dst, tmp, one, zero */
4717 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4718 &one, &zero, FALSE);
4719
4720 free_temp_indexes(emit);
4721
4722 return TRUE;
4723 }
4724
4725
4726 /**
4727 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4728 */
4729 static boolean
4730 emit_sne(struct svga_shader_emitter_v10 *emit,
4731 const struct tgsi_full_instruction *inst)
4732 {
4733 /* dst = SNE(s0, s1):
4734 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4735 * Translates into:
4736 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4737 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4738 */
4739 unsigned tmp = get_temp_index(emit);
4740 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4741 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4742 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4743 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4744
4745 /* NE tmp, s0, s1 */
4746 emit_instruction_op2(emit, VGPU10_OPCODE_NE, &tmp_dst, &inst->Src[0],
4747 &inst->Src[1], FALSE);
4748
4749 /* MOVC dst, tmp, one, zero */
4750 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4751 &one, &zero, FALSE);
4752
4753 free_temp_indexes(emit);
4754
4755 return TRUE;
4756 }
4757
4758
4759 /**
4760 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4761 */
4762 static boolean
4763 emit_ssg(struct svga_shader_emitter_v10 *emit,
4764 const struct tgsi_full_instruction *inst)
4765 {
4766 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4767 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4768 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4769 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4770 * Translates into:
4771 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4772 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4773 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4774 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4775 */
4776 struct tgsi_full_src_register zero =
4777 make_immediate_reg_float(emit, 0.0f);
4778 struct tgsi_full_src_register one =
4779 make_immediate_reg_float(emit, 1.0f);
4780 struct tgsi_full_src_register neg_one =
4781 make_immediate_reg_float(emit, -1.0f);
4782
4783 unsigned tmp1 = get_temp_index(emit);
4784 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4785 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4786
4787 unsigned tmp2 = get_temp_index(emit);
4788 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4789 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4790
4791 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &inst->Src[0],
4792 &zero, FALSE);
4793 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp2_dst, &tmp1_src,
4794 &neg_one, &zero, FALSE);
4795 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &zero,
4796 &inst->Src[0], FALSE);
4797 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp1_src,
4798 &one, &tmp2_src, FALSE);
4799
4800 free_temp_indexes(emit);
4801
4802 return TRUE;
4803 }
4804
4805
4806 /**
4807 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4808 */
4809 static boolean
4810 emit_issg(struct svga_shader_emitter_v10 *emit,
4811 const struct tgsi_full_instruction *inst)
4812 {
4813 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4814 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4815 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4816 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4817 * Translates into:
4818 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4819 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4820 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4821 */
4822 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4823
4824 unsigned tmp1 = get_temp_index(emit);
4825 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4826 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4827
4828 unsigned tmp2 = get_temp_index(emit);
4829 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4830 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4831
4832 struct tgsi_full_src_register neg_tmp2 = negate_src(&tmp2_src);
4833
4834 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp1_dst,
4835 &inst->Src[0], &zero, FALSE);
4836 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp2_dst,
4837 &zero, &inst->Src[0], FALSE);
4838 emit_instruction_op2(emit, VGPU10_OPCODE_IADD, &inst->Dst[0],
4839 &tmp1_src, &neg_tmp2, FALSE);
4840
4841 free_temp_indexes(emit);
4842
4843 return TRUE;
4844 }
4845
4846
4847 /**
4848 * Emit a comparison instruction. The dest register will get
4849 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4850 */
4851 static void
4852 emit_comparison(struct svga_shader_emitter_v10 *emit,
4853 SVGA3dCmpFunc func,
4854 const struct tgsi_full_dst_register *dst,
4855 const struct tgsi_full_src_register *src0,
4856 const struct tgsi_full_src_register *src1)
4857 {
4858 struct tgsi_full_src_register immediate;
4859 VGPU10OpcodeToken0 opcode0;
4860 boolean swapSrc = FALSE;
4861
4862 /* Sanity checks for svga vs. gallium enums */
4863 STATIC_ASSERT(SVGA3D_CMP_LESS == (PIPE_FUNC_LESS + 1));
4864 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL == (PIPE_FUNC_GEQUAL + 1));
4865
4866 opcode0.value = 0;
4867
4868 switch (func) {
4869 case SVGA3D_CMP_NEVER:
4870 immediate = make_immediate_reg_int(emit, 0);
4871 /* MOV dst, {0} */
4872 begin_emit_instruction(emit);
4873 emit_dword(emit, VGPU10_OPCODE_MOV);
4874 emit_dst_register(emit, dst);
4875 emit_src_register(emit, &immediate);
4876 end_emit_instruction(emit);
4877 return;
4878 case SVGA3D_CMP_ALWAYS:
4879 immediate = make_immediate_reg_int(emit, -1);
4880 /* MOV dst, {-1} */
4881 begin_emit_instruction(emit);
4882 emit_dword(emit, VGPU10_OPCODE_MOV);
4883 emit_dst_register(emit, dst);
4884 emit_src_register(emit, &immediate);
4885 end_emit_instruction(emit);
4886 return;
4887 case SVGA3D_CMP_LESS:
4888 opcode0.opcodeType = VGPU10_OPCODE_LT;
4889 break;
4890 case SVGA3D_CMP_EQUAL:
4891 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4892 break;
4893 case SVGA3D_CMP_LESSEQUAL:
4894 opcode0.opcodeType = VGPU10_OPCODE_GE;
4895 swapSrc = TRUE;
4896 break;
4897 case SVGA3D_CMP_GREATER:
4898 opcode0.opcodeType = VGPU10_OPCODE_LT;
4899 swapSrc = TRUE;
4900 break;
4901 case SVGA3D_CMP_NOTEQUAL:
4902 opcode0.opcodeType = VGPU10_OPCODE_NE;
4903 break;
4904 case SVGA3D_CMP_GREATEREQUAL:
4905 opcode0.opcodeType = VGPU10_OPCODE_GE;
4906 break;
4907 default:
4908 assert(!"Unexpected comparison mode");
4909 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4910 }
4911
4912 begin_emit_instruction(emit);
4913 emit_dword(emit, opcode0.value);
4914 emit_dst_register(emit, dst);
4915 if (swapSrc) {
4916 emit_src_register(emit, src1);
4917 emit_src_register(emit, src0);
4918 }
4919 else {
4920 emit_src_register(emit, src0);
4921 emit_src_register(emit, src1);
4922 }
4923 end_emit_instruction(emit);
4924 }
4925
4926
4927 /**
4928 * Get texel/address offsets for a texture instruction.
4929 */
4930 static void
4931 get_texel_offsets(const struct svga_shader_emitter_v10 *emit,
4932 const struct tgsi_full_instruction *inst, int offsets[3])
4933 {
4934 if (inst->Texture.NumOffsets == 1) {
4935 /* According to OpenGL Shader Language spec the offsets are only
4936 * fetched from a previously-declared immediate/literal.
4937 */
4938 const struct tgsi_texture_offset *off = inst->TexOffsets;
4939 const unsigned index = off[0].Index;
4940 const unsigned swizzleX = off[0].SwizzleX;
4941 const unsigned swizzleY = off[0].SwizzleY;
4942 const unsigned swizzleZ = off[0].SwizzleZ;
4943 const union tgsi_immediate_data *imm = emit->immediates[index];
4944
4945 assert(inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE);
4946
4947 offsets[0] = imm[swizzleX].Int;
4948 offsets[1] = imm[swizzleY].Int;
4949 offsets[2] = imm[swizzleZ].Int;
4950 }
4951 else {
4952 offsets[0] = offsets[1] = offsets[2] = 0;
4953 }
4954 }
4955
4956
4957 /**
4958 * Set up the coordinate register for texture sampling.
4959 * When we're sampling from a RECT texture we have to scale the
4960 * unnormalized coordinate to a normalized coordinate.
4961 * We do that by multiplying the coordinate by an "extra" constant.
4962 * An alternative would be to use the RESINFO instruction to query the
4963 * texture's size.
4964 */
4965 static struct tgsi_full_src_register
4966 setup_texcoord(struct svga_shader_emitter_v10 *emit,
4967 unsigned unit,
4968 const struct tgsi_full_src_register *coord)
4969 {
4970 if (emit->sampler_view[unit] && emit->key.tex[unit].unnormalized) {
4971 unsigned scale_index = emit->texcoord_scale_index[unit];
4972 unsigned tmp = get_temp_index(emit);
4973 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4974 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4975 struct tgsi_full_src_register scale_src = make_src_const_reg(scale_index);
4976
4977 if (emit->key.tex[unit].texel_bias) {
4978 /* to fix texture coordinate rounding issue, 0.0001 offset is
4979 * been added. This fixes piglit test fbo-blit-scaled-linear. */
4980 struct tgsi_full_src_register offset =
4981 make_immediate_reg_float(emit, 0.0001f);
4982
4983 /* ADD tmp, coord, offset */
4984 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_dst,
4985 coord, &offset, FALSE);
4986 /* MUL tmp, tmp, scale */
4987 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4988 &tmp_src, &scale_src, FALSE);
4989 }
4990 else {
4991 /* MUL tmp, coord, const[] */
4992 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4993 coord, &scale_src, FALSE);
4994 }
4995 return tmp_src;
4996 }
4997 else {
4998 /* use texcoord as-is */
4999 return *coord;
5000 }
5001 }
5002
5003
5004 /**
5005 * For SAMPLE_C instructions, emit the extra src register which indicates
5006 * the reference/comparision value.
5007 */
5008 static void
5009 emit_tex_compare_refcoord(struct svga_shader_emitter_v10 *emit,
5010 enum tgsi_texture_type target,
5011 const struct tgsi_full_src_register *coord)
5012 {
5013 struct tgsi_full_src_register coord_src_ref;
5014 int component;
5015
5016 assert(tgsi_is_shadow_target(target));
5017
5018 component = tgsi_util_get_shadow_ref_src_index(target) % 4;
5019 assert(component >= 0);
5020
5021 coord_src_ref = scalar_src(coord, component);
5022
5023 emit_src_register(emit, &coord_src_ref);
5024 }
5025
5026
5027 /**
5028 * Info for implementing texture swizzles.
5029 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
5030 * functions use this to encapsulate the extra steps needed to perform
5031 * a texture swizzle, or shadow/depth comparisons.
5032 * The shadow/depth comparison is only done here if for the cases where
5033 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
5034 */
5035 struct tex_swizzle_info
5036 {
5037 boolean swizzled;
5038 boolean shadow_compare;
5039 unsigned unit;
5040 enum tgsi_texture_type texture_target; /**< TGSI_TEXTURE_x */
5041 struct tgsi_full_src_register tmp_src;
5042 struct tgsi_full_dst_register tmp_dst;
5043 const struct tgsi_full_dst_register *inst_dst;
5044 const struct tgsi_full_src_register *coord_src;
5045 };
5046
5047
5048 /**
5049 * Do setup for handling texture swizzles or shadow compares.
5050 * \param unit the texture unit
5051 * \param inst the TGSI texture instruction
5052 * \param shadow_compare do shadow/depth comparison?
5053 * \param swz returns the swizzle info
5054 */
5055 static void
5056 begin_tex_swizzle(struct svga_shader_emitter_v10 *emit,
5057 unsigned unit,
5058 const struct tgsi_full_instruction *inst,
5059 boolean shadow_compare,
5060 struct tex_swizzle_info *swz)
5061 {
5062 swz->swizzled = (emit->key.tex[unit].swizzle_r != TGSI_SWIZZLE_X ||
5063 emit->key.tex[unit].swizzle_g != TGSI_SWIZZLE_Y ||
5064 emit->key.tex[unit].swizzle_b != TGSI_SWIZZLE_Z ||
5065 emit->key.tex[unit].swizzle_a != TGSI_SWIZZLE_W);
5066
5067 swz->shadow_compare = shadow_compare;
5068 swz->texture_target = inst->Texture.Texture;
5069
5070 if (swz->swizzled || shadow_compare) {
5071 /* Allocate temp register for the result of the SAMPLE instruction
5072 * and the source of the MOV/compare/swizzle instructions.
5073 */
5074 unsigned tmp = get_temp_index(emit);
5075 swz->tmp_src = make_src_temp_reg(tmp);
5076 swz->tmp_dst = make_dst_temp_reg(tmp);
5077
5078 swz->unit = unit;
5079 }
5080 swz->inst_dst = &inst->Dst[0];
5081 swz->coord_src = &inst->Src[0];
5082
5083 emit->fs.shadow_compare_units |= shadow_compare << unit;
5084 }
5085
5086
5087 /**
5088 * Returns the register to put the SAMPLE instruction results into.
5089 * This will either be the original instruction dst reg (if no swizzle
5090 * and no shadow comparison) or a temporary reg if there is a swizzle.
5091 */
5092 static const struct tgsi_full_dst_register *
5093 get_tex_swizzle_dst(const struct tex_swizzle_info *swz)
5094 {
5095 return (swz->swizzled || swz->shadow_compare)
5096 ? &swz->tmp_dst : swz->inst_dst;
5097 }
5098
5099
5100 /**
5101 * This emits the MOV instruction that actually implements a texture swizzle
5102 * and/or shadow comparison.
5103 */
5104 static void
5105 end_tex_swizzle(struct svga_shader_emitter_v10 *emit,
5106 const struct tex_swizzle_info *swz)
5107 {
5108 if (swz->shadow_compare) {
5109 /* Emit extra instructions to compare the fetched texel value against
5110 * a texture coordinate component. The result of the comparison
5111 * is 0.0 or 1.0.
5112 */
5113 struct tgsi_full_src_register coord_src;
5114 struct tgsi_full_src_register texel_src =
5115 scalar_src(&swz->tmp_src, TGSI_SWIZZLE_X);
5116 struct tgsi_full_src_register one =
5117 make_immediate_reg_float(emit, 1.0f);
5118 /* convert gallium comparison func to SVGA comparison func */
5119 SVGA3dCmpFunc compare_func = emit->key.tex[swz->unit].compare_func + 1;
5120
5121 assert(emit->unit == PIPE_SHADER_FRAGMENT);
5122
5123 int component =
5124 tgsi_util_get_shadow_ref_src_index(swz->texture_target) % 4;
5125 assert(component >= 0);
5126 coord_src = scalar_src(swz->coord_src, component);
5127
5128 /* COMPARE tmp, coord, texel */
5129 emit_comparison(emit, compare_func,
5130 &swz->tmp_dst, &coord_src, &texel_src);
5131
5132 /* AND dest, tmp, {1.0} */
5133 begin_emit_instruction(emit);
5134 emit_opcode(emit, VGPU10_OPCODE_AND, FALSE);
5135 if (swz->swizzled) {
5136 emit_dst_register(emit, &swz->tmp_dst);
5137 }
5138 else {
5139 emit_dst_register(emit, swz->inst_dst);
5140 }
5141 emit_src_register(emit, &swz->tmp_src);
5142 emit_src_register(emit, &one);
5143 end_emit_instruction(emit);
5144 }
5145
5146 if (swz->swizzled) {
5147 unsigned swz_r = emit->key.tex[swz->unit].swizzle_r;
5148 unsigned swz_g = emit->key.tex[swz->unit].swizzle_g;
5149 unsigned swz_b = emit->key.tex[swz->unit].swizzle_b;
5150 unsigned swz_a = emit->key.tex[swz->unit].swizzle_a;
5151 unsigned writemask_0 = 0, writemask_1 = 0;
5152 boolean int_tex = is_integer_type(emit->sampler_return_type[swz->unit]);
5153
5154 /* Swizzle w/out zero/one terms */
5155 struct tgsi_full_src_register src_swizzled =
5156 swizzle_src(&swz->tmp_src,
5157 swz_r < PIPE_SWIZZLE_0 ? swz_r : PIPE_SWIZZLE_X,
5158 swz_g < PIPE_SWIZZLE_0 ? swz_g : PIPE_SWIZZLE_Y,
5159 swz_b < PIPE_SWIZZLE_0 ? swz_b : PIPE_SWIZZLE_Z,
5160 swz_a < PIPE_SWIZZLE_0 ? swz_a : PIPE_SWIZZLE_W);
5161
5162 /* MOV dst, color(tmp).<swizzle> */
5163 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5164 swz->inst_dst, &src_swizzled, FALSE);
5165
5166 /* handle swizzle zero terms */
5167 writemask_0 = (((swz_r == PIPE_SWIZZLE_0) << 0) |
5168 ((swz_g == PIPE_SWIZZLE_0) << 1) |
5169 ((swz_b == PIPE_SWIZZLE_0) << 2) |
5170 ((swz_a == PIPE_SWIZZLE_0) << 3));
5171 writemask_0 &= swz->inst_dst->Register.WriteMask;
5172
5173 if (writemask_0) {
5174 struct tgsi_full_src_register zero = int_tex ?
5175 make_immediate_reg_int(emit, 0) :
5176 make_immediate_reg_float(emit, 0.0f);
5177 struct tgsi_full_dst_register dst =
5178 writemask_dst(swz->inst_dst, writemask_0);
5179
5180 /* MOV dst.writemask_0, {0,0,0,0} */
5181 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5182 &dst, &zero, FALSE);
5183 }
5184
5185 /* handle swizzle one terms */
5186 writemask_1 = (((swz_r == PIPE_SWIZZLE_1) << 0) |
5187 ((swz_g == PIPE_SWIZZLE_1) << 1) |
5188 ((swz_b == PIPE_SWIZZLE_1) << 2) |
5189 ((swz_a == PIPE_SWIZZLE_1) << 3));
5190 writemask_1 &= swz->inst_dst->Register.WriteMask;
5191
5192 if (writemask_1) {
5193 struct tgsi_full_src_register one = int_tex ?
5194 make_immediate_reg_int(emit, 1) :
5195 make_immediate_reg_float(emit, 1.0f);
5196 struct tgsi_full_dst_register dst =
5197 writemask_dst(swz->inst_dst, writemask_1);
5198
5199 /* MOV dst.writemask_1, {1,1,1,1} */
5200 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst, &one, FALSE);
5201 }
5202 }
5203 }
5204
5205
5206 /**
5207 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5208 */
5209 static boolean
5210 emit_sample(struct svga_shader_emitter_v10 *emit,
5211 const struct tgsi_full_instruction *inst)
5212 {
5213 const unsigned resource_unit = inst->Src[1].Register.Index;
5214 const unsigned sampler_unit = inst->Src[2].Register.Index;
5215 struct tgsi_full_src_register coord;
5216 int offsets[3];
5217 struct tex_swizzle_info swz_info;
5218
5219 begin_tex_swizzle(emit, sampler_unit, inst, FALSE, &swz_info);
5220
5221 get_texel_offsets(emit, inst, offsets);
5222
5223 coord = setup_texcoord(emit, resource_unit, &inst->Src[0]);
5224
5225 /* SAMPLE dst, coord(s0), resource, sampler */
5226 begin_emit_instruction(emit);
5227
5228 /* NOTE: for non-fragment shaders, we should use VGPU10_OPCODE_SAMPLE_L
5229 * with LOD=0. But our virtual GPU accepts this as-is.
5230 */
5231 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE,
5232 inst->Instruction.Saturate, offsets);
5233 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5234 emit_src_register(emit, &coord);
5235 emit_resource_register(emit, resource_unit);
5236 emit_sampler_register(emit, sampler_unit);
5237 end_emit_instruction(emit);
5238
5239 end_tex_swizzle(emit, &swz_info);
5240
5241 free_temp_indexes(emit);
5242
5243 return TRUE;
5244 }
5245
5246
5247 /**
5248 * Check if a texture instruction is valid.
5249 * An example of an invalid texture instruction is doing shadow comparison
5250 * with an integer-valued texture.
5251 * If we detect an invalid texture instruction, we replace it with:
5252 * MOV dst, {1,1,1,1};
5253 * \return TRUE if valid, FALSE if invalid.
5254 */
5255 static boolean
5256 is_valid_tex_instruction(struct svga_shader_emitter_v10 *emit,
5257 const struct tgsi_full_instruction *inst)
5258 {
5259 const unsigned unit = inst->Src[1].Register.Index;
5260 const enum tgsi_texture_type target = inst->Texture.Texture;
5261 boolean valid = TRUE;
5262
5263 if (tgsi_is_shadow_target(target) &&
5264 is_integer_type(emit->sampler_return_type[unit])) {
5265 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5266 valid = FALSE;
5267 }
5268 /* XXX might check for other conditions in the future here */
5269
5270 if (!valid) {
5271 /* emit a MOV dst, {1,1,1,1} instruction. */
5272 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
5273 begin_emit_instruction(emit);
5274 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5275 emit_dst_register(emit, &inst->Dst[0]);
5276 emit_src_register(emit, &one);
5277 end_emit_instruction(emit);
5278 }
5279
5280 return valid;
5281 }
5282
5283
5284 /**
5285 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5286 */
5287 static boolean
5288 emit_tex(struct svga_shader_emitter_v10 *emit,
5289 const struct tgsi_full_instruction *inst)
5290 {
5291 const uint unit = inst->Src[1].Register.Index;
5292 const enum tgsi_texture_type target = inst->Texture.Texture;
5293 VGPU10_OPCODE_TYPE opcode;
5294 struct tgsi_full_src_register coord;
5295 int offsets[3];
5296 struct tex_swizzle_info swz_info;
5297
5298 /* check that the sampler returns a float */
5299 if (!is_valid_tex_instruction(emit, inst))
5300 return TRUE;
5301
5302 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5303
5304 get_texel_offsets(emit, inst, offsets);
5305
5306 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5307
5308 /* SAMPLE dst, coord(s0), resource, sampler */
5309 begin_emit_instruction(emit);
5310
5311 if (tgsi_is_shadow_target(target))
5312 opcode = VGPU10_OPCODE_SAMPLE_C;
5313 else
5314 opcode = VGPU10_OPCODE_SAMPLE;
5315
5316 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5317 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5318 emit_src_register(emit, &coord);
5319 emit_resource_register(emit, unit);
5320 emit_sampler_register(emit, unit);
5321 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5322 emit_tex_compare_refcoord(emit, target, &coord);
5323 }
5324 end_emit_instruction(emit);
5325
5326 end_tex_swizzle(emit, &swz_info);
5327
5328 free_temp_indexes(emit);
5329
5330 return TRUE;
5331 }
5332
5333 /**
5334 * Emit code for TGSI_OPCODE_TG4 (texture lookup for texture gather)
5335 */
5336 static boolean
5337 emit_tg4(struct svga_shader_emitter_v10 *emit,
5338 const struct tgsi_full_instruction *inst)
5339 {
5340 const uint unit = inst->Src[2].Register.Index;
5341 struct tgsi_full_src_register src;
5342 int offsets[3];
5343
5344 /* check that the sampler returns a float */
5345 if (!is_valid_tex_instruction(emit, inst))
5346 return TRUE;
5347
5348 /* Only a single channel is supported in SM4_1 and we report
5349 * PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS = 1.
5350 * Only the 0th component will be gathered.
5351 */
5352 switch (emit->key.tex[unit].swizzle_r) {
5353 case PIPE_SWIZZLE_X:
5354 get_texel_offsets(emit, inst, offsets);
5355 src = setup_texcoord(emit, unit, &inst->Src[0]);
5356
5357 /* Gather dst, coord, resource, sampler */
5358 begin_emit_instruction(emit);
5359 emit_sample_opcode(emit, VGPU10_OPCODE_GATHER4,
5360 inst->Instruction.Saturate, offsets);
5361 emit_dst_register(emit, &inst->Dst[0]);
5362 emit_src_register(emit, &src);
5363 emit_resource_register(emit, unit);
5364 emit_sampler_register(emit, unit);
5365 end_emit_instruction(emit);
5366 break;
5367 case PIPE_SWIZZLE_W:
5368 case PIPE_SWIZZLE_1:
5369 src = make_immediate_reg_float(emit, 1.0);
5370 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5371 &inst->Dst[0], &src, FALSE);
5372 break;
5373 case PIPE_SWIZZLE_Y:
5374 case PIPE_SWIZZLE_Z:
5375 case PIPE_SWIZZLE_0:
5376 default:
5377 src = make_immediate_reg_float(emit, 0.0);
5378 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5379 &inst->Dst[0], &src, FALSE);
5380 break;
5381 }
5382
5383 return TRUE;
5384 }
5385
5386
5387
5388 /**
5389 * Emit code for TGSI_OPCODE_TEX2 (texture lookup for shadow cube map arrays)
5390 */
5391 static boolean
5392 emit_tex2(struct svga_shader_emitter_v10 *emit,
5393 const struct tgsi_full_instruction *inst)
5394 {
5395 const uint unit = inst->Src[2].Register.Index;
5396 unsigned target = inst->Texture.Texture;
5397 struct tgsi_full_src_register coord, ref;
5398 int offsets[3];
5399 struct tex_swizzle_info swz_info;
5400
5401 /* check that the sampler returns a float */
5402 if (!is_valid_tex_instruction(emit, inst))
5403 return TRUE;
5404
5405 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5406
5407 get_texel_offsets(emit, inst, offsets);
5408
5409 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5410 ref = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5411
5412 /* SAMPLE_C dst, coord, resource, sampler, ref */
5413 begin_emit_instruction(emit);
5414 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_C,
5415 inst->Instruction.Saturate, offsets);
5416 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5417 emit_src_register(emit, &coord);
5418 emit_resource_register(emit, unit);
5419 emit_sampler_register(emit, unit);
5420 emit_tex_compare_refcoord(emit, target, &ref);
5421 end_emit_instruction(emit);
5422
5423 end_tex_swizzle(emit, &swz_info);
5424
5425 free_temp_indexes(emit);
5426
5427 return TRUE;
5428 }
5429
5430
5431 /**
5432 * Emit code for TGSI_OPCODE_TXP (projective texture)
5433 */
5434 static boolean
5435 emit_txp(struct svga_shader_emitter_v10 *emit,
5436 const struct tgsi_full_instruction *inst)
5437 {
5438 const uint unit = inst->Src[1].Register.Index;
5439 const enum tgsi_texture_type target = inst->Texture.Texture;
5440 VGPU10_OPCODE_TYPE opcode;
5441 int offsets[3];
5442 unsigned tmp = get_temp_index(emit);
5443 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
5444 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
5445 struct tgsi_full_src_register src0_wwww =
5446 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5447 struct tgsi_full_src_register coord;
5448 struct tex_swizzle_info swz_info;
5449
5450 /* check that the sampler returns a float */
5451 if (!is_valid_tex_instruction(emit, inst))
5452 return TRUE;
5453
5454 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5455
5456 get_texel_offsets(emit, inst, offsets);
5457
5458 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5459
5460 /* DIV tmp, coord, coord.wwww */
5461 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst,
5462 &coord, &src0_wwww, FALSE);
5463
5464 /* SAMPLE dst, coord(tmp), resource, sampler */
5465 begin_emit_instruction(emit);
5466
5467 if (tgsi_is_shadow_target(target))
5468 /* NOTE: for non-fragment shaders, we should use
5469 * VGPU10_OPCODE_SAMPLE_C_LZ, but our virtual GPU accepts this as-is.
5470 */
5471 opcode = VGPU10_OPCODE_SAMPLE_C;
5472 else
5473 opcode = VGPU10_OPCODE_SAMPLE;
5474
5475 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5476 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5477 emit_src_register(emit, &tmp_src); /* projected coord */
5478 emit_resource_register(emit, unit);
5479 emit_sampler_register(emit, unit);
5480 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5481 emit_tex_compare_refcoord(emit, target, &tmp_src);
5482 }
5483 end_emit_instruction(emit);
5484
5485 end_tex_swizzle(emit, &swz_info);
5486
5487 free_temp_indexes(emit);
5488
5489 return TRUE;
5490 }
5491
5492
5493 /**
5494 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5495 */
5496 static boolean
5497 emit_txd(struct svga_shader_emitter_v10 *emit,
5498 const struct tgsi_full_instruction *inst)
5499 {
5500 const uint unit = inst->Src[3].Register.Index;
5501 const enum tgsi_texture_type target = inst->Texture.Texture;
5502 int offsets[3];
5503 struct tgsi_full_src_register coord;
5504 struct tex_swizzle_info swz_info;
5505
5506 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5507 &swz_info);
5508
5509 get_texel_offsets(emit, inst, offsets);
5510
5511 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5512
5513 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5514 begin_emit_instruction(emit);
5515 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_D,
5516 inst->Instruction.Saturate, offsets);
5517 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5518 emit_src_register(emit, &coord);
5519 emit_resource_register(emit, unit);
5520 emit_sampler_register(emit, unit);
5521 emit_src_register(emit, &inst->Src[1]); /* Xderiv */
5522 emit_src_register(emit, &inst->Src[2]); /* Yderiv */
5523 end_emit_instruction(emit);
5524
5525 end_tex_swizzle(emit, &swz_info);
5526
5527 free_temp_indexes(emit);
5528
5529 return TRUE;
5530 }
5531
5532
5533 /**
5534 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5535 */
5536 static boolean
5537 emit_txf(struct svga_shader_emitter_v10 *emit,
5538 const struct tgsi_full_instruction *inst)
5539 {
5540 const uint unit = inst->Src[1].Register.Index;
5541 const boolean msaa = tgsi_is_msaa_target(inst->Texture.Texture)
5542 && emit->key.tex[unit].num_samples > 1;
5543 int offsets[3];
5544 struct tex_swizzle_info swz_info;
5545
5546 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5547
5548 get_texel_offsets(emit, inst, offsets);
5549
5550 if (msaa) {
5551 assert(emit->key.tex[unit].num_samples > 1);
5552
5553 /* Fetch one sample from an MSAA texture */
5554 struct tgsi_full_src_register sampleIndex =
5555 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5556 /* LD_MS dst, coord(s0), resource, sampleIndex */
5557 begin_emit_instruction(emit);
5558 emit_sample_opcode(emit, VGPU10_OPCODE_LD_MS,
5559 inst->Instruction.Saturate, offsets);
5560 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5561 emit_src_register(emit, &inst->Src[0]);
5562 emit_resource_register(emit, unit);
5563 emit_src_register(emit, &sampleIndex);
5564 end_emit_instruction(emit);
5565 }
5566 else {
5567 /* Fetch one texel specified by integer coordinate */
5568 /* LD dst, coord(s0), resource */
5569 begin_emit_instruction(emit);
5570 emit_sample_opcode(emit, VGPU10_OPCODE_LD,
5571 inst->Instruction.Saturate, offsets);
5572 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5573 emit_src_register(emit, &inst->Src[0]);
5574 emit_resource_register(emit, unit);
5575 end_emit_instruction(emit);
5576 }
5577
5578 end_tex_swizzle(emit, &swz_info);
5579
5580 free_temp_indexes(emit);
5581
5582 return TRUE;
5583 }
5584
5585
5586 /**
5587 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5588 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5589 */
5590 static boolean
5591 emit_txl_txb(struct svga_shader_emitter_v10 *emit,
5592 const struct tgsi_full_instruction *inst)
5593 {
5594 const enum tgsi_texture_type target = inst->Texture.Texture;
5595 VGPU10_OPCODE_TYPE opcode;
5596 unsigned unit;
5597 int offsets[3];
5598 struct tgsi_full_src_register coord, lod_bias;
5599 struct tex_swizzle_info swz_info;
5600
5601 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5602 inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5603 inst->Instruction.Opcode == TGSI_OPCODE_TXB2);
5604
5605 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
5606 lod_bias = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5607 unit = inst->Src[2].Register.Index;
5608 }
5609 else {
5610 lod_bias = scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5611 unit = inst->Src[1].Register.Index;
5612 }
5613
5614 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5615 &swz_info);
5616
5617 get_texel_offsets(emit, inst, offsets);
5618
5619 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5620
5621 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5622 begin_emit_instruction(emit);
5623 if (inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
5624 opcode = VGPU10_OPCODE_SAMPLE_L;
5625 }
5626 else {
5627 opcode = VGPU10_OPCODE_SAMPLE_B;
5628 }
5629 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5630 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5631 emit_src_register(emit, &coord);
5632 emit_resource_register(emit, unit);
5633 emit_sampler_register(emit, unit);
5634 emit_src_register(emit, &lod_bias);
5635 end_emit_instruction(emit);
5636
5637 end_tex_swizzle(emit, &swz_info);
5638
5639 free_temp_indexes(emit);
5640
5641 return TRUE;
5642 }
5643
5644
5645 /**
5646 * Emit code for TGSI_OPCODE_TXL2 (explicit LOD) for cubemap array.
5647 */
5648 static boolean
5649 emit_txl2(struct svga_shader_emitter_v10 *emit,
5650 const struct tgsi_full_instruction *inst)
5651 {
5652 unsigned target = inst->Texture.Texture;
5653 unsigned opcode, unit;
5654 int offsets[3];
5655 struct tgsi_full_src_register coord, lod;
5656 struct tex_swizzle_info swz_info;
5657
5658 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL2);
5659
5660 lod = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5661 unit = inst->Src[2].Register.Index;
5662
5663 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5664 &swz_info);
5665
5666 get_texel_offsets(emit, inst, offsets);
5667
5668 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5669
5670 /* SAMPLE_L dst, coord(s0), resource, sampler, lod(s3) */
5671 begin_emit_instruction(emit);
5672 opcode = VGPU10_OPCODE_SAMPLE_L;
5673 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5674 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5675 emit_src_register(emit, &coord);
5676 emit_resource_register(emit, unit);
5677 emit_sampler_register(emit, unit);
5678 emit_src_register(emit, &lod);
5679 end_emit_instruction(emit);
5680
5681 end_tex_swizzle(emit, &swz_info);
5682
5683 free_temp_indexes(emit);
5684
5685 return TRUE;
5686 }
5687
5688
5689 /**
5690 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5691 */
5692 static boolean
5693 emit_txq(struct svga_shader_emitter_v10 *emit,
5694 const struct tgsi_full_instruction *inst)
5695 {
5696 const uint unit = inst->Src[1].Register.Index;
5697
5698 if (emit->sampler_target[unit] == TGSI_TEXTURE_BUFFER) {
5699 /* RESINFO does not support querying texture buffers, so we instead
5700 * store texture buffer sizes in shader constants, then copy them to
5701 * implement TXQ instead of emitting RESINFO.
5702 * MOV dst, const[texture_buffer_size_index[unit]]
5703 */
5704 struct tgsi_full_src_register size_src =
5705 make_src_const_reg(emit->texture_buffer_size_index[unit]);
5706 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &size_src,
5707 FALSE);
5708 } else {
5709 /* RESINFO dst, srcMipLevel, resource */
5710 begin_emit_instruction(emit);
5711 emit_opcode_resinfo(emit, VGPU10_RESINFO_RETURN_UINT);
5712 emit_dst_register(emit, &inst->Dst[0]);
5713 emit_src_register(emit, &inst->Src[0]);
5714 emit_resource_register(emit, unit);
5715 end_emit_instruction(emit);
5716 }
5717
5718 free_temp_indexes(emit);
5719
5720 return TRUE;
5721 }
5722
5723
5724 /**
5725 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5726 */
5727 static boolean
5728 emit_simple(struct svga_shader_emitter_v10 *emit,
5729 const struct tgsi_full_instruction *inst)
5730 {
5731 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5732 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5733 unsigned i;
5734
5735 begin_emit_instruction(emit);
5736 emit_opcode(emit, translate_opcode(opcode), inst->Instruction.Saturate);
5737 for (i = 0; i < op->num_dst; i++) {
5738 emit_dst_register(emit, &inst->Dst[i]);
5739 }
5740 for (i = 0; i < op->num_src; i++) {
5741 emit_src_register(emit, &inst->Src[i]);
5742 }
5743 end_emit_instruction(emit);
5744
5745 return TRUE;
5746 }
5747
5748
5749 /**
5750 * We only special case the MOV instruction to try to detect constant
5751 * color writes in the fragment shader.
5752 */
5753 static boolean
5754 emit_mov(struct svga_shader_emitter_v10 *emit,
5755 const struct tgsi_full_instruction *inst)
5756 {
5757 const struct tgsi_full_src_register *src = &inst->Src[0];
5758 const struct tgsi_full_dst_register *dst = &inst->Dst[0];
5759
5760 if (emit->unit == PIPE_SHADER_FRAGMENT &&
5761 dst->Register.File == TGSI_FILE_OUTPUT &&
5762 dst->Register.Index == 0 &&
5763 src->Register.File == TGSI_FILE_CONSTANT &&
5764 !src->Register.Indirect) {
5765 emit->constant_color_output = TRUE;
5766 }
5767
5768 return emit_simple(emit, inst);
5769 }
5770
5771
5772 /**
5773 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5774 * where TGSI only uses one dest register.
5775 */
5776 static boolean
5777 emit_simple_1dst(struct svga_shader_emitter_v10 *emit,
5778 const struct tgsi_full_instruction *inst,
5779 unsigned dst_count,
5780 unsigned dst_index)
5781 {
5782 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5783 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5784 unsigned i;
5785
5786 begin_emit_instruction(emit);
5787 emit_opcode(emit, translate_opcode(opcode), inst->Instruction.Saturate);
5788
5789 for (i = 0; i < dst_count; i++) {
5790 if (i == dst_index) {
5791 emit_dst_register(emit, &inst->Dst[0]);
5792 } else {
5793 emit_null_dst_register(emit);
5794 }
5795 }
5796
5797 for (i = 0; i < op->num_src; i++) {
5798 emit_src_register(emit, &inst->Src[i]);
5799 }
5800 end_emit_instruction(emit);
5801
5802 return TRUE;
5803 }
5804
5805
5806 /**
5807 * Translate a single TGSI instruction to VGPU10.
5808 */
5809 static boolean
5810 emit_vgpu10_instruction(struct svga_shader_emitter_v10 *emit,
5811 unsigned inst_number,
5812 const struct tgsi_full_instruction *inst)
5813 {
5814 const enum tgsi_opcode opcode = inst->Instruction.Opcode;
5815
5816 switch (opcode) {
5817 case TGSI_OPCODE_ADD:
5818 case TGSI_OPCODE_AND:
5819 case TGSI_OPCODE_BGNLOOP:
5820 case TGSI_OPCODE_BRK:
5821 case TGSI_OPCODE_CEIL:
5822 case TGSI_OPCODE_CONT:
5823 case TGSI_OPCODE_DDX:
5824 case TGSI_OPCODE_DDY:
5825 case TGSI_OPCODE_DIV:
5826 case TGSI_OPCODE_DP2:
5827 case TGSI_OPCODE_DP3:
5828 case TGSI_OPCODE_DP4:
5829 case TGSI_OPCODE_ELSE:
5830 case TGSI_OPCODE_ENDIF:
5831 case TGSI_OPCODE_ENDLOOP:
5832 case TGSI_OPCODE_ENDSUB:
5833 case TGSI_OPCODE_F2I:
5834 case TGSI_OPCODE_F2U:
5835 case TGSI_OPCODE_FLR:
5836 case TGSI_OPCODE_FRC:
5837 case TGSI_OPCODE_FSEQ:
5838 case TGSI_OPCODE_FSGE:
5839 case TGSI_OPCODE_FSLT:
5840 case TGSI_OPCODE_FSNE:
5841 case TGSI_OPCODE_I2F:
5842 case TGSI_OPCODE_IMAX:
5843 case TGSI_OPCODE_IMIN:
5844 case TGSI_OPCODE_INEG:
5845 case TGSI_OPCODE_ISGE:
5846 case TGSI_OPCODE_ISHR:
5847 case TGSI_OPCODE_ISLT:
5848 case TGSI_OPCODE_MAD:
5849 case TGSI_OPCODE_MAX:
5850 case TGSI_OPCODE_MIN:
5851 case TGSI_OPCODE_MUL:
5852 case TGSI_OPCODE_NOP:
5853 case TGSI_OPCODE_NOT:
5854 case TGSI_OPCODE_OR:
5855 case TGSI_OPCODE_RET:
5856 case TGSI_OPCODE_UADD:
5857 case TGSI_OPCODE_USEQ:
5858 case TGSI_OPCODE_USGE:
5859 case TGSI_OPCODE_USLT:
5860 case TGSI_OPCODE_UMIN:
5861 case TGSI_OPCODE_UMAD:
5862 case TGSI_OPCODE_UMAX:
5863 case TGSI_OPCODE_ROUND:
5864 case TGSI_OPCODE_SQRT:
5865 case TGSI_OPCODE_SHL:
5866 case TGSI_OPCODE_TRUNC:
5867 case TGSI_OPCODE_U2F:
5868 case TGSI_OPCODE_UCMP:
5869 case TGSI_OPCODE_USHR:
5870 case TGSI_OPCODE_USNE:
5871 case TGSI_OPCODE_XOR:
5872 /* simple instructions */
5873 return emit_simple(emit, inst);
5874
5875 case TGSI_OPCODE_MOV:
5876 return emit_mov(emit, inst);
5877 case TGSI_OPCODE_EMIT:
5878 return emit_vertex(emit, inst);
5879 case TGSI_OPCODE_ENDPRIM:
5880 return emit_endprim(emit, inst);
5881 case TGSI_OPCODE_IABS:
5882 return emit_iabs(emit, inst);
5883 case TGSI_OPCODE_ARL:
5884 /* fall-through */
5885 case TGSI_OPCODE_UARL:
5886 return emit_arl_uarl(emit, inst);
5887 case TGSI_OPCODE_BGNSUB:
5888 /* no-op */
5889 return TRUE;
5890 case TGSI_OPCODE_CAL:
5891 return emit_cal(emit, inst);
5892 case TGSI_OPCODE_CMP:
5893 return emit_cmp(emit, inst);
5894 case TGSI_OPCODE_COS:
5895 return emit_sincos(emit, inst);
5896 case TGSI_OPCODE_DST:
5897 return emit_dst(emit, inst);
5898 case TGSI_OPCODE_EX2:
5899 return emit_ex2(emit, inst);
5900 case TGSI_OPCODE_EXP:
5901 return emit_exp(emit, inst);
5902 case TGSI_OPCODE_IF:
5903 return emit_if(emit, inst);
5904 case TGSI_OPCODE_KILL:
5905 return emit_kill(emit, inst);
5906 case TGSI_OPCODE_KILL_IF:
5907 return emit_kill_if(emit, inst);
5908 case TGSI_OPCODE_LG2:
5909 return emit_lg2(emit, inst);
5910 case TGSI_OPCODE_LIT:
5911 return emit_lit(emit, inst);
5912 case TGSI_OPCODE_LODQ:
5913 return emit_lodq(emit, inst);
5914 case TGSI_OPCODE_LOG:
5915 return emit_log(emit, inst);
5916 case TGSI_OPCODE_LRP:
5917 return emit_lrp(emit, inst);
5918 case TGSI_OPCODE_POW:
5919 return emit_pow(emit, inst);
5920 case TGSI_OPCODE_RCP:
5921 return emit_rcp(emit, inst);
5922 case TGSI_OPCODE_RSQ:
5923 return emit_rsq(emit, inst);
5924 case TGSI_OPCODE_SAMPLE:
5925 return emit_sample(emit, inst);
5926 case TGSI_OPCODE_SEQ:
5927 return emit_seq(emit, inst);
5928 case TGSI_OPCODE_SGE:
5929 return emit_sge(emit, inst);
5930 case TGSI_OPCODE_SGT:
5931 return emit_sgt(emit, inst);
5932 case TGSI_OPCODE_SIN:
5933 return emit_sincos(emit, inst);
5934 case TGSI_OPCODE_SLE:
5935 return emit_sle(emit, inst);
5936 case TGSI_OPCODE_SLT:
5937 return emit_slt(emit, inst);
5938 case TGSI_OPCODE_SNE:
5939 return emit_sne(emit, inst);
5940 case TGSI_OPCODE_SSG:
5941 return emit_ssg(emit, inst);
5942 case TGSI_OPCODE_ISSG:
5943 return emit_issg(emit, inst);
5944 case TGSI_OPCODE_TEX:
5945 return emit_tex(emit, inst);
5946 case TGSI_OPCODE_TG4:
5947 return emit_tg4(emit, inst);
5948 case TGSI_OPCODE_TEX2:
5949 return emit_tex2(emit, inst);
5950 case TGSI_OPCODE_TXP:
5951 return emit_txp(emit, inst);
5952 case TGSI_OPCODE_TXB:
5953 case TGSI_OPCODE_TXB2:
5954 case TGSI_OPCODE_TXL:
5955 return emit_txl_txb(emit, inst);
5956 case TGSI_OPCODE_TXD:
5957 return emit_txd(emit, inst);
5958 case TGSI_OPCODE_TXF:
5959 return emit_txf(emit, inst);
5960 case TGSI_OPCODE_TXL2:
5961 return emit_txl2(emit, inst);
5962 case TGSI_OPCODE_TXQ:
5963 return emit_txq(emit, inst);
5964 case TGSI_OPCODE_UIF:
5965 return emit_if(emit, inst);
5966 case TGSI_OPCODE_UMUL_HI:
5967 case TGSI_OPCODE_IMUL_HI:
5968 case TGSI_OPCODE_UDIV:
5969 case TGSI_OPCODE_IDIV:
5970 /* These cases use only the FIRST of two destination registers */
5971 return emit_simple_1dst(emit, inst, 2, 0);
5972 case TGSI_OPCODE_UMUL:
5973 case TGSI_OPCODE_UMOD:
5974 case TGSI_OPCODE_MOD:
5975 /* These cases use only the SECOND of two destination registers */
5976 return emit_simple_1dst(emit, inst, 2, 1);
5977 case TGSI_OPCODE_END:
5978 if (!emit_post_helpers(emit))
5979 return FALSE;
5980 return emit_simple(emit, inst);
5981
5982 default:
5983 debug_printf("Unimplemented tgsi instruction %s\n",
5984 tgsi_get_opcode_name(opcode));
5985 return FALSE;
5986 }
5987
5988 return TRUE;
5989 }
5990
5991
5992 /**
5993 * Emit the extra instructions to adjust the vertex position.
5994 * There are two possible adjustments:
5995 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5996 * "prescale" and "pretranslate" values.
5997 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5998 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5999 */
6000 static void
6001 emit_vpos_instructions(struct svga_shader_emitter_v10 *emit,
6002 unsigned vs_pos_tmp_index)
6003 {
6004 struct tgsi_full_src_register tmp_pos_src;
6005 struct tgsi_full_dst_register pos_dst;
6006
6007 /* Don't bother to emit any extra vertex instructions if vertex position is
6008 * not written out
6009 */
6010 if (emit->vposition.out_index == INVALID_INDEX)
6011 return;
6012
6013 tmp_pos_src = make_src_temp_reg(vs_pos_tmp_index);
6014 pos_dst = make_dst_output_reg(emit->vposition.out_index);
6015
6016 /* If non-adjusted vertex position register index
6017 * is valid, copy the vertex position from the temporary
6018 * vertex position register before it is modified by the
6019 * prescale computation.
6020 */
6021 if (emit->vposition.so_index != INVALID_INDEX) {
6022 struct tgsi_full_dst_register pos_so_dst =
6023 make_dst_output_reg(emit->vposition.so_index);
6024
6025 /* MOV pos_so, tmp_pos */
6026 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_so_dst,
6027 &tmp_pos_src, FALSE);
6028 }
6029
6030 if (emit->vposition.need_prescale) {
6031 /* This code adjusts the vertex position to match the VGPU10 convention.
6032 * If p is the position computed by the shader (usually by applying the
6033 * modelview and projection matrices), the new position q is computed by:
6034 *
6035 * q.x = p.w * trans.x + p.x * scale.x
6036 * q.y = p.w * trans.y + p.y * scale.y
6037 * q.z = p.w * trans.z + p.z * scale.z;
6038 * q.w = p.w * trans.w + p.w;
6039 */
6040 struct tgsi_full_src_register tmp_pos_src_w =
6041 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
6042 struct tgsi_full_dst_register tmp_pos_dst =
6043 make_dst_temp_reg(vs_pos_tmp_index);
6044 struct tgsi_full_dst_register tmp_pos_dst_xyz =
6045 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XYZ);
6046
6047 struct tgsi_full_src_register prescale_scale =
6048 make_src_const_reg(emit->vposition.prescale_scale_index);
6049 struct tgsi_full_src_register prescale_trans =
6050 make_src_const_reg(emit->vposition.prescale_trans_index);
6051
6052 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
6053 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xyz,
6054 &tmp_pos_src, &prescale_scale, FALSE);
6055
6056 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
6057 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &pos_dst, &tmp_pos_src_w,
6058 &prescale_trans, &tmp_pos_src, FALSE);
6059 }
6060 else if (emit->key.vs.undo_viewport) {
6061 /* This code computes the final vertex position from the temporary
6062 * vertex position by undoing the viewport transformation and the
6063 * divide-by-W operation (we convert window coords back to clip coords).
6064 * This is needed when we use the 'draw' module for fallbacks.
6065 * If p is the temp pos in window coords, then the NDC coord q is:
6066 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
6067 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
6068 * q.z = p.z * p.w
6069 * q.w = p.w
6070 * CONST[vs_viewport_index] contains:
6071 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
6072 */
6073 struct tgsi_full_dst_register tmp_pos_dst =
6074 make_dst_temp_reg(vs_pos_tmp_index);
6075 struct tgsi_full_dst_register tmp_pos_dst_xy =
6076 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XY);
6077 struct tgsi_full_src_register tmp_pos_src_wwww =
6078 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
6079
6080 struct tgsi_full_dst_register pos_dst_xyz =
6081 writemask_dst(&pos_dst, TGSI_WRITEMASK_XYZ);
6082 struct tgsi_full_dst_register pos_dst_w =
6083 writemask_dst(&pos_dst, TGSI_WRITEMASK_W);
6084
6085 struct tgsi_full_src_register vp_xyzw =
6086 make_src_const_reg(emit->vs.viewport_index);
6087 struct tgsi_full_src_register vp_zwww =
6088 swizzle_src(&vp_xyzw, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
6089 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
6090
6091 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
6092 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_pos_dst_xy,
6093 &tmp_pos_src, &vp_zwww, FALSE);
6094
6095 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
6096 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xy,
6097 &tmp_pos_src, &vp_xyzw, FALSE);
6098
6099 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
6100 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &pos_dst_xyz,
6101 &tmp_pos_src, &tmp_pos_src_wwww, FALSE);
6102
6103 /* MOV pos.w, tmp_pos.w */
6104 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_dst_w,
6105 &tmp_pos_src, FALSE);
6106 }
6107 else if (vs_pos_tmp_index != INVALID_INDEX) {
6108 /* This code is to handle the case where the temporary vertex
6109 * position register is created when the vertex shader has stream
6110 * output and prescale is disabled because rasterization is to be
6111 * discarded.
6112 */
6113 struct tgsi_full_dst_register pos_dst =
6114 make_dst_output_reg(emit->vposition.out_index);
6115
6116 /* MOV pos, tmp_pos */
6117 begin_emit_instruction(emit);
6118 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6119 emit_dst_register(emit, &pos_dst);
6120 emit_src_register(emit, &tmp_pos_src);
6121 end_emit_instruction(emit);
6122 }
6123 }
6124
6125 static void
6126 emit_clipping_instructions(struct svga_shader_emitter_v10 *emit)
6127 {
6128 if (emit->clip_mode == CLIP_DISTANCE) {
6129 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
6130 emit_clip_distance_instructions(emit);
6131
6132 } else if (emit->clip_mode == CLIP_VERTEX) {
6133 /* Convert TGSI CLIPVERTEX to CLIPDIST */
6134 emit_clip_vertex_instructions(emit);
6135 }
6136
6137 /**
6138 * Emit vertex position and take care of legacy user planes only if
6139 * there is a valid vertex position register index.
6140 * This is to take care of the case
6141 * where the shader doesn't output vertex position. Then in
6142 * this case, don't bother to emit more vertex instructions.
6143 */
6144 if (emit->vposition.out_index == INVALID_INDEX)
6145 return;
6146
6147 /**
6148 * Emit per-vertex clipping instructions for legacy user defined clip planes.
6149 * NOTE: we must emit the clip distance instructions before the
6150 * emit_vpos_instructions() call since the later function will change
6151 * the TEMP[vs_pos_tmp_index] value.
6152 */
6153 if (emit->clip_mode == CLIP_LEGACY) {
6154 /* Emit CLIPDIST for legacy user defined clip planes */
6155 emit_clip_distance_from_vpos(emit, emit->vposition.tmp_index);
6156 }
6157 }
6158
6159
6160 /**
6161 * Emit extra per-vertex instructions. This includes clip-coordinate
6162 * space conversion and computing clip distances. This is called for
6163 * each GS emit-vertex instruction and at the end of VS translation.
6164 */
6165 static void
6166 emit_vertex_instructions(struct svga_shader_emitter_v10 *emit)
6167 {
6168 const unsigned vs_pos_tmp_index = emit->vposition.tmp_index;
6169
6170 /* Emit clipping instructions based on clipping mode */
6171 emit_clipping_instructions(emit);
6172
6173 /**
6174 * Reset the temporary vertex position register index
6175 * so that emit_dst_register() will use the real vertex position output
6176 */
6177 emit->vposition.tmp_index = INVALID_INDEX;
6178
6179 /* Emit vertex position instructions */
6180 emit_vpos_instructions(emit, vs_pos_tmp_index);
6181
6182 /* Restore original vposition.tmp_index value for the next GS vertex.
6183 * It doesn't matter for VS.
6184 */
6185 emit->vposition.tmp_index = vs_pos_tmp_index;
6186 }
6187
6188 /**
6189 * Translate the TGSI_OPCODE_EMIT GS instruction.
6190 */
6191 static boolean
6192 emit_vertex(struct svga_shader_emitter_v10 *emit,
6193 const struct tgsi_full_instruction *inst)
6194 {
6195 unsigned ret = TRUE;
6196
6197 assert(emit->unit == PIPE_SHADER_GEOMETRY);
6198
6199 emit_vertex_instructions(emit);
6200
6201 /* We can't use emit_simple() because the TGSI instruction has one
6202 * operand (vertex stream number) which we must ignore for VGPU10.
6203 */
6204 begin_emit_instruction(emit);
6205 emit_opcode(emit, VGPU10_OPCODE_EMIT, FALSE);
6206 end_emit_instruction(emit);
6207
6208 return ret;
6209 }
6210
6211
6212 /**
6213 * Emit the extra code to convert from VGPU10's boolean front-face
6214 * register to TGSI's signed front-face register.
6215 *
6216 * TODO: Make temporary front-face register a scalar.
6217 */
6218 static void
6219 emit_frontface_instructions(struct svga_shader_emitter_v10 *emit)
6220 {
6221 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6222
6223 if (emit->fs.face_input_index != INVALID_INDEX) {
6224 /* convert vgpu10 boolean face register to gallium +/-1 value */
6225 struct tgsi_full_dst_register tmp_dst =
6226 make_dst_temp_reg(emit->fs.face_tmp_index);
6227 struct tgsi_full_src_register one =
6228 make_immediate_reg_float(emit, 1.0f);
6229 struct tgsi_full_src_register neg_one =
6230 make_immediate_reg_float(emit, -1.0f);
6231
6232 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6233 begin_emit_instruction(emit);
6234 emit_opcode(emit, VGPU10_OPCODE_MOVC, FALSE);
6235 emit_dst_register(emit, &tmp_dst);
6236 emit_face_register(emit);
6237 emit_src_register(emit, &one);
6238 emit_src_register(emit, &neg_one);
6239 end_emit_instruction(emit);
6240 }
6241 }
6242
6243
6244 /**
6245 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6246 */
6247 static void
6248 emit_fragcoord_instructions(struct svga_shader_emitter_v10 *emit)
6249 {
6250 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6251
6252 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
6253 struct tgsi_full_dst_register tmp_dst =
6254 make_dst_temp_reg(emit->fs.fragcoord_tmp_index);
6255 struct tgsi_full_dst_register tmp_dst_xyz =
6256 writemask_dst(&tmp_dst, TGSI_WRITEMASK_XYZ);
6257 struct tgsi_full_dst_register tmp_dst_w =
6258 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6259 struct tgsi_full_src_register one =
6260 make_immediate_reg_float(emit, 1.0f);
6261 struct tgsi_full_src_register fragcoord =
6262 make_src_reg(TGSI_FILE_INPUT, emit->fs.fragcoord_input_index);
6263
6264 /* save the input index */
6265 unsigned fragcoord_input_index = emit->fs.fragcoord_input_index;
6266 /* set to invalid to prevent substitution in emit_src_register() */
6267 emit->fs.fragcoord_input_index = INVALID_INDEX;
6268
6269 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6270 begin_emit_instruction(emit);
6271 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6272 emit_dst_register(emit, &tmp_dst_xyz);
6273 emit_src_register(emit, &fragcoord);
6274 end_emit_instruction(emit);
6275
6276 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6277 begin_emit_instruction(emit);
6278 emit_opcode(emit, VGPU10_OPCODE_DIV, FALSE);
6279 emit_dst_register(emit, &tmp_dst_w);
6280 emit_src_register(emit, &one);
6281 emit_src_register(emit, &fragcoord);
6282 end_emit_instruction(emit);
6283
6284 /* restore saved value */
6285 emit->fs.fragcoord_input_index = fragcoord_input_index;
6286 }
6287 }
6288
6289
6290 /**
6291 * Emit the extra code to get the current sample position value and
6292 * put it into a temp register.
6293 */
6294 static void
6295 emit_sample_position_instructions(struct svga_shader_emitter_v10 *emit)
6296 {
6297 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6298
6299 if (emit->fs.sample_pos_sys_index != INVALID_INDEX) {
6300 assert(emit->version >= 41);
6301
6302 struct tgsi_full_dst_register tmp_dst =
6303 make_dst_temp_reg(emit->fs.sample_pos_tmp_index);
6304 struct tgsi_full_src_register half =
6305 make_immediate_reg_float4(emit, 0.5, 0.5, 0.0, 0.0);
6306
6307 struct tgsi_full_src_register tmp_src =
6308 make_src_temp_reg(emit->fs.sample_pos_tmp_index);
6309 struct tgsi_full_src_register sample_index_reg =
6310 make_src_scalar_reg(TGSI_FILE_SYSTEM_VALUE,
6311 emit->fs.sample_id_sys_index, TGSI_SWIZZLE_X);
6312
6313 /* The first src register is a shader resource (if we want a
6314 * multisampled resource sample position) or the rasterizer register
6315 * (if we want the current sample position in the color buffer). We
6316 * want the later.
6317 */
6318
6319 /* SAMPLE_POS dst, RASTERIZER, sampleIndex */
6320 begin_emit_instruction(emit);
6321 emit_opcode(emit, VGPU10_OPCODE_SAMPLE_POS, FALSE);
6322 emit_dst_register(emit, &tmp_dst);
6323 emit_rasterizer_register(emit);
6324 emit_src_register(emit, &sample_index_reg);
6325 end_emit_instruction(emit);
6326
6327 /* Convert from D3D coords to GL coords by adding 0.5 bias */
6328 /* ADD dst, dst, half */
6329 begin_emit_instruction(emit);
6330 emit_opcode(emit, VGPU10_OPCODE_ADD, FALSE);
6331 emit_dst_register(emit, &tmp_dst);
6332 emit_src_register(emit, &tmp_src);
6333 emit_src_register(emit, &half);
6334 end_emit_instruction(emit);
6335 }
6336 }
6337
6338
6339 /**
6340 * Emit extra instructions to adjust VS inputs/attributes. This can
6341 * mean casting a vertex attribute from int to float or setting the
6342 * W component to 1, or both.
6343 */
6344 static void
6345 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10 *emit)
6346 {
6347 const unsigned save_w_1_mask = emit->key.vs.adjust_attrib_w_1;
6348 const unsigned save_itof_mask = emit->key.vs.adjust_attrib_itof;
6349 const unsigned save_utof_mask = emit->key.vs.adjust_attrib_utof;
6350 const unsigned save_is_bgra_mask = emit->key.vs.attrib_is_bgra;
6351 const unsigned save_puint_to_snorm_mask = emit->key.vs.attrib_puint_to_snorm;
6352 const unsigned save_puint_to_uscaled_mask = emit->key.vs.attrib_puint_to_uscaled;
6353 const unsigned save_puint_to_sscaled_mask = emit->key.vs.attrib_puint_to_sscaled;
6354
6355 unsigned adjust_mask = (save_w_1_mask |
6356 save_itof_mask |
6357 save_utof_mask |
6358 save_is_bgra_mask |
6359 save_puint_to_snorm_mask |
6360 save_puint_to_uscaled_mask |
6361 save_puint_to_sscaled_mask);
6362
6363 assert(emit->unit == PIPE_SHADER_VERTEX);
6364
6365 if (adjust_mask) {
6366 struct tgsi_full_src_register one =
6367 make_immediate_reg_float(emit, 1.0f);
6368
6369 struct tgsi_full_src_register one_int =
6370 make_immediate_reg_int(emit, 1);
6371
6372 /* We need to turn off these bitmasks while emitting the
6373 * instructions below, then restore them afterward.
6374 */
6375 emit->key.vs.adjust_attrib_w_1 = 0;
6376 emit->key.vs.adjust_attrib_itof = 0;
6377 emit->key.vs.adjust_attrib_utof = 0;
6378 emit->key.vs.attrib_is_bgra = 0;
6379 emit->key.vs.attrib_puint_to_snorm = 0;
6380 emit->key.vs.attrib_puint_to_uscaled = 0;
6381 emit->key.vs.attrib_puint_to_sscaled = 0;
6382
6383 while (adjust_mask) {
6384 unsigned index = u_bit_scan(&adjust_mask);
6385
6386 /* skip the instruction if this vertex attribute is not being used */
6387 if (emit->info.input_usage_mask[index] == 0)
6388 continue;
6389
6390 unsigned tmp = emit->vs.adjusted_input[index];
6391 struct tgsi_full_src_register input_src =
6392 make_src_reg(TGSI_FILE_INPUT, index);
6393
6394 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6395 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6396 struct tgsi_full_dst_register tmp_dst_w =
6397 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6398
6399 /* ITOF/UTOF/MOV tmp, input[index] */
6400 if (save_itof_mask & (1 << index)) {
6401 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF,
6402 &tmp_dst, &input_src, FALSE);
6403 }
6404 else if (save_utof_mask & (1 << index)) {
6405 emit_instruction_op1(emit, VGPU10_OPCODE_UTOF,
6406 &tmp_dst, &input_src, FALSE);
6407 }
6408 else if (save_puint_to_snorm_mask & (1 << index)) {
6409 emit_puint_to_snorm(emit, &tmp_dst, &input_src);
6410 }
6411 else if (save_puint_to_uscaled_mask & (1 << index)) {
6412 emit_puint_to_uscaled(emit, &tmp_dst, &input_src);
6413 }
6414 else if (save_puint_to_sscaled_mask & (1 << index)) {
6415 emit_puint_to_sscaled(emit, &tmp_dst, &input_src);
6416 }
6417 else {
6418 assert((save_w_1_mask | save_is_bgra_mask) & (1 << index));
6419 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6420 &tmp_dst, &input_src, FALSE);
6421 }
6422
6423 if (save_is_bgra_mask & (1 << index)) {
6424 emit_swap_r_b(emit, &tmp_dst, &tmp_src);
6425 }
6426
6427 if (save_w_1_mask & (1 << index)) {
6428 /* MOV tmp.w, 1.0 */
6429 if (emit->key.vs.attrib_is_pure_int & (1 << index)) {
6430 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6431 &tmp_dst_w, &one_int, FALSE);
6432 }
6433 else {
6434 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6435 &tmp_dst_w, &one, FALSE);
6436 }
6437 }
6438 }
6439
6440 emit->key.vs.adjust_attrib_w_1 = save_w_1_mask;
6441 emit->key.vs.adjust_attrib_itof = save_itof_mask;
6442 emit->key.vs.adjust_attrib_utof = save_utof_mask;
6443 emit->key.vs.attrib_is_bgra = save_is_bgra_mask;
6444 emit->key.vs.attrib_puint_to_snorm = save_puint_to_snorm_mask;
6445 emit->key.vs.attrib_puint_to_uscaled = save_puint_to_uscaled_mask;
6446 emit->key.vs.attrib_puint_to_sscaled = save_puint_to_sscaled_mask;
6447 }
6448 }
6449
6450
6451 /**
6452 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6453 * to implement some instructions. We pre-allocate those values here
6454 * in the immediate constant buffer.
6455 */
6456 static void
6457 alloc_common_immediates(struct svga_shader_emitter_v10 *emit)
6458 {
6459 unsigned n = 0;
6460
6461 emit->common_immediate_pos[n++] =
6462 alloc_immediate_float4(emit, 0.0f, 1.0f, 0.5f, -1.0f);
6463
6464 if (emit->info.opcode_count[TGSI_OPCODE_LIT] > 0) {
6465 emit->common_immediate_pos[n++] =
6466 alloc_immediate_float4(emit, 128.0f, -128.0f, 0.0f, 0.0f);
6467 }
6468
6469 emit->common_immediate_pos[n++] =
6470 alloc_immediate_int4(emit, 0, 1, 0, -1);
6471
6472 if (emit->key.vs.attrib_puint_to_snorm) {
6473 emit->common_immediate_pos[n++] =
6474 alloc_immediate_float4(emit, -2.0f, 2.0f, 3.0f, -1.66666f);
6475 }
6476
6477 if (emit->key.vs.attrib_puint_to_uscaled) {
6478 emit->common_immediate_pos[n++] =
6479 alloc_immediate_float4(emit, 1023.0f, 3.0f, 0.0f, 0.0f);
6480 }
6481
6482 if (emit->key.vs.attrib_puint_to_sscaled) {
6483 emit->common_immediate_pos[n++] =
6484 alloc_immediate_int4(emit, 22, 12, 2, 0);
6485
6486 emit->common_immediate_pos[n++] =
6487 alloc_immediate_int4(emit, 22, 30, 0, 0);
6488 }
6489
6490 unsigned i;
6491
6492 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
6493 if (emit->key.tex[i].texel_bias) {
6494 /* Replace 0.0f if more immediate float value is needed */
6495 emit->common_immediate_pos[n++] =
6496 alloc_immediate_float4(emit, 0.0001f, 0.0f, 0.0f, 0.0f);
6497 break;
6498 }
6499 }
6500
6501 assert(n <= ARRAY_SIZE(emit->common_immediate_pos));
6502 emit->num_common_immediates = n;
6503 }
6504
6505
6506 /**
6507 * Emit any extra/helper declarations/code that we might need between
6508 * the declaration section and code section.
6509 */
6510 static boolean
6511 emit_pre_helpers(struct svga_shader_emitter_v10 *emit)
6512 {
6513 /* Properties */
6514 if (emit->unit == PIPE_SHADER_GEOMETRY)
6515 emit_property_instructions(emit);
6516
6517 /* Declare inputs */
6518 if (!emit_input_declarations(emit))
6519 return FALSE;
6520
6521 /* Declare outputs */
6522 if (!emit_output_declarations(emit))
6523 return FALSE;
6524
6525 /* Declare temporary registers */
6526 emit_temporaries_declaration(emit);
6527
6528 /* Declare constant registers */
6529 emit_constant_declaration(emit);
6530
6531 /* Declare samplers and resources */
6532 emit_sampler_declarations(emit);
6533 emit_resource_declarations(emit);
6534
6535 /* Declare clip distance output registers */
6536 if (emit->unit == PIPE_SHADER_VERTEX ||
6537 emit->unit == PIPE_SHADER_GEOMETRY) {
6538 emit_clip_distance_declarations(emit);
6539 }
6540
6541 alloc_common_immediates(emit);
6542
6543 if (emit->unit == PIPE_SHADER_FRAGMENT &&
6544 emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6545 float alpha = emit->key.fs.alpha_ref;
6546 emit->fs.alpha_ref_index =
6547 alloc_immediate_float4(emit, alpha, alpha, alpha, alpha);
6548 }
6549
6550 /* Now, emit the constant block containing all the immediates
6551 * declared by shader, as well as the extra ones seen above.
6552 */
6553 emit_vgpu10_immediates_block(emit);
6554
6555 if (emit->unit == PIPE_SHADER_FRAGMENT) {
6556 emit_frontface_instructions(emit);
6557 emit_fragcoord_instructions(emit);
6558 emit_sample_position_instructions(emit);
6559 }
6560 else if (emit->unit == PIPE_SHADER_VERTEX) {
6561 emit_vertex_attrib_instructions(emit);
6562 }
6563
6564 return TRUE;
6565 }
6566
6567
6568 /**
6569 * The device has no direct support for the pipe_blend_state::alpha_to_one
6570 * option so we implement it here with shader code.
6571 *
6572 * Note that this is kind of pointless, actually. Here we're clobbering
6573 * the alpha value with 1.0. So if alpha-to-coverage is enabled, we'll wind
6574 * up with 100% coverage. That's almost certainly not what the user wants.
6575 * The work-around is to add extra shader code to compute coverage from alpha
6576 * and write it to the coverage output register (if the user's shader doesn't
6577 * do so already). We'll probably do that in the future.
6578 */
6579 static void
6580 emit_alpha_to_one_instructions(struct svga_shader_emitter_v10 *emit,
6581 unsigned fs_color_tmp_index)
6582 {
6583 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
6584 unsigned i;
6585
6586 /* Note: it's not 100% clear from the spec if we're supposed to clobber
6587 * the alpha for all render targets. But that's what NVIDIA does and
6588 * that's what Piglit tests.
6589 */
6590 for (i = 0; i < emit->fs.num_color_outputs; i++) {
6591 struct tgsi_full_dst_register color_dst;
6592
6593 if (fs_color_tmp_index != INVALID_INDEX && i == 0) {
6594 /* write to the temp color register */
6595 color_dst = make_dst_temp_reg(fs_color_tmp_index);
6596 }
6597 else {
6598 /* write directly to the color[i] output */
6599 color_dst = make_dst_output_reg(emit->fs.color_out_index[i]);
6600 }
6601
6602 color_dst = writemask_dst(&color_dst, TGSI_WRITEMASK_W);
6603
6604 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst, &one, FALSE);
6605 }
6606 }
6607
6608
6609 /**
6610 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6611 * against the alpha reference value and discards the fragment if the
6612 * comparison fails.
6613 */
6614 static void
6615 emit_alpha_test_instructions(struct svga_shader_emitter_v10 *emit,
6616 unsigned fs_color_tmp_index)
6617 {
6618 /* compare output color's alpha to alpha ref and kill */
6619 unsigned tmp = get_temp_index(emit);
6620 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6621 struct tgsi_full_src_register tmp_src_x =
6622 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
6623 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6624 struct tgsi_full_src_register color_src =
6625 make_src_temp_reg(fs_color_tmp_index);
6626 struct tgsi_full_src_register color_src_w =
6627 scalar_src(&color_src, TGSI_SWIZZLE_W);
6628 struct tgsi_full_src_register ref_src =
6629 make_src_immediate_reg(emit->fs.alpha_ref_index);
6630 struct tgsi_full_dst_register color_dst =
6631 make_dst_output_reg(emit->fs.color_out_index[0]);
6632
6633 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6634
6635 /* dst = src0 'alpha_func' src1 */
6636 emit_comparison(emit, emit->key.fs.alpha_func, &tmp_dst,
6637 &color_src_w, &ref_src);
6638
6639 /* DISCARD if dst.x == 0 */
6640 begin_emit_instruction(emit);
6641 emit_discard_opcode(emit, FALSE); /* discard if src0.x is zero */
6642 emit_src_register(emit, &tmp_src_x);
6643 end_emit_instruction(emit);
6644
6645 /* If we don't need to broadcast the color below, emit the final color here.
6646 */
6647 if (emit->key.fs.write_color0_to_n_cbufs <= 1) {
6648 /* MOV output.color, tempcolor */
6649 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6650 &color_src, FALSE); /* XXX saturate? */
6651 }
6652
6653 free_temp_indexes(emit);
6654 }
6655
6656
6657 /**
6658 * Emit instructions for writing a single color output to multiple
6659 * color buffers.
6660 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS (or
6661 * when key.fs.white_fragments is true).
6662 * property is set and the number of render targets is greater than one.
6663 * \param fs_color_tmp_index index of the temp register that holds the
6664 * color to broadcast.
6665 */
6666 static void
6667 emit_broadcast_color_instructions(struct svga_shader_emitter_v10 *emit,
6668 unsigned fs_color_tmp_index)
6669 {
6670 const unsigned n = emit->key.fs.write_color0_to_n_cbufs;
6671 unsigned i;
6672 struct tgsi_full_src_register color_src;
6673
6674 if (emit->key.fs.white_fragments) {
6675 /* set all color outputs to white */
6676 color_src = make_immediate_reg_float(emit, 1.0f);
6677 }
6678 else {
6679 /* set all color outputs to TEMP[fs_color_tmp_index] */
6680 assert(fs_color_tmp_index != INVALID_INDEX);
6681 color_src = make_src_temp_reg(fs_color_tmp_index);
6682 }
6683
6684 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6685
6686 for (i = 0; i < n; i++) {
6687 unsigned output_reg = emit->fs.color_out_index[i];
6688 struct tgsi_full_dst_register color_dst =
6689 make_dst_output_reg(output_reg);
6690
6691 /* Fill in this semantic here since we'll use it later in
6692 * emit_dst_register().
6693 */
6694 emit->info.output_semantic_name[output_reg] = TGSI_SEMANTIC_COLOR;
6695
6696 /* MOV output.color[i], tempcolor */
6697 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6698 &color_src, FALSE); /* XXX saturate? */
6699 }
6700 }
6701
6702
6703 /**
6704 * Emit extra helper code after the original shader code, but before the
6705 * last END/RET instruction.
6706 * For vertex shaders this means emitting the extra code to apply the
6707 * prescale scale/translation.
6708 */
6709 static boolean
6710 emit_post_helpers(struct svga_shader_emitter_v10 *emit)
6711 {
6712 if (emit->unit == PIPE_SHADER_VERTEX) {
6713 emit_vertex_instructions(emit);
6714 }
6715 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
6716 const unsigned fs_color_tmp_index = emit->fs.color_tmp_index;
6717
6718 assert(!(emit->key.fs.white_fragments &&
6719 emit->key.fs.write_color0_to_n_cbufs == 0));
6720
6721 /* We no longer want emit_dst_register() to substitute the
6722 * temporary fragment color register for the real color output.
6723 */
6724 emit->fs.color_tmp_index = INVALID_INDEX;
6725
6726 if (emit->key.fs.alpha_to_one) {
6727 emit_alpha_to_one_instructions(emit, fs_color_tmp_index);
6728 }
6729 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6730 emit_alpha_test_instructions(emit, fs_color_tmp_index);
6731 }
6732 if (emit->key.fs.write_color0_to_n_cbufs > 1 ||
6733 emit->key.fs.white_fragments) {
6734 emit_broadcast_color_instructions(emit, fs_color_tmp_index);
6735 }
6736 }
6737
6738 return TRUE;
6739 }
6740
6741
6742 /**
6743 * Translate the TGSI tokens into VGPU10 tokens.
6744 */
6745 static boolean
6746 emit_vgpu10_instructions(struct svga_shader_emitter_v10 *emit,
6747 const struct tgsi_token *tokens)
6748 {
6749 struct tgsi_parse_context parse;
6750 boolean ret = TRUE;
6751 boolean pre_helpers_emitted = FALSE;
6752 unsigned inst_number = 0;
6753
6754 tgsi_parse_init(&parse, tokens);
6755
6756 while (!tgsi_parse_end_of_tokens(&parse)) {
6757 tgsi_parse_token(&parse);
6758
6759 switch (parse.FullToken.Token.Type) {
6760 case TGSI_TOKEN_TYPE_IMMEDIATE:
6761 ret = emit_vgpu10_immediate(emit, &parse.FullToken.FullImmediate);
6762 if (!ret)
6763 goto done;
6764 break;
6765
6766 case TGSI_TOKEN_TYPE_DECLARATION:
6767 ret = emit_vgpu10_declaration(emit, &parse.FullToken.FullDeclaration);
6768 if (!ret)
6769 goto done;
6770 break;
6771
6772 case TGSI_TOKEN_TYPE_INSTRUCTION:
6773 if (!pre_helpers_emitted) {
6774 ret = emit_pre_helpers(emit);
6775 if (!ret)
6776 goto done;
6777 pre_helpers_emitted = TRUE;
6778 }
6779 ret = emit_vgpu10_instruction(emit, inst_number++,
6780 &parse.FullToken.FullInstruction);
6781 if (!ret)
6782 goto done;
6783 break;
6784
6785 case TGSI_TOKEN_TYPE_PROPERTY:
6786 ret = emit_vgpu10_property(emit, &parse.FullToken.FullProperty);
6787 if (!ret)
6788 goto done;
6789 break;
6790
6791 default:
6792 break;
6793 }
6794 }
6795
6796 done:
6797 tgsi_parse_free(&parse);
6798 return ret;
6799 }
6800
6801
6802 /**
6803 * Emit the first VGPU10 shader tokens.
6804 */
6805 static boolean
6806 emit_vgpu10_header(struct svga_shader_emitter_v10 *emit)
6807 {
6808 VGPU10ProgramToken ptoken;
6809
6810 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6811 ptoken.majorVersion = emit->version / 10;
6812 ptoken.minorVersion = emit->version % 10;
6813 ptoken.programType = translate_shader_type(emit->unit);
6814 if (!emit_dword(emit, ptoken.value))
6815 return FALSE;
6816
6817 /* Second token: total length of shader, in tokens. We can't fill this
6818 * in until we're all done. Emit zero for now.
6819 */
6820 return emit_dword(emit, 0);
6821 }
6822
6823
6824 static boolean
6825 emit_vgpu10_tail(struct svga_shader_emitter_v10 *emit)
6826 {
6827 VGPU10ProgramToken *tokens;
6828
6829 /* Replace the second token with total shader length */
6830 tokens = (VGPU10ProgramToken *) emit->buf;
6831 tokens[1].value = emit_get_num_tokens(emit);
6832
6833 return TRUE;
6834 }
6835
6836
6837 /**
6838 * Modify the FS to read the BCOLORs and use the FACE register
6839 * to choose between the front/back colors.
6840 */
6841 static const struct tgsi_token *
6842 transform_fs_twoside(const struct tgsi_token *tokens)
6843 {
6844 if (0) {
6845 debug_printf("Before tgsi_add_two_side ------------------\n");
6846 tgsi_dump(tokens,0);
6847 }
6848 tokens = tgsi_add_two_side(tokens);
6849 if (0) {
6850 debug_printf("After tgsi_add_two_side ------------------\n");
6851 tgsi_dump(tokens, 0);
6852 }
6853 return tokens;
6854 }
6855
6856
6857 /**
6858 * Modify the FS to do polygon stipple.
6859 */
6860 static const struct tgsi_token *
6861 transform_fs_pstipple(struct svga_shader_emitter_v10 *emit,
6862 const struct tgsi_token *tokens)
6863 {
6864 const struct tgsi_token *new_tokens;
6865 unsigned unit;
6866
6867 if (0) {
6868 debug_printf("Before pstipple ------------------\n");
6869 tgsi_dump(tokens,0);
6870 }
6871
6872 new_tokens = util_pstipple_create_fragment_shader(tokens, &unit, 0,
6873 TGSI_FILE_INPUT);
6874
6875 emit->fs.pstipple_sampler_unit = unit;
6876
6877 /* Setup texture state for stipple */
6878 emit->sampler_target[unit] = TGSI_TEXTURE_2D;
6879 emit->key.tex[unit].swizzle_r = TGSI_SWIZZLE_X;
6880 emit->key.tex[unit].swizzle_g = TGSI_SWIZZLE_Y;
6881 emit->key.tex[unit].swizzle_b = TGSI_SWIZZLE_Z;
6882 emit->key.tex[unit].swizzle_a = TGSI_SWIZZLE_W;
6883
6884 if (0) {
6885 debug_printf("After pstipple ------------------\n");
6886 tgsi_dump(new_tokens, 0);
6887 }
6888
6889 return new_tokens;
6890 }
6891
6892 /**
6893 * Modify the FS to support anti-aliasing point.
6894 */
6895 static const struct tgsi_token *
6896 transform_fs_aapoint(const struct tgsi_token *tokens,
6897 int aa_coord_index)
6898 {
6899 if (0) {
6900 debug_printf("Before tgsi_add_aa_point ------------------\n");
6901 tgsi_dump(tokens,0);
6902 }
6903 tokens = tgsi_add_aa_point(tokens, aa_coord_index);
6904 if (0) {
6905 debug_printf("After tgsi_add_aa_point ------------------\n");
6906 tgsi_dump(tokens, 0);
6907 }
6908 return tokens;
6909 }
6910
6911 /**
6912 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6913 */
6914 struct svga_shader_variant *
6915 svga_tgsi_vgpu10_translate(struct svga_context *svga,
6916 const struct svga_shader *shader,
6917 const struct svga_compile_key *key,
6918 enum pipe_shader_type unit)
6919 {
6920 struct svga_shader_variant *variant = NULL;
6921 struct svga_shader_emitter_v10 *emit;
6922 const struct tgsi_token *tokens = shader->tokens;
6923 struct svga_vertex_shader *vs = svga->curr.vs;
6924 struct svga_geometry_shader *gs = svga->curr.gs;
6925
6926 assert(unit == PIPE_SHADER_VERTEX ||
6927 unit == PIPE_SHADER_GEOMETRY ||
6928 unit == PIPE_SHADER_FRAGMENT);
6929
6930 /* These two flags cannot be used together */
6931 assert(key->vs.need_prescale + key->vs.undo_viewport <= 1);
6932
6933 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE);
6934 /*
6935 * Setup the code emitter
6936 */
6937 emit = alloc_emitter();
6938 if (!emit)
6939 goto done;
6940
6941 emit->unit = unit;
6942 emit->version = svga_have_sm4_1(svga) ? 41 : 40;
6943
6944 emit->key = *key;
6945
6946 emit->vposition.need_prescale = (emit->key.vs.need_prescale ||
6947 emit->key.gs.need_prescale);
6948 emit->vposition.tmp_index = INVALID_INDEX;
6949 emit->vposition.so_index = INVALID_INDEX;
6950 emit->vposition.out_index = INVALID_INDEX;
6951
6952 emit->fs.color_tmp_index = INVALID_INDEX;
6953 emit->fs.face_input_index = INVALID_INDEX;
6954 emit->fs.fragcoord_input_index = INVALID_INDEX;
6955 emit->fs.sample_id_sys_index = INVALID_INDEX;
6956 emit->fs.sample_pos_sys_index = INVALID_INDEX;
6957
6958 emit->gs.prim_id_index = INVALID_INDEX;
6959
6960 emit->clip_dist_out_index = INVALID_INDEX;
6961 emit->clip_dist_tmp_index = INVALID_INDEX;
6962 emit->clip_dist_so_index = INVALID_INDEX;
6963 emit->clip_vertex_out_index = INVALID_INDEX;
6964
6965 if (emit->key.fs.alpha_func == SVGA3D_CMP_INVALID) {
6966 emit->key.fs.alpha_func = SVGA3D_CMP_ALWAYS;
6967 }
6968
6969 if (unit == PIPE_SHADER_FRAGMENT) {
6970 if (key->fs.light_twoside) {
6971 tokens = transform_fs_twoside(tokens);
6972 }
6973 if (key->fs.pstipple) {
6974 const struct tgsi_token *new_tokens =
6975 transform_fs_pstipple(emit, tokens);
6976 if (tokens != shader->tokens) {
6977 /* free the two-sided shader tokens */
6978 tgsi_free_tokens(tokens);
6979 }
6980 tokens = new_tokens;
6981 }
6982 if (key->fs.aa_point) {
6983 tokens = transform_fs_aapoint(tokens, key->fs.aa_point_coord_index);
6984 }
6985 }
6986
6987 if (SVGA_DEBUG & DEBUG_TGSI) {
6988 debug_printf("#####################################\n");
6989 debug_printf("### TGSI Shader %u\n", shader->id);
6990 tgsi_dump(tokens, 0);
6991 }
6992
6993 /**
6994 * Rescan the header if the token string is different from the one
6995 * included in the shader; otherwise, the header info is already up-to-date
6996 */
6997 if (tokens != shader->tokens) {
6998 tgsi_scan_shader(tokens, &emit->info);
6999 } else {
7000 emit->info = shader->info;
7001 }
7002
7003 emit->num_outputs = emit->info.num_outputs;
7004
7005 if (unit == PIPE_SHADER_FRAGMENT) {
7006 /* Compute FS input remapping to match the output from VS/GS */
7007 if (gs) {
7008 svga_link_shaders(&gs->base.info, &emit->info, &emit->linkage);
7009 } else {
7010 assert(vs);
7011 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
7012 }
7013 } else if (unit == PIPE_SHADER_GEOMETRY) {
7014 assert(vs);
7015 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
7016 }
7017
7018 /* Since vertex shader does not need to go through the linker to
7019 * establish the input map, we need to make sure the highest index
7020 * of input registers is set properly here.
7021 */
7022 emit->linkage.input_map_max = MAX2((int)emit->linkage.input_map_max,
7023 emit->info.file_max[TGSI_FILE_INPUT]);
7024
7025 determine_clipping_mode(emit);
7026
7027 if (unit == PIPE_SHADER_GEOMETRY || unit == PIPE_SHADER_VERTEX) {
7028 if (shader->stream_output != NULL || emit->clip_mode == CLIP_DISTANCE) {
7029 /* if there is stream output declarations associated
7030 * with this shader or the shader writes to ClipDistance
7031 * then reserve extra registers for the non-adjusted vertex position
7032 * and the ClipDistance shadow copy
7033 */
7034 emit->vposition.so_index = emit->num_outputs++;
7035
7036 if (emit->clip_mode == CLIP_DISTANCE) {
7037 emit->clip_dist_so_index = emit->num_outputs++;
7038 if (emit->info.num_written_clipdistance > 4)
7039 emit->num_outputs++;
7040 }
7041 }
7042 }
7043
7044 /*
7045 * Do actual shader translation.
7046 */
7047 if (!emit_vgpu10_header(emit)) {
7048 debug_printf("svga: emit VGPU10 header failed\n");
7049 goto cleanup;
7050 }
7051
7052 if (!emit_vgpu10_instructions(emit, tokens)) {
7053 debug_printf("svga: emit VGPU10 instructions failed\n");
7054 goto cleanup;
7055 }
7056
7057 if (!emit_vgpu10_tail(emit)) {
7058 debug_printf("svga: emit VGPU10 tail failed\n");
7059 goto cleanup;
7060 }
7061
7062 if (emit->register_overflow) {
7063 goto cleanup;
7064 }
7065
7066 /*
7067 * Create, initialize the 'variant' object.
7068 */
7069 variant = svga_new_shader_variant(svga, unit);
7070 if (!variant)
7071 goto cleanup;
7072
7073 variant->shader = shader;
7074 variant->nr_tokens = emit_get_num_tokens(emit);
7075 variant->tokens = (const unsigned *)emit->buf;
7076 emit->buf = NULL; /* buffer is no longer owed by emitter context */
7077 memcpy(&variant->key, key, sizeof(*key));
7078 variant->id = UTIL_BITMASK_INVALID_INDEX;
7079
7080 /* The extra constant starting offset starts with the number of
7081 * shader constants declared in the shader.
7082 */
7083 variant->extra_const_start = emit->num_shader_consts[0];
7084 if (key->gs.wide_point) {
7085 /**
7086 * The extra constant added in the transformed shader
7087 * for inverse viewport scale is to be supplied by the driver.
7088 * So the extra constant starting offset needs to be reduced by 1.
7089 */
7090 assert(variant->extra_const_start > 0);
7091 variant->extra_const_start--;
7092 }
7093
7094 variant->pstipple_sampler_unit = emit->fs.pstipple_sampler_unit;
7095
7096 /* If there was exactly one write to a fragment shader output register
7097 * and it came from a constant buffer, we know all fragments will have
7098 * the same color (except for blending).
7099 */
7100 variant->constant_color_output =
7101 emit->constant_color_output && emit->num_output_writes == 1;
7102
7103 /** keep track in the variant if flat interpolation is used
7104 * for any of the varyings.
7105 */
7106 variant->uses_flat_interp = emit->uses_flat_interp;
7107
7108 variant->fs_shadow_compare_units = emit->fs.shadow_compare_units;
7109
7110 variant->fs_shadow_compare_units = emit->fs.shadow_compare_units;
7111
7112 if (tokens != shader->tokens) {
7113 tgsi_free_tokens(tokens);
7114 }
7115
7116 cleanup:
7117 free_emitter(emit);
7118
7119 done:
7120 SVGA_STATS_TIME_POP(svga_sws(svga));
7121 return variant;
7122 }