svga: update driver for version 10 GPU interface
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_vgpu10.c
1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file svga_tgsi_vgpu10.c
28 *
29 * TGSI -> VGPU10 shader translation.
30 *
31 * \author Mingcheng Chen
32 * \author Brian Paul
33 */
34
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
51
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
57
58 #include "VGPU10ShaderTokens.h"
59
60
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
67
68
69 /**
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
72 */
73 enum clipping_mode
74 {
75 CLIP_NONE, /**< No clipping enabled */
76 CLIP_LEGACY, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
79 */
80 CLIP_DISTANCE, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
82 */
83 CLIP_VERTEX /**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
86 */
87 };
88
89
90 struct svga_shader_emitter_v10
91 {
92 /* The token output buffer */
93 unsigned size;
94 char *buf;
95 char *ptr;
96
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key;
99 struct tgsi_shader_info info;
100 unsigned unit;
101
102 unsigned inst_start_token;
103 boolean discard_instruction; /**< throw away current instruction? */
104
105 union tgsi_immediate_data immediates[MAX_IMMEDIATE_COUNT][4];
106 unsigned num_immediates; /**< Number of immediates emitted */
107 unsigned common_immediate_pos[8]; /**< literals for common immediates */
108 unsigned num_common_immediates;
109 boolean immediates_emitted;
110
111 unsigned num_outputs; /**< include any extra outputs */
112 /** The first extra output is reserved for
113 * non-adjusted vertex position for
114 * stream output purpose
115 */
116
117 /* Temporary Registers */
118 unsigned num_shader_temps; /**< num of temps used by original shader */
119 unsigned internal_temp_count; /**< currently allocated internal temps */
120 struct {
121 unsigned start, size;
122 } temp_arrays[MAX_TEMP_ARRAYS];
123 unsigned num_temp_arrays;
124
125 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
126 struct {
127 unsigned arrayId, index;
128 } temp_map[VGPU10_MAX_TEMPS]; /**< arrayId, element */
129
130 /** Number of constants used by original shader for each constant buffer.
131 * The size should probably always match with that of svga_state.constbufs.
132 */
133 unsigned num_shader_consts[SVGA_MAX_CONST_BUFS];
134
135 /* Samplers */
136 unsigned num_samplers;
137
138 /* Address regs (really implemented with temps) */
139 unsigned num_address_regs;
140 unsigned address_reg_index[MAX_VGPU10_ADDR_REGS];
141
142 /* Output register usage masks */
143 ubyte output_usage_mask[PIPE_MAX_SHADER_OUTPUTS];
144
145 /* To map TGSI system value index to VGPU shader input indexes */
146 ubyte system_value_indexes[MAX_SYSTEM_VALUES];
147
148 struct {
149 /* vertex position scale/translation */
150 unsigned out_index; /**< the real position output reg */
151 unsigned tmp_index; /**< the fake/temp position output reg */
152 unsigned so_index; /**< the non-adjusted position output reg */
153 unsigned prescale_scale_index, prescale_trans_index;
154 boolean need_prescale;
155 } vposition;
156
157 /* For vertex shaders only */
158 struct {
159 /* viewport constant */
160 unsigned viewport_index;
161
162 /* temp index of adjusted vertex attributes */
163 unsigned adjusted_input[PIPE_MAX_SHADER_INPUTS];
164 } vs;
165
166 /* For fragment shaders only */
167 struct {
168 /* apha test */
169 unsigned color_out_index[PIPE_MAX_COLOR_BUFS]; /**< the real color output regs */
170 unsigned color_tmp_index; /**< fake/temp color output reg */
171 unsigned alpha_ref_index; /**< immediate constant for alpha ref */
172
173 /* front-face */
174 unsigned face_input_index; /**< real fragment shader face reg (bool) */
175 unsigned face_tmp_index; /**< temp face reg converted to -1 / +1 */
176
177 unsigned pstipple_sampler_unit;
178
179 unsigned fragcoord_input_index; /**< real fragment position input reg */
180 unsigned fragcoord_tmp_index; /**< 1/w modified position temp reg */
181 } fs;
182
183 /* For geometry shaders only */
184 struct {
185 VGPU10_PRIMITIVE prim_type;/**< VGPU10 primitive type */
186 VGPU10_PRIMITIVE_TOPOLOGY prim_topology; /**< VGPU10 primitive topology */
187 unsigned input_size; /**< size of input arrays */
188 unsigned prim_id_index; /**< primitive id register index */
189 unsigned max_out_vertices; /**< maximum number of output vertices */
190 } gs;
191
192 /* For vertex or geometry shaders */
193 enum clipping_mode clip_mode;
194 unsigned clip_dist_out_index; /**< clip distance output register index */
195 unsigned clip_dist_tmp_index; /**< clip distance temporary register */
196 unsigned clip_dist_so_index; /**< clip distance shadow copy */
197
198 /** Index of temporary holding the clipvertex coordinate */
199 unsigned clip_vertex_out_index; /**< clip vertex output register index */
200 unsigned clip_vertex_tmp_index; /**< clip vertex temporary index */
201
202 /* user clip plane constant slot indexes */
203 unsigned clip_plane_const[PIPE_MAX_CLIP_PLANES];
204
205 boolean uses_flat_interp;
206
207 /* For all shaders: const reg index for RECT coord scaling */
208 unsigned texcoord_scale_index[PIPE_MAX_SAMPLERS];
209
210 /* For all shaders: const reg index for texture buffer size */
211 unsigned texture_buffer_size_index[PIPE_MAX_SAMPLERS];
212
213 /* VS/GS/FS Linkage info */
214 struct shader_linkage linkage;
215
216 bool register_overflow; /**< Set if we exceed a VGPU10 register limit */
217 };
218
219
220 static boolean
221 emit_post_helpers(struct svga_shader_emitter_v10 *emit);
222
223 static boolean
224 emit_vertex(struct svga_shader_emitter_v10 *emit,
225 const struct tgsi_full_instruction *inst);
226
227 static char err_buf[128];
228
229 static boolean
230 expand(struct svga_shader_emitter_v10 *emit)
231 {
232 char *new_buf;
233 unsigned newsize = emit->size * 2;
234
235 if (emit->buf != err_buf)
236 new_buf = REALLOC(emit->buf, emit->size, newsize);
237 else
238 new_buf = NULL;
239
240 if (new_buf == NULL) {
241 emit->ptr = err_buf;
242 emit->buf = err_buf;
243 emit->size = sizeof(err_buf);
244 return FALSE;
245 }
246
247 emit->size = newsize;
248 emit->ptr = new_buf + (emit->ptr - emit->buf);
249 emit->buf = new_buf;
250 return TRUE;
251 }
252
253 /**
254 * Create and initialize a new svga_shader_emitter_v10 object.
255 */
256 static struct svga_shader_emitter_v10 *
257 alloc_emitter(void)
258 {
259 struct svga_shader_emitter_v10 *emit = CALLOC(1, sizeof(*emit));
260
261 if (!emit)
262 return NULL;
263
264 /* to initialize the output buffer */
265 emit->size = 512;
266 if (!expand(emit)) {
267 FREE(emit);
268 return NULL;
269 }
270 return emit;
271 }
272
273 /**
274 * Free an svga_shader_emitter_v10 object.
275 */
276 static void
277 free_emitter(struct svga_shader_emitter_v10 *emit)
278 {
279 assert(emit);
280 FREE(emit->buf); /* will be NULL if translation succeeded */
281 FREE(emit);
282 }
283
284 static inline boolean
285 reserve(struct svga_shader_emitter_v10 *emit,
286 unsigned nr_dwords)
287 {
288 while (emit->ptr - emit->buf + nr_dwords * sizeof(uint32) >= emit->size) {
289 if (!expand(emit))
290 return FALSE;
291 }
292
293 return TRUE;
294 }
295
296 static boolean
297 emit_dword(struct svga_shader_emitter_v10 *emit, uint32 dword)
298 {
299 if (!reserve(emit, 1))
300 return FALSE;
301
302 *(uint32 *)emit->ptr = dword;
303 emit->ptr += sizeof dword;
304 return TRUE;
305 }
306
307 static boolean
308 emit_dwords(struct svga_shader_emitter_v10 *emit,
309 const uint32 *dwords,
310 unsigned nr)
311 {
312 if (!reserve(emit, nr))
313 return FALSE;
314
315 memcpy(emit->ptr, dwords, nr * sizeof *dwords);
316 emit->ptr += nr * sizeof *dwords;
317 return TRUE;
318 }
319
320 /** Return the number of tokens in the emitter's buffer */
321 static unsigned
322 emit_get_num_tokens(const struct svga_shader_emitter_v10 *emit)
323 {
324 return (emit->ptr - emit->buf) / sizeof(unsigned);
325 }
326
327
328 /**
329 * Check for register overflow. If we overflow we'll set an
330 * error flag. This function can be called for register declarations
331 * or use as src/dst instruction operands.
332 * \param type register type. One of VGPU10_OPERAND_TYPE_x
333 or VGPU10_OPCODE_DCL_x
334 * \param index the register index
335 */
336 static void
337 check_register_index(struct svga_shader_emitter_v10 *emit,
338 unsigned operandType, unsigned index)
339 {
340 bool overflow_before = emit->register_overflow;
341
342 switch (operandType) {
343 case VGPU10_OPERAND_TYPE_TEMP:
344 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP:
345 case VGPU10_OPCODE_DCL_TEMPS:
346 if (index >= VGPU10_MAX_TEMPS) {
347 emit->register_overflow = TRUE;
348 }
349 break;
350 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER:
351 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER:
352 if (index >= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
353 emit->register_overflow = TRUE;
354 }
355 break;
356 case VGPU10_OPERAND_TYPE_INPUT:
357 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID:
358 case VGPU10_OPCODE_DCL_INPUT:
359 case VGPU10_OPCODE_DCL_INPUT_SGV:
360 case VGPU10_OPCODE_DCL_INPUT_SIV:
361 case VGPU10_OPCODE_DCL_INPUT_PS:
362 case VGPU10_OPCODE_DCL_INPUT_PS_SGV:
363 case VGPU10_OPCODE_DCL_INPUT_PS_SIV:
364 if ((emit->unit == PIPE_SHADER_VERTEX &&
365 index >= VGPU10_MAX_VS_INPUTS) ||
366 (emit->unit == PIPE_SHADER_GEOMETRY &&
367 index >= VGPU10_MAX_GS_INPUTS) ||
368 (emit->unit == PIPE_SHADER_FRAGMENT &&
369 index >= VGPU10_MAX_FS_INPUTS)) {
370 emit->register_overflow = TRUE;
371 }
372 break;
373 case VGPU10_OPERAND_TYPE_OUTPUT:
374 case VGPU10_OPCODE_DCL_OUTPUT:
375 case VGPU10_OPCODE_DCL_OUTPUT_SGV:
376 case VGPU10_OPCODE_DCL_OUTPUT_SIV:
377 if ((emit->unit == PIPE_SHADER_VERTEX &&
378 index >= VGPU10_MAX_VS_OUTPUTS) ||
379 (emit->unit == PIPE_SHADER_GEOMETRY &&
380 index >= VGPU10_MAX_GS_OUTPUTS) ||
381 (emit->unit == PIPE_SHADER_FRAGMENT &&
382 index >= VGPU10_MAX_FS_OUTPUTS)) {
383 emit->register_overflow = TRUE;
384 }
385 break;
386 case VGPU10_OPERAND_TYPE_SAMPLER:
387 case VGPU10_OPCODE_DCL_SAMPLER:
388 if (index >= VGPU10_MAX_SAMPLERS) {
389 emit->register_overflow = TRUE;
390 }
391 break;
392 case VGPU10_OPERAND_TYPE_RESOURCE:
393 case VGPU10_OPCODE_DCL_RESOURCE:
394 if (index >= VGPU10_MAX_RESOURCES) {
395 emit->register_overflow = TRUE;
396 }
397 break;
398 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER:
399 if (index >= MAX_IMMEDIATE_COUNT) {
400 emit->register_overflow = TRUE;
401 }
402 break;
403 default:
404 assert(0);
405 ; /* nothing */
406 }
407
408 if (emit->register_overflow && !overflow_before) {
409 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
410 operandType, index);
411 }
412 }
413
414
415 /**
416 * Examine misc state to determine the clipping mode.
417 */
418 static void
419 determine_clipping_mode(struct svga_shader_emitter_v10 *emit)
420 {
421 if (emit->info.num_written_clipdistance > 0) {
422 emit->clip_mode = CLIP_DISTANCE;
423 }
424 else if (emit->info.writes_clipvertex) {
425 emit->clip_mode = CLIP_VERTEX;
426 }
427 else if (emit->key.clip_plane_enable) {
428 emit->clip_mode = CLIP_LEGACY;
429 }
430 else {
431 emit->clip_mode = CLIP_NONE;
432 }
433 }
434
435
436 /**
437 * For clip distance register declarations and clip distance register
438 * writes we need to mask the declaration usage or instruction writemask
439 * (respectively) against the set of the really-enabled clipping planes.
440 *
441 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
442 * has a VS that writes to all 8 clip distance registers, but the plane enable
443 * flags are a subset of that.
444 *
445 * This function is used to apply the plane enable flags to the register
446 * declaration or instruction writemask.
447 *
448 * \param writemask the declaration usage mask or instruction writemask
449 * \param clip_reg_index which clip plane register is being declared/written.
450 * The legal values are 0 and 1 (two clip planes per
451 * register, for a total of 8 clip planes)
452 */
453 static unsigned
454 apply_clip_plane_mask(struct svga_shader_emitter_v10 *emit,
455 unsigned writemask, unsigned clip_reg_index)
456 {
457 unsigned shift;
458
459 assert(clip_reg_index < 2);
460
461 /* four clip planes per clip register: */
462 shift = clip_reg_index * 4;
463 writemask &= ((emit->key.clip_plane_enable >> shift) & 0xf);
464
465 return writemask;
466 }
467
468
469 /**
470 * Translate gallium shader type into VGPU10 type.
471 */
472 static VGPU10_PROGRAM_TYPE
473 translate_shader_type(unsigned type)
474 {
475 switch (type) {
476 case PIPE_SHADER_VERTEX:
477 return VGPU10_VERTEX_SHADER;
478 case PIPE_SHADER_GEOMETRY:
479 return VGPU10_GEOMETRY_SHADER;
480 case PIPE_SHADER_FRAGMENT:
481 return VGPU10_PIXEL_SHADER;
482 default:
483 assert(!"Unexpected shader type");
484 return VGPU10_VERTEX_SHADER;
485 }
486 }
487
488
489 /**
490 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
491 * Note: we only need to translate the opcodes for "simple" instructions,
492 * as seen below. All other opcodes are handled/translated specially.
493 */
494 static VGPU10_OPCODE_TYPE
495 translate_opcode(unsigned opcode)
496 {
497 switch (opcode) {
498 case TGSI_OPCODE_MOV:
499 return VGPU10_OPCODE_MOV;
500 case TGSI_OPCODE_MUL:
501 return VGPU10_OPCODE_MUL;
502 case TGSI_OPCODE_ADD:
503 return VGPU10_OPCODE_ADD;
504 case TGSI_OPCODE_DP3:
505 return VGPU10_OPCODE_DP3;
506 case TGSI_OPCODE_DP4:
507 return VGPU10_OPCODE_DP4;
508 case TGSI_OPCODE_MIN:
509 return VGPU10_OPCODE_MIN;
510 case TGSI_OPCODE_MAX:
511 return VGPU10_OPCODE_MAX;
512 case TGSI_OPCODE_MAD:
513 return VGPU10_OPCODE_MAD;
514 case TGSI_OPCODE_SQRT:
515 return VGPU10_OPCODE_SQRT;
516 case TGSI_OPCODE_FRC:
517 return VGPU10_OPCODE_FRC;
518 case TGSI_OPCODE_FLR:
519 return VGPU10_OPCODE_ROUND_NI;
520 case TGSI_OPCODE_FSEQ:
521 return VGPU10_OPCODE_EQ;
522 case TGSI_OPCODE_FSGE:
523 return VGPU10_OPCODE_GE;
524 case TGSI_OPCODE_FSNE:
525 return VGPU10_OPCODE_NE;
526 case TGSI_OPCODE_DDX:
527 return VGPU10_OPCODE_DERIV_RTX;
528 case TGSI_OPCODE_DDY:
529 return VGPU10_OPCODE_DERIV_RTY;
530 case TGSI_OPCODE_RET:
531 return VGPU10_OPCODE_RET;
532 case TGSI_OPCODE_DIV:
533 return VGPU10_OPCODE_DIV;
534 case TGSI_OPCODE_IDIV:
535 return VGPU10_OPCODE_IDIV;
536 case TGSI_OPCODE_DP2:
537 return VGPU10_OPCODE_DP2;
538 case TGSI_OPCODE_BRK:
539 return VGPU10_OPCODE_BREAK;
540 case TGSI_OPCODE_IF:
541 return VGPU10_OPCODE_IF;
542 case TGSI_OPCODE_ELSE:
543 return VGPU10_OPCODE_ELSE;
544 case TGSI_OPCODE_ENDIF:
545 return VGPU10_OPCODE_ENDIF;
546 case TGSI_OPCODE_CEIL:
547 return VGPU10_OPCODE_ROUND_PI;
548 case TGSI_OPCODE_I2F:
549 return VGPU10_OPCODE_ITOF;
550 case TGSI_OPCODE_NOT:
551 return VGPU10_OPCODE_NOT;
552 case TGSI_OPCODE_TRUNC:
553 return VGPU10_OPCODE_ROUND_Z;
554 case TGSI_OPCODE_SHL:
555 return VGPU10_OPCODE_ISHL;
556 case TGSI_OPCODE_AND:
557 return VGPU10_OPCODE_AND;
558 case TGSI_OPCODE_OR:
559 return VGPU10_OPCODE_OR;
560 case TGSI_OPCODE_XOR:
561 return VGPU10_OPCODE_XOR;
562 case TGSI_OPCODE_CONT:
563 return VGPU10_OPCODE_CONTINUE;
564 case TGSI_OPCODE_EMIT:
565 return VGPU10_OPCODE_EMIT;
566 case TGSI_OPCODE_ENDPRIM:
567 return VGPU10_OPCODE_CUT;
568 case TGSI_OPCODE_BGNLOOP:
569 return VGPU10_OPCODE_LOOP;
570 case TGSI_OPCODE_ENDLOOP:
571 return VGPU10_OPCODE_ENDLOOP;
572 case TGSI_OPCODE_ENDSUB:
573 return VGPU10_OPCODE_RET;
574 case TGSI_OPCODE_NOP:
575 return VGPU10_OPCODE_NOP;
576 case TGSI_OPCODE_BREAKC:
577 return VGPU10_OPCODE_BREAKC;
578 case TGSI_OPCODE_END:
579 return VGPU10_OPCODE_RET;
580 case TGSI_OPCODE_F2I:
581 return VGPU10_OPCODE_FTOI;
582 case TGSI_OPCODE_IMAX:
583 return VGPU10_OPCODE_IMAX;
584 case TGSI_OPCODE_IMIN:
585 return VGPU10_OPCODE_IMIN;
586 case TGSI_OPCODE_UDIV:
587 case TGSI_OPCODE_UMOD:
588 case TGSI_OPCODE_MOD:
589 return VGPU10_OPCODE_UDIV;
590 case TGSI_OPCODE_IMUL_HI:
591 return VGPU10_OPCODE_IMUL;
592 case TGSI_OPCODE_INEG:
593 return VGPU10_OPCODE_INEG;
594 case TGSI_OPCODE_ISHR:
595 return VGPU10_OPCODE_ISHR;
596 case TGSI_OPCODE_ISGE:
597 return VGPU10_OPCODE_IGE;
598 case TGSI_OPCODE_ISLT:
599 return VGPU10_OPCODE_ILT;
600 case TGSI_OPCODE_F2U:
601 return VGPU10_OPCODE_FTOU;
602 case TGSI_OPCODE_UADD:
603 return VGPU10_OPCODE_IADD;
604 case TGSI_OPCODE_U2F:
605 return VGPU10_OPCODE_UTOF;
606 case TGSI_OPCODE_UCMP:
607 return VGPU10_OPCODE_MOVC;
608 case TGSI_OPCODE_UMAD:
609 return VGPU10_OPCODE_UMAD;
610 case TGSI_OPCODE_UMAX:
611 return VGPU10_OPCODE_UMAX;
612 case TGSI_OPCODE_UMIN:
613 return VGPU10_OPCODE_UMIN;
614 case TGSI_OPCODE_UMUL:
615 case TGSI_OPCODE_UMUL_HI:
616 return VGPU10_OPCODE_UMUL;
617 case TGSI_OPCODE_USEQ:
618 return VGPU10_OPCODE_IEQ;
619 case TGSI_OPCODE_USGE:
620 return VGPU10_OPCODE_UGE;
621 case TGSI_OPCODE_USHR:
622 return VGPU10_OPCODE_USHR;
623 case TGSI_OPCODE_USLT:
624 return VGPU10_OPCODE_ULT;
625 case TGSI_OPCODE_USNE:
626 return VGPU10_OPCODE_INE;
627 case TGSI_OPCODE_SWITCH:
628 return VGPU10_OPCODE_SWITCH;
629 case TGSI_OPCODE_CASE:
630 return VGPU10_OPCODE_CASE;
631 case TGSI_OPCODE_DEFAULT:
632 return VGPU10_OPCODE_DEFAULT;
633 case TGSI_OPCODE_ENDSWITCH:
634 return VGPU10_OPCODE_ENDSWITCH;
635 case TGSI_OPCODE_FSLT:
636 return VGPU10_OPCODE_LT;
637 case TGSI_OPCODE_ROUND:
638 return VGPU10_OPCODE_ROUND_NE;
639 default:
640 assert(!"Unexpected TGSI opcode in translate_opcode()");
641 return VGPU10_OPCODE_NOP;
642 }
643 }
644
645
646 /**
647 * Translate a TGSI register file type into a VGPU10 operand type.
648 * \param array is the TGSI_FILE_TEMPORARY register an array?
649 */
650 static VGPU10_OPERAND_TYPE
651 translate_register_file(enum tgsi_file_type file, boolean array)
652 {
653 switch (file) {
654 case TGSI_FILE_CONSTANT:
655 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
656 case TGSI_FILE_INPUT:
657 return VGPU10_OPERAND_TYPE_INPUT;
658 case TGSI_FILE_OUTPUT:
659 return VGPU10_OPERAND_TYPE_OUTPUT;
660 case TGSI_FILE_TEMPORARY:
661 return array ? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
662 : VGPU10_OPERAND_TYPE_TEMP;
663 case TGSI_FILE_IMMEDIATE:
664 /* all immediates are 32-bit values at this time so
665 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
666 */
667 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER;
668 case TGSI_FILE_SAMPLER:
669 return VGPU10_OPERAND_TYPE_SAMPLER;
670 case TGSI_FILE_SYSTEM_VALUE:
671 return VGPU10_OPERAND_TYPE_INPUT;
672
673 /* XXX TODO more cases to finish */
674
675 default:
676 assert(!"Bad tgsi register file!");
677 return VGPU10_OPERAND_TYPE_NULL;
678 }
679 }
680
681
682 /**
683 * Emit a null dst register
684 */
685 static void
686 emit_null_dst_register(struct svga_shader_emitter_v10 *emit)
687 {
688 VGPU10OperandToken0 operand;
689
690 operand.value = 0;
691 operand.operandType = VGPU10_OPERAND_TYPE_NULL;
692 operand.numComponents = VGPU10_OPERAND_0_COMPONENT;
693
694 emit_dword(emit, operand.value);
695 }
696
697
698 /**
699 * If the given register is a temporary, return the array ID.
700 * Else return zero.
701 */
702 static unsigned
703 get_temp_array_id(const struct svga_shader_emitter_v10 *emit,
704 unsigned file, unsigned index)
705 {
706 if (file == TGSI_FILE_TEMPORARY) {
707 return emit->temp_map[index].arrayId;
708 }
709 else {
710 return 0;
711 }
712 }
713
714
715 /**
716 * If the given register is a temporary, convert the index from a TGSI
717 * TEMPORARY index to a VGPU10 temp index.
718 */
719 static unsigned
720 remap_temp_index(const struct svga_shader_emitter_v10 *emit,
721 unsigned file, unsigned index)
722 {
723 if (file == TGSI_FILE_TEMPORARY) {
724 return emit->temp_map[index].index;
725 }
726 else {
727 return index;
728 }
729 }
730
731
732 /**
733 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
734 * Note: the operandType field must already be initialized.
735 */
736 static VGPU10OperandToken0
737 setup_operand0_indexing(struct svga_shader_emitter_v10 *emit,
738 VGPU10OperandToken0 operand0,
739 unsigned file,
740 boolean indirect, boolean index2D,
741 unsigned tempArrayID)
742 {
743 unsigned indexDim, index0Rep, index1Rep = VGPU10_OPERAND_INDEX_0D;
744
745 /*
746 * Compute index dimensions
747 */
748 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32 ||
749 operand0.operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
750 /* there's no swizzle for in-line immediates */
751 indexDim = VGPU10_OPERAND_INDEX_0D;
752 assert(operand0.selectionMode == 0);
753 }
754 else {
755 if (index2D ||
756 tempArrayID > 0 ||
757 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
758 indexDim = VGPU10_OPERAND_INDEX_2D;
759 }
760 else {
761 indexDim = VGPU10_OPERAND_INDEX_1D;
762 }
763 }
764
765 /*
766 * Compute index representations (immediate, relative, etc).
767 */
768 if (tempArrayID > 0) {
769 assert(file == TGSI_FILE_TEMPORARY);
770 /* First index is the array ID, second index is the array element */
771 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
772 if (indirect) {
773 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
774 }
775 else {
776 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
777 }
778 }
779 else if (indirect) {
780 if (file == TGSI_FILE_CONSTANT) {
781 /* index[0] indicates which constant buffer while index[1] indicates
782 * the position in the constant buffer.
783 */
784 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
785 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
786 }
787 else {
788 /* All other register files are 1-dimensional */
789 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE;
790 }
791 }
792 else {
793 index0Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
794 index1Rep = VGPU10_OPERAND_INDEX_IMMEDIATE32;
795 }
796
797 operand0.indexDimension = indexDim;
798 operand0.index0Representation = index0Rep;
799 operand0.index1Representation = index1Rep;
800
801 return operand0;
802 }
803
804
805 /**
806 * Emit the operand for expressing an address register for indirect indexing.
807 * Note that the address register is really just a temp register.
808 * \param addr_reg_index which address register to use
809 */
810 static void
811 emit_indirect_register(struct svga_shader_emitter_v10 *emit,
812 unsigned addr_reg_index)
813 {
814 unsigned tmp_reg_index;
815 VGPU10OperandToken0 operand0;
816
817 assert(addr_reg_index < MAX_VGPU10_ADDR_REGS);
818
819 tmp_reg_index = emit->address_reg_index[addr_reg_index];
820
821 /* operand0 is a simple temporary register, selecting one component */
822 operand0.value = 0;
823 operand0.operandType = VGPU10_OPERAND_TYPE_TEMP;
824 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
825 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
826 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
827 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
828 operand0.swizzleX = 0;
829 operand0.swizzleY = 1;
830 operand0.swizzleZ = 2;
831 operand0.swizzleW = 3;
832
833 emit_dword(emit, operand0.value);
834 emit_dword(emit, remap_temp_index(emit, TGSI_FILE_TEMPORARY, tmp_reg_index));
835 }
836
837
838 /**
839 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
840 * \param emit the emitter context
841 * \param reg the TGSI dst register to translate
842 */
843 static void
844 emit_dst_register(struct svga_shader_emitter_v10 *emit,
845 const struct tgsi_full_dst_register *reg)
846 {
847 unsigned file = reg->Register.File;
848 unsigned index = reg->Register.Index;
849 const unsigned sem_name = emit->info.output_semantic_name[index];
850 const unsigned sem_index = emit->info.output_semantic_index[index];
851 unsigned writemask = reg->Register.WriteMask;
852 const unsigned indirect = reg->Register.Indirect;
853 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
854 const unsigned index2d = reg->Register.Dimension;
855 VGPU10OperandToken0 operand0;
856
857 if (file == TGSI_FILE_OUTPUT) {
858 if (emit->unit == PIPE_SHADER_VERTEX ||
859 emit->unit == PIPE_SHADER_GEOMETRY) {
860 if (index == emit->vposition.out_index &&
861 emit->vposition.tmp_index != INVALID_INDEX) {
862 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
863 * vertex position result in a temporary so that we can modify
864 * it in the post_helper() code.
865 */
866 file = TGSI_FILE_TEMPORARY;
867 index = emit->vposition.tmp_index;
868 }
869 else if (sem_name == TGSI_SEMANTIC_CLIPDIST &&
870 emit->clip_dist_tmp_index != INVALID_INDEX) {
871 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
872 * We store the clip distance in a temporary first, then
873 * we'll copy it to the shadow copy and to CLIPDIST with the
874 * enabled planes mask in emit_clip_distance_instructions().
875 */
876 file = TGSI_FILE_TEMPORARY;
877 index = emit->clip_dist_tmp_index + sem_index;
878 }
879 else if (sem_name == TGSI_SEMANTIC_CLIPVERTEX &&
880 emit->clip_vertex_tmp_index != INVALID_INDEX) {
881 /* replace the CLIPVERTEX output register with a temporary */
882 assert(emit->clip_mode == CLIP_VERTEX);
883 assert(sem_index == 0);
884 file = TGSI_FILE_TEMPORARY;
885 index = emit->clip_vertex_tmp_index;
886 }
887 }
888 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
889 if (sem_name == TGSI_SEMANTIC_POSITION) {
890 /* Fragment depth output register */
891 operand0.value = 0;
892 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
893 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
894 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
895 emit_dword(emit, operand0.value);
896 return;
897 }
898 else if (index == emit->fs.color_out_index[0] &&
899 emit->fs.color_tmp_index != INVALID_INDEX) {
900 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
901 * fragment color result in a temporary so that we can read it
902 * it in the post_helper() code.
903 */
904 file = TGSI_FILE_TEMPORARY;
905 index = emit->fs.color_tmp_index;
906 }
907 else {
908 /* Typically, for fragment shaders, the output register index
909 * matches the color semantic index. But not when we write to
910 * the fragment depth register. In that case, OUT[0] will be
911 * fragdepth and OUT[1] will be the 0th color output. We need
912 * to use the semantic index for color outputs.
913 */
914 assert(sem_name == TGSI_SEMANTIC_COLOR);
915 index = emit->info.output_semantic_index[index];
916 }
917 }
918 }
919
920 /* init operand tokens to all zero */
921 operand0.value = 0;
922
923 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
924
925 /* the operand has a writemask */
926 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
927
928 /* Which of the four dest components to write to. Note that we can use a
929 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
930 */
931 STATIC_ASSERT(TGSI_WRITEMASK_X == VGPU10_OPERAND_4_COMPONENT_MASK_X);
932 operand0.mask = writemask;
933
934 /* translate TGSI register file type to VGPU10 operand type */
935 operand0.operandType = translate_register_file(file, tempArrayId > 0);
936
937 check_register_index(emit, operand0.operandType, index);
938
939 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
940 index2d, tempArrayId);
941
942 /* Emit tokens */
943 emit_dword(emit, operand0.value);
944 if (tempArrayId > 0) {
945 emit_dword(emit, tempArrayId);
946 }
947
948 emit_dword(emit, remap_temp_index(emit, file, index));
949
950 if (indirect) {
951 emit_indirect_register(emit, reg->Indirect.Index);
952 }
953 }
954
955
956 /**
957 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
958 */
959 static void
960 emit_src_register(struct svga_shader_emitter_v10 *emit,
961 const struct tgsi_full_src_register *reg)
962 {
963 unsigned file = reg->Register.File;
964 unsigned index = reg->Register.Index;
965 const unsigned indirect = reg->Register.Indirect;
966 const unsigned tempArrayId = get_temp_array_id(emit, file, index);
967 const unsigned index2d = reg->Register.Dimension;
968 const unsigned swizzleX = reg->Register.SwizzleX;
969 const unsigned swizzleY = reg->Register.SwizzleY;
970 const unsigned swizzleZ = reg->Register.SwizzleZ;
971 const unsigned swizzleW = reg->Register.SwizzleW;
972 const unsigned absolute = reg->Register.Absolute;
973 const unsigned negate = reg->Register.Negate;
974 bool is_prim_id = FALSE;
975
976 VGPU10OperandToken0 operand0;
977 VGPU10OperandToken1 operand1;
978
979 if (emit->unit == PIPE_SHADER_FRAGMENT &&
980 file == TGSI_FILE_INPUT) {
981 if (index == emit->fs.face_input_index) {
982 /* Replace INPUT[FACE] with TEMP[FACE] */
983 file = TGSI_FILE_TEMPORARY;
984 index = emit->fs.face_tmp_index;
985 }
986 else if (index == emit->fs.fragcoord_input_index) {
987 /* Replace INPUT[POSITION] with TEMP[POSITION] */
988 file = TGSI_FILE_TEMPORARY;
989 index = emit->fs.fragcoord_tmp_index;
990 }
991 else {
992 /* We remap fragment shader inputs to that FS input indexes
993 * match up with VS/GS output indexes.
994 */
995 index = emit->linkage.input_map[index];
996 }
997 }
998 else if (emit->unit == PIPE_SHADER_GEOMETRY &&
999 file == TGSI_FILE_INPUT) {
1000 is_prim_id = (index == emit->gs.prim_id_index);
1001 index = emit->linkage.input_map[index];
1002 }
1003 else if (emit->unit == PIPE_SHADER_VERTEX) {
1004 if (file == TGSI_FILE_INPUT) {
1005 /* if input is adjusted... */
1006 if ((emit->key.vs.adjust_attrib_w_1 |
1007 emit->key.vs.adjust_attrib_itof |
1008 emit->key.vs.adjust_attrib_utof |
1009 emit->key.vs.attrib_is_bgra |
1010 emit->key.vs.attrib_puint_to_snorm |
1011 emit->key.vs.attrib_puint_to_uscaled |
1012 emit->key.vs.attrib_puint_to_sscaled) & (1 << index)) {
1013 file = TGSI_FILE_TEMPORARY;
1014 index = emit->vs.adjusted_input[index];
1015 }
1016 }
1017 else if (file == TGSI_FILE_SYSTEM_VALUE) {
1018 assert(index < Elements(emit->system_value_indexes));
1019 index = emit->system_value_indexes[index];
1020 }
1021 }
1022
1023 operand0.value = operand1.value = 0;
1024
1025 if (is_prim_id) {
1026 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
1027 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
1028 }
1029 else {
1030 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1031 operand0.operandType = translate_register_file(file, tempArrayId > 0);
1032 }
1033
1034 operand0 = setup_operand0_indexing(emit, operand0, file, indirect,
1035 index2d, tempArrayId);
1036
1037 if (operand0.operandType != VGPU10_OPERAND_TYPE_IMMEDIATE32 &&
1038 operand0.operandType != VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID) {
1039 /* there's no swizzle for in-line immediates */
1040 if (swizzleX == swizzleY &&
1041 swizzleX == swizzleZ &&
1042 swizzleX == swizzleW) {
1043 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1044 }
1045 else {
1046 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1047 }
1048
1049 operand0.swizzleX = swizzleX;
1050 operand0.swizzleY = swizzleY;
1051 operand0.swizzleZ = swizzleZ;
1052 operand0.swizzleW = swizzleW;
1053
1054 if (absolute || negate) {
1055 operand0.extended = 1;
1056 operand1.extendedOperandType = VGPU10_EXTENDED_OPERAND_MODIFIER;
1057 if (absolute && !negate)
1058 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABS;
1059 if (!absolute && negate)
1060 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_NEG;
1061 if (absolute && negate)
1062 operand1.operandModifier = VGPU10_OPERAND_MODIFIER_ABSNEG;
1063 }
1064 }
1065
1066 /* Emit the operand tokens */
1067 emit_dword(emit, operand0.value);
1068 if (operand0.extended)
1069 emit_dword(emit, operand1.value);
1070
1071 if (operand0.operandType == VGPU10_OPERAND_TYPE_IMMEDIATE32) {
1072 /* Emit the four float/int in-line immediate values */
1073 unsigned *c;
1074 assert(index < Elements(emit->immediates));
1075 assert(file == TGSI_FILE_IMMEDIATE);
1076 assert(swizzleX < 4);
1077 assert(swizzleY < 4);
1078 assert(swizzleZ < 4);
1079 assert(swizzleW < 4);
1080 c = (unsigned *) emit->immediates[index];
1081 emit_dword(emit, c[swizzleX]);
1082 emit_dword(emit, c[swizzleY]);
1083 emit_dword(emit, c[swizzleZ]);
1084 emit_dword(emit, c[swizzleW]);
1085 }
1086 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_1D) {
1087 /* Emit the register index(es) */
1088 if (index2d ||
1089 operand0.operandType == VGPU10_OPERAND_TYPE_CONSTANT_BUFFER) {
1090 emit_dword(emit, reg->Dimension.Index);
1091 }
1092
1093 if (tempArrayId > 0) {
1094 emit_dword(emit, tempArrayId);
1095 }
1096
1097 emit_dword(emit, remap_temp_index(emit, file, index));
1098
1099 if (indirect) {
1100 emit_indirect_register(emit, reg->Indirect.Index);
1101 }
1102 }
1103 }
1104
1105
1106 /**
1107 * Emit a resource operand (for use with a SAMPLE instruction).
1108 */
1109 static void
1110 emit_resource_register(struct svga_shader_emitter_v10 *emit,
1111 unsigned resource_number)
1112 {
1113 VGPU10OperandToken0 operand0;
1114
1115 check_register_index(emit, VGPU10_OPERAND_TYPE_RESOURCE, resource_number);
1116
1117 /* init */
1118 operand0.value = 0;
1119
1120 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
1121 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1122 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1123 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
1124 operand0.swizzleX = VGPU10_COMPONENT_X;
1125 operand0.swizzleY = VGPU10_COMPONENT_Y;
1126 operand0.swizzleZ = VGPU10_COMPONENT_Z;
1127 operand0.swizzleW = VGPU10_COMPONENT_W;
1128
1129 emit_dword(emit, operand0.value);
1130 emit_dword(emit, resource_number);
1131 }
1132
1133
1134 /**
1135 * Emit a sampler operand (for use with a SAMPLE instruction).
1136 */
1137 static void
1138 emit_sampler_register(struct svga_shader_emitter_v10 *emit,
1139 unsigned sampler_number)
1140 {
1141 VGPU10OperandToken0 operand0;
1142
1143 check_register_index(emit, VGPU10_OPERAND_TYPE_SAMPLER, sampler_number);
1144
1145 /* init */
1146 operand0.value = 0;
1147
1148 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
1149 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1150
1151 emit_dword(emit, operand0.value);
1152 emit_dword(emit, sampler_number);
1153 }
1154
1155
1156 /**
1157 * Emit an operand which reads the IS_FRONT_FACING register.
1158 */
1159 static void
1160 emit_face_register(struct svga_shader_emitter_v10 *emit)
1161 {
1162 VGPU10OperandToken0 operand0;
1163 unsigned index = emit->linkage.input_map[emit->fs.face_input_index];
1164
1165 /* init */
1166 operand0.value = 0;
1167
1168 operand0.operandType = VGPU10_OPERAND_TYPE_INPUT;
1169 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
1170 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE;
1171 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
1172
1173 operand0.swizzleX = VGPU10_COMPONENT_X;
1174 operand0.swizzleY = VGPU10_COMPONENT_X;
1175 operand0.swizzleZ = VGPU10_COMPONENT_X;
1176 operand0.swizzleW = VGPU10_COMPONENT_X;
1177
1178 emit_dword(emit, operand0.value);
1179 emit_dword(emit, index);
1180 }
1181
1182
1183 /**
1184 * Emit the token for a VGPU10 opcode.
1185 * \param saturate clamp result to [0,1]?
1186 */
1187 static void
1188 emit_opcode(struct svga_shader_emitter_v10 *emit,
1189 unsigned vgpu10_opcode, boolean saturate)
1190 {
1191 VGPU10OpcodeToken0 token0;
1192
1193 token0.value = 0; /* init all fields to zero */
1194 token0.opcodeType = vgpu10_opcode;
1195 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1196 token0.saturate = saturate;
1197
1198 emit_dword(emit, token0.value);
1199 }
1200
1201
1202 /**
1203 * Emit the token for a VGPU10 resinfo instruction.
1204 * \param modifier return type modifier, _uint or _rcpFloat.
1205 * TODO: We may want to remove this parameter if it will
1206 * only ever be used as _uint.
1207 */
1208 static void
1209 emit_opcode_resinfo(struct svga_shader_emitter_v10 *emit,
1210 VGPU10_RESINFO_RETURN_TYPE modifier)
1211 {
1212 VGPU10OpcodeToken0 token0;
1213
1214 token0.value = 0; /* init all fields to zero */
1215 token0.opcodeType = VGPU10_OPCODE_RESINFO;
1216 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1217 token0.resinfoReturnType = modifier;
1218
1219 emit_dword(emit, token0.value);
1220 }
1221
1222
1223 /**
1224 * Emit opcode tokens for a texture sample instruction. Texture instructions
1225 * can be rather complicated (texel offsets, etc) so we have this specialized
1226 * function.
1227 */
1228 static void
1229 emit_sample_opcode(struct svga_shader_emitter_v10 *emit,
1230 unsigned vgpu10_opcode, boolean saturate,
1231 const int offsets[3])
1232 {
1233 VGPU10OpcodeToken0 token0;
1234 VGPU10OpcodeToken1 token1;
1235
1236 token0.value = 0; /* init all fields to zero */
1237 token0.opcodeType = vgpu10_opcode;
1238 token0.instructionLength = 0; /* Filled in by end_emit_instruction() */
1239 token0.saturate = saturate;
1240
1241 if (offsets[0] || offsets[1] || offsets[2]) {
1242 assert(offsets[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1243 assert(offsets[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1244 assert(offsets[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET);
1245 assert(offsets[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1246 assert(offsets[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1247 assert(offsets[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET);
1248
1249 token0.extended = 1;
1250 token1.value = 0;
1251 token1.opcodeType = VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS;
1252 token1.offsetU = offsets[0];
1253 token1.offsetV = offsets[1];
1254 token1.offsetW = offsets[2];
1255 }
1256
1257 emit_dword(emit, token0.value);
1258 if (token0.extended) {
1259 emit_dword(emit, token1.value);
1260 }
1261 }
1262
1263
1264 /**
1265 * Emit a DISCARD opcode token.
1266 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1267 * Otherwise, we'll discard the fragment if the X component is 0.
1268 */
1269 static void
1270 emit_discard_opcode(struct svga_shader_emitter_v10 *emit, boolean nonzero)
1271 {
1272 VGPU10OpcodeToken0 opcode0;
1273
1274 opcode0.value = 0;
1275 opcode0.opcodeType = VGPU10_OPCODE_DISCARD;
1276 if (nonzero)
1277 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
1278
1279 emit_dword(emit, opcode0.value);
1280 }
1281
1282
1283 /**
1284 * We need to call this before we begin emitting a VGPU10 instruction.
1285 */
1286 static void
1287 begin_emit_instruction(struct svga_shader_emitter_v10 *emit)
1288 {
1289 assert(emit->inst_start_token == 0);
1290 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1291 * Note, we can't save a pointer because it would become invalid if
1292 * we have to realloc the output buffer.
1293 */
1294 emit->inst_start_token = emit_get_num_tokens(emit);
1295 }
1296
1297
1298 /**
1299 * We need to call this after we emit the last token of a VGPU10 instruction.
1300 * This function patches in the opcode token's instructionLength field.
1301 */
1302 static void
1303 end_emit_instruction(struct svga_shader_emitter_v10 *emit)
1304 {
1305 VGPU10OpcodeToken0 *tokens = (VGPU10OpcodeToken0 *) emit->buf;
1306 unsigned inst_length;
1307
1308 assert(emit->inst_start_token > 0);
1309
1310 if (emit->discard_instruction) {
1311 /* Back up the emit->ptr to where this instruction started so
1312 * that we discard the current instruction.
1313 */
1314 emit->ptr = (char *) (tokens + emit->inst_start_token);
1315 }
1316 else {
1317 /* Compute instruction length and patch that into the start of
1318 * the instruction.
1319 */
1320 inst_length = emit_get_num_tokens(emit) - emit->inst_start_token;
1321
1322 assert(inst_length > 0);
1323
1324 tokens[emit->inst_start_token].instructionLength = inst_length;
1325 }
1326
1327 emit->inst_start_token = 0; /* reset to zero for error checking */
1328 emit->discard_instruction = FALSE;
1329 }
1330
1331
1332 /**
1333 * Return index for a free temporary register.
1334 */
1335 static unsigned
1336 get_temp_index(struct svga_shader_emitter_v10 *emit)
1337 {
1338 assert(emit->internal_temp_count < MAX_INTERNAL_TEMPS);
1339 return emit->num_shader_temps + emit->internal_temp_count++;
1340 }
1341
1342
1343 /**
1344 * Release the temporaries which were generated by get_temp_index().
1345 */
1346 static void
1347 free_temp_indexes(struct svga_shader_emitter_v10 *emit)
1348 {
1349 emit->internal_temp_count = 0;
1350 }
1351
1352
1353 /**
1354 * Create a tgsi_full_src_register.
1355 */
1356 static struct tgsi_full_src_register
1357 make_src_reg(unsigned file, unsigned index)
1358 {
1359 struct tgsi_full_src_register reg;
1360
1361 memset(&reg, 0, sizeof(reg));
1362 reg.Register.File = file;
1363 reg.Register.Index = index;
1364 reg.Register.SwizzleX = TGSI_SWIZZLE_X;
1365 reg.Register.SwizzleY = TGSI_SWIZZLE_Y;
1366 reg.Register.SwizzleZ = TGSI_SWIZZLE_Z;
1367 reg.Register.SwizzleW = TGSI_SWIZZLE_W;
1368 return reg;
1369 }
1370
1371
1372 /**
1373 * Create a tgsi_full_src_register for a temporary.
1374 */
1375 static struct tgsi_full_src_register
1376 make_src_temp_reg(unsigned index)
1377 {
1378 return make_src_reg(TGSI_FILE_TEMPORARY, index);
1379 }
1380
1381
1382 /**
1383 * Create a tgsi_full_src_register for a constant.
1384 */
1385 static struct tgsi_full_src_register
1386 make_src_const_reg(unsigned index)
1387 {
1388 return make_src_reg(TGSI_FILE_CONSTANT, index);
1389 }
1390
1391
1392 /**
1393 * Create a tgsi_full_src_register for an immediate constant.
1394 */
1395 static struct tgsi_full_src_register
1396 make_src_immediate_reg(unsigned index)
1397 {
1398 return make_src_reg(TGSI_FILE_IMMEDIATE, index);
1399 }
1400
1401
1402 /**
1403 * Create a tgsi_full_dst_register.
1404 */
1405 static struct tgsi_full_dst_register
1406 make_dst_reg(unsigned file, unsigned index)
1407 {
1408 struct tgsi_full_dst_register reg;
1409
1410 memset(&reg, 0, sizeof(reg));
1411 reg.Register.File = file;
1412 reg.Register.Index = index;
1413 reg.Register.WriteMask = TGSI_WRITEMASK_XYZW;
1414 return reg;
1415 }
1416
1417
1418 /**
1419 * Create a tgsi_full_dst_register for a temporary.
1420 */
1421 static struct tgsi_full_dst_register
1422 make_dst_temp_reg(unsigned index)
1423 {
1424 return make_dst_reg(TGSI_FILE_TEMPORARY, index);
1425 }
1426
1427
1428 /**
1429 * Create a tgsi_full_dst_register for an output.
1430 */
1431 static struct tgsi_full_dst_register
1432 make_dst_output_reg(unsigned index)
1433 {
1434 return make_dst_reg(TGSI_FILE_OUTPUT, index);
1435 }
1436
1437
1438 /**
1439 * Create negated tgsi_full_src_register.
1440 */
1441 static struct tgsi_full_src_register
1442 negate_src(const struct tgsi_full_src_register *reg)
1443 {
1444 struct tgsi_full_src_register neg = *reg;
1445 neg.Register.Negate = !reg->Register.Negate;
1446 return neg;
1447 }
1448
1449 /**
1450 * Create absolute value of a tgsi_full_src_register.
1451 */
1452 static struct tgsi_full_src_register
1453 absolute_src(const struct tgsi_full_src_register *reg)
1454 {
1455 struct tgsi_full_src_register absolute = *reg;
1456 absolute.Register.Absolute = 1;
1457 return absolute;
1458 }
1459
1460
1461 /** Return the named swizzle term from the src register */
1462 static inline unsigned
1463 get_swizzle(const struct tgsi_full_src_register *reg, unsigned term)
1464 {
1465 switch (term) {
1466 case TGSI_SWIZZLE_X:
1467 return reg->Register.SwizzleX;
1468 case TGSI_SWIZZLE_Y:
1469 return reg->Register.SwizzleY;
1470 case TGSI_SWIZZLE_Z:
1471 return reg->Register.SwizzleZ;
1472 case TGSI_SWIZZLE_W:
1473 return reg->Register.SwizzleW;
1474 default:
1475 assert(!"Bad swizzle");
1476 return TGSI_SWIZZLE_X;
1477 }
1478 }
1479
1480
1481 /**
1482 * Create swizzled tgsi_full_src_register.
1483 */
1484 static struct tgsi_full_src_register
1485 swizzle_src(const struct tgsi_full_src_register *reg,
1486 unsigned swizzleX, unsigned swizzleY,
1487 unsigned swizzleZ, unsigned swizzleW)
1488 {
1489 struct tgsi_full_src_register swizzled = *reg;
1490 /* Note: we swizzle the current swizzle */
1491 swizzled.Register.SwizzleX = get_swizzle(reg, swizzleX);
1492 swizzled.Register.SwizzleY = get_swizzle(reg, swizzleY);
1493 swizzled.Register.SwizzleZ = get_swizzle(reg, swizzleZ);
1494 swizzled.Register.SwizzleW = get_swizzle(reg, swizzleW);
1495 return swizzled;
1496 }
1497
1498
1499 /**
1500 * Create swizzled tgsi_full_src_register where all the swizzle
1501 * terms are the same.
1502 */
1503 static struct tgsi_full_src_register
1504 scalar_src(const struct tgsi_full_src_register *reg, unsigned swizzle)
1505 {
1506 struct tgsi_full_src_register swizzled = *reg;
1507 /* Note: we swizzle the current swizzle */
1508 swizzled.Register.SwizzleX =
1509 swizzled.Register.SwizzleY =
1510 swizzled.Register.SwizzleZ =
1511 swizzled.Register.SwizzleW = get_swizzle(reg, swizzle);
1512 return swizzled;
1513 }
1514
1515
1516 /**
1517 * Create new tgsi_full_dst_register with writemask.
1518 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1519 */
1520 static struct tgsi_full_dst_register
1521 writemask_dst(const struct tgsi_full_dst_register *reg, unsigned mask)
1522 {
1523 struct tgsi_full_dst_register masked = *reg;
1524 masked.Register.WriteMask = mask;
1525 return masked;
1526 }
1527
1528
1529 /**
1530 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1531 */
1532 static boolean
1533 same_swizzle_terms(const struct tgsi_full_src_register *reg)
1534 {
1535 return (reg->Register.SwizzleX == reg->Register.SwizzleY &&
1536 reg->Register.SwizzleY == reg->Register.SwizzleZ &&
1537 reg->Register.SwizzleZ == reg->Register.SwizzleW);
1538 }
1539
1540
1541 /**
1542 * Search the vector for the value 'x' and return its position.
1543 */
1544 static int
1545 find_imm_in_vec4(const union tgsi_immediate_data vec[4],
1546 union tgsi_immediate_data x)
1547 {
1548 unsigned i;
1549 for (i = 0; i < 4; i++) {
1550 if (vec[i].Int == x.Int)
1551 return i;
1552 }
1553 return -1;
1554 }
1555
1556
1557 /**
1558 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1559 */
1560 static int
1561 find_immediate(struct svga_shader_emitter_v10 *emit,
1562 union tgsi_immediate_data x, unsigned startIndex)
1563 {
1564 const unsigned endIndex = emit->num_immediates;
1565 unsigned i;
1566
1567 assert(emit->immediates_emitted);
1568
1569 /* Search immediates for x, y, z, w */
1570 for (i = startIndex; i < endIndex; i++) {
1571 if (x.Int == emit->immediates[i][0].Int ||
1572 x.Int == emit->immediates[i][1].Int ||
1573 x.Int == emit->immediates[i][2].Int ||
1574 x.Int == emit->immediates[i][3].Int) {
1575 return i;
1576 }
1577 }
1578 /* Should never try to use an immediate value that wasn't pre-declared */
1579 assert(!"find_immediate() failed!");
1580 return -1;
1581 }
1582
1583
1584 /**
1585 * Return a tgsi_full_src_register for an immediate/literal
1586 * union tgsi_immediate_data[4] value.
1587 * Note: the values must have been previously declared/allocated in
1588 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1589 * vec4 immediate.
1590 */
1591 static struct tgsi_full_src_register
1592 make_immediate_reg_4(struct svga_shader_emitter_v10 *emit,
1593 const union tgsi_immediate_data imm[4])
1594 {
1595 struct tgsi_full_src_register reg;
1596 unsigned i;
1597
1598 for (i = 0; i < emit->num_common_immediates; i++) {
1599 /* search for first component value */
1600 int immpos = find_immediate(emit, imm[0], i);
1601 int x, y, z, w;
1602
1603 assert(immpos >= 0);
1604
1605 /* find remaining components within the immediate vector */
1606 x = find_imm_in_vec4(emit->immediates[immpos], imm[0]);
1607 y = find_imm_in_vec4(emit->immediates[immpos], imm[1]);
1608 z = find_imm_in_vec4(emit->immediates[immpos], imm[2]);
1609 w = find_imm_in_vec4(emit->immediates[immpos], imm[3]);
1610
1611 if (x >=0 && y >= 0 && z >= 0 && w >= 0) {
1612 /* found them all */
1613 memset(&reg, 0, sizeof(reg));
1614 reg.Register.File = TGSI_FILE_IMMEDIATE;
1615 reg.Register.Index = immpos;
1616 reg.Register.SwizzleX = x;
1617 reg.Register.SwizzleY = y;
1618 reg.Register.SwizzleZ = z;
1619 reg.Register.SwizzleW = w;
1620 return reg;
1621 }
1622 /* else, keep searching */
1623 }
1624
1625 assert(!"Failed to find immediate register!");
1626
1627 /* Just return IMM[0].xxxx */
1628 memset(&reg, 0, sizeof(reg));
1629 reg.Register.File = TGSI_FILE_IMMEDIATE;
1630 return reg;
1631 }
1632
1633
1634 /**
1635 * Return a tgsi_full_src_register for an immediate/literal
1636 * union tgsi_immediate_data value of the form {value, value, value, value}.
1637 * \sa make_immediate_reg_4() regarding allowed values.
1638 */
1639 static struct tgsi_full_src_register
1640 make_immediate_reg(struct svga_shader_emitter_v10 *emit,
1641 union tgsi_immediate_data value)
1642 {
1643 struct tgsi_full_src_register reg;
1644 int immpos = find_immediate(emit, value, 0);
1645
1646 assert(immpos >= 0);
1647
1648 memset(&reg, 0, sizeof(reg));
1649 reg.Register.File = TGSI_FILE_IMMEDIATE;
1650 reg.Register.Index = immpos;
1651 reg.Register.SwizzleX =
1652 reg.Register.SwizzleY =
1653 reg.Register.SwizzleZ =
1654 reg.Register.SwizzleW = find_imm_in_vec4(emit->immediates[immpos], value);
1655
1656 return reg;
1657 }
1658
1659
1660 /**
1661 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1662 * \sa make_immediate_reg_4() regarding allowed values.
1663 */
1664 static struct tgsi_full_src_register
1665 make_immediate_reg_float4(struct svga_shader_emitter_v10 *emit,
1666 float x, float y, float z, float w)
1667 {
1668 union tgsi_immediate_data imm[4];
1669 imm[0].Float = x;
1670 imm[1].Float = y;
1671 imm[2].Float = z;
1672 imm[3].Float = w;
1673 return make_immediate_reg_4(emit, imm);
1674 }
1675
1676
1677 /**
1678 * Return a tgsi_full_src_register for an immediate/literal float value
1679 * of the form {value, value, value, value}.
1680 * \sa make_immediate_reg_4() regarding allowed values.
1681 */
1682 static struct tgsi_full_src_register
1683 make_immediate_reg_float(struct svga_shader_emitter_v10 *emit, float value)
1684 {
1685 union tgsi_immediate_data imm;
1686 imm.Float = value;
1687 return make_immediate_reg(emit, imm);
1688 }
1689
1690
1691 /**
1692 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1693 */
1694 static struct tgsi_full_src_register
1695 make_immediate_reg_int4(struct svga_shader_emitter_v10 *emit,
1696 int x, int y, int z, int w)
1697 {
1698 union tgsi_immediate_data imm[4];
1699 imm[0].Int = x;
1700 imm[1].Int = y;
1701 imm[2].Int = z;
1702 imm[3].Int = w;
1703 return make_immediate_reg_4(emit, imm);
1704 }
1705
1706
1707 /**
1708 * Return a tgsi_full_src_register for an immediate/literal int value
1709 * of the form {value, value, value, value}.
1710 * \sa make_immediate_reg_4() regarding allowed values.
1711 */
1712 static struct tgsi_full_src_register
1713 make_immediate_reg_int(struct svga_shader_emitter_v10 *emit, int value)
1714 {
1715 union tgsi_immediate_data imm;
1716 imm.Int = value;
1717 return make_immediate_reg(emit, imm);
1718 }
1719
1720
1721 /**
1722 * Allocate space for a union tgsi_immediate_data[4] immediate.
1723 * \return the index/position of the immediate.
1724 */
1725 static unsigned
1726 alloc_immediate_4(struct svga_shader_emitter_v10 *emit,
1727 const union tgsi_immediate_data imm[4])
1728 {
1729 unsigned n = emit->num_immediates++;
1730 assert(!emit->immediates_emitted);
1731 assert(n < Elements(emit->immediates));
1732 emit->immediates[n][0] = imm[0];
1733 emit->immediates[n][1] = imm[1];
1734 emit->immediates[n][2] = imm[2];
1735 emit->immediates[n][3] = imm[3];
1736 return n;
1737 }
1738
1739
1740 /**
1741 * Allocate space for a float[4] immediate.
1742 * \return the index/position of the immediate.
1743 */
1744 static unsigned
1745 alloc_immediate_float4(struct svga_shader_emitter_v10 *emit,
1746 float x, float y, float z, float w)
1747 {
1748 union tgsi_immediate_data imm[4];
1749 imm[0].Float = x;
1750 imm[1].Float = y;
1751 imm[2].Float = z;
1752 imm[3].Float = w;
1753 return alloc_immediate_4(emit, imm);
1754 }
1755
1756
1757 /**
1758 * Allocate space for a int[4] immediate.
1759 * \return the index/position of the immediate.
1760 */
1761 static unsigned
1762 alloc_immediate_int4(struct svga_shader_emitter_v10 *emit,
1763 int x, int y, int z, int w)
1764 {
1765 union tgsi_immediate_data imm[4];
1766 imm[0].Int = x;
1767 imm[1].Int = y;
1768 imm[2].Int = z;
1769 imm[3].Int = w;
1770 return alloc_immediate_4(emit, imm);
1771 }
1772
1773
1774 /**
1775 * Allocate a shader input to store a system value.
1776 */
1777 static unsigned
1778 alloc_system_value_index(struct svga_shader_emitter_v10 *emit, unsigned index)
1779 {
1780 const unsigned n = emit->info.num_inputs + index;
1781 assert(index < Elements(emit->system_value_indexes));
1782 emit->system_value_indexes[index] = n;
1783 return n;
1784 }
1785
1786
1787 /**
1788 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1789 */
1790 static boolean
1791 emit_vgpu10_immediate(struct svga_shader_emitter_v10 *emit,
1792 const struct tgsi_full_immediate *imm)
1793 {
1794 /* We don't actually emit any code here. We just save the
1795 * immediate values and emit them later.
1796 */
1797 alloc_immediate_4(emit, imm->u);
1798 return TRUE;
1799 }
1800
1801
1802 /**
1803 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1804 * containing all the immediate values previously allocated
1805 * with alloc_immediate_4().
1806 */
1807 static boolean
1808 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10 *emit)
1809 {
1810 VGPU10OpcodeToken0 token;
1811
1812 assert(!emit->immediates_emitted);
1813
1814 token.value = 0;
1815 token.opcodeType = VGPU10_OPCODE_CUSTOMDATA;
1816 token.customDataClass = VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER;
1817
1818 /* Note: no begin/end_emit_instruction() calls */
1819 emit_dword(emit, token.value);
1820 emit_dword(emit, 2 + 4 * emit->num_immediates);
1821 emit_dwords(emit, (unsigned *) emit->immediates, 4 * emit->num_immediates);
1822
1823 emit->immediates_emitted = TRUE;
1824
1825 return TRUE;
1826 }
1827
1828
1829 /**
1830 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1831 * interpolation mode.
1832 * \return a VGPU10_INTERPOLATION_x value
1833 */
1834 static unsigned
1835 translate_interpolation(const struct svga_shader_emitter_v10 *emit,
1836 unsigned interp, unsigned interpolate_loc)
1837 {
1838 if (interp == TGSI_INTERPOLATE_COLOR) {
1839 interp = emit->key.fs.flatshade ?
1840 TGSI_INTERPOLATE_CONSTANT : TGSI_INTERPOLATE_PERSPECTIVE;
1841 }
1842
1843 switch (interp) {
1844 case TGSI_INTERPOLATE_CONSTANT:
1845 return VGPU10_INTERPOLATION_CONSTANT;
1846 case TGSI_INTERPOLATE_LINEAR:
1847 return interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID ?
1848 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID :
1849 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE;
1850 case TGSI_INTERPOLATE_PERSPECTIVE:
1851 return interpolate_loc == TGSI_INTERPOLATE_LOC_CENTROID ?
1852 VGPU10_INTERPOLATION_LINEAR_CENTROID :
1853 VGPU10_INTERPOLATION_LINEAR;
1854 default:
1855 assert(!"Unexpected interpolation mode");
1856 return VGPU10_INTERPOLATION_CONSTANT;
1857 }
1858 }
1859
1860
1861 /**
1862 * Translate a TGSI property to VGPU10.
1863 * Don't emit any instructions yet, only need to gather the primitive property information.
1864 * The output primitive topology might be changed later. The final property instructions
1865 * will be emitted as part of the pre-helper code.
1866 */
1867 static boolean
1868 emit_vgpu10_property(struct svga_shader_emitter_v10 *emit,
1869 const struct tgsi_full_property *prop)
1870 {
1871 static const VGPU10_PRIMITIVE primType[] = {
1872 VGPU10_PRIMITIVE_POINT, /* PIPE_PRIM_POINTS */
1873 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINES */
1874 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_LOOP */
1875 VGPU10_PRIMITIVE_LINE, /* PIPE_PRIM_LINE_STRIP */
1876 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLES */
1877 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_STRIP */
1878 VGPU10_PRIMITIVE_TRIANGLE, /* PIPE_PRIM_TRIANGLE_FAN */
1879 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUADS */
1880 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
1881 VGPU10_PRIMITIVE_UNDEFINED, /* PIPE_PRIM_POLYGON */
1882 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
1883 VGPU10_PRIMITIVE_LINE_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1884 VGPU10_PRIMITIVE_TRIANGLE_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1885 VGPU10_PRIMITIVE_TRIANGLE_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1886 };
1887
1888 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology[] = {
1889 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST, /* PIPE_PRIM_POINTS */
1890 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINES */
1891 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST, /* PIPE_PRIM_LINE_LOOP */
1892 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP, /* PIPE_PRIM_LINE_STRIP */
1893 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST, /* PIPE_PRIM_TRIANGLES */
1894 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_STRIP */
1895 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, /* PIPE_PRIM_TRIANGLE_FAN */
1896 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUADS */
1897 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_QUAD_STRIP */
1898 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED, /* PIPE_PRIM_POLYGON */
1899 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINES_ADJACENCY */
1900 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1901 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1902 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ /* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1903 };
1904
1905 static const unsigned inputArraySize[] = {
1906 0, /* VGPU10_PRIMITIVE_UNDEFINED */
1907 1, /* VGPU10_PRIMITIVE_POINT */
1908 2, /* VGPU10_PRIMITIVE_LINE */
1909 3, /* VGPU10_PRIMITIVE_TRIANGLE */
1910 0,
1911 0,
1912 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
1913 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
1914 };
1915
1916 switch (prop->Property.PropertyName) {
1917 case TGSI_PROPERTY_GS_INPUT_PRIM:
1918 assert(prop->u[0].Data < Elements(primType));
1919 emit->gs.prim_type = primType[prop->u[0].Data];
1920 assert(emit->gs.prim_type != VGPU10_PRIMITIVE_UNDEFINED);
1921 emit->gs.input_size = inputArraySize[emit->gs.prim_type];
1922 break;
1923
1924 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1925 assert(prop->u[0].Data < Elements(primTopology));
1926 emit->gs.prim_topology = primTopology[prop->u[0].Data];
1927 assert(emit->gs.prim_topology != VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED);
1928 break;
1929
1930 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1931 emit->gs.max_out_vertices = prop->u[0].Data;
1932 break;
1933
1934 default:
1935 break;
1936 }
1937
1938 return TRUE;
1939 }
1940
1941
1942 static void
1943 emit_property_instruction(struct svga_shader_emitter_v10 *emit,
1944 VGPU10OpcodeToken0 opcode0, unsigned nData,
1945 unsigned data)
1946 {
1947 begin_emit_instruction(emit);
1948 emit_dword(emit, opcode0.value);
1949 if (nData)
1950 emit_dword(emit, data);
1951 end_emit_instruction(emit);
1952 }
1953
1954
1955 /**
1956 * Emit property instructions
1957 */
1958 static void
1959 emit_property_instructions(struct svga_shader_emitter_v10 *emit)
1960 {
1961 VGPU10OpcodeToken0 opcode0;
1962
1963 assert(emit->unit == PIPE_SHADER_GEOMETRY);
1964
1965 /* emit input primitive type declaration */
1966 opcode0.value = 0;
1967 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE;
1968 opcode0.primitive = emit->gs.prim_type;
1969 emit_property_instruction(emit, opcode0, 0, 0);
1970
1971 /* emit output primitive topology declaration */
1972 opcode0.value = 0;
1973 opcode0.opcodeType = VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY;
1974 opcode0.primitiveTopology = emit->gs.prim_topology;
1975 emit_property_instruction(emit, opcode0, 0, 0);
1976
1977 /* emit max output vertices */
1978 opcode0.value = 0;
1979 opcode0.opcodeType = VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT;
1980 emit_property_instruction(emit, opcode0, 1, emit->gs.max_out_vertices);
1981 }
1982
1983
1984 /**
1985 * Emit a vgpu10 declaration "instruction".
1986 * \param index the register index
1987 * \param size array size of the operand. In most cases, it is 1,
1988 * but for inputs to geometry shader, the array size varies
1989 * depending on the primitive type.
1990 */
1991 static void
1992 emit_decl_instruction(struct svga_shader_emitter_v10 *emit,
1993 VGPU10OpcodeToken0 opcode0,
1994 VGPU10OperandToken0 operand0,
1995 VGPU10NameToken name_token,
1996 unsigned index, unsigned size)
1997 {
1998 assert(opcode0.opcodeType);
1999 assert(operand0.mask);
2000
2001 begin_emit_instruction(emit);
2002 emit_dword(emit, opcode0.value);
2003
2004 emit_dword(emit, operand0.value);
2005
2006 if (operand0.indexDimension == VGPU10_OPERAND_INDEX_1D) {
2007 /* Next token is the index of the register to declare */
2008 emit_dword(emit, index);
2009 }
2010 else if (operand0.indexDimension >= VGPU10_OPERAND_INDEX_2D) {
2011 /* Next token is the size of the register */
2012 emit_dword(emit, size);
2013
2014 /* Followed by the index of the register */
2015 emit_dword(emit, index);
2016 }
2017
2018 if (name_token.value) {
2019 emit_dword(emit, name_token.value);
2020 }
2021
2022 end_emit_instruction(emit);
2023 }
2024
2025
2026 /**
2027 * Emit the declaration for a shader input.
2028 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2029 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2030 * \param dim index dimension
2031 * \param index the input register index
2032 * \param size array size of the operand. In most cases, it is 1,
2033 * but for inputs to geometry shader, the array size varies
2034 * depending on the primitive type.
2035 * \param name one of VGPU10_NAME_x
2036 * \parma numComp number of components
2037 * \param selMode component selection mode
2038 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2039 * \param interpMode interpolation mode
2040 */
2041 static void
2042 emit_input_declaration(struct svga_shader_emitter_v10 *emit,
2043 unsigned opcodeType, unsigned operandType,
2044 unsigned dim, unsigned index, unsigned size,
2045 unsigned name, unsigned numComp,
2046 unsigned selMode, unsigned usageMask,
2047 unsigned interpMode)
2048 {
2049 VGPU10OpcodeToken0 opcode0;
2050 VGPU10OperandToken0 operand0;
2051 VGPU10NameToken name_token;
2052
2053 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2054 assert(opcodeType == VGPU10_OPCODE_DCL_INPUT ||
2055 opcodeType == VGPU10_OPCODE_DCL_INPUT_SIV ||
2056 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS ||
2057 opcodeType == VGPU10_OPCODE_DCL_INPUT_PS_SGV);
2058 assert(operandType == VGPU10_OPERAND_TYPE_INPUT ||
2059 operandType == VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID);
2060 assert(numComp <= VGPU10_OPERAND_4_COMPONENT);
2061 assert(selMode <= VGPU10_OPERAND_4_COMPONENT_MASK_MODE);
2062 assert(dim <= VGPU10_OPERAND_INDEX_3D);
2063 assert(name == VGPU10_NAME_UNDEFINED ||
2064 name == VGPU10_NAME_POSITION ||
2065 name == VGPU10_NAME_INSTANCE_ID ||
2066 name == VGPU10_NAME_VERTEX_ID ||
2067 name == VGPU10_NAME_PRIMITIVE_ID ||
2068 name == VGPU10_NAME_IS_FRONT_FACE);
2069 assert(interpMode == VGPU10_INTERPOLATION_UNDEFINED ||
2070 interpMode == VGPU10_INTERPOLATION_CONSTANT ||
2071 interpMode == VGPU10_INTERPOLATION_LINEAR ||
2072 interpMode == VGPU10_INTERPOLATION_LINEAR_CENTROID ||
2073 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE ||
2074 interpMode == VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID);
2075
2076 check_register_index(emit, opcodeType, index);
2077
2078 opcode0.value = operand0.value = name_token.value = 0;
2079
2080 opcode0.opcodeType = opcodeType;
2081 opcode0.interpolationMode = interpMode;
2082
2083 operand0.operandType = operandType;
2084 operand0.numComponents = numComp;
2085 operand0.selectionMode = selMode;
2086 operand0.mask = usageMask;
2087 operand0.indexDimension = dim;
2088 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2089 if (dim == VGPU10_OPERAND_INDEX_2D)
2090 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2091
2092 name_token.name = name;
2093
2094 emit_decl_instruction(emit, opcode0, operand0, name_token, index, size);
2095 }
2096
2097
2098 /**
2099 * Emit the declaration for a shader output.
2100 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2101 * \param index the output register index
2102 * \param name one of VGPU10_NAME_x
2103 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2104 */
2105 static void
2106 emit_output_declaration(struct svga_shader_emitter_v10 *emit,
2107 unsigned type, unsigned index,
2108 unsigned name, unsigned usageMask)
2109 {
2110 VGPU10OpcodeToken0 opcode0;
2111 VGPU10OperandToken0 operand0;
2112 VGPU10NameToken name_token;
2113
2114 assert(usageMask <= VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2115 assert(type == VGPU10_OPCODE_DCL_OUTPUT ||
2116 type == VGPU10_OPCODE_DCL_OUTPUT_SGV ||
2117 type == VGPU10_OPCODE_DCL_OUTPUT_SIV);
2118 assert(name == VGPU10_NAME_UNDEFINED ||
2119 name == VGPU10_NAME_POSITION ||
2120 name == VGPU10_NAME_PRIMITIVE_ID ||
2121 name == VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX ||
2122 name == VGPU10_NAME_CLIP_DISTANCE);
2123
2124 check_register_index(emit, type, index);
2125
2126 opcode0.value = operand0.value = name_token.value = 0;
2127
2128 opcode0.opcodeType = type;
2129 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT;
2130 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2131 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2132 operand0.mask = usageMask;
2133 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2134 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2135
2136 name_token.name = name;
2137
2138 emit_decl_instruction(emit, opcode0, operand0, name_token, index, 1);
2139 }
2140
2141
2142 /**
2143 * Emit the declaration for the fragment depth output.
2144 */
2145 static void
2146 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10 *emit)
2147 {
2148 VGPU10OpcodeToken0 opcode0;
2149 VGPU10OperandToken0 operand0;
2150 VGPU10NameToken name_token;
2151
2152 assert(emit->unit == PIPE_SHADER_FRAGMENT);
2153
2154 opcode0.value = operand0.value = name_token.value = 0;
2155
2156 opcode0.opcodeType = VGPU10_OPCODE_DCL_OUTPUT;
2157 operand0.operandType = VGPU10_OPERAND_TYPE_OUTPUT_DEPTH;
2158 operand0.numComponents = VGPU10_OPERAND_1_COMPONENT;
2159 operand0.indexDimension = VGPU10_OPERAND_INDEX_0D;
2160 operand0.mask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2161
2162 emit_decl_instruction(emit, opcode0, operand0, name_token, 0, 1);
2163 }
2164
2165
2166 /**
2167 * Emit the declaration for a system value input/output.
2168 */
2169 static void
2170 emit_system_value_declaration(struct svga_shader_emitter_v10 *emit,
2171 unsigned semantic_name, unsigned index)
2172 {
2173 switch (semantic_name) {
2174 case TGSI_SEMANTIC_INSTANCEID:
2175 index = alloc_system_value_index(emit, index);
2176 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2177 VGPU10_OPERAND_TYPE_INPUT,
2178 VGPU10_OPERAND_INDEX_1D,
2179 index, 1,
2180 VGPU10_NAME_INSTANCE_ID,
2181 VGPU10_OPERAND_4_COMPONENT,
2182 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2183 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2184 VGPU10_INTERPOLATION_UNDEFINED);
2185 break;
2186 case TGSI_SEMANTIC_VERTEXID:
2187 index = alloc_system_value_index(emit, index);
2188 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT_SIV,
2189 VGPU10_OPERAND_TYPE_INPUT,
2190 VGPU10_OPERAND_INDEX_1D,
2191 index, 1,
2192 VGPU10_NAME_VERTEX_ID,
2193 VGPU10_OPERAND_4_COMPONENT,
2194 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2195 VGPU10_OPERAND_4_COMPONENT_MASK_X,
2196 VGPU10_INTERPOLATION_UNDEFINED);
2197 break;
2198 default:
2199 ; /* XXX */
2200 }
2201 }
2202
2203 /**
2204 * Translate a TGSI declaration to VGPU10.
2205 */
2206 static boolean
2207 emit_vgpu10_declaration(struct svga_shader_emitter_v10 *emit,
2208 const struct tgsi_full_declaration *decl)
2209 {
2210 switch (decl->Declaration.File) {
2211 case TGSI_FILE_INPUT:
2212 /* do nothing - see emit_input_declarations() */
2213 return TRUE;
2214
2215 case TGSI_FILE_OUTPUT:
2216 assert(decl->Range.First == decl->Range.Last);
2217 emit->output_usage_mask[decl->Range.First] = decl->Declaration.UsageMask;
2218 return TRUE;
2219
2220 case TGSI_FILE_TEMPORARY:
2221 /* Don't declare the temps here. Just keep track of how many
2222 * and emit the declaration later.
2223 */
2224 if (decl->Declaration.Array) {
2225 /* Indexed temporary array. Save the start index of the array
2226 * and the size of the array.
2227 */
2228 const unsigned arrayID = MIN2(decl->Array.ArrayID, MAX_TEMP_ARRAYS);
2229 unsigned i;
2230
2231 assert(arrayID < ARRAY_SIZE(emit->temp_arrays));
2232
2233 /* Save this array so we can emit the declaration for it later */
2234 emit->temp_arrays[arrayID].start = decl->Range.First;
2235 emit->temp_arrays[arrayID].size =
2236 decl->Range.Last - decl->Range.First + 1;
2237
2238 emit->num_temp_arrays = MAX2(emit->num_temp_arrays, arrayID + 1);
2239 assert(emit->num_temp_arrays <= MAX_TEMP_ARRAYS);
2240 emit->num_temp_arrays = MIN2(emit->num_temp_arrays, MAX_TEMP_ARRAYS);
2241
2242 /* Fill in the temp_map entries for this array */
2243 for (i = decl->Range.First; i <= decl->Range.Last; i++) {
2244 emit->temp_map[i].arrayId = arrayID;
2245 emit->temp_map[i].index = i - decl->Range.First;
2246 }
2247 }
2248
2249 /* for all temps, indexed or not, keep track of highest index */
2250 emit->num_shader_temps = MAX2(emit->num_shader_temps,
2251 decl->Range.Last + 1);
2252 return TRUE;
2253
2254 case TGSI_FILE_CONSTANT:
2255 /* Don't declare constants here. Just keep track and emit later. */
2256 {
2257 unsigned constbuf = 0, num_consts;
2258 if (decl->Declaration.Dimension) {
2259 constbuf = decl->Dim.Index2D;
2260 }
2261 /* We throw an assertion here when, in fact, the shader should never
2262 * have linked due to constbuf index out of bounds, so we shouldn't
2263 * have reached here.
2264 */
2265 assert(constbuf < Elements(emit->num_shader_consts));
2266
2267 num_consts = MAX2(emit->num_shader_consts[constbuf],
2268 decl->Range.Last + 1);
2269
2270 if (num_consts > VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT) {
2271 debug_printf("Warning: constant buffer is declared to size [%u]"
2272 " but [%u] is the limit.\n",
2273 num_consts,
2274 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2275 }
2276 /* The linker doesn't enforce the max UBO size so we clamp here */
2277 emit->num_shader_consts[constbuf] =
2278 MIN2(num_consts, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT);
2279 }
2280 return TRUE;
2281
2282 case TGSI_FILE_IMMEDIATE:
2283 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2284 return FALSE;
2285
2286 case TGSI_FILE_SYSTEM_VALUE:
2287 emit_system_value_declaration(emit, decl->Semantic.Name,
2288 decl->Range.First);
2289 return TRUE;
2290
2291 case TGSI_FILE_SAMPLER:
2292 /* Don't declare samplers here. Just keep track and emit later. */
2293 emit->num_samplers = MAX2(emit->num_samplers, decl->Range.Last + 1);
2294 return TRUE;
2295
2296 case TGSI_FILE_RESOURCE:
2297 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2298 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2299 assert(!"TGSI_FILE_RESOURCE not handled yet");
2300 return FALSE;
2301
2302 case TGSI_FILE_ADDRESS:
2303 emit->num_address_regs = MAX2(emit->num_address_regs,
2304 decl->Range.Last + 1);
2305 return TRUE;
2306
2307 case TGSI_FILE_SAMPLER_VIEW:
2308 /* Not used at this time, but maybe in the future.
2309 * See emit_resource_declarations().
2310 */
2311 return TRUE;
2312
2313 default:
2314 assert(!"Unexpected type of declaration");
2315 return FALSE;
2316 }
2317 }
2318
2319
2320
2321 /**
2322 * Emit all input declarations.
2323 */
2324 static boolean
2325 emit_input_declarations(struct svga_shader_emitter_v10 *emit)
2326 {
2327 unsigned i;
2328
2329 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2330
2331 for (i = 0; i < emit->linkage.num_inputs; i++) {
2332 unsigned semantic_name = emit->info.input_semantic_name[i];
2333 unsigned usage_mask = emit->info.input_usage_mask[i];
2334 unsigned index = emit->linkage.input_map[i];
2335 unsigned type, interpolationMode, name;
2336
2337 if (usage_mask == 0)
2338 continue; /* register is not actually used */
2339
2340 if (semantic_name == TGSI_SEMANTIC_POSITION) {
2341 /* fragment position input */
2342 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2343 interpolationMode = VGPU10_INTERPOLATION_LINEAR;
2344 name = VGPU10_NAME_POSITION;
2345 if (usage_mask & TGSI_WRITEMASK_W) {
2346 /* we need to replace use of 'w' with '1/w' */
2347 emit->fs.fragcoord_input_index = i;
2348 }
2349 }
2350 else if (semantic_name == TGSI_SEMANTIC_FACE) {
2351 /* fragment front-facing input */
2352 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2353 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2354 name = VGPU10_NAME_IS_FRONT_FACE;
2355 emit->fs.face_input_index = i;
2356 }
2357 else if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2358 /* primitive ID */
2359 type = VGPU10_OPCODE_DCL_INPUT_PS_SGV;
2360 interpolationMode = VGPU10_INTERPOLATION_CONSTANT;
2361 name = VGPU10_NAME_PRIMITIVE_ID;
2362 }
2363 else {
2364 /* general fragment input */
2365 type = VGPU10_OPCODE_DCL_INPUT_PS;
2366 interpolationMode =
2367 translate_interpolation(emit,
2368 emit->info.input_interpolate[i],
2369 emit->info.input_interpolate_loc[i]);
2370
2371 /* keeps track if flat interpolation mode is being used */
2372 emit->uses_flat_interp = emit->uses_flat_interp ||
2373 (interpolationMode == VGPU10_INTERPOLATION_CONSTANT);
2374
2375 name = VGPU10_NAME_UNDEFINED;
2376 }
2377
2378 emit_input_declaration(emit, type,
2379 VGPU10_OPERAND_TYPE_INPUT,
2380 VGPU10_OPERAND_INDEX_1D, index, 1,
2381 name,
2382 VGPU10_OPERAND_4_COMPONENT,
2383 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2384 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2385 interpolationMode);
2386 }
2387 }
2388 else if (emit->unit == PIPE_SHADER_GEOMETRY) {
2389
2390 for (i = 0; i < emit->info.num_inputs; i++) {
2391 unsigned semantic_name = emit->info.input_semantic_name[i];
2392 unsigned usage_mask = emit->info.input_usage_mask[i];
2393 unsigned index = emit->linkage.input_map[i];
2394 unsigned opcodeType, operandType;
2395 unsigned numComp, selMode;
2396 unsigned name;
2397 unsigned dim;
2398
2399 if (usage_mask == 0)
2400 continue; /* register is not actually used */
2401
2402 opcodeType = VGPU10_OPCODE_DCL_INPUT;
2403 operandType = VGPU10_OPERAND_TYPE_INPUT;
2404 numComp = VGPU10_OPERAND_4_COMPONENT;
2405 selMode = VGPU10_OPERAND_4_COMPONENT_MASK_MODE;
2406 name = VGPU10_NAME_UNDEFINED;
2407
2408 /* all geometry shader inputs are two dimensional except gl_PrimitiveID */
2409 dim = VGPU10_OPERAND_INDEX_2D;
2410
2411 if (semantic_name == TGSI_SEMANTIC_PRIMID) {
2412 /* Primitive ID */
2413 operandType = VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID;
2414 dim = VGPU10_OPERAND_INDEX_0D;
2415 numComp = VGPU10_OPERAND_0_COMPONENT;
2416 selMode = 0;
2417
2418 /* also save the register index so we can check for
2419 * primitive id when emit src register. We need to modify the
2420 * operand type, index dimension when emit primitive id src reg.
2421 */
2422 emit->gs.prim_id_index = i;
2423 }
2424 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2425 /* vertex position input */
2426 opcodeType = VGPU10_OPCODE_DCL_INPUT_SIV;
2427 name = VGPU10_NAME_POSITION;
2428 }
2429
2430 emit_input_declaration(emit, opcodeType, operandType,
2431 dim, index,
2432 emit->gs.input_size,
2433 name,
2434 numComp, selMode,
2435 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2436 VGPU10_INTERPOLATION_UNDEFINED);
2437 }
2438 }
2439 else {
2440 assert(emit->unit == PIPE_SHADER_VERTEX);
2441
2442 for (i = 0; i < emit->info.num_inputs; i++) {
2443 unsigned usage_mask = emit->info.input_usage_mask[i];
2444 unsigned index = i;
2445
2446 if (usage_mask == 0)
2447 continue; /* register is not actually used */
2448
2449 emit_input_declaration(emit, VGPU10_OPCODE_DCL_INPUT,
2450 VGPU10_OPERAND_TYPE_INPUT,
2451 VGPU10_OPERAND_INDEX_1D, index, 1,
2452 VGPU10_NAME_UNDEFINED,
2453 VGPU10_OPERAND_4_COMPONENT,
2454 VGPU10_OPERAND_4_COMPONENT_MASK_MODE,
2455 VGPU10_OPERAND_4_COMPONENT_MASK_ALL,
2456 VGPU10_INTERPOLATION_UNDEFINED);
2457 }
2458 }
2459
2460 return TRUE;
2461 }
2462
2463
2464 /**
2465 * Emit all output declarations.
2466 */
2467 static boolean
2468 emit_output_declarations(struct svga_shader_emitter_v10 *emit)
2469 {
2470 unsigned i;
2471
2472 for (i = 0; i < emit->info.num_outputs; i++) {
2473 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2474 const unsigned semantic_name = emit->info.output_semantic_name[i];
2475 const unsigned semantic_index = emit->info.output_semantic_index[i];
2476 unsigned index = i;
2477
2478 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2479 if (semantic_name == TGSI_SEMANTIC_COLOR) {
2480 assert(semantic_index < Elements(emit->fs.color_out_index));
2481
2482 emit->fs.color_out_index[semantic_index] = index;
2483
2484 /* The semantic index is the shader's color output/buffer index */
2485 emit_output_declaration(emit,
2486 VGPU10_OPCODE_DCL_OUTPUT, semantic_index,
2487 VGPU10_NAME_UNDEFINED,
2488 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2489
2490 if (semantic_index == 0) {
2491 if (emit->key.fs.write_color0_to_n_cbufs > 1) {
2492 /* Emit declarations for the additional color outputs
2493 * for broadcasting.
2494 */
2495 unsigned j;
2496 for (j = 1; j < emit->key.fs.write_color0_to_n_cbufs; j++) {
2497 /* Allocate a new output index */
2498 unsigned idx = emit->info.num_outputs + j - 1;
2499 emit->fs.color_out_index[j] = idx;
2500 emit_output_declaration(emit,
2501 VGPU10_OPCODE_DCL_OUTPUT, idx,
2502 VGPU10_NAME_UNDEFINED,
2503 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2504 emit->info.output_semantic_index[idx] = j;
2505 }
2506 }
2507 }
2508 else {
2509 assert(!emit->key.fs.write_color0_to_n_cbufs);
2510 }
2511 }
2512 else if (semantic_name == TGSI_SEMANTIC_POSITION) {
2513 /* Fragment depth output */
2514 emit_fragdepth_output_declaration(emit);
2515 }
2516 else {
2517 assert(!"Bad output semantic name");
2518 }
2519 }
2520 else {
2521 /* VS or GS */
2522 unsigned name, type;
2523 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
2524
2525 switch (semantic_name) {
2526 case TGSI_SEMANTIC_POSITION:
2527 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2528 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2529 name = VGPU10_NAME_POSITION;
2530 /* Save the index of the vertex position output register */
2531 emit->vposition.out_index = index;
2532 break;
2533 case TGSI_SEMANTIC_CLIPDIST:
2534 type = VGPU10_OPCODE_DCL_OUTPUT_SIV;
2535 name = VGPU10_NAME_CLIP_DISTANCE;
2536 /* save the starting index of the clip distance output register */
2537 if (semantic_index == 0)
2538 emit->clip_dist_out_index = index;
2539 writemask = emit->output_usage_mask[index];
2540 writemask = apply_clip_plane_mask(emit, writemask, semantic_index);
2541 if (writemask == 0x0) {
2542 continue; /* discard this do-nothing declaration */
2543 }
2544 break;
2545 case TGSI_SEMANTIC_PRIMID:
2546 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2547 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2548 name = VGPU10_NAME_PRIMITIVE_ID;
2549 break;
2550 case TGSI_SEMANTIC_LAYER:
2551 assert(emit->unit == PIPE_SHADER_GEOMETRY);
2552 type = VGPU10_OPCODE_DCL_OUTPUT_SGV;
2553 name = VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX;
2554 break;
2555 case TGSI_SEMANTIC_CLIPVERTEX:
2556 type = VGPU10_OPCODE_DCL_OUTPUT;
2557 name = VGPU10_NAME_UNDEFINED;
2558 emit->clip_vertex_out_index = index;
2559 break;
2560 default:
2561 /* generic output */
2562 type = VGPU10_OPCODE_DCL_OUTPUT;
2563 name = VGPU10_NAME_UNDEFINED;
2564 }
2565
2566 emit_output_declaration(emit, type, index, name, writemask);
2567 }
2568 }
2569
2570 if (emit->vposition.so_index != INVALID_INDEX &&
2571 emit->vposition.out_index != INVALID_INDEX) {
2572
2573 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2574
2575 /* Emit the declaration for the non-adjusted vertex position
2576 * for stream output purpose
2577 */
2578 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2579 emit->vposition.so_index,
2580 VGPU10_NAME_UNDEFINED,
2581 VGPU10_OPERAND_4_COMPONENT_MASK_ALL);
2582 }
2583
2584 if (emit->clip_dist_so_index != INVALID_INDEX &&
2585 emit->clip_dist_out_index != INVALID_INDEX) {
2586
2587 assert(emit->unit != PIPE_SHADER_FRAGMENT);
2588
2589 /* Emit the declaration for the clip distance shadow copy which
2590 * will be used for stream output purpose and for clip distance
2591 * varying variable
2592 */
2593 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2594 emit->clip_dist_so_index,
2595 VGPU10_NAME_UNDEFINED,
2596 emit->output_usage_mask[emit->clip_dist_out_index]);
2597
2598 if (emit->info.num_written_clipdistance > 4) {
2599 /* for the second clip distance register, each handles 4 planes */
2600 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT,
2601 emit->clip_dist_so_index + 1,
2602 VGPU10_NAME_UNDEFINED,
2603 emit->output_usage_mask[emit->clip_dist_out_index+1]);
2604 }
2605 }
2606
2607 return TRUE;
2608 }
2609
2610
2611 /**
2612 * Emit the declaration for the temporary registers.
2613 */
2614 static boolean
2615 emit_temporaries_declaration(struct svga_shader_emitter_v10 *emit)
2616 {
2617 unsigned total_temps, reg, i;
2618
2619 total_temps = emit->num_shader_temps;
2620
2621 /* Allocate extra temps for specially-implemented instructions,
2622 * such as LIT.
2623 */
2624 total_temps += MAX_INTERNAL_TEMPS;
2625
2626 if (emit->unit == PIPE_SHADER_VERTEX || emit->unit == PIPE_SHADER_GEOMETRY) {
2627 if (emit->vposition.need_prescale || emit->key.vs.undo_viewport ||
2628 emit->key.clip_plane_enable ||
2629 emit->vposition.so_index != INVALID_INDEX) {
2630 emit->vposition.tmp_index = total_temps;
2631 total_temps += 1;
2632 }
2633
2634 if (emit->unit == PIPE_SHADER_VERTEX) {
2635 unsigned attrib_mask = (emit->key.vs.adjust_attrib_w_1 |
2636 emit->key.vs.adjust_attrib_itof |
2637 emit->key.vs.adjust_attrib_utof |
2638 emit->key.vs.attrib_is_bgra |
2639 emit->key.vs.attrib_puint_to_snorm |
2640 emit->key.vs.attrib_puint_to_uscaled |
2641 emit->key.vs.attrib_puint_to_sscaled);
2642 while (attrib_mask) {
2643 unsigned index = u_bit_scan(&attrib_mask);
2644 emit->vs.adjusted_input[index] = total_temps++;
2645 }
2646 }
2647
2648 if (emit->clip_mode == CLIP_DISTANCE) {
2649 /* We need to write the clip distance to a temporary register
2650 * first. Then it will be copied to the shadow copy for
2651 * the clip distance varying variable and stream output purpose.
2652 * It will also be copied to the actual CLIPDIST register
2653 * according to the enabled clip planes
2654 */
2655 emit->clip_dist_tmp_index = total_temps++;
2656 if (emit->info.num_written_clipdistance > 4)
2657 total_temps++; /* second clip register */
2658 }
2659 else if (emit->clip_mode == CLIP_VERTEX) {
2660 /* We need to convert the TGSI CLIPVERTEX output to one or more
2661 * clip distances. Allocate a temp reg for the clipvertex here.
2662 */
2663 assert(emit->info.writes_clipvertex > 0);
2664 emit->clip_vertex_tmp_index = total_temps;
2665 total_temps++;
2666 }
2667 }
2668 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
2669 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS ||
2670 emit->key.fs.write_color0_to_n_cbufs > 1) {
2671 /* Allocate a temp to hold the output color */
2672 emit->fs.color_tmp_index = total_temps;
2673 total_temps += 1;
2674 }
2675
2676 if (emit->fs.face_input_index != INVALID_INDEX) {
2677 /* Allocate a temp for the +/-1 face register */
2678 emit->fs.face_tmp_index = total_temps;
2679 total_temps += 1;
2680 }
2681
2682 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
2683 /* Allocate a temp for modified fragment position register */
2684 emit->fs.fragcoord_tmp_index = total_temps;
2685 total_temps += 1;
2686 }
2687 }
2688
2689 for (i = 0; i < emit->num_address_regs; i++) {
2690 emit->address_reg_index[i] = total_temps++;
2691 }
2692
2693 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2694 * temp indexes. Basically, we compact all the non-array temp register
2695 * indexes into a consecutive series.
2696 *
2697 * Before, we may have some TGSI declarations like:
2698 * DCL TEMP[0..1], LOCAL
2699 * DCL TEMP[2..4], ARRAY(1), LOCAL
2700 * DCL TEMP[5..7], ARRAY(2), LOCAL
2701 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2702 *
2703 * After, we'll have a map like this:
2704 * temp_map[0] = { array 0, index 0 }
2705 * temp_map[1] = { array 0, index 1 }
2706 * temp_map[2] = { array 1, index 0 }
2707 * temp_map[3] = { array 1, index 1 }
2708 * temp_map[4] = { array 1, index 2 }
2709 * temp_map[5] = { array 2, index 0 }
2710 * temp_map[6] = { array 2, index 1 }
2711 * temp_map[7] = { array 2, index 2 }
2712 * temp_map[8] = { array 0, index 2 }
2713 * temp_map[9] = { array 0, index 3 }
2714 *
2715 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2716 * temps numbered 0..3
2717 *
2718 * Any time we emit a temporary register index, we'll have to use the
2719 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2720 *
2721 * Finally, we recompute the total_temps value here.
2722 */
2723 reg = 0;
2724 for (i = 0; i < total_temps; i++) {
2725 if (emit->temp_map[i].arrayId == 0) {
2726 emit->temp_map[i].index = reg++;
2727 }
2728 }
2729 total_temps = reg;
2730
2731 if (0) {
2732 debug_printf("total_temps %u\n", total_temps);
2733 for (i = 0; i < 30; i++) {
2734 debug_printf("temp %u -> array %u index %u\n",
2735 i, emit->temp_map[i].arrayId, emit->temp_map[i].index);
2736 }
2737 }
2738
2739 /* Emit declaration of ordinary temp registers */
2740 if (total_temps > 0) {
2741 VGPU10OpcodeToken0 opcode0;
2742
2743 opcode0.value = 0;
2744 opcode0.opcodeType = VGPU10_OPCODE_DCL_TEMPS;
2745
2746 begin_emit_instruction(emit);
2747 emit_dword(emit, opcode0.value);
2748 emit_dword(emit, total_temps);
2749 end_emit_instruction(emit);
2750 }
2751
2752 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2753 * it's unused.
2754 */
2755 for (i = 1; i < emit->num_temp_arrays; i++) {
2756 unsigned num_temps = emit->temp_arrays[i].size;
2757
2758 if (num_temps > 0) {
2759 VGPU10OpcodeToken0 opcode0;
2760
2761 opcode0.value = 0;
2762 opcode0.opcodeType = VGPU10_OPCODE_DCL_INDEXABLE_TEMP;
2763
2764 begin_emit_instruction(emit);
2765 emit_dword(emit, opcode0.value);
2766 emit_dword(emit, i); /* which array */
2767 emit_dword(emit, num_temps);
2768 emit_dword(emit, 4); /* num components */
2769 end_emit_instruction(emit);
2770
2771 total_temps += num_temps;
2772 }
2773 }
2774
2775 /* Check that the grand total of all regular and indexed temps is
2776 * under the limit.
2777 */
2778 check_register_index(emit, VGPU10_OPCODE_DCL_TEMPS, total_temps - 1);
2779
2780 return TRUE;
2781 }
2782
2783
2784 static boolean
2785 emit_constant_declaration(struct svga_shader_emitter_v10 *emit)
2786 {
2787 VGPU10OpcodeToken0 opcode0;
2788 VGPU10OperandToken0 operand0;
2789 unsigned total_consts, i;
2790
2791 opcode0.value = 0;
2792 opcode0.opcodeType = VGPU10_OPCODE_DCL_CONSTANT_BUFFER;
2793 opcode0.accessPattern = VGPU10_CB_IMMEDIATE_INDEXED;
2794 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
2795
2796 operand0.value = 0;
2797 operand0.numComponents = VGPU10_OPERAND_4_COMPONENT;
2798 operand0.indexDimension = VGPU10_OPERAND_INDEX_2D;
2799 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2800 operand0.index1Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2801 operand0.operandType = VGPU10_OPERAND_TYPE_CONSTANT_BUFFER;
2802 operand0.selectionMode = VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE;
2803 operand0.swizzleX = 0;
2804 operand0.swizzleY = 1;
2805 operand0.swizzleZ = 2;
2806 operand0.swizzleW = 3;
2807
2808 /**
2809 * Emit declaration for constant buffer [0]. We also allocate
2810 * room for the extra constants here.
2811 */
2812 total_consts = emit->num_shader_consts[0];
2813
2814 /* Now, allocate constant slots for the "extra" constants */
2815
2816 /* Vertex position scale/translation */
2817 if (emit->vposition.need_prescale) {
2818 emit->vposition.prescale_scale_index = total_consts++;
2819 emit->vposition.prescale_trans_index = total_consts++;
2820 }
2821
2822 if (emit->unit == PIPE_SHADER_VERTEX) {
2823 if (emit->key.vs.undo_viewport) {
2824 emit->vs.viewport_index = total_consts++;
2825 }
2826 }
2827
2828 /* user-defined clip planes */
2829 if (emit->key.clip_plane_enable) {
2830 unsigned n = util_bitcount(emit->key.clip_plane_enable);
2831 assert(emit->unit == PIPE_SHADER_VERTEX ||
2832 emit->unit == PIPE_SHADER_GEOMETRY);
2833 for (i = 0; i < n; i++) {
2834 emit->clip_plane_const[i] = total_consts++;
2835 }
2836 }
2837
2838 /* Texcoord scale factors for RECT textures */
2839 {
2840 for (i = 0; i < emit->num_samplers; i++) {
2841 if (emit->key.tex[i].unnormalized) {
2842 emit->texcoord_scale_index[i] = total_consts++;
2843 }
2844 }
2845 }
2846
2847 /* Texture buffer sizes */
2848 for (i = 0; i < emit->num_samplers; i++) {
2849 if (emit->key.tex[i].texture_target == PIPE_BUFFER) {
2850 emit->texture_buffer_size_index[i] = total_consts++;
2851 }
2852 }
2853
2854 if (total_consts > 0) {
2855 begin_emit_instruction(emit);
2856 emit_dword(emit, opcode0.value);
2857 emit_dword(emit, operand0.value);
2858 emit_dword(emit, 0); /* which const buffer slot */
2859 emit_dword(emit, total_consts);
2860 end_emit_instruction(emit);
2861 }
2862
2863 /* Declare remaining constant buffers (UBOs) */
2864 for (i = 1; i < Elements(emit->num_shader_consts); i++) {
2865 if (emit->num_shader_consts[i] > 0) {
2866 begin_emit_instruction(emit);
2867 emit_dword(emit, opcode0.value);
2868 emit_dword(emit, operand0.value);
2869 emit_dword(emit, i); /* which const buffer slot */
2870 emit_dword(emit, emit->num_shader_consts[i]);
2871 end_emit_instruction(emit);
2872 }
2873 }
2874
2875 return TRUE;
2876 }
2877
2878
2879 /**
2880 * Emit declarations for samplers.
2881 */
2882 static boolean
2883 emit_sampler_declarations(struct svga_shader_emitter_v10 *emit)
2884 {
2885 unsigned i;
2886
2887 for (i = 0; i < emit->num_samplers; i++) {
2888 VGPU10OpcodeToken0 opcode0;
2889 VGPU10OperandToken0 operand0;
2890
2891 opcode0.value = 0;
2892 opcode0.opcodeType = VGPU10_OPCODE_DCL_SAMPLER;
2893 opcode0.samplerMode = VGPU10_SAMPLER_MODE_DEFAULT;
2894
2895 operand0.value = 0;
2896 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
2897 operand0.operandType = VGPU10_OPERAND_TYPE_SAMPLER;
2898 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2899 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2900
2901 begin_emit_instruction(emit);
2902 emit_dword(emit, opcode0.value);
2903 emit_dword(emit, operand0.value);
2904 emit_dword(emit, i);
2905 end_emit_instruction(emit);
2906 }
2907
2908 return TRUE;
2909 }
2910
2911
2912 /**
2913 * Translate PIPE_TEXTURE_x to VGAPU10_RESOURCE_DIMENSION_x.
2914 */
2915 static unsigned
2916 pipe_texture_to_resource_dimension(unsigned target, bool msaa)
2917 {
2918 switch (target) {
2919 case PIPE_BUFFER:
2920 return VGPU10_RESOURCE_DIMENSION_BUFFER;
2921 case PIPE_TEXTURE_1D:
2922 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D;
2923 case PIPE_TEXTURE_2D:
2924 case PIPE_TEXTURE_RECT:
2925 return msaa ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
2926 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
2927 case PIPE_TEXTURE_3D:
2928 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D;
2929 case PIPE_TEXTURE_CUBE:
2930 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE;
2931 case PIPE_TEXTURE_1D_ARRAY:
2932 return VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY;
2933 case PIPE_TEXTURE_2D_ARRAY:
2934 return msaa ? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
2935 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY;
2936 case PIPE_TEXTURE_CUBE_ARRAY:
2937 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY;
2938 default:
2939 assert(!"Unexpected resource type");
2940 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D;
2941 }
2942 }
2943
2944
2945 /**
2946 * Given a tgsi_return_type, return true iff it is an integer type.
2947 */
2948 static boolean
2949 is_integer_type(enum tgsi_return_type type)
2950 {
2951 switch (type) {
2952 case TGSI_RETURN_TYPE_SINT:
2953 case TGSI_RETURN_TYPE_UINT:
2954 return TRUE;
2955 case TGSI_RETURN_TYPE_FLOAT:
2956 case TGSI_RETURN_TYPE_UNORM:
2957 case TGSI_RETURN_TYPE_SNORM:
2958 return FALSE;
2959 case TGSI_RETURN_TYPE_COUNT:
2960 default:
2961 assert(!"is_integer_type: Unknown tgsi_return_type");
2962 return FALSE;
2963 }
2964 }
2965
2966
2967 /**
2968 * Emit declarations for resources.
2969 * XXX When we're sure that all TGSI shaders will be generated with
2970 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
2971 * rework this code.
2972 */
2973 static boolean
2974 emit_resource_declarations(struct svga_shader_emitter_v10 *emit)
2975 {
2976 unsigned i;
2977
2978 /* Emit resource decl for each sampler */
2979 for (i = 0; i < emit->num_samplers; i++) {
2980 VGPU10OpcodeToken0 opcode0;
2981 VGPU10OperandToken0 operand0;
2982 VGPU10ResourceReturnTypeToken return_type;
2983 VGPU10_RESOURCE_RETURN_TYPE rt;
2984
2985 opcode0.value = 0;
2986 opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;
2987 opcode0.resourceDimension =
2988 pipe_texture_to_resource_dimension(emit->key.tex[i].texture_target,
2989 emit->key.tex[i].texture_msaa);
2990 operand0.value = 0;
2991 operand0.numComponents = VGPU10_OPERAND_0_COMPONENT;
2992 operand0.operandType = VGPU10_OPERAND_TYPE_RESOURCE;
2993 operand0.indexDimension = VGPU10_OPERAND_INDEX_1D;
2994 operand0.index0Representation = VGPU10_OPERAND_INDEX_IMMEDIATE32;
2995
2996 #if 1
2997 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
2998 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM == TGSI_RETURN_TYPE_UNORM + 1);
2999 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM == TGSI_RETURN_TYPE_SNORM + 1);
3000 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT == TGSI_RETURN_TYPE_SINT + 1);
3001 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT == TGSI_RETURN_TYPE_UINT + 1);
3002 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT == TGSI_RETURN_TYPE_FLOAT + 1);
3003 assert(emit->key.tex[i].return_type <= TGSI_RETURN_TYPE_FLOAT);
3004 rt = emit->key.tex[i].return_type + 1;
3005 #else
3006 switch (emit->key.tex[i].return_type) {
3007 case TGSI_RETURN_TYPE_UNORM: rt = VGPU10_RETURN_TYPE_UNORM; break;
3008 case TGSI_RETURN_TYPE_SNORM: rt = VGPU10_RETURN_TYPE_SNORM; break;
3009 case TGSI_RETURN_TYPE_SINT: rt = VGPU10_RETURN_TYPE_SINT; break;
3010 case TGSI_RETURN_TYPE_UINT: rt = VGPU10_RETURN_TYPE_UINT; break;
3011 case TGSI_RETURN_TYPE_FLOAT: rt = VGPU10_RETURN_TYPE_FLOAT; break;
3012 case TGSI_RETURN_TYPE_COUNT:
3013 default:
3014 rt = VGPU10_RETURN_TYPE_FLOAT;
3015 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3016 }
3017 #endif
3018
3019 return_type.value = 0;
3020 return_type.component0 = rt;
3021 return_type.component1 = rt;
3022 return_type.component2 = rt;
3023 return_type.component3 = rt;
3024
3025 begin_emit_instruction(emit);
3026 emit_dword(emit, opcode0.value);
3027 emit_dword(emit, operand0.value);
3028 emit_dword(emit, i);
3029 emit_dword(emit, return_type.value);
3030 end_emit_instruction(emit);
3031 }
3032
3033 return TRUE;
3034 }
3035
3036 static void
3037 emit_instruction_op1(struct svga_shader_emitter_v10 *emit,
3038 unsigned opcode,
3039 const struct tgsi_full_dst_register *dst,
3040 const struct tgsi_full_src_register *src,
3041 boolean saturate)
3042 {
3043 begin_emit_instruction(emit);
3044 emit_opcode(emit, opcode, saturate);
3045 emit_dst_register(emit, dst);
3046 emit_src_register(emit, src);
3047 end_emit_instruction(emit);
3048 }
3049
3050 static void
3051 emit_instruction_op2(struct svga_shader_emitter_v10 *emit,
3052 unsigned opcode,
3053 const struct tgsi_full_dst_register *dst,
3054 const struct tgsi_full_src_register *src1,
3055 const struct tgsi_full_src_register *src2,
3056 boolean saturate)
3057 {
3058 begin_emit_instruction(emit);
3059 emit_opcode(emit, opcode, saturate);
3060 emit_dst_register(emit, dst);
3061 emit_src_register(emit, src1);
3062 emit_src_register(emit, src2);
3063 end_emit_instruction(emit);
3064 }
3065
3066 static void
3067 emit_instruction_op3(struct svga_shader_emitter_v10 *emit,
3068 unsigned opcode,
3069 const struct tgsi_full_dst_register *dst,
3070 const struct tgsi_full_src_register *src1,
3071 const struct tgsi_full_src_register *src2,
3072 const struct tgsi_full_src_register *src3,
3073 boolean saturate)
3074 {
3075 begin_emit_instruction(emit);
3076 emit_opcode(emit, opcode, saturate);
3077 emit_dst_register(emit, dst);
3078 emit_src_register(emit, src1);
3079 emit_src_register(emit, src2);
3080 emit_src_register(emit, src3);
3081 end_emit_instruction(emit);
3082 }
3083
3084 /**
3085 * Emit the actual clip distance instructions to be used for clipping
3086 * by copying the clip distance from the temporary registers to the
3087 * CLIPDIST registers written with the enabled planes mask.
3088 * Also copy the clip distance from the temporary to the clip distance
3089 * shadow copy register which will be referenced by the input shader
3090 */
3091 static void
3092 emit_clip_distance_instructions(struct svga_shader_emitter_v10 *emit)
3093 {
3094 struct tgsi_full_src_register tmp_clip_dist_src;
3095 struct tgsi_full_dst_register clip_dist_dst;
3096
3097 unsigned i;
3098 unsigned clip_plane_enable = emit->key.clip_plane_enable;
3099 unsigned clip_dist_tmp_index = emit->clip_dist_tmp_index;
3100 unsigned num_written_clipdist = emit->info.num_written_clipdistance;
3101
3102 assert(emit->clip_dist_out_index != INVALID_INDEX);
3103 assert(emit->clip_dist_tmp_index != INVALID_INDEX);
3104
3105 /**
3106 * Temporary reset the temporary clip dist register index so
3107 * that the copy to the real clip dist register will not
3108 * attempt to copy to the temporary register again
3109 */
3110 emit->clip_dist_tmp_index = INVALID_INDEX;
3111
3112 for (i = 0; i < 2 && num_written_clipdist; i++, num_written_clipdist-=4) {
3113
3114 tmp_clip_dist_src = make_src_temp_reg(clip_dist_tmp_index + i);
3115
3116 /**
3117 * copy to the shadow copy for use by varying variable and
3118 * stream output. All clip distances
3119 * will be written regardless of the enabled clipping planes.
3120 */
3121 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3122 emit->clip_dist_so_index + i);
3123
3124 /* MOV clip_dist_so, tmp_clip_dist */
3125 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3126 &tmp_clip_dist_src, FALSE);
3127
3128 /**
3129 * copy those clip distances to enabled clipping planes
3130 * to CLIPDIST registers for clipping
3131 */
3132 if (clip_plane_enable & 0xf) {
3133 clip_dist_dst = make_dst_reg(TGSI_FILE_OUTPUT,
3134 emit->clip_dist_out_index + i);
3135 clip_dist_dst = writemask_dst(&clip_dist_dst, clip_plane_enable & 0xf);
3136
3137 /* MOV CLIPDIST, tmp_clip_dist */
3138 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &clip_dist_dst,
3139 &tmp_clip_dist_src, FALSE);
3140 }
3141 /* four clip planes per clip register */
3142 clip_plane_enable >>= 4;
3143 }
3144 /**
3145 * set the temporary clip dist register index back to the
3146 * temporary index for the next vertex
3147 */
3148 emit->clip_dist_tmp_index = clip_dist_tmp_index;
3149 }
3150
3151 /* Declare clip distance output registers for user-defined clip planes
3152 * or the TGSI_CLIPVERTEX output.
3153 */
3154 static void
3155 emit_clip_distance_declarations(struct svga_shader_emitter_v10 *emit)
3156 {
3157 unsigned num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3158 unsigned index = emit->num_outputs;
3159 unsigned plane_mask;
3160
3161 assert(emit->unit == PIPE_SHADER_VERTEX ||
3162 emit->unit == PIPE_SHADER_GEOMETRY);
3163 assert(num_clip_planes <= 8);
3164
3165 if (emit->clip_mode != CLIP_LEGACY &&
3166 emit->clip_mode != CLIP_VERTEX) {
3167 return;
3168 }
3169
3170 if (num_clip_planes == 0)
3171 return;
3172
3173 /* Declare one or two clip output registers. The number of components
3174 * in the mask reflects the number of clip planes. For example, if 5
3175 * clip planes are needed, we'll declare outputs similar to:
3176 * dcl_output_siv o2.xyzw, clip_distance
3177 * dcl_output_siv o3.x, clip_distance
3178 */
3179 emit->clip_dist_out_index = index; /* save the starting clip dist reg index */
3180
3181 plane_mask = (1 << num_clip_planes) - 1;
3182 if (plane_mask & 0xf) {
3183 unsigned cmask = plane_mask & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3184 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index,
3185 VGPU10_NAME_CLIP_DISTANCE, cmask);
3186 emit->num_outputs++;
3187 }
3188 if (plane_mask & 0xf0) {
3189 unsigned cmask = (plane_mask >> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL;
3190 emit_output_declaration(emit, VGPU10_OPCODE_DCL_OUTPUT_SIV, index + 1,
3191 VGPU10_NAME_CLIP_DISTANCE, cmask);
3192 emit->num_outputs++;
3193 }
3194 }
3195
3196
3197 /**
3198 * Emit the instructions for writing to the clip distance registers
3199 * to handle legacy/automatic clip planes.
3200 * For each clip plane, the distance is the dot product of the vertex
3201 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3202 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3203 * output registers already declared.
3204 */
3205 static void
3206 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10 *emit,
3207 unsigned vpos_tmp_index)
3208 {
3209 unsigned i, num_clip_planes = util_bitcount(emit->key.clip_plane_enable);
3210
3211 assert(emit->clip_mode == CLIP_LEGACY);
3212 assert(num_clip_planes <= 8);
3213
3214 assert(emit->unit == PIPE_SHADER_VERTEX ||
3215 emit->unit == PIPE_SHADER_GEOMETRY);
3216
3217 for (i = 0; i < num_clip_planes; i++) {
3218 struct tgsi_full_dst_register dst;
3219 struct tgsi_full_src_register plane_src, vpos_src;
3220 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3221 unsigned comp = i % 4;
3222 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3223
3224 /* create dst, src regs */
3225 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3226 dst = writemask_dst(&dst, writemask);
3227
3228 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3229 vpos_src = make_src_temp_reg(vpos_tmp_index);
3230
3231 /* DP4 clip_dist, plane, vpos */
3232 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3233 &plane_src, &vpos_src, FALSE);
3234 }
3235 }
3236
3237
3238 /**
3239 * Emit the instructions for computing the clip distance results from
3240 * the clip vertex temporary.
3241 * For each clip plane, the distance is the dot product of the clip vertex
3242 * position (found in a temp reg) and the clip plane coefficients.
3243 */
3244 static void
3245 emit_clip_vertex_instructions(struct svga_shader_emitter_v10 *emit)
3246 {
3247 const unsigned num_clip = util_bitcount(emit->key.clip_plane_enable);
3248 unsigned i;
3249 struct tgsi_full_dst_register dst;
3250 struct tgsi_full_src_register clipvert_src;
3251 const unsigned clip_vertex_tmp = emit->clip_vertex_tmp_index;
3252
3253 assert(emit->unit == PIPE_SHADER_VERTEX ||
3254 emit->unit == PIPE_SHADER_GEOMETRY);
3255
3256 assert(emit->clip_mode == CLIP_VERTEX);
3257
3258 clipvert_src = make_src_temp_reg(clip_vertex_tmp);
3259
3260 for (i = 0; i < num_clip; i++) {
3261 struct tgsi_full_src_register plane_src;
3262 unsigned reg_index = emit->clip_dist_out_index + i / 4;
3263 unsigned comp = i % 4;
3264 unsigned writemask = VGPU10_OPERAND_4_COMPONENT_MASK_X << comp;
3265
3266 /* create dst, src regs */
3267 dst = make_dst_reg(TGSI_FILE_OUTPUT, reg_index);
3268 dst = writemask_dst(&dst, writemask);
3269
3270 plane_src = make_src_const_reg(emit->clip_plane_const[i]);
3271
3272 /* DP4 clip_dist, plane, vpos */
3273 emit_instruction_op2(emit, VGPU10_OPCODE_DP4, &dst,
3274 &plane_src, &clipvert_src, FALSE);
3275 }
3276
3277 /* copy temporary clip vertex register to the clip vertex register */
3278
3279 assert(emit->clip_vertex_out_index != INVALID_INDEX);
3280
3281 /**
3282 * temporary reset the temporary clip vertex register index so
3283 * that copy to the clip vertex register will not attempt
3284 * to copy to the temporary register again
3285 */
3286 emit->clip_vertex_tmp_index = INVALID_INDEX;
3287
3288 /* MOV clip_vertex, clip_vertex_tmp */
3289 dst = make_dst_reg(TGSI_FILE_OUTPUT, emit->clip_vertex_out_index);
3290 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
3291 &dst, &clipvert_src, FALSE);
3292
3293 /**
3294 * set the temporary clip vertex register index back to the
3295 * temporary index for the next vertex
3296 */
3297 emit->clip_vertex_tmp_index = clip_vertex_tmp;
3298 }
3299
3300 /**
3301 * Emit code to convert RGBA to BGRA
3302 */
3303 static void
3304 emit_swap_r_b(struct svga_shader_emitter_v10 *emit,
3305 const struct tgsi_full_dst_register *dst,
3306 const struct tgsi_full_src_register *src)
3307 {
3308 struct tgsi_full_src_register bgra_src =
3309 swizzle_src(src, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Y, TGSI_SWIZZLE_X, TGSI_SWIZZLE_W);
3310
3311 begin_emit_instruction(emit);
3312 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
3313 emit_dst_register(emit, dst);
3314 emit_src_register(emit, &bgra_src);
3315 end_emit_instruction(emit);
3316 }
3317
3318
3319 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3320 static void
3321 emit_puint_to_snorm(struct svga_shader_emitter_v10 *emit,
3322 const struct tgsi_full_dst_register *dst,
3323 const struct tgsi_full_src_register *src)
3324 {
3325 struct tgsi_full_src_register half = make_immediate_reg_float(emit, 0.5f);
3326 struct tgsi_full_src_register two =
3327 make_immediate_reg_float4(emit, 2.0f, 2.0f, 2.0f, 3.0f);
3328 struct tgsi_full_src_register neg_two =
3329 make_immediate_reg_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
3330
3331 unsigned val_tmp = get_temp_index(emit);
3332 struct tgsi_full_dst_register val_dst = make_dst_temp_reg(val_tmp);
3333 struct tgsi_full_src_register val_src = make_src_temp_reg(val_tmp);
3334
3335 unsigned bias_tmp = get_temp_index(emit);
3336 struct tgsi_full_dst_register bias_dst = make_dst_temp_reg(bias_tmp);
3337 struct tgsi_full_src_register bias_src = make_src_temp_reg(bias_tmp);
3338
3339 /* val = src * 2.0 */
3340 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &val_dst,
3341 src, &two, FALSE);
3342
3343 /* bias = src > 0.5 */
3344 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &bias_dst,
3345 src, &half, FALSE);
3346
3347 /* bias = bias & -2.0 */
3348 emit_instruction_op2(emit, VGPU10_OPCODE_AND, &bias_dst,
3349 &bias_src, &neg_two, FALSE);
3350
3351 /* dst = val + bias */
3352 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, dst,
3353 &val_src, &bias_src, FALSE);
3354
3355 free_temp_indexes(emit);
3356 }
3357
3358
3359 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3360 static void
3361 emit_puint_to_uscaled(struct svga_shader_emitter_v10 *emit,
3362 const struct tgsi_full_dst_register *dst,
3363 const struct tgsi_full_src_register *src)
3364 {
3365 struct tgsi_full_src_register scale =
3366 make_immediate_reg_float4(emit, 1023.0f, 1023.0f, 1023.0f, 3.0f);
3367
3368 /* dst = src * scale */
3369 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, dst, src, &scale, FALSE);
3370 }
3371
3372
3373 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3374 static void
3375 emit_puint_to_sscaled(struct svga_shader_emitter_v10 *emit,
3376 const struct tgsi_full_dst_register *dst,
3377 const struct tgsi_full_src_register *src)
3378 {
3379 struct tgsi_full_src_register lshift =
3380 make_immediate_reg_int4(emit, 22, 12, 2, 0);
3381 struct tgsi_full_src_register rshift =
3382 make_immediate_reg_int4(emit, 22, 22, 22, 30);
3383
3384 struct tgsi_full_src_register src_xxxx = scalar_src(src, TGSI_SWIZZLE_X);
3385
3386 unsigned tmp = get_temp_index(emit);
3387 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3388 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3389
3390 /*
3391 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3392 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3393 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3394 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3395 * dst = i_to_f(r,g,b,a); # convert to float
3396 */
3397 emit_instruction_op2(emit, VGPU10_OPCODE_ISHL, &tmp_dst,
3398 &src_xxxx, &lshift, FALSE);
3399 emit_instruction_op2(emit, VGPU10_OPCODE_ISHR, &tmp_dst,
3400 &tmp_src, &rshift, FALSE);
3401 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF, dst, &tmp_src, FALSE);
3402
3403 free_temp_indexes(emit);
3404 }
3405
3406
3407 /**
3408 * Emit code for TGSI_OPCODE_ABS instruction.
3409 */
3410 static boolean
3411 emit_abs(struct svga_shader_emitter_v10 *emit,
3412 const struct tgsi_full_instruction *inst)
3413 {
3414 /* dst = ABS(s0):
3415 * dst = abs(s0)
3416 * Translates into:
3417 * MOV dst, abs(s0)
3418 */
3419 struct tgsi_full_src_register abs_src0 = absolute_src(&inst->Src[0]);
3420
3421 /* MOV dst, abs(s0) */
3422 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
3423 &abs_src0, inst->Instruction.Saturate);
3424
3425 return TRUE;
3426 }
3427
3428
3429 /**
3430 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3431 */
3432 static boolean
3433 emit_arl_uarl(struct svga_shader_emitter_v10 *emit,
3434 const struct tgsi_full_instruction *inst)
3435 {
3436 unsigned index = inst->Dst[0].Register.Index;
3437 struct tgsi_full_dst_register dst;
3438 unsigned opcode;
3439
3440 assert(index < MAX_VGPU10_ADDR_REGS);
3441 dst = make_dst_temp_reg(emit->address_reg_index[index]);
3442
3443 /* ARL dst, s0
3444 * Translates into:
3445 * FTOI address_tmp, s0
3446 *
3447 * UARL dst, s0
3448 * Translates into:
3449 * MOV address_tmp, s0
3450 */
3451 if (inst->Instruction.Opcode == TGSI_OPCODE_ARL)
3452 opcode = VGPU10_OPCODE_FTOI;
3453 else
3454 opcode = VGPU10_OPCODE_MOV;
3455
3456 emit_instruction_op1(emit, opcode, &dst, &inst->Src[0], FALSE);
3457
3458 return TRUE;
3459 }
3460
3461
3462 /**
3463 * Emit code for TGSI_OPCODE_CAL instruction.
3464 */
3465 static boolean
3466 emit_cal(struct svga_shader_emitter_v10 *emit,
3467 const struct tgsi_full_instruction *inst)
3468 {
3469 unsigned label = inst->Label.Label;
3470 VGPU10OperandToken0 operand;
3471 operand.value = 0;
3472 operand.operandType = VGPU10_OPERAND_TYPE_LABEL;
3473
3474 begin_emit_instruction(emit);
3475 emit_dword(emit, operand.value);
3476 emit_dword(emit, label);
3477 end_emit_instruction(emit);
3478
3479 return TRUE;
3480 }
3481
3482
3483 /**
3484 * Emit code for TGSI_OPCODE_IABS instruction.
3485 */
3486 static boolean
3487 emit_iabs(struct svga_shader_emitter_v10 *emit,
3488 const struct tgsi_full_instruction *inst)
3489 {
3490 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3491 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3492 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3493 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3494 *
3495 * Translates into
3496 * IMAX dst, src, neg(src)
3497 */
3498 struct tgsi_full_src_register neg_src = negate_src(&inst->Src[0]);
3499 emit_instruction_op2(emit, VGPU10_OPCODE_IMAX, &inst->Dst[0],
3500 &inst->Src[0], &neg_src, FALSE);
3501
3502 return TRUE;
3503 }
3504
3505
3506 /**
3507 * Emit code for TGSI_OPCODE_CMP instruction.
3508 */
3509 static boolean
3510 emit_cmp(struct svga_shader_emitter_v10 *emit,
3511 const struct tgsi_full_instruction *inst)
3512 {
3513 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3514 * dst.y = (src0.y < 0) ? src1.y : src2.y
3515 * dst.z = (src0.z < 0) ? src1.z : src2.z
3516 * dst.w = (src0.w < 0) ? src1.w : src2.w
3517 *
3518 * Translates into
3519 * LT tmp, src0, 0.0
3520 * MOVC dst, tmp, src1, src2
3521 */
3522 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3523 unsigned tmp = get_temp_index(emit);
3524 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3525 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3526
3527 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst,
3528 &inst->Src[0], &zero, FALSE);
3529 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0],
3530 &tmp_src, &inst->Src[1], &inst->Src[2],
3531 inst->Instruction.Saturate);
3532
3533 free_temp_indexes(emit);
3534
3535 return TRUE;
3536 }
3537
3538
3539 /**
3540 * Emit code for TGSI_OPCODE_DP2A instruction.
3541 */
3542 static boolean
3543 emit_dp2a(struct svga_shader_emitter_v10 *emit,
3544 const struct tgsi_full_instruction *inst)
3545 {
3546 /* dst.x = src0.x * src1.x + src0.y * src1.y + src2.x
3547 * dst.y = src0.x * src1.x + src0.y * src1.y + src2.x
3548 * dst.z = src0.x * src1.x + src0.y * src1.y + src2.x
3549 * dst.w = src0.x * src1.x + src0.y * src1.y + src2.x
3550 * Translate into
3551 * MAD tmp.x, s0.y, s1.y, s2.x
3552 * MAD tmp.x, s0.x, s1.x, tmp.x
3553 * MOV dst.xyzw, tmp.xxxx
3554 */
3555 unsigned tmp = get_temp_index(emit);
3556 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3557 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3558
3559 struct tgsi_full_src_register tmp_src_xxxx =
3560 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3561 struct tgsi_full_dst_register tmp_dst_x =
3562 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3563
3564 struct tgsi_full_src_register src0_xxxx =
3565 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3566 struct tgsi_full_src_register src0_yyyy =
3567 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3568 struct tgsi_full_src_register src1_xxxx =
3569 scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
3570 struct tgsi_full_src_register src1_yyyy =
3571 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3572 struct tgsi_full_src_register src2_xxxx =
3573 scalar_src(&inst->Src[2], TGSI_SWIZZLE_X);
3574
3575 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &tmp_dst_x, &src0_yyyy,
3576 &src1_yyyy, &src2_xxxx, FALSE);
3577 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &tmp_dst_x, &src0_xxxx,
3578 &src1_xxxx, &tmp_src_xxxx, FALSE);
3579 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
3580 &tmp_src_xxxx, inst->Instruction.Saturate);
3581
3582 free_temp_indexes(emit);
3583
3584 return TRUE;
3585 }
3586
3587
3588 /**
3589 * Emit code for TGSI_OPCODE_DPH instruction.
3590 */
3591 static boolean
3592 emit_dph(struct svga_shader_emitter_v10 *emit,
3593 const struct tgsi_full_instruction *inst)
3594 {
3595 /*
3596 * DP3 tmp, s0, s1
3597 * ADD dst, tmp, s1.wwww
3598 */
3599
3600 struct tgsi_full_src_register s1_wwww =
3601 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_W, TGSI_SWIZZLE_W,
3602 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
3603
3604 unsigned tmp = get_temp_index(emit);
3605 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3606 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3607
3608 /* DP3 tmp, s0, s1 */
3609 emit_instruction_op2(emit, VGPU10_OPCODE_DP3, &tmp_dst, &inst->Src[0],
3610 &inst->Src[1], FALSE);
3611
3612 /* ADD dst, tmp, s1.wwww */
3613 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &inst->Dst[0], &tmp_src,
3614 &s1_wwww, inst->Instruction.Saturate);
3615
3616 free_temp_indexes(emit);
3617
3618 return TRUE;
3619 }
3620
3621
3622 /**
3623 * Emit code for TGSI_OPCODE_DST instruction.
3624 */
3625 static boolean
3626 emit_dst(struct svga_shader_emitter_v10 *emit,
3627 const struct tgsi_full_instruction *inst)
3628 {
3629 /*
3630 * dst.x = 1
3631 * dst.y = src0.y * src1.y
3632 * dst.z = src0.z
3633 * dst.w = src1.w
3634 */
3635
3636 struct tgsi_full_src_register s0_yyyy =
3637 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
3638 struct tgsi_full_src_register s0_zzzz =
3639 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
3640 struct tgsi_full_src_register s1_yyyy =
3641 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
3642 struct tgsi_full_src_register s1_wwww =
3643 scalar_src(&inst->Src[1], TGSI_SWIZZLE_W);
3644
3645 /*
3646 * If dst and either src0 and src1 are the same we need
3647 * to create a temporary for it and insert a extra move.
3648 */
3649 unsigned tmp_move = get_temp_index(emit);
3650 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3651 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3652
3653 /* MOV dst.x, 1.0 */
3654 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3655 struct tgsi_full_dst_register dst_x =
3656 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3657 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3658
3659 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
3660 }
3661
3662 /* MUL dst.y, s0.y, s1.y */
3663 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3664 struct tgsi_full_dst_register dst_y =
3665 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3666
3667 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &dst_y, &s0_yyyy,
3668 &s1_yyyy, inst->Instruction.Saturate);
3669 }
3670
3671 /* MOV dst.z, s0.z */
3672 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3673 struct tgsi_full_dst_register dst_z =
3674 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3675
3676 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z, &s0_zzzz,
3677 inst->Instruction.Saturate);
3678 }
3679
3680 /* MOV dst.w, s1.w */
3681 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3682 struct tgsi_full_dst_register dst_w =
3683 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3684
3685 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &s1_wwww,
3686 inst->Instruction.Saturate);
3687 }
3688
3689 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3690 FALSE);
3691 free_temp_indexes(emit);
3692
3693 return TRUE;
3694 }
3695
3696
3697
3698 /**
3699 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3700 */
3701 static boolean
3702 emit_endprim(struct svga_shader_emitter_v10 *emit,
3703 const struct tgsi_full_instruction *inst)
3704 {
3705 assert(emit->unit == PIPE_SHADER_GEOMETRY);
3706
3707 /* We can't use emit_simple() because the TGSI instruction has one
3708 * operand (vertex stream number) which we must ignore for VGPU10.
3709 */
3710 begin_emit_instruction(emit);
3711 emit_opcode(emit, VGPU10_OPCODE_CUT, FALSE);
3712 end_emit_instruction(emit);
3713 return TRUE;
3714 }
3715
3716
3717 /**
3718 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3719 */
3720 static boolean
3721 emit_ex2(struct svga_shader_emitter_v10 *emit,
3722 const struct tgsi_full_instruction *inst)
3723 {
3724 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3725 * while VGPU10 computes four values.
3726 *
3727 * dst = EX2(src):
3728 * dst.xyzw = 2.0 ^ src.x
3729 */
3730
3731 struct tgsi_full_src_register src_xxxx =
3732 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3733 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3734
3735 /* EXP tmp, s0.xxxx */
3736 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0], &src_xxxx,
3737 inst->Instruction.Saturate);
3738
3739 return TRUE;
3740 }
3741
3742
3743 /**
3744 * Emit code for TGSI_OPCODE_EXP instruction.
3745 */
3746 static boolean
3747 emit_exp(struct svga_shader_emitter_v10 *emit,
3748 const struct tgsi_full_instruction *inst)
3749 {
3750 /*
3751 * dst.x = 2 ^ floor(s0.x)
3752 * dst.y = s0.x - floor(s0.x)
3753 * dst.z = 2 ^ s0.x
3754 * dst.w = 1.0
3755 */
3756
3757 struct tgsi_full_src_register src_xxxx =
3758 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
3759 unsigned tmp = get_temp_index(emit);
3760 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3761 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3762
3763 /*
3764 * If dst and src are the same we need to create
3765 * a temporary for it and insert a extra move.
3766 */
3767 unsigned tmp_move = get_temp_index(emit);
3768 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3769 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3770
3771 /* only use X component of temp reg */
3772 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3773 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3774
3775 /* ROUND_NI tmp.x, s0.x */
3776 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
3777 &src_xxxx, FALSE); /* round to -infinity */
3778
3779 /* EXP dst.x, tmp.x */
3780 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3781 struct tgsi_full_dst_register dst_x =
3782 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3783
3784 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_x, &tmp_src,
3785 inst->Instruction.Saturate);
3786 }
3787
3788 /* ADD dst.y, s0.x, -tmp */
3789 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3790 struct tgsi_full_dst_register dst_y =
3791 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3792 struct tgsi_full_src_register neg_tmp_src = negate_src(&tmp_src);
3793
3794 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_y, &src_xxxx,
3795 &neg_tmp_src, inst->Instruction.Saturate);
3796 }
3797
3798 /* EXP dst.z, s0.x */
3799 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
3800 struct tgsi_full_dst_register dst_z =
3801 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
3802
3803 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &dst_z, &src_xxxx,
3804 inst->Instruction.Saturate);
3805 }
3806
3807 /* MOV dst.w, 1.0 */
3808 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3809 struct tgsi_full_dst_register dst_w =
3810 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3811 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3812
3813 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one,
3814 FALSE);
3815 }
3816
3817 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
3818 FALSE);
3819
3820 free_temp_indexes(emit);
3821
3822 return TRUE;
3823 }
3824
3825
3826 /**
3827 * Emit code for TGSI_OPCODE_IF instruction.
3828 */
3829 static boolean
3830 emit_if(struct svga_shader_emitter_v10 *emit,
3831 const struct tgsi_full_instruction *inst)
3832 {
3833 VGPU10OpcodeToken0 opcode0;
3834
3835 /* The src register should be a scalar */
3836 assert(inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleY &&
3837 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleZ &&
3838 inst->Src[0].Register.SwizzleX == inst->Src[0].Register.SwizzleW);
3839
3840 /* The only special thing here is that we need to set the
3841 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
3842 * src.x is non-zero.
3843 */
3844 opcode0.value = 0;
3845 opcode0.opcodeType = VGPU10_OPCODE_IF;
3846 opcode0.testBoolean = VGPU10_INSTRUCTION_TEST_NONZERO;
3847
3848 begin_emit_instruction(emit);
3849 emit_dword(emit, opcode0.value);
3850 emit_src_register(emit, &inst->Src[0]);
3851 end_emit_instruction(emit);
3852
3853 return TRUE;
3854 }
3855
3856
3857 /**
3858 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
3859 * the register components are negative).
3860 */
3861 static boolean
3862 emit_kill_if(struct svga_shader_emitter_v10 *emit,
3863 const struct tgsi_full_instruction *inst)
3864 {
3865 unsigned tmp = get_temp_index(emit);
3866 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
3867 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
3868
3869 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3870
3871 struct tgsi_full_dst_register tmp_dst_x =
3872 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
3873 struct tgsi_full_src_register tmp_src_xxxx =
3874 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
3875
3876 /* tmp = src[0] < 0.0 */
3877 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
3878 &zero, FALSE);
3879
3880 if (!same_swizzle_terms(&inst->Src[0])) {
3881 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
3882 * logically OR the swizzle terms. Most uses of KILL_IF only
3883 * test one channel so it's good to avoid these extra steps.
3884 */
3885 struct tgsi_full_src_register tmp_src_yyyy =
3886 scalar_src(&tmp_src, TGSI_SWIZZLE_Y);
3887 struct tgsi_full_src_register tmp_src_zzzz =
3888 scalar_src(&tmp_src, TGSI_SWIZZLE_Z);
3889 struct tgsi_full_src_register tmp_src_wwww =
3890 scalar_src(&tmp_src, TGSI_SWIZZLE_W);
3891
3892 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3893 &tmp_src_yyyy, FALSE);
3894 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3895 &tmp_src_zzzz, FALSE);
3896 emit_instruction_op2(emit, VGPU10_OPCODE_OR, &tmp_dst_x, &tmp_src_xxxx,
3897 &tmp_src_wwww, FALSE);
3898 }
3899
3900 begin_emit_instruction(emit);
3901 emit_discard_opcode(emit, TRUE); /* discard if src0.x is non-zero */
3902 emit_src_register(emit, &tmp_src_xxxx);
3903 end_emit_instruction(emit);
3904
3905 free_temp_indexes(emit);
3906
3907 return TRUE;
3908 }
3909
3910
3911 /**
3912 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
3913 */
3914 static boolean
3915 emit_kill(struct svga_shader_emitter_v10 *emit,
3916 const struct tgsi_full_instruction *inst)
3917 {
3918 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
3919
3920 /* DISCARD if 0.0 is zero */
3921 begin_emit_instruction(emit);
3922 emit_discard_opcode(emit, FALSE);
3923 emit_src_register(emit, &zero);
3924 end_emit_instruction(emit);
3925
3926 return TRUE;
3927 }
3928
3929
3930 /**
3931 * Emit code for TGSI_OPCODE_LG2 instruction.
3932 */
3933 static boolean
3934 emit_lg2(struct svga_shader_emitter_v10 *emit,
3935 const struct tgsi_full_instruction *inst)
3936 {
3937 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
3938 * while VGPU10 computes four values.
3939 *
3940 * dst = LG2(src):
3941 * dst.xyzw = log2(src.x)
3942 */
3943
3944 struct tgsi_full_src_register src_xxxx =
3945 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
3946 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
3947
3948 /* LOG tmp, s0.xxxx */
3949 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &inst->Dst[0], &src_xxxx,
3950 inst->Instruction.Saturate);
3951
3952 return TRUE;
3953 }
3954
3955
3956 /**
3957 * Emit code for TGSI_OPCODE_LIT instruction.
3958 */
3959 static boolean
3960 emit_lit(struct svga_shader_emitter_v10 *emit,
3961 const struct tgsi_full_instruction *inst)
3962 {
3963 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
3964
3965 /*
3966 * If dst and src are the same we need to create
3967 * a temporary for it and insert a extra move.
3968 */
3969 unsigned tmp_move = get_temp_index(emit);
3970 struct tgsi_full_src_register move_src = make_src_temp_reg(tmp_move);
3971 struct tgsi_full_dst_register move_dst = make_dst_temp_reg(tmp_move);
3972
3973 /*
3974 * dst.x = 1
3975 * dst.y = max(src.x, 0)
3976 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
3977 * dst.w = 1
3978 */
3979
3980 /* MOV dst.x, 1.0 */
3981 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
3982 struct tgsi_full_dst_register dst_x =
3983 writemask_dst(&move_dst, TGSI_WRITEMASK_X);
3984 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &one, FALSE);
3985 }
3986
3987 /* MOV dst.w, 1.0 */
3988 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
3989 struct tgsi_full_dst_register dst_w =
3990 writemask_dst(&move_dst, TGSI_WRITEMASK_W);
3991 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
3992 }
3993
3994 /* MAX dst.y, src.x, 0.0 */
3995 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
3996 struct tgsi_full_dst_register dst_y =
3997 writemask_dst(&move_dst, TGSI_WRITEMASK_Y);
3998 struct tgsi_full_src_register zero =
3999 make_immediate_reg_float(emit, 0.0f);
4000 struct tgsi_full_src_register src_xxxx =
4001 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4002 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4003
4004 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &dst_y, &src_xxxx,
4005 &zero, inst->Instruction.Saturate);
4006 }
4007
4008 /*
4009 * tmp1 = clamp(src.w, -128, 128);
4010 * MAX tmp1, src.w, -128
4011 * MIN tmp1, tmp1, 128
4012 *
4013 * tmp2 = max(tmp2, 0);
4014 * MAX tmp2, src.y, 0
4015 *
4016 * tmp1 = pow(tmp2, tmp1);
4017 * LOG tmp2, tmp2
4018 * MUL tmp1, tmp2, tmp1
4019 * EXP tmp1, tmp1
4020 *
4021 * tmp1 = (src.w == 0) ? 1 : tmp1;
4022 * EQ tmp2, 0, src.w
4023 * MOVC tmp1, tmp2, 1.0, tmp1
4024 *
4025 * dst.z = (0 < src.x) ? tmp1 : 0;
4026 * LT tmp2, 0, src.x
4027 * MOVC dst.z, tmp2, tmp1, 0.0
4028 */
4029 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4030 struct tgsi_full_dst_register dst_z =
4031 writemask_dst(&move_dst, TGSI_WRITEMASK_Z);
4032
4033 unsigned tmp1 = get_temp_index(emit);
4034 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4035 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4036 unsigned tmp2 = get_temp_index(emit);
4037 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4038 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4039
4040 struct tgsi_full_src_register src_xxxx =
4041 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4042 struct tgsi_full_src_register src_yyyy =
4043 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
4044 struct tgsi_full_src_register src_wwww =
4045 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
4046
4047 struct tgsi_full_src_register zero =
4048 make_immediate_reg_float(emit, 0.0f);
4049 struct tgsi_full_src_register lowerbound =
4050 make_immediate_reg_float(emit, -128.0f);
4051 struct tgsi_full_src_register upperbound =
4052 make_immediate_reg_float(emit, 128.0f);
4053
4054 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp1_dst, &src_wwww,
4055 &lowerbound, FALSE);
4056 emit_instruction_op2(emit, VGPU10_OPCODE_MIN, &tmp1_dst, &tmp1_src,
4057 &upperbound, FALSE);
4058 emit_instruction_op2(emit, VGPU10_OPCODE_MAX, &tmp2_dst, &src_yyyy,
4059 &zero, FALSE);
4060
4061 /* POW tmp1, tmp2, tmp1 */
4062 /* LOG tmp2, tmp2 */
4063 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp2_dst, &tmp2_src,
4064 FALSE);
4065
4066 /* MUL tmp1, tmp2, tmp1 */
4067 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &tmp2_src,
4068 &tmp1_src, FALSE);
4069
4070 /* EXP tmp1, tmp1 */
4071 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp1_dst, &tmp1_src,
4072 FALSE);
4073
4074 /* EQ tmp2, 0, src.w */
4075 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp2_dst, &zero,
4076 &src_wwww, FALSE);
4077 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4078 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp1_dst,
4079 &tmp2_src, &one, &tmp1_src, FALSE);
4080
4081 /* LT tmp2, 0, src.x */
4082 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp2_dst, &zero,
4083 &src_xxxx, FALSE);
4084 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4085 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &dst_z,
4086 &tmp2_src, &tmp1_src, &zero, FALSE);
4087 }
4088
4089 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &move_src,
4090 FALSE);
4091 free_temp_indexes(emit);
4092
4093 return TRUE;
4094 }
4095
4096
4097 /**
4098 * Emit code for TGSI_OPCODE_LOG instruction.
4099 */
4100 static boolean
4101 emit_log(struct svga_shader_emitter_v10 *emit,
4102 const struct tgsi_full_instruction *inst)
4103 {
4104 /*
4105 * dst.x = floor(lg2(abs(s0.x)))
4106 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4107 * dst.z = lg2(abs(s0.x))
4108 * dst.w = 1.0
4109 */
4110
4111 struct tgsi_full_src_register src_xxxx =
4112 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
4113 unsigned tmp = get_temp_index(emit);
4114 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4115 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4116 struct tgsi_full_src_register abs_src_xxxx = absolute_src(&src_xxxx);
4117
4118 /* only use X component of temp reg */
4119 tmp_dst = writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4120 tmp_src = scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4121
4122 /* LOG tmp.x, abs(s0.x) */
4123 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XYZ) {
4124 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst,
4125 &abs_src_xxxx, FALSE);
4126 }
4127
4128 /* MOV dst.z, tmp.x */
4129 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
4130 struct tgsi_full_dst_register dst_z =
4131 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Z);
4132
4133 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_z,
4134 &tmp_src, inst->Instruction.Saturate);
4135 }
4136
4137 /* FLR tmp.x, tmp.x */
4138 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY) {
4139 emit_instruction_op1(emit, VGPU10_OPCODE_ROUND_NI, &tmp_dst,
4140 &tmp_src, FALSE);
4141 }
4142
4143 /* MOV dst.x, tmp.x */
4144 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
4145 struct tgsi_full_dst_register dst_x =
4146 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4147
4148 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_x, &tmp_src,
4149 inst->Instruction.Saturate);
4150 }
4151
4152 /* EXP tmp.x, tmp.x */
4153 /* DIV dst.y, abs(s0.x), tmp.x */
4154 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
4155 struct tgsi_full_dst_register dst_y =
4156 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4157
4158 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &tmp_dst, &tmp_src,
4159 FALSE);
4160 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &dst_y, &abs_src_xxxx,
4161 &tmp_src, inst->Instruction.Saturate);
4162 }
4163
4164 /* MOV dst.w, 1.0 */
4165 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
4166 struct tgsi_full_dst_register dst_w =
4167 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_W);
4168 struct tgsi_full_src_register one =
4169 make_immediate_reg_float(emit, 1.0f);
4170
4171 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst_w, &one, FALSE);
4172 }
4173
4174 free_temp_indexes(emit);
4175
4176 return TRUE;
4177 }
4178
4179
4180 /**
4181 * Emit code for TGSI_OPCODE_LRP instruction.
4182 */
4183 static boolean
4184 emit_lrp(struct svga_shader_emitter_v10 *emit,
4185 const struct tgsi_full_instruction *inst)
4186 {
4187 /* dst = LRP(s0, s1, s2):
4188 * dst = s0 * (s1 - s2) + s2
4189 * Translates into:
4190 * SUB tmp, s1, s2; tmp = s1 - s2
4191 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4192 */
4193 unsigned tmp = get_temp_index(emit);
4194 struct tgsi_full_src_register src_tmp = make_src_temp_reg(tmp);
4195 struct tgsi_full_dst_register dst_tmp = make_dst_temp_reg(tmp);
4196 struct tgsi_full_src_register neg_src2 = negate_src(&inst->Src[2]);
4197
4198 /* ADD tmp, s1, -s2 */
4199 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &dst_tmp,
4200 &inst->Src[1], &neg_src2, FALSE);
4201
4202 /* MAD dst, s1, tmp, s3 */
4203 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &inst->Dst[0],
4204 &inst->Src[0], &src_tmp, &inst->Src[2],
4205 inst->Instruction.Saturate);
4206
4207 free_temp_indexes(emit);
4208
4209 return TRUE;
4210 }
4211
4212
4213 /**
4214 * Emit code for TGSI_OPCODE_POW instruction.
4215 */
4216 static boolean
4217 emit_pow(struct svga_shader_emitter_v10 *emit,
4218 const struct tgsi_full_instruction *inst)
4219 {
4220 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4221 * src1.x while VGPU10 computes four values.
4222 *
4223 * dst = POW(src0, src1):
4224 * dst.xyzw = src0.x ^ src1.x
4225 */
4226 unsigned tmp = get_temp_index(emit);
4227 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4228 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4229 struct tgsi_full_src_register src0_xxxx =
4230 swizzle_src(&inst->Src[0], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4231 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4232 struct tgsi_full_src_register src1_xxxx =
4233 swizzle_src(&inst->Src[1], TGSI_SWIZZLE_X, TGSI_SWIZZLE_X,
4234 TGSI_SWIZZLE_X, TGSI_SWIZZLE_X);
4235
4236 /* LOG tmp, s0.xxxx */
4237 emit_instruction_op1(emit, VGPU10_OPCODE_LOG, &tmp_dst, &src0_xxxx,
4238 FALSE);
4239
4240 /* MUL tmp, tmp, s1.xxxx */
4241 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst, &tmp_src,
4242 &src1_xxxx, FALSE);
4243
4244 /* EXP tmp, s0.xxxx */
4245 emit_instruction_op1(emit, VGPU10_OPCODE_EXP, &inst->Dst[0],
4246 &tmp_src, inst->Instruction.Saturate);
4247
4248 /* free tmp */
4249 free_temp_indexes(emit);
4250
4251 return TRUE;
4252 }
4253
4254
4255 /**
4256 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4257 */
4258 static boolean
4259 emit_rcp(struct svga_shader_emitter_v10 *emit,
4260 const struct tgsi_full_instruction *inst)
4261 {
4262 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4263
4264 unsigned tmp = get_temp_index(emit);
4265 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4266 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4267
4268 struct tgsi_full_dst_register tmp_dst_x =
4269 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4270 struct tgsi_full_src_register tmp_src_xxxx =
4271 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4272
4273 /* DIV tmp.x, 1.0, s0 */
4274 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst_x, &one,
4275 &inst->Src[0], FALSE);
4276
4277 /* MOV dst, tmp.xxxx */
4278 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4279 &tmp_src_xxxx, inst->Instruction.Saturate);
4280
4281 free_temp_indexes(emit);
4282
4283 return TRUE;
4284 }
4285
4286
4287 /**
4288 * Emit code for TGSI_OPCODE_RSQ instruction.
4289 */
4290 static boolean
4291 emit_rsq(struct svga_shader_emitter_v10 *emit,
4292 const struct tgsi_full_instruction *inst)
4293 {
4294 /* dst = RSQ(src):
4295 * dst.xyzw = 1 / sqrt(src.x)
4296 * Translates into:
4297 * RSQ tmp, src.x
4298 * MOV dst, tmp.xxxx
4299 */
4300
4301 unsigned tmp = get_temp_index(emit);
4302 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4303 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4304
4305 struct tgsi_full_dst_register tmp_dst_x =
4306 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4307 struct tgsi_full_src_register tmp_src_xxxx =
4308 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4309
4310 /* RSQ tmp, src.x */
4311 emit_instruction_op1(emit, VGPU10_OPCODE_RSQ, &tmp_dst_x,
4312 &inst->Src[0], FALSE);
4313
4314 /* MOV dst, tmp.xxxx */
4315 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4316 &tmp_src_xxxx, inst->Instruction.Saturate);
4317
4318 /* free tmp */
4319 free_temp_indexes(emit);
4320
4321 return TRUE;
4322 }
4323
4324
4325 /**
4326 * Emit code for TGSI_OPCODE_SCS instruction.
4327 */
4328 static boolean
4329 emit_scs(struct svga_shader_emitter_v10 *emit,
4330 const struct tgsi_full_instruction *inst)
4331 {
4332 /* dst.x = cos(src.x)
4333 * dst.y = sin(src.x)
4334 * dst.z = 0.0
4335 * dst.w = 1.0
4336 */
4337 struct tgsi_full_dst_register dst_x =
4338 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_X);
4339 struct tgsi_full_dst_register dst_y =
4340 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_Y);
4341 struct tgsi_full_dst_register dst_zw =
4342 writemask_dst(&inst->Dst[0], TGSI_WRITEMASK_ZW);
4343
4344 struct tgsi_full_src_register zero_one =
4345 make_immediate_reg_float4(emit, 0.0f, 0.0f, 0.0f, 1.0f);
4346
4347 begin_emit_instruction(emit);
4348 emit_opcode(emit, VGPU10_OPCODE_SINCOS, inst->Instruction.Saturate);
4349 emit_dst_register(emit, &dst_y);
4350 emit_dst_register(emit, &dst_x);
4351 emit_src_register(emit, &inst->Src[0]);
4352 end_emit_instruction(emit);
4353
4354 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
4355 &dst_zw, &zero_one, inst->Instruction.Saturate);
4356
4357 return TRUE;
4358 }
4359
4360
4361 /**
4362 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4363 */
4364 static boolean
4365 emit_seq(struct svga_shader_emitter_v10 *emit,
4366 const struct tgsi_full_instruction *inst)
4367 {
4368 /* dst = SEQ(s0, s1):
4369 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4370 * Translates into:
4371 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4372 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4373 */
4374 unsigned tmp = get_temp_index(emit);
4375 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4376 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4377 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4378 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4379
4380 /* EQ tmp, s0, s1 */
4381 emit_instruction_op2(emit, VGPU10_OPCODE_EQ, &tmp_dst, &inst->Src[0],
4382 &inst->Src[1], FALSE);
4383
4384 /* MOVC dst, tmp, one, zero */
4385 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4386 &one, &zero, FALSE);
4387
4388 free_temp_indexes(emit);
4389
4390 return TRUE;
4391 }
4392
4393
4394 /**
4395 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4396 */
4397 static boolean
4398 emit_sge(struct svga_shader_emitter_v10 *emit,
4399 const struct tgsi_full_instruction *inst)
4400 {
4401 /* dst = SGE(s0, s1):
4402 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4403 * Translates into:
4404 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4405 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4406 */
4407 unsigned tmp = get_temp_index(emit);
4408 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4409 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4410 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4411 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4412
4413 /* GE tmp, s0, s1 */
4414 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[0],
4415 &inst->Src[1], FALSE);
4416
4417 /* MOVC dst, tmp, one, zero */
4418 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4419 &one, &zero, FALSE);
4420
4421 free_temp_indexes(emit);
4422
4423 return TRUE;
4424 }
4425
4426
4427 /**
4428 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4429 */
4430 static boolean
4431 emit_sgt(struct svga_shader_emitter_v10 *emit,
4432 const struct tgsi_full_instruction *inst)
4433 {
4434 /* dst = SGT(s0, s1):
4435 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4436 * Translates into:
4437 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4438 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4439 */
4440 unsigned tmp = get_temp_index(emit);
4441 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4442 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4443 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4444 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4445
4446 /* LT tmp, s1, s0 */
4447 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[1],
4448 &inst->Src[0], FALSE);
4449
4450 /* MOVC dst, tmp, one, zero */
4451 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4452 &one, &zero, FALSE);
4453
4454 free_temp_indexes(emit);
4455
4456 return TRUE;
4457 }
4458
4459
4460 /**
4461 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4462 */
4463 static boolean
4464 emit_sincos(struct svga_shader_emitter_v10 *emit,
4465 const struct tgsi_full_instruction *inst)
4466 {
4467 unsigned tmp = get_temp_index(emit);
4468 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4469 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4470
4471 struct tgsi_full_src_register tmp_src_xxxx =
4472 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
4473 struct tgsi_full_dst_register tmp_dst_x =
4474 writemask_dst(&tmp_dst, TGSI_WRITEMASK_X);
4475
4476 begin_emit_instruction(emit);
4477 emit_opcode(emit, VGPU10_OPCODE_SINCOS, FALSE);
4478
4479 if(inst->Instruction.Opcode == TGSI_OPCODE_SIN)
4480 {
4481 emit_dst_register(emit, &tmp_dst_x); /* first destination register */
4482 emit_null_dst_register(emit); /* second destination register */
4483 }
4484 else {
4485 emit_null_dst_register(emit);
4486 emit_dst_register(emit, &tmp_dst_x);
4487 }
4488
4489 emit_src_register(emit, &inst->Src[0]);
4490 end_emit_instruction(emit);
4491
4492 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0],
4493 &tmp_src_xxxx, inst->Instruction.Saturate);
4494
4495 free_temp_indexes(emit);
4496
4497 return TRUE;
4498 }
4499
4500
4501 /**
4502 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4503 */
4504 static boolean
4505 emit_sle(struct svga_shader_emitter_v10 *emit,
4506 const struct tgsi_full_instruction *inst)
4507 {
4508 /* dst = SLE(s0, s1):
4509 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4510 * Translates into:
4511 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4512 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4513 */
4514 unsigned tmp = get_temp_index(emit);
4515 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4516 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4517 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4518 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4519
4520 /* GE tmp, s1, s0 */
4521 emit_instruction_op2(emit, VGPU10_OPCODE_GE, &tmp_dst, &inst->Src[1],
4522 &inst->Src[0], FALSE);
4523
4524 /* MOVC dst, tmp, one, zero */
4525 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4526 &one, &zero, FALSE);
4527
4528 free_temp_indexes(emit);
4529
4530 return TRUE;
4531 }
4532
4533
4534 /**
4535 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4536 */
4537 static boolean
4538 emit_slt(struct svga_shader_emitter_v10 *emit,
4539 const struct tgsi_full_instruction *inst)
4540 {
4541 /* dst = SLT(s0, s1):
4542 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4543 * Translates into:
4544 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4545 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4546 */
4547 unsigned tmp = get_temp_index(emit);
4548 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4549 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4550 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4551 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4552
4553 /* LT tmp, s0, s1 */
4554 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp_dst, &inst->Src[0],
4555 &inst->Src[1], FALSE);
4556
4557 /* MOVC dst, tmp, one, zero */
4558 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4559 &one, &zero, FALSE);
4560
4561 free_temp_indexes(emit);
4562
4563 return TRUE;
4564 }
4565
4566
4567 /**
4568 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4569 */
4570 static boolean
4571 emit_sne(struct svga_shader_emitter_v10 *emit,
4572 const struct tgsi_full_instruction *inst)
4573 {
4574 /* dst = SNE(s0, s1):
4575 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4576 * Translates into:
4577 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4578 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4579 */
4580 unsigned tmp = get_temp_index(emit);
4581 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4582 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4583 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4584 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
4585
4586 /* NE tmp, s0, s1 */
4587 emit_instruction_op2(emit, VGPU10_OPCODE_NE, &tmp_dst, &inst->Src[0],
4588 &inst->Src[1], FALSE);
4589
4590 /* MOVC dst, tmp, one, zero */
4591 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp_src,
4592 &one, &zero, FALSE);
4593
4594 free_temp_indexes(emit);
4595
4596 return TRUE;
4597 }
4598
4599
4600 /**
4601 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4602 */
4603 static boolean
4604 emit_ssg(struct svga_shader_emitter_v10 *emit,
4605 const struct tgsi_full_instruction *inst)
4606 {
4607 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4608 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4609 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4610 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4611 * Translates into:
4612 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4613 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4614 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4615 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4616 */
4617 struct tgsi_full_src_register zero =
4618 make_immediate_reg_float(emit, 0.0f);
4619 struct tgsi_full_src_register one =
4620 make_immediate_reg_float(emit, 1.0f);
4621 struct tgsi_full_src_register neg_one =
4622 make_immediate_reg_float(emit, -1.0f);
4623
4624 unsigned tmp1 = get_temp_index(emit);
4625 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4626 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4627
4628 unsigned tmp2 = get_temp_index(emit);
4629 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4630 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4631
4632 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &inst->Src[0],
4633 &zero, FALSE);
4634 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &tmp2_dst, &tmp1_src,
4635 &neg_one, &zero, FALSE);
4636 emit_instruction_op2(emit, VGPU10_OPCODE_LT, &tmp1_dst, &zero,
4637 &inst->Src[0], FALSE);
4638 emit_instruction_op3(emit, VGPU10_OPCODE_MOVC, &inst->Dst[0], &tmp1_src,
4639 &one, &tmp2_src, FALSE);
4640
4641 free_temp_indexes(emit);
4642
4643 return TRUE;
4644 }
4645
4646
4647 /**
4648 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4649 */
4650 static boolean
4651 emit_issg(struct svga_shader_emitter_v10 *emit,
4652 const struct tgsi_full_instruction *inst)
4653 {
4654 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4655 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4656 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4657 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4658 * Translates into:
4659 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4660 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4661 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4662 */
4663 struct tgsi_full_src_register zero = make_immediate_reg_float(emit, 0.0f);
4664
4665 unsigned tmp1 = get_temp_index(emit);
4666 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
4667 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
4668
4669 unsigned tmp2 = get_temp_index(emit);
4670 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
4671 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
4672
4673 struct tgsi_full_src_register neg_tmp2 = negate_src(&tmp2_src);
4674
4675 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp1_dst,
4676 &inst->Src[0], &zero, FALSE);
4677 emit_instruction_op2(emit, VGPU10_OPCODE_ILT, &tmp2_dst,
4678 &zero, &inst->Src[0], FALSE);
4679 emit_instruction_op2(emit, VGPU10_OPCODE_IADD, &inst->Dst[0],
4680 &tmp1_src, &neg_tmp2, FALSE);
4681
4682 free_temp_indexes(emit);
4683
4684 return TRUE;
4685 }
4686
4687
4688 /**
4689 * Emit code for TGSI_OPCODE_SUB instruction.
4690 */
4691 static boolean
4692 emit_sub(struct svga_shader_emitter_v10 *emit,
4693 const struct tgsi_full_instruction *inst)
4694 {
4695 /* dst = SUB(s0, s1):
4696 * dst = s0 - s1
4697 * Translates into:
4698 * ADD dst, s0, neg(s1)
4699 */
4700 struct tgsi_full_src_register neg_src1 = negate_src(&inst->Src[1]);
4701
4702 /* ADD dst, s0, neg(s1) */
4703 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &inst->Dst[0],
4704 &inst->Src[0], &neg_src1,
4705 inst->Instruction.Saturate);
4706
4707 return TRUE;
4708 }
4709
4710
4711 /**
4712 * Emit a comparison instruction. The dest register will get
4713 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4714 */
4715 static void
4716 emit_comparison(struct svga_shader_emitter_v10 *emit,
4717 SVGA3dCmpFunc func,
4718 const struct tgsi_full_dst_register *dst,
4719 const struct tgsi_full_src_register *src0,
4720 const struct tgsi_full_src_register *src1)
4721 {
4722 struct tgsi_full_src_register immediate;
4723 VGPU10OpcodeToken0 opcode0;
4724 boolean swapSrc = FALSE;
4725
4726 /* Sanity checks for svga vs. gallium enums */
4727 STATIC_ASSERT(SVGA3D_CMP_LESS == (PIPE_FUNC_LESS + 1));
4728 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL == (PIPE_FUNC_GEQUAL + 1));
4729
4730 opcode0.value = 0;
4731
4732 switch (func) {
4733 case SVGA3D_CMP_NEVER:
4734 immediate = make_immediate_reg_int(emit, 0);
4735 /* MOV dst, {0} */
4736 begin_emit_instruction(emit);
4737 emit_dword(emit, VGPU10_OPCODE_MOV);
4738 emit_dst_register(emit, dst);
4739 emit_src_register(emit, &immediate);
4740 end_emit_instruction(emit);
4741 return;
4742 case SVGA3D_CMP_ALWAYS:
4743 immediate = make_immediate_reg_int(emit, -1);
4744 /* MOV dst, {-1} */
4745 begin_emit_instruction(emit);
4746 emit_dword(emit, VGPU10_OPCODE_MOV);
4747 emit_dst_register(emit, dst);
4748 emit_src_register(emit, &immediate);
4749 end_emit_instruction(emit);
4750 return;
4751 case SVGA3D_CMP_LESS:
4752 opcode0.opcodeType = VGPU10_OPCODE_LT;
4753 break;
4754 case SVGA3D_CMP_EQUAL:
4755 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4756 break;
4757 case SVGA3D_CMP_LESSEQUAL:
4758 opcode0.opcodeType = VGPU10_OPCODE_GE;
4759 swapSrc = TRUE;
4760 break;
4761 case SVGA3D_CMP_GREATER:
4762 opcode0.opcodeType = VGPU10_OPCODE_LT;
4763 swapSrc = TRUE;
4764 break;
4765 case SVGA3D_CMP_NOTEQUAL:
4766 opcode0.opcodeType = VGPU10_OPCODE_NE;
4767 break;
4768 case SVGA3D_CMP_GREATEREQUAL:
4769 opcode0.opcodeType = VGPU10_OPCODE_GE;
4770 break;
4771 default:
4772 assert(!"Unexpected comparison mode");
4773 opcode0.opcodeType = VGPU10_OPCODE_EQ;
4774 }
4775
4776 begin_emit_instruction(emit);
4777 emit_dword(emit, opcode0.value);
4778 emit_dst_register(emit, dst);
4779 if (swapSrc) {
4780 emit_src_register(emit, src1);
4781 emit_src_register(emit, src0);
4782 }
4783 else {
4784 emit_src_register(emit, src0);
4785 emit_src_register(emit, src1);
4786 }
4787 end_emit_instruction(emit);
4788 }
4789
4790
4791 /**
4792 * Get texel/address offsets for a texture instruction.
4793 */
4794 static void
4795 get_texel_offsets(const struct svga_shader_emitter_v10 *emit,
4796 const struct tgsi_full_instruction *inst, int offsets[3])
4797 {
4798 if (inst->Texture.NumOffsets == 1) {
4799 /* According to OpenGL Shader Language spec the offsets are only
4800 * fetched from a previously-declared immediate/literal.
4801 */
4802 const struct tgsi_texture_offset *off = inst->TexOffsets;
4803 const unsigned index = off[0].Index;
4804 const unsigned swizzleX = off[0].SwizzleX;
4805 const unsigned swizzleY = off[0].SwizzleY;
4806 const unsigned swizzleZ = off[0].SwizzleZ;
4807 const union tgsi_immediate_data *imm = emit->immediates[index];
4808
4809 assert(inst->TexOffsets[0].File == TGSI_FILE_IMMEDIATE);
4810
4811 offsets[0] = imm[swizzleX].Int;
4812 offsets[1] = imm[swizzleY].Int;
4813 offsets[2] = imm[swizzleZ].Int;
4814 }
4815 else {
4816 offsets[0] = offsets[1] = offsets[2] = 0;
4817 }
4818 }
4819
4820
4821 /**
4822 * Set up the coordinate register for texture sampling.
4823 * When we're sampling from a RECT texture we have to scale the
4824 * unnormalized coordinate to a normalized coordinate.
4825 * We do that by multiplying the coordinate by an "extra" constant.
4826 * An alternative would be to use the RESINFO instruction to query the
4827 * texture's size.
4828 */
4829 static struct tgsi_full_src_register
4830 setup_texcoord(struct svga_shader_emitter_v10 *emit,
4831 unsigned unit,
4832 const struct tgsi_full_src_register *coord)
4833 {
4834 if (emit->key.tex[unit].unnormalized) {
4835 unsigned scale_index = emit->texcoord_scale_index[unit];
4836 unsigned tmp = get_temp_index(emit);
4837 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
4838 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
4839 struct tgsi_full_src_register scale_src = make_src_const_reg(scale_index);
4840
4841 /* MUL tmp, coord, const[] */
4842 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_dst,
4843 coord, &scale_src, FALSE);
4844 return tmp_src;
4845 }
4846 else {
4847 /* use texcoord as-is */
4848 return *coord;
4849 }
4850 }
4851
4852
4853 /**
4854 * For SAMPLE_C instructions, emit the extra src register which indicates
4855 * the reference/comparision value.
4856 */
4857 static void
4858 emit_tex_compare_refcoord(struct svga_shader_emitter_v10 *emit,
4859 unsigned target,
4860 const struct tgsi_full_src_register *coord)
4861 {
4862 struct tgsi_full_src_register coord_src_ref;
4863 unsigned component;
4864
4865 assert(tgsi_is_shadow_target(target));
4866
4867 assert(target != TGSI_TEXTURE_SHADOWCUBE_ARRAY); /* XXX not implemented */
4868 if (target == TGSI_TEXTURE_SHADOW2D_ARRAY ||
4869 target == TGSI_TEXTURE_SHADOWCUBE)
4870 component = TGSI_SWIZZLE_W;
4871 else
4872 component = TGSI_SWIZZLE_Z;
4873
4874 coord_src_ref = scalar_src(coord, component);
4875
4876 emit_src_register(emit, &coord_src_ref);
4877 }
4878
4879
4880 /**
4881 * Info for implementing texture swizzles.
4882 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
4883 * functions use this to encapsulate the extra steps needed to perform
4884 * a texture swizzle, or shadow/depth comparisons.
4885 * The shadow/depth comparison is only done here if for the cases where
4886 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
4887 */
4888 struct tex_swizzle_info
4889 {
4890 boolean swizzled;
4891 boolean shadow_compare;
4892 unsigned unit;
4893 unsigned texture_target; /**< TGSI_TEXTURE_x */
4894 struct tgsi_full_src_register tmp_src;
4895 struct tgsi_full_dst_register tmp_dst;
4896 const struct tgsi_full_dst_register *inst_dst;
4897 const struct tgsi_full_src_register *coord_src;
4898 };
4899
4900
4901 /**
4902 * Do setup for handling texture swizzles or shadow compares.
4903 * \param unit the texture unit
4904 * \param inst the TGSI texture instruction
4905 * \param shadow_compare do shadow/depth comparison?
4906 * \param swz returns the swizzle info
4907 */
4908 static void
4909 begin_tex_swizzle(struct svga_shader_emitter_v10 *emit,
4910 unsigned unit,
4911 const struct tgsi_full_instruction *inst,
4912 boolean shadow_compare,
4913 struct tex_swizzle_info *swz)
4914 {
4915 swz->swizzled = (emit->key.tex[unit].swizzle_r != TGSI_SWIZZLE_X ||
4916 emit->key.tex[unit].swizzle_g != TGSI_SWIZZLE_Y ||
4917 emit->key.tex[unit].swizzle_b != TGSI_SWIZZLE_Z ||
4918 emit->key.tex[unit].swizzle_a != TGSI_SWIZZLE_W);
4919
4920 swz->shadow_compare = shadow_compare;
4921 swz->texture_target = inst->Texture.Texture;
4922
4923 if (swz->swizzled || shadow_compare) {
4924 /* Allocate temp register for the result of the SAMPLE instruction
4925 * and the source of the MOV/compare/swizzle instructions.
4926 */
4927 unsigned tmp = get_temp_index(emit);
4928 swz->tmp_src = make_src_temp_reg(tmp);
4929 swz->tmp_dst = make_dst_temp_reg(tmp);
4930
4931 swz->unit = unit;
4932 }
4933 swz->inst_dst = &inst->Dst[0];
4934 swz->coord_src = &inst->Src[0];
4935 }
4936
4937
4938 /**
4939 * Returns the register to put the SAMPLE instruction results into.
4940 * This will either be the original instruction dst reg (if no swizzle
4941 * and no shadow comparison) or a temporary reg if there is a swizzle.
4942 */
4943 static const struct tgsi_full_dst_register *
4944 get_tex_swizzle_dst(const struct tex_swizzle_info *swz)
4945 {
4946 return (swz->swizzled || swz->shadow_compare)
4947 ? &swz->tmp_dst : swz->inst_dst;
4948 }
4949
4950
4951 /**
4952 * This emits the MOV instruction that actually implements a texture swizzle
4953 * and/or shadow comparison.
4954 */
4955 static void
4956 end_tex_swizzle(struct svga_shader_emitter_v10 *emit,
4957 const struct tex_swizzle_info *swz)
4958 {
4959 if (swz->shadow_compare) {
4960 /* Emit extra instructions to compare the fetched texel value against
4961 * a texture coordinate component. The result of the comparison
4962 * is 0.0 or 1.0.
4963 */
4964 struct tgsi_full_src_register coord_src;
4965 struct tgsi_full_src_register texel_src =
4966 scalar_src(&swz->tmp_src, TGSI_SWIZZLE_X);
4967 struct tgsi_full_src_register one =
4968 make_immediate_reg_float(emit, 1.0f);
4969 /* convert gallium comparison func to SVGA comparison func */
4970 SVGA3dCmpFunc compare_func = emit->key.tex[swz->unit].compare_func + 1;
4971
4972 assert(emit->unit == PIPE_SHADER_FRAGMENT);
4973
4974 switch (swz->texture_target) {
4975 case TGSI_TEXTURE_SHADOW2D:
4976 case TGSI_TEXTURE_SHADOWRECT:
4977 case TGSI_TEXTURE_SHADOW1D_ARRAY:
4978 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Z);
4979 break;
4980 case TGSI_TEXTURE_SHADOW1D:
4981 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Y);
4982 break;
4983 case TGSI_TEXTURE_SHADOWCUBE:
4984 case TGSI_TEXTURE_SHADOW2D_ARRAY:
4985 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_W);
4986 break;
4987 default:
4988 assert(!"Unexpected texture target in end_tex_swizzle()");
4989 coord_src = scalar_src(swz->coord_src, TGSI_SWIZZLE_Z);
4990 }
4991
4992 /* COMPARE tmp, coord, texel */
4993 /* XXX it would seem that the texel and coord arguments should
4994 * be transposed here, but piglit tests indicate otherwise.
4995 */
4996 emit_comparison(emit, compare_func,
4997 &swz->tmp_dst, &texel_src, &coord_src);
4998
4999 /* AND dest, tmp, {1.0} */
5000 begin_emit_instruction(emit);
5001 emit_opcode(emit, VGPU10_OPCODE_AND, FALSE);
5002 if (swz->swizzled) {
5003 emit_dst_register(emit, &swz->tmp_dst);
5004 }
5005 else {
5006 emit_dst_register(emit, swz->inst_dst);
5007 }
5008 emit_src_register(emit, &swz->tmp_src);
5009 emit_src_register(emit, &one);
5010 end_emit_instruction(emit);
5011 }
5012
5013 if (swz->swizzled) {
5014 unsigned swz_r = emit->key.tex[swz->unit].swizzle_r;
5015 unsigned swz_g = emit->key.tex[swz->unit].swizzle_g;
5016 unsigned swz_b = emit->key.tex[swz->unit].swizzle_b;
5017 unsigned swz_a = emit->key.tex[swz->unit].swizzle_a;
5018 unsigned writemask_0 = 0, writemask_1 = 0;
5019 boolean int_tex = is_integer_type(emit->key.tex[swz->unit].return_type);
5020
5021 /* Swizzle w/out zero/one terms */
5022 struct tgsi_full_src_register src_swizzled =
5023 swizzle_src(&swz->tmp_src,
5024 swz_r < PIPE_SWIZZLE_ZERO ? swz_r : PIPE_SWIZZLE_RED,
5025 swz_g < PIPE_SWIZZLE_ZERO ? swz_g : PIPE_SWIZZLE_GREEN,
5026 swz_b < PIPE_SWIZZLE_ZERO ? swz_b : PIPE_SWIZZLE_BLUE,
5027 swz_a < PIPE_SWIZZLE_ZERO ? swz_a : PIPE_SWIZZLE_ALPHA);
5028
5029 /* MOV dst, color(tmp).<swizzle> */
5030 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5031 swz->inst_dst, &src_swizzled, FALSE);
5032
5033 /* handle swizzle zero terms */
5034 writemask_0 = (((swz_r == PIPE_SWIZZLE_ZERO) << 0) |
5035 ((swz_g == PIPE_SWIZZLE_ZERO) << 1) |
5036 ((swz_b == PIPE_SWIZZLE_ZERO) << 2) |
5037 ((swz_a == PIPE_SWIZZLE_ZERO) << 3));
5038
5039 if (writemask_0) {
5040 struct tgsi_full_src_register zero = int_tex ?
5041 make_immediate_reg_int(emit, 0) :
5042 make_immediate_reg_float(emit, 0.0f);
5043 struct tgsi_full_dst_register dst =
5044 writemask_dst(swz->inst_dst, writemask_0);
5045
5046 /* MOV dst.writemask_0, {0,0,0,0} */
5047 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
5048 &dst, &zero, FALSE);
5049 }
5050
5051 /* handle swizzle one terms */
5052 writemask_1 = (((swz_r == PIPE_SWIZZLE_ONE) << 0) |
5053 ((swz_g == PIPE_SWIZZLE_ONE) << 1) |
5054 ((swz_b == PIPE_SWIZZLE_ONE) << 2) |
5055 ((swz_a == PIPE_SWIZZLE_ONE) << 3));
5056
5057 if (writemask_1) {
5058 struct tgsi_full_src_register one = int_tex ?
5059 make_immediate_reg_int(emit, 1) :
5060 make_immediate_reg_float(emit, 1.0f);
5061 struct tgsi_full_dst_register dst =
5062 writemask_dst(swz->inst_dst, writemask_1);
5063
5064 /* MOV dst.writemask_1, {1,1,1,1} */
5065 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &dst, &one, FALSE);
5066 }
5067 }
5068 }
5069
5070
5071 /**
5072 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5073 */
5074 static boolean
5075 emit_sample(struct svga_shader_emitter_v10 *emit,
5076 const struct tgsi_full_instruction *inst)
5077 {
5078 const unsigned resource_unit = inst->Src[1].Register.Index;
5079 const unsigned sampler_unit = inst->Src[2].Register.Index;
5080 struct tgsi_full_src_register coord;
5081 int offsets[3];
5082 struct tex_swizzle_info swz_info;
5083
5084 begin_tex_swizzle(emit, sampler_unit, inst, FALSE, &swz_info);
5085
5086 get_texel_offsets(emit, inst, offsets);
5087
5088 coord = setup_texcoord(emit, resource_unit, &inst->Src[0]);
5089
5090 /* SAMPLE dst, coord(s0), resource, sampler */
5091 begin_emit_instruction(emit);
5092
5093 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE,
5094 inst->Instruction.Saturate, offsets);
5095 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5096 emit_src_register(emit, &coord);
5097 emit_resource_register(emit, resource_unit);
5098 emit_sampler_register(emit, sampler_unit);
5099 end_emit_instruction(emit);
5100
5101 end_tex_swizzle(emit, &swz_info);
5102
5103 free_temp_indexes(emit);
5104
5105 return TRUE;
5106 }
5107
5108
5109 /**
5110 * Check if a texture instruction is valid.
5111 * An example of an invalid texture instruction is doing shadow comparison
5112 * with an integer-valued texture.
5113 * If we detect an invalid texture instruction, we replace it with:
5114 * MOV dst, {1,1,1,1};
5115 * \return TRUE if valid, FALSE if invalid.
5116 */
5117 static boolean
5118 is_valid_tex_instruction(struct svga_shader_emitter_v10 *emit,
5119 const struct tgsi_full_instruction *inst)
5120 {
5121 const unsigned unit = inst->Src[1].Register.Index;
5122 const unsigned target = inst->Texture.Texture;
5123 boolean valid = TRUE;
5124
5125 if (tgsi_is_shadow_target(target) &&
5126 is_integer_type(emit->key.tex[unit].return_type)) {
5127 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5128 valid = FALSE;
5129 }
5130 /* XXX might check for other conditions in the future here */
5131
5132 if (!valid) {
5133 /* emit a MOV dst, {1,1,1,1} instruction. */
5134 struct tgsi_full_src_register one = make_immediate_reg_float(emit, 1.0f);
5135 begin_emit_instruction(emit);
5136 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5137 emit_dst_register(emit, &inst->Dst[0]);
5138 emit_src_register(emit, &one);
5139 end_emit_instruction(emit);
5140 }
5141
5142 return valid;
5143 }
5144
5145
5146 /**
5147 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5148 */
5149 static boolean
5150 emit_tex(struct svga_shader_emitter_v10 *emit,
5151 const struct tgsi_full_instruction *inst)
5152 {
5153 const uint unit = inst->Src[1].Register.Index;
5154 unsigned target = inst->Texture.Texture;
5155 unsigned opcode;
5156 struct tgsi_full_src_register coord;
5157 int offsets[3];
5158 struct tex_swizzle_info swz_info;
5159
5160 /* check that the sampler returns a float */
5161 if (!is_valid_tex_instruction(emit, inst))
5162 return TRUE;
5163
5164 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5165
5166 get_texel_offsets(emit, inst, offsets);
5167
5168 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5169
5170 /* SAMPLE dst, coord(s0), resource, sampler */
5171 begin_emit_instruction(emit);
5172
5173 if (tgsi_is_shadow_target(target))
5174 opcode = VGPU10_OPCODE_SAMPLE_C;
5175 else
5176 opcode = VGPU10_OPCODE_SAMPLE;
5177
5178 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5179 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5180 emit_src_register(emit, &coord);
5181 emit_resource_register(emit, unit);
5182 emit_sampler_register(emit, unit);
5183 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5184 emit_tex_compare_refcoord(emit, target, &coord);
5185 }
5186 end_emit_instruction(emit);
5187
5188 end_tex_swizzle(emit, &swz_info);
5189
5190 free_temp_indexes(emit);
5191
5192 return TRUE;
5193 }
5194
5195
5196 /**
5197 * Emit code for TGSI_OPCODE_TXP (projective texture)
5198 */
5199 static boolean
5200 emit_txp(struct svga_shader_emitter_v10 *emit,
5201 const struct tgsi_full_instruction *inst)
5202 {
5203 const uint unit = inst->Src[1].Register.Index;
5204 unsigned target = inst->Texture.Texture;
5205 unsigned opcode;
5206 int offsets[3];
5207 unsigned tmp = get_temp_index(emit);
5208 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
5209 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
5210 struct tgsi_full_src_register src0_wwww =
5211 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5212 struct tgsi_full_src_register coord;
5213 struct tex_swizzle_info swz_info;
5214
5215 /* check that the sampler returns a float */
5216 if (!is_valid_tex_instruction(emit, inst))
5217 return TRUE;
5218
5219 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5220
5221 get_texel_offsets(emit, inst, offsets);
5222
5223 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5224
5225 /* DIV tmp, coord, coord.wwww */
5226 emit_instruction_op2(emit, VGPU10_OPCODE_DIV, &tmp_dst,
5227 &coord, &src0_wwww, FALSE);
5228
5229 /* SAMPLE dst, coord(tmp), resource, sampler */
5230 begin_emit_instruction(emit);
5231
5232 if (tgsi_is_shadow_target(target))
5233 opcode = VGPU10_OPCODE_SAMPLE_C;
5234 else
5235 opcode = VGPU10_OPCODE_SAMPLE;
5236
5237 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5238 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5239 emit_src_register(emit, &tmp_src); /* projected coord */
5240 emit_resource_register(emit, unit);
5241 emit_sampler_register(emit, unit);
5242 if (opcode == VGPU10_OPCODE_SAMPLE_C) {
5243 emit_tex_compare_refcoord(emit, target, &tmp_src);
5244 }
5245 end_emit_instruction(emit);
5246
5247 end_tex_swizzle(emit, &swz_info);
5248
5249 free_temp_indexes(emit);
5250
5251 return TRUE;
5252 }
5253
5254
5255 /*
5256 * Emit code for TGSI_OPCODE_XPD instruction.
5257 */
5258 static boolean
5259 emit_xpd(struct svga_shader_emitter_v10 *emit,
5260 const struct tgsi_full_instruction *inst)
5261 {
5262 /* dst.x = src0.y * src1.z - src1.y * src0.z
5263 * dst.y = src0.z * src1.x - src1.z * src0.x
5264 * dst.z = src0.x * src1.y - src1.x * src0.y
5265 * dst.w = 1
5266 */
5267 struct tgsi_full_src_register s0_xxxx =
5268 scalar_src(&inst->Src[0], TGSI_SWIZZLE_X);
5269 struct tgsi_full_src_register s0_yyyy =
5270 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Y);
5271 struct tgsi_full_src_register s0_zzzz =
5272 scalar_src(&inst->Src[0], TGSI_SWIZZLE_Z);
5273
5274 struct tgsi_full_src_register s1_xxxx =
5275 scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5276 struct tgsi_full_src_register s1_yyyy =
5277 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Y);
5278 struct tgsi_full_src_register s1_zzzz =
5279 scalar_src(&inst->Src[1], TGSI_SWIZZLE_Z);
5280
5281 unsigned tmp1 = get_temp_index(emit);
5282 struct tgsi_full_src_register tmp1_src = make_src_temp_reg(tmp1);
5283 struct tgsi_full_dst_register tmp1_dst = make_dst_temp_reg(tmp1);
5284
5285 unsigned tmp2 = get_temp_index(emit);
5286 struct tgsi_full_src_register tmp2_src = make_src_temp_reg(tmp2);
5287 struct tgsi_full_dst_register tmp2_dst = make_dst_temp_reg(tmp2);
5288 struct tgsi_full_src_register neg_tmp2_src = negate_src(&tmp2_src);
5289
5290 unsigned tmp3 = get_temp_index(emit);
5291 struct tgsi_full_src_register tmp3_src = make_src_temp_reg(tmp3);
5292 struct tgsi_full_dst_register tmp3_dst = make_dst_temp_reg(tmp3);
5293 struct tgsi_full_dst_register tmp3_dst_x =
5294 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_X);
5295 struct tgsi_full_dst_register tmp3_dst_y =
5296 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_Y);
5297 struct tgsi_full_dst_register tmp3_dst_z =
5298 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_Z);
5299 struct tgsi_full_dst_register tmp3_dst_w =
5300 writemask_dst(&tmp3_dst, TGSI_WRITEMASK_W);
5301
5302 /* Note: we put all the intermediate computations into tmp3 in case
5303 * the XPD dest register is that same as one of the src regs (in which
5304 * case we could clobber a src reg before we're done with it) .
5305 *
5306 * Note: we could get by with just one temp register instead of three
5307 * since we're doing scalar operations and there's enough room in one
5308 * temp for everything.
5309 */
5310
5311 /* MUL tmp1, src0.y, src1.z */
5312 /* MUL tmp2, src1.y, src0.z */
5313 /* ADD tmp3.x, tmp1, -tmp2 */
5314 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
5315 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst,
5316 &s0_yyyy, &s1_zzzz, FALSE);
5317 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst,
5318 &s1_yyyy, &s0_zzzz, FALSE);
5319 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_x,
5320 &tmp1_src, &neg_tmp2_src, FALSE);
5321 }
5322
5323 /* MUL tmp1, src0.z, src1.x */
5324 /* MUL tmp2, src1.z, src0.x */
5325 /* ADD tmp3.y, tmp1, -tmp2 */
5326 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
5327 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &s0_zzzz,
5328 &s1_xxxx, FALSE);
5329 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst, &s1_zzzz,
5330 &s0_xxxx, FALSE);
5331 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_y,
5332 &tmp1_src, &neg_tmp2_src, FALSE);
5333 }
5334
5335 /* MUL tmp1, src0.x, src1.y */
5336 /* MUL tmp2, src1.x, src0.y */
5337 /* ADD tmp3.z, tmp1, -tmp2 */
5338 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
5339 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp1_dst, &s0_xxxx,
5340 &s1_yyyy, FALSE);
5341 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp2_dst, &s1_xxxx,
5342 &s0_yyyy, FALSE);
5343 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp3_dst_z,
5344 &tmp1_src, &neg_tmp2_src, FALSE);
5345 }
5346
5347 /* MOV tmp3.w, 1.0 */
5348 if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
5349 struct tgsi_full_src_register one =
5350 make_immediate_reg_float(emit, 1.0f);
5351
5352 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &tmp3_dst_w, &one, FALSE);
5353 }
5354
5355 /* MOV dst, tmp3 */
5356 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &tmp3_src,
5357 inst->Instruction.Saturate);
5358
5359
5360 free_temp_indexes(emit);
5361
5362 return TRUE;
5363 }
5364
5365
5366 /**
5367 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5368 */
5369 static boolean
5370 emit_txd(struct svga_shader_emitter_v10 *emit,
5371 const struct tgsi_full_instruction *inst)
5372 {
5373 const uint unit = inst->Src[3].Register.Index;
5374 unsigned target = inst->Texture.Texture;
5375 int offsets[3];
5376 struct tgsi_full_src_register coord;
5377 struct tex_swizzle_info swz_info;
5378
5379 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5380 &swz_info);
5381
5382 get_texel_offsets(emit, inst, offsets);
5383
5384 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5385
5386 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5387 begin_emit_instruction(emit);
5388 emit_sample_opcode(emit, VGPU10_OPCODE_SAMPLE_D,
5389 inst->Instruction.Saturate, offsets);
5390 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5391 emit_src_register(emit, &coord);
5392 emit_resource_register(emit, unit);
5393 emit_sampler_register(emit, unit);
5394 emit_src_register(emit, &inst->Src[1]); /* Xderiv */
5395 emit_src_register(emit, &inst->Src[2]); /* Yderiv */
5396 end_emit_instruction(emit);
5397
5398 end_tex_swizzle(emit, &swz_info);
5399
5400 free_temp_indexes(emit);
5401
5402 return TRUE;
5403 }
5404
5405
5406 /**
5407 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5408 */
5409 static boolean
5410 emit_txf(struct svga_shader_emitter_v10 *emit,
5411 const struct tgsi_full_instruction *inst)
5412 {
5413 const uint unit = inst->Src[1].Register.Index;
5414 const unsigned msaa = emit->key.tex[unit].texture_msaa;
5415 int offsets[3];
5416 struct tex_swizzle_info swz_info;
5417
5418 begin_tex_swizzle(emit, unit, inst, FALSE, &swz_info);
5419
5420 get_texel_offsets(emit, inst, offsets);
5421
5422 if (msaa) {
5423 /* Fetch one sample from an MSAA texture */
5424 struct tgsi_full_src_register sampleIndex =
5425 scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5426 /* LD_MS dst, coord(s0), resource, sampleIndex */
5427 begin_emit_instruction(emit);
5428 emit_sample_opcode(emit, VGPU10_OPCODE_LD_MS,
5429 inst->Instruction.Saturate, offsets);
5430 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5431 emit_src_register(emit, &inst->Src[0]);
5432 emit_resource_register(emit, unit);
5433 emit_src_register(emit, &sampleIndex);
5434 end_emit_instruction(emit);
5435 }
5436 else {
5437 /* Fetch one texel specified by integer coordinate */
5438 /* LD dst, coord(s0), resource */
5439 begin_emit_instruction(emit);
5440 emit_sample_opcode(emit, VGPU10_OPCODE_LD,
5441 inst->Instruction.Saturate, offsets);
5442 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5443 emit_src_register(emit, &inst->Src[0]);
5444 emit_resource_register(emit, unit);
5445 end_emit_instruction(emit);
5446 }
5447
5448 end_tex_swizzle(emit, &swz_info);
5449
5450 free_temp_indexes(emit);
5451
5452 return TRUE;
5453 }
5454
5455
5456 /**
5457 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5458 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5459 */
5460 static boolean
5461 emit_txl_txb(struct svga_shader_emitter_v10 *emit,
5462 const struct tgsi_full_instruction *inst)
5463 {
5464 unsigned target = inst->Texture.Texture;
5465 unsigned opcode, unit;
5466 int offsets[3];
5467 struct tgsi_full_src_register coord, lod_bias;
5468 struct tex_swizzle_info swz_info;
5469
5470 assert(inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
5471 inst->Instruction.Opcode == TGSI_OPCODE_TXB ||
5472 inst->Instruction.Opcode == TGSI_OPCODE_TXB2);
5473
5474 if (inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
5475 lod_bias = scalar_src(&inst->Src[1], TGSI_SWIZZLE_X);
5476 unit = inst->Src[2].Register.Index;
5477 }
5478 else {
5479 lod_bias = scalar_src(&inst->Src[0], TGSI_SWIZZLE_W);
5480 unit = inst->Src[1].Register.Index;
5481 }
5482
5483 begin_tex_swizzle(emit, unit, inst, tgsi_is_shadow_target(target),
5484 &swz_info);
5485
5486 get_texel_offsets(emit, inst, offsets);
5487
5488 coord = setup_texcoord(emit, unit, &inst->Src[0]);
5489
5490 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5491 begin_emit_instruction(emit);
5492 if (inst->Instruction.Opcode == TGSI_OPCODE_TXL) {
5493 opcode = VGPU10_OPCODE_SAMPLE_L;
5494 }
5495 else {
5496 opcode = VGPU10_OPCODE_SAMPLE_B;
5497 }
5498 emit_sample_opcode(emit, opcode, inst->Instruction.Saturate, offsets);
5499 emit_dst_register(emit, get_tex_swizzle_dst(&swz_info));
5500 emit_src_register(emit, &coord);
5501 emit_resource_register(emit, unit);
5502 emit_sampler_register(emit, unit);
5503 emit_src_register(emit, &lod_bias);
5504 end_emit_instruction(emit);
5505
5506 end_tex_swizzle(emit, &swz_info);
5507
5508 free_temp_indexes(emit);
5509
5510 return TRUE;
5511 }
5512
5513
5514 /**
5515 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5516 */
5517 static boolean
5518 emit_txq(struct svga_shader_emitter_v10 *emit,
5519 const struct tgsi_full_instruction *inst)
5520 {
5521 const uint unit = inst->Src[1].Register.Index;
5522
5523 if (emit->key.tex[unit].texture_target == PIPE_BUFFER) {
5524 /* RESINFO does not support querying texture buffers, so we instead
5525 * store texture buffer sizes in shader constants, then copy them to
5526 * implement TXQ instead of emitting RESINFO.
5527 * MOV dst, const[texture_buffer_size_index[unit]]
5528 */
5529 struct tgsi_full_src_register size_src =
5530 make_src_const_reg(emit->texture_buffer_size_index[unit]);
5531 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &inst->Dst[0], &size_src,
5532 FALSE);
5533 } else {
5534 /* RESINFO dst, srcMipLevel, resource */
5535 begin_emit_instruction(emit);
5536 emit_opcode_resinfo(emit, VGPU10_RESINFO_RETURN_UINT);
5537 emit_dst_register(emit, &inst->Dst[0]);
5538 emit_src_register(emit, &inst->Src[0]);
5539 emit_resource_register(emit, unit);
5540 end_emit_instruction(emit);
5541 }
5542
5543 free_temp_indexes(emit);
5544
5545 return TRUE;
5546 }
5547
5548
5549 /**
5550 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5551 */
5552 static boolean
5553 emit_simple(struct svga_shader_emitter_v10 *emit,
5554 const struct tgsi_full_instruction *inst)
5555 {
5556 const unsigned opcode = inst->Instruction.Opcode;
5557 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5558 unsigned i;
5559
5560 begin_emit_instruction(emit);
5561 emit_opcode(emit, translate_opcode(inst->Instruction.Opcode),
5562 inst->Instruction.Saturate);
5563 for (i = 0; i < op->num_dst; i++) {
5564 emit_dst_register(emit, &inst->Dst[i]);
5565 }
5566 for (i = 0; i < op->num_src; i++) {
5567 emit_src_register(emit, &inst->Src[i]);
5568 }
5569 end_emit_instruction(emit);
5570
5571 return TRUE;
5572 }
5573
5574
5575 /**
5576 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5577 * where TGSI only uses one dest register.
5578 */
5579 static boolean
5580 emit_simple_1dst(struct svga_shader_emitter_v10 *emit,
5581 const struct tgsi_full_instruction *inst,
5582 unsigned dst_count,
5583 unsigned dst_index)
5584 {
5585 const unsigned opcode = inst->Instruction.Opcode;
5586 const struct tgsi_opcode_info *op = tgsi_get_opcode_info(opcode);
5587 unsigned i;
5588
5589 begin_emit_instruction(emit);
5590 emit_opcode(emit, translate_opcode(inst->Instruction.Opcode),
5591 inst->Instruction.Saturate);
5592
5593 for (i = 0; i < dst_count; i++) {
5594 if (i == dst_index) {
5595 emit_dst_register(emit, &inst->Dst[0]);
5596 } else {
5597 emit_null_dst_register(emit);
5598 }
5599 }
5600
5601 for (i = 0; i < op->num_src; i++) {
5602 emit_src_register(emit, &inst->Src[i]);
5603 }
5604 end_emit_instruction(emit);
5605
5606 return TRUE;
5607 }
5608
5609
5610 /**
5611 * Translate a single TGSI instruction to VGPU10.
5612 */
5613 static boolean
5614 emit_vgpu10_instruction(struct svga_shader_emitter_v10 *emit,
5615 unsigned inst_number,
5616 const struct tgsi_full_instruction *inst)
5617 {
5618 const unsigned opcode = inst->Instruction.Opcode;
5619
5620 switch (opcode) {
5621 case TGSI_OPCODE_ADD:
5622 case TGSI_OPCODE_AND:
5623 case TGSI_OPCODE_BGNLOOP:
5624 case TGSI_OPCODE_BRK:
5625 case TGSI_OPCODE_CEIL:
5626 case TGSI_OPCODE_CONT:
5627 case TGSI_OPCODE_DDX:
5628 case TGSI_OPCODE_DDY:
5629 case TGSI_OPCODE_DIV:
5630 case TGSI_OPCODE_DP2:
5631 case TGSI_OPCODE_DP3:
5632 case TGSI_OPCODE_DP4:
5633 case TGSI_OPCODE_ELSE:
5634 case TGSI_OPCODE_ENDIF:
5635 case TGSI_OPCODE_ENDLOOP:
5636 case TGSI_OPCODE_ENDSUB:
5637 case TGSI_OPCODE_F2I:
5638 case TGSI_OPCODE_F2U:
5639 case TGSI_OPCODE_FLR:
5640 case TGSI_OPCODE_FRC:
5641 case TGSI_OPCODE_FSEQ:
5642 case TGSI_OPCODE_FSGE:
5643 case TGSI_OPCODE_FSLT:
5644 case TGSI_OPCODE_FSNE:
5645 case TGSI_OPCODE_I2F:
5646 case TGSI_OPCODE_IMAX:
5647 case TGSI_OPCODE_IMIN:
5648 case TGSI_OPCODE_INEG:
5649 case TGSI_OPCODE_ISGE:
5650 case TGSI_OPCODE_ISHR:
5651 case TGSI_OPCODE_ISLT:
5652 case TGSI_OPCODE_MAD:
5653 case TGSI_OPCODE_MAX:
5654 case TGSI_OPCODE_MIN:
5655 case TGSI_OPCODE_MOV:
5656 case TGSI_OPCODE_MUL:
5657 case TGSI_OPCODE_NOP:
5658 case TGSI_OPCODE_NOT:
5659 case TGSI_OPCODE_OR:
5660 case TGSI_OPCODE_RET:
5661 case TGSI_OPCODE_UADD:
5662 case TGSI_OPCODE_USEQ:
5663 case TGSI_OPCODE_USGE:
5664 case TGSI_OPCODE_USLT:
5665 case TGSI_OPCODE_UMIN:
5666 case TGSI_OPCODE_UMAD:
5667 case TGSI_OPCODE_UMAX:
5668 case TGSI_OPCODE_ROUND:
5669 case TGSI_OPCODE_SQRT:
5670 case TGSI_OPCODE_SHL:
5671 case TGSI_OPCODE_TRUNC:
5672 case TGSI_OPCODE_U2F:
5673 case TGSI_OPCODE_UCMP:
5674 case TGSI_OPCODE_USHR:
5675 case TGSI_OPCODE_USNE:
5676 case TGSI_OPCODE_XOR:
5677 /* simple instructions */
5678 return emit_simple(emit, inst);
5679
5680
5681 case TGSI_OPCODE_EMIT:
5682 return emit_vertex(emit, inst);
5683 case TGSI_OPCODE_ENDPRIM:
5684 return emit_endprim(emit, inst);
5685 case TGSI_OPCODE_ABS:
5686 return emit_abs(emit, inst);
5687 case TGSI_OPCODE_IABS:
5688 return emit_iabs(emit, inst);
5689 case TGSI_OPCODE_ARL:
5690 /* fall-through */
5691 case TGSI_OPCODE_UARL:
5692 return emit_arl_uarl(emit, inst);
5693 case TGSI_OPCODE_BGNSUB:
5694 /* no-op */
5695 return TRUE;
5696 case TGSI_OPCODE_CAL:
5697 return emit_cal(emit, inst);
5698 case TGSI_OPCODE_CMP:
5699 return emit_cmp(emit, inst);
5700 case TGSI_OPCODE_COS:
5701 return emit_sincos(emit, inst);
5702 case TGSI_OPCODE_DP2A:
5703 return emit_dp2a(emit, inst);
5704 case TGSI_OPCODE_DPH:
5705 return emit_dph(emit, inst);
5706 case TGSI_OPCODE_DST:
5707 return emit_dst(emit, inst);
5708 case TGSI_OPCODE_EX2:
5709 return emit_ex2(emit, inst);
5710 case TGSI_OPCODE_EXP:
5711 return emit_exp(emit, inst);
5712 case TGSI_OPCODE_IF:
5713 return emit_if(emit, inst);
5714 case TGSI_OPCODE_KILL:
5715 return emit_kill(emit, inst);
5716 case TGSI_OPCODE_KILL_IF:
5717 return emit_kill_if(emit, inst);
5718 case TGSI_OPCODE_LG2:
5719 return emit_lg2(emit, inst);
5720 case TGSI_OPCODE_LIT:
5721 return emit_lit(emit, inst);
5722 case TGSI_OPCODE_LOG:
5723 return emit_log(emit, inst);
5724 case TGSI_OPCODE_LRP:
5725 return emit_lrp(emit, inst);
5726 case TGSI_OPCODE_POW:
5727 return emit_pow(emit, inst);
5728 case TGSI_OPCODE_RCP:
5729 return emit_rcp(emit, inst);
5730 case TGSI_OPCODE_RSQ:
5731 return emit_rsq(emit, inst);
5732 case TGSI_OPCODE_SAMPLE:
5733 return emit_sample(emit, inst);
5734 case TGSI_OPCODE_SCS:
5735 return emit_scs(emit, inst);
5736 case TGSI_OPCODE_SEQ:
5737 return emit_seq(emit, inst);
5738 case TGSI_OPCODE_SGE:
5739 return emit_sge(emit, inst);
5740 case TGSI_OPCODE_SGT:
5741 return emit_sgt(emit, inst);
5742 case TGSI_OPCODE_SIN:
5743 return emit_sincos(emit, inst);
5744 case TGSI_OPCODE_SLE:
5745 return emit_sle(emit, inst);
5746 case TGSI_OPCODE_SLT:
5747 return emit_slt(emit, inst);
5748 case TGSI_OPCODE_SNE:
5749 return emit_sne(emit, inst);
5750 case TGSI_OPCODE_SSG:
5751 return emit_ssg(emit, inst);
5752 case TGSI_OPCODE_ISSG:
5753 return emit_issg(emit, inst);
5754 case TGSI_OPCODE_SUB:
5755 return emit_sub(emit, inst);
5756 case TGSI_OPCODE_TEX:
5757 return emit_tex(emit, inst);
5758 case TGSI_OPCODE_TXP:
5759 return emit_txp(emit, inst);
5760 case TGSI_OPCODE_TXB:
5761 case TGSI_OPCODE_TXB2:
5762 case TGSI_OPCODE_TXL:
5763 return emit_txl_txb(emit, inst);
5764 case TGSI_OPCODE_TXD:
5765 return emit_txd(emit, inst);
5766 case TGSI_OPCODE_TXF:
5767 return emit_txf(emit, inst);
5768 case TGSI_OPCODE_TXQ:
5769 return emit_txq(emit, inst);
5770 case TGSI_OPCODE_UIF:
5771 return emit_if(emit, inst);
5772 case TGSI_OPCODE_XPD:
5773 return emit_xpd(emit, inst);
5774 case TGSI_OPCODE_UMUL_HI:
5775 case TGSI_OPCODE_IMUL_HI:
5776 case TGSI_OPCODE_UDIV:
5777 case TGSI_OPCODE_IDIV:
5778 /* These cases use only the FIRST of two destination registers */
5779 return emit_simple_1dst(emit, inst, 2, 0);
5780 case TGSI_OPCODE_UMUL:
5781 case TGSI_OPCODE_UMOD:
5782 case TGSI_OPCODE_MOD:
5783 /* These cases use only the SECOND of two destination registers */
5784 return emit_simple_1dst(emit, inst, 2, 1);
5785 case TGSI_OPCODE_END:
5786 if (!emit_post_helpers(emit))
5787 return FALSE;
5788 return emit_simple(emit, inst);
5789
5790 default:
5791 debug_printf("Unimplemented tgsi instruction %s\n",
5792 tgsi_get_opcode_name(opcode));
5793 return FALSE;
5794 }
5795
5796 return TRUE;
5797 }
5798
5799
5800 /**
5801 * Emit the extra instructions to adjust the vertex position.
5802 * There are two possible adjustments:
5803 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5804 * "prescale" and "pretranslate" values.
5805 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5806 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5807 */
5808 static void
5809 emit_vpos_instructions(struct svga_shader_emitter_v10 *emit,
5810 unsigned vs_pos_tmp_index)
5811 {
5812 struct tgsi_full_src_register tmp_pos_src;
5813 struct tgsi_full_dst_register pos_dst;
5814
5815 /* Don't bother to emit any extra vertex instructions if vertex position is
5816 * not written out
5817 */
5818 if (emit->vposition.out_index == INVALID_INDEX)
5819 return;
5820
5821 tmp_pos_src = make_src_temp_reg(vs_pos_tmp_index);
5822 pos_dst = make_dst_output_reg(emit->vposition.out_index);
5823
5824 /* If non-adjusted vertex position register index
5825 * is valid, copy the vertex position from the temporary
5826 * vertex position register before it is modified by the
5827 * prescale computation.
5828 */
5829 if (emit->vposition.so_index != INVALID_INDEX) {
5830 struct tgsi_full_dst_register pos_so_dst =
5831 make_dst_output_reg(emit->vposition.so_index);
5832
5833 /* MOV pos_so, tmp_pos */
5834 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_so_dst,
5835 &tmp_pos_src, FALSE);
5836 }
5837
5838 if (emit->vposition.need_prescale) {
5839 /* This code adjusts the vertex position to match the VGPU10 convention.
5840 * If p is the position computed by the shader (usually by applying the
5841 * modelview and projection matrices), the new position q is computed by:
5842 *
5843 * q.x = p.w * trans.x + p.x * scale.x
5844 * q.y = p.w * trans.y + p.y * scale.y
5845 * q.z = p.w * trans.z + p.z * scale.z;
5846 * q.w = p.w * trans.w + p.w;
5847 */
5848 struct tgsi_full_src_register tmp_pos_src_w =
5849 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
5850 struct tgsi_full_dst_register tmp_pos_dst =
5851 make_dst_temp_reg(vs_pos_tmp_index);
5852 struct tgsi_full_dst_register tmp_pos_dst_xyz =
5853 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XYZ);
5854
5855 struct tgsi_full_src_register prescale_scale =
5856 make_src_const_reg(emit->vposition.prescale_scale_index);
5857 struct tgsi_full_src_register prescale_trans =
5858 make_src_const_reg(emit->vposition.prescale_trans_index);
5859
5860 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
5861 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xyz,
5862 &tmp_pos_src, &prescale_scale, FALSE);
5863
5864 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
5865 emit_instruction_op3(emit, VGPU10_OPCODE_MAD, &pos_dst, &tmp_pos_src_w,
5866 &prescale_trans, &tmp_pos_src, FALSE);
5867 }
5868 else if (emit->key.vs.undo_viewport) {
5869 /* This code computes the final vertex position from the temporary
5870 * vertex position by undoing the viewport transformation and the
5871 * divide-by-W operation (we convert window coords back to clip coords).
5872 * This is needed when we use the 'draw' module for fallbacks.
5873 * If p is the temp pos in window coords, then the NDC coord q is:
5874 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
5875 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
5876 * q.z = p.z * p.w
5877 * q.w = p.w
5878 * CONST[vs_viewport_index] contains:
5879 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
5880 */
5881 struct tgsi_full_dst_register tmp_pos_dst =
5882 make_dst_temp_reg(vs_pos_tmp_index);
5883 struct tgsi_full_dst_register tmp_pos_dst_xy =
5884 writemask_dst(&tmp_pos_dst, TGSI_WRITEMASK_XY);
5885 struct tgsi_full_src_register tmp_pos_src_wwww =
5886 scalar_src(&tmp_pos_src, TGSI_SWIZZLE_W);
5887
5888 struct tgsi_full_dst_register pos_dst_xyz =
5889 writemask_dst(&pos_dst, TGSI_WRITEMASK_XYZ);
5890 struct tgsi_full_dst_register pos_dst_w =
5891 writemask_dst(&pos_dst, TGSI_WRITEMASK_W);
5892
5893 struct tgsi_full_src_register vp_xyzw =
5894 make_src_const_reg(emit->vs.viewport_index);
5895 struct tgsi_full_src_register vp_zwww =
5896 swizzle_src(&vp_xyzw, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_W,
5897 TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
5898
5899 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
5900 emit_instruction_op2(emit, VGPU10_OPCODE_ADD, &tmp_pos_dst_xy,
5901 &tmp_pos_src, &vp_zwww, FALSE);
5902
5903 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
5904 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &tmp_pos_dst_xy,
5905 &tmp_pos_src, &vp_xyzw, FALSE);
5906
5907 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
5908 emit_instruction_op2(emit, VGPU10_OPCODE_MUL, &pos_dst_xyz,
5909 &tmp_pos_src, &tmp_pos_src_wwww, FALSE);
5910
5911 /* MOV pos.w, tmp_pos.w */
5912 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &pos_dst_w,
5913 &tmp_pos_src, FALSE);
5914 }
5915 else if (vs_pos_tmp_index != INVALID_INDEX) {
5916 /* This code is to handle the case where the temporary vertex
5917 * position register is created when the vertex shader has stream
5918 * output and prescale is disabled because rasterization is to be
5919 * discarded.
5920 */
5921 struct tgsi_full_dst_register pos_dst =
5922 make_dst_output_reg(emit->vposition.out_index);
5923
5924 /* MOV pos, tmp_pos */
5925 begin_emit_instruction(emit);
5926 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
5927 emit_dst_register(emit, &pos_dst);
5928 emit_src_register(emit, &tmp_pos_src);
5929 end_emit_instruction(emit);
5930 }
5931 }
5932
5933 static void
5934 emit_clipping_instructions(struct svga_shader_emitter_v10 *emit)
5935 {
5936 if (emit->clip_mode == CLIP_DISTANCE) {
5937 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
5938 emit_clip_distance_instructions(emit);
5939
5940 } else if (emit->clip_mode == CLIP_VERTEX) {
5941 /* Convert TGSI CLIPVERTEX to CLIPDIST */
5942 emit_clip_vertex_instructions(emit);
5943 }
5944
5945 /**
5946 * Emit vertex position and take care of legacy user planes only if
5947 * there is a valid vertex position register index.
5948 * This is to take care of the case
5949 * where the shader doesn't output vertex position. Then in
5950 * this case, don't bother to emit more vertex instructions.
5951 */
5952 if (emit->vposition.out_index == INVALID_INDEX)
5953 return;
5954
5955 /**
5956 * Emit per-vertex clipping instructions for legacy user defined clip planes.
5957 * NOTE: we must emit the clip distance instructions before the
5958 * emit_vpos_instructions() call since the later function will change
5959 * the TEMP[vs_pos_tmp_index] value.
5960 */
5961 if (emit->clip_mode == CLIP_LEGACY) {
5962 /* Emit CLIPDIST for legacy user defined clip planes */
5963 emit_clip_distance_from_vpos(emit, emit->vposition.tmp_index);
5964 }
5965 }
5966
5967
5968 /**
5969 * Emit extra per-vertex instructions. This includes clip-coordinate
5970 * space conversion and computing clip distances. This is called for
5971 * each GS emit-vertex instruction and at the end of VS translation.
5972 */
5973 static void
5974 emit_vertex_instructions(struct svga_shader_emitter_v10 *emit)
5975 {
5976 const unsigned vs_pos_tmp_index = emit->vposition.tmp_index;
5977
5978 /* Emit clipping instructions based on clipping mode */
5979 emit_clipping_instructions(emit);
5980
5981 /**
5982 * Reset the temporary vertex position register index
5983 * so that emit_dst_register() will use the real vertex position output
5984 */
5985 emit->vposition.tmp_index = INVALID_INDEX;
5986
5987 /* Emit vertex position instructions */
5988 emit_vpos_instructions(emit, vs_pos_tmp_index);
5989
5990 /* Restore original vposition.tmp_index value for the next GS vertex.
5991 * It doesn't matter for VS.
5992 */
5993 emit->vposition.tmp_index = vs_pos_tmp_index;
5994 }
5995
5996 /**
5997 * Translate the TGSI_OPCODE_EMIT GS instruction.
5998 */
5999 static boolean
6000 emit_vertex(struct svga_shader_emitter_v10 *emit,
6001 const struct tgsi_full_instruction *inst)
6002 {
6003 unsigned ret = TRUE;
6004
6005 assert(emit->unit == PIPE_SHADER_GEOMETRY);
6006
6007 emit_vertex_instructions(emit);
6008
6009 /* We can't use emit_simple() because the TGSI instruction has one
6010 * operand (vertex stream number) which we must ignore for VGPU10.
6011 */
6012 begin_emit_instruction(emit);
6013 emit_opcode(emit, VGPU10_OPCODE_EMIT, FALSE);
6014 end_emit_instruction(emit);
6015
6016 return ret;
6017 }
6018
6019
6020 /**
6021 * Emit the extra code to convert from VGPU10's boolean front-face
6022 * register to TGSI's signed front-face register.
6023 *
6024 * TODO: Make temporary front-face register a scalar.
6025 */
6026 static void
6027 emit_frontface_instructions(struct svga_shader_emitter_v10 *emit)
6028 {
6029 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6030
6031 if (emit->fs.face_input_index != INVALID_INDEX) {
6032 /* convert vgpu10 boolean face register to gallium +/-1 value */
6033 struct tgsi_full_dst_register tmp_dst =
6034 make_dst_temp_reg(emit->fs.face_tmp_index);
6035 struct tgsi_full_src_register one =
6036 make_immediate_reg_float(emit, 1.0f);
6037 struct tgsi_full_src_register neg_one =
6038 make_immediate_reg_float(emit, -1.0f);
6039
6040 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6041 begin_emit_instruction(emit);
6042 emit_opcode(emit, VGPU10_OPCODE_MOVC, FALSE);
6043 emit_dst_register(emit, &tmp_dst);
6044 emit_face_register(emit);
6045 emit_src_register(emit, &one);
6046 emit_src_register(emit, &neg_one);
6047 end_emit_instruction(emit);
6048 }
6049 }
6050
6051
6052 /**
6053 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6054 */
6055 static void
6056 emit_fragcoord_instructions(struct svga_shader_emitter_v10 *emit)
6057 {
6058 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6059
6060 if (emit->fs.fragcoord_input_index != INVALID_INDEX) {
6061 struct tgsi_full_dst_register tmp_dst =
6062 make_dst_temp_reg(emit->fs.fragcoord_tmp_index);
6063 struct tgsi_full_dst_register tmp_dst_xyz =
6064 writemask_dst(&tmp_dst, TGSI_WRITEMASK_XYZ);
6065 struct tgsi_full_dst_register tmp_dst_w =
6066 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6067 struct tgsi_full_src_register one =
6068 make_immediate_reg_float(emit, 1.0f);
6069 struct tgsi_full_src_register fragcoord =
6070 make_src_reg(TGSI_FILE_INPUT, emit->fs.fragcoord_input_index);
6071
6072 /* save the input index */
6073 unsigned fragcoord_input_index = emit->fs.fragcoord_input_index;
6074 /* set to invalid to prevent substitution in emit_src_register() */
6075 emit->fs.fragcoord_input_index = INVALID_INDEX;
6076
6077 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6078 begin_emit_instruction(emit);
6079 emit_opcode(emit, VGPU10_OPCODE_MOV, FALSE);
6080 emit_dst_register(emit, &tmp_dst_xyz);
6081 emit_src_register(emit, &fragcoord);
6082 end_emit_instruction(emit);
6083
6084 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6085 begin_emit_instruction(emit);
6086 emit_opcode(emit, VGPU10_OPCODE_DIV, FALSE);
6087 emit_dst_register(emit, &tmp_dst_w);
6088 emit_src_register(emit, &one);
6089 emit_src_register(emit, &fragcoord);
6090 end_emit_instruction(emit);
6091
6092 /* restore saved value */
6093 emit->fs.fragcoord_input_index = fragcoord_input_index;
6094 }
6095 }
6096
6097
6098 /**
6099 * Emit extra instructions to adjust VS inputs/attributes. This can
6100 * mean casting a vertex attribute from int to float or setting the
6101 * W component to 1, or both.
6102 */
6103 static void
6104 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10 *emit)
6105 {
6106 const unsigned save_w_1_mask = emit->key.vs.adjust_attrib_w_1;
6107 const unsigned save_itof_mask = emit->key.vs.adjust_attrib_itof;
6108 const unsigned save_utof_mask = emit->key.vs.adjust_attrib_utof;
6109 const unsigned save_is_bgra_mask = emit->key.vs.attrib_is_bgra;
6110 const unsigned save_puint_to_snorm_mask = emit->key.vs.attrib_puint_to_snorm;
6111 const unsigned save_puint_to_uscaled_mask = emit->key.vs.attrib_puint_to_uscaled;
6112 const unsigned save_puint_to_sscaled_mask = emit->key.vs.attrib_puint_to_sscaled;
6113
6114 unsigned adjust_mask = (save_w_1_mask |
6115 save_itof_mask |
6116 save_utof_mask |
6117 save_is_bgra_mask |
6118 save_puint_to_snorm_mask |
6119 save_puint_to_uscaled_mask |
6120 save_puint_to_sscaled_mask);
6121
6122 assert(emit->unit == PIPE_SHADER_VERTEX);
6123
6124 if (adjust_mask) {
6125 struct tgsi_full_src_register one =
6126 make_immediate_reg_float(emit, 1.0f);
6127
6128 struct tgsi_full_src_register one_int =
6129 make_immediate_reg_int(emit, 1);
6130
6131 /* We need to turn off these bitmasks while emitting the
6132 * instructions below, then restore them afterward.
6133 */
6134 emit->key.vs.adjust_attrib_w_1 = 0;
6135 emit->key.vs.adjust_attrib_itof = 0;
6136 emit->key.vs.adjust_attrib_utof = 0;
6137 emit->key.vs.attrib_is_bgra = 0;
6138 emit->key.vs.attrib_puint_to_snorm = 0;
6139 emit->key.vs.attrib_puint_to_uscaled = 0;
6140 emit->key.vs.attrib_puint_to_sscaled = 0;
6141
6142 while (adjust_mask) {
6143 unsigned index = u_bit_scan(&adjust_mask);
6144 unsigned tmp = emit->vs.adjusted_input[index];
6145 struct tgsi_full_src_register input_src =
6146 make_src_reg(TGSI_FILE_INPUT, index);
6147
6148 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6149 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6150 struct tgsi_full_dst_register tmp_dst_w =
6151 writemask_dst(&tmp_dst, TGSI_WRITEMASK_W);
6152
6153 /* ITOF/UTOF/MOV tmp, input[index] */
6154 if (save_itof_mask & (1 << index)) {
6155 emit_instruction_op1(emit, VGPU10_OPCODE_ITOF,
6156 &tmp_dst, &input_src, FALSE);
6157 }
6158 else if (save_utof_mask & (1 << index)) {
6159 emit_instruction_op1(emit, VGPU10_OPCODE_UTOF,
6160 &tmp_dst, &input_src, FALSE);
6161 }
6162 else if (save_puint_to_snorm_mask & (1 << index)) {
6163 emit_puint_to_snorm(emit, &tmp_dst, &input_src);
6164 }
6165 else if (save_puint_to_uscaled_mask & (1 << index)) {
6166 emit_puint_to_uscaled(emit, &tmp_dst, &input_src);
6167 }
6168 else if (save_puint_to_sscaled_mask & (1 << index)) {
6169 emit_puint_to_sscaled(emit, &tmp_dst, &input_src);
6170 }
6171 else {
6172 assert((save_w_1_mask | save_is_bgra_mask) & (1 << index));
6173 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6174 &tmp_dst, &input_src, FALSE);
6175 }
6176
6177 if (save_is_bgra_mask & (1 << index)) {
6178 emit_swap_r_b(emit, &tmp_dst, &tmp_src);
6179 }
6180
6181 if (save_w_1_mask & (1 << index)) {
6182 /* MOV tmp.w, 1.0 */
6183 if (emit->key.vs.attrib_is_pure_int & (1 << index)) {
6184 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6185 &tmp_dst_w, &one_int, FALSE);
6186 }
6187 else {
6188 emit_instruction_op1(emit, VGPU10_OPCODE_MOV,
6189 &tmp_dst_w, &one, FALSE);
6190 }
6191 }
6192 }
6193
6194 emit->key.vs.adjust_attrib_w_1 = save_w_1_mask;
6195 emit->key.vs.adjust_attrib_itof = save_itof_mask;
6196 emit->key.vs.adjust_attrib_utof = save_utof_mask;
6197 emit->key.vs.attrib_is_bgra = save_is_bgra_mask;
6198 emit->key.vs.attrib_puint_to_snorm = save_puint_to_snorm_mask;
6199 emit->key.vs.attrib_puint_to_uscaled = save_puint_to_uscaled_mask;
6200 emit->key.vs.attrib_puint_to_sscaled = save_puint_to_sscaled_mask;
6201 }
6202 }
6203
6204
6205 /**
6206 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6207 * to implement some instructions. We pre-allocate those values here
6208 * in the immediate constant buffer.
6209 */
6210 static void
6211 alloc_common_immediates(struct svga_shader_emitter_v10 *emit)
6212 {
6213 unsigned n = 0;
6214
6215 emit->common_immediate_pos[n++] =
6216 alloc_immediate_float4(emit, 0.0f, 1.0f, 0.5f, -1.0f);
6217
6218 emit->common_immediate_pos[n++] =
6219 alloc_immediate_float4(emit, 128.0f, -128.0f, 2.0f, 3.0f);
6220
6221 emit->common_immediate_pos[n++] =
6222 alloc_immediate_int4(emit, 0, 1, 0, -1);
6223
6224 if (emit->key.vs.attrib_puint_to_snorm) {
6225 emit->common_immediate_pos[n++] =
6226 alloc_immediate_float4(emit, -2.0f, -2.0f, -2.0f, -1.66666f);
6227 }
6228
6229 if (emit->key.vs.attrib_puint_to_uscaled) {
6230 emit->common_immediate_pos[n++] =
6231 alloc_immediate_float4(emit, 1023.0f, 3.0f, 0.0f, 0.0f);
6232 }
6233
6234 if (emit->key.vs.attrib_puint_to_sscaled) {
6235 emit->common_immediate_pos[n++] =
6236 alloc_immediate_int4(emit, 22, 12, 2, 0);
6237
6238 emit->common_immediate_pos[n++] =
6239 alloc_immediate_int4(emit, 22, 30, 0, 0);
6240 }
6241
6242 assert(n <= Elements(emit->common_immediate_pos));
6243 emit->num_common_immediates = n;
6244 }
6245
6246
6247 /**
6248 * Emit any extra/helper declarations/code that we might need between
6249 * the declaration section and code section.
6250 */
6251 static boolean
6252 emit_pre_helpers(struct svga_shader_emitter_v10 *emit)
6253 {
6254 /* Properties */
6255 if (emit->unit == PIPE_SHADER_GEOMETRY)
6256 emit_property_instructions(emit);
6257
6258 /* Declare inputs */
6259 if (!emit_input_declarations(emit))
6260 return FALSE;
6261
6262 /* Declare outputs */
6263 if (!emit_output_declarations(emit))
6264 return FALSE;
6265
6266 /* Declare temporary registers */
6267 emit_temporaries_declaration(emit);
6268
6269 /* Declare constant registers */
6270 emit_constant_declaration(emit);
6271
6272 /* Declare samplers and resources */
6273 emit_sampler_declarations(emit);
6274 emit_resource_declarations(emit);
6275
6276 /* Declare clip distance output registers */
6277 if (emit->unit == PIPE_SHADER_VERTEX ||
6278 emit->unit == PIPE_SHADER_GEOMETRY) {
6279 emit_clip_distance_declarations(emit);
6280 }
6281
6282 alloc_common_immediates(emit);
6283
6284 if (emit->unit == PIPE_SHADER_FRAGMENT &&
6285 emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6286 float alpha = emit->key.fs.alpha_ref;
6287 emit->fs.alpha_ref_index =
6288 alloc_immediate_float4(emit, alpha, alpha, alpha, alpha);
6289 }
6290
6291 /* Now, emit the constant block containing all the immediates
6292 * declared by shader, as well as the extra ones seen above.
6293 */
6294 emit_vgpu10_immediates_block(emit);
6295
6296 if (emit->unit == PIPE_SHADER_FRAGMENT) {
6297 emit_frontface_instructions(emit);
6298 emit_fragcoord_instructions(emit);
6299 }
6300 else if (emit->unit == PIPE_SHADER_VERTEX) {
6301 emit_vertex_attrib_instructions(emit);
6302 }
6303
6304 return TRUE;
6305 }
6306
6307
6308 /**
6309 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6310 * against the alpha reference value and discards the fragment if the
6311 * comparison fails.
6312 */
6313 static void
6314 emit_alpha_test_instructions(struct svga_shader_emitter_v10 *emit,
6315 unsigned fs_color_tmp_index)
6316 {
6317 /* compare output color's alpha to alpha ref and kill */
6318 unsigned tmp = get_temp_index(emit);
6319 struct tgsi_full_src_register tmp_src = make_src_temp_reg(tmp);
6320 struct tgsi_full_src_register tmp_src_x =
6321 scalar_src(&tmp_src, TGSI_SWIZZLE_X);
6322 struct tgsi_full_dst_register tmp_dst = make_dst_temp_reg(tmp);
6323 struct tgsi_full_src_register color_src =
6324 make_src_temp_reg(fs_color_tmp_index);
6325 struct tgsi_full_src_register color_src_w =
6326 scalar_src(&color_src, TGSI_SWIZZLE_W);
6327 struct tgsi_full_src_register ref_src =
6328 make_src_immediate_reg(emit->fs.alpha_ref_index);
6329 struct tgsi_full_dst_register color_dst =
6330 make_dst_output_reg(emit->fs.color_out_index[0]);
6331
6332 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6333
6334 /* dst = src0 'alpha_func' src1 */
6335 emit_comparison(emit, emit->key.fs.alpha_func, &tmp_dst,
6336 &color_src_w, &ref_src);
6337
6338 /* DISCARD if dst.x == 0 */
6339 begin_emit_instruction(emit);
6340 emit_discard_opcode(emit, FALSE); /* discard if src0.x is zero */
6341 emit_src_register(emit, &tmp_src_x);
6342 end_emit_instruction(emit);
6343
6344 /* If we don't need to broadcast the color below, emit final color here */
6345 if (emit->key.fs.write_color0_to_n_cbufs <= 1) {
6346 /* MOV output.color, tempcolor */
6347 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6348 &color_src, FALSE); /* XXX saturate? */
6349 }
6350
6351 free_temp_indexes(emit);
6352 }
6353
6354
6355 /**
6356 * Emit instructions for writing a single color output to multiple
6357 * color buffers.
6358 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
6359 * property is set and the number of render targets is greater than one.
6360 * \param fs_color_tmp_index index of the temp register that holds the
6361 * color to broadcast.
6362 */
6363 static void
6364 emit_broadcast_color_instructions(struct svga_shader_emitter_v10 *emit,
6365 unsigned fs_color_tmp_index)
6366 {
6367 const unsigned n = emit->key.fs.write_color0_to_n_cbufs;
6368 unsigned i;
6369 struct tgsi_full_src_register color_src =
6370 make_src_temp_reg(fs_color_tmp_index);
6371
6372 assert(emit->unit == PIPE_SHADER_FRAGMENT);
6373 assert(n > 1);
6374
6375 for (i = 0; i < n; i++) {
6376 unsigned output_reg = emit->fs.color_out_index[i];
6377 struct tgsi_full_dst_register color_dst =
6378 make_dst_output_reg(output_reg);
6379
6380 /* Fill in this semantic here since we'll use it later in
6381 * emit_dst_register().
6382 */
6383 emit->info.output_semantic_name[output_reg] = TGSI_SEMANTIC_COLOR;
6384
6385 /* MOV output.color[i], tempcolor */
6386 emit_instruction_op1(emit, VGPU10_OPCODE_MOV, &color_dst,
6387 &color_src, FALSE); /* XXX saturate? */
6388 }
6389 }
6390
6391
6392 /**
6393 * Emit extra helper code after the original shader code, but before the
6394 * last END/RET instruction.
6395 * For vertex shaders this means emitting the extra code to apply the
6396 * prescale scale/translation.
6397 */
6398 static boolean
6399 emit_post_helpers(struct svga_shader_emitter_v10 *emit)
6400 {
6401 if (emit->unit == PIPE_SHADER_VERTEX) {
6402 emit_vertex_instructions(emit);
6403 }
6404 else if (emit->unit == PIPE_SHADER_FRAGMENT) {
6405 const unsigned fs_color_tmp_index = emit->fs.color_tmp_index;
6406
6407 /* We no longer want emit_dst_register() to substitute the
6408 * temporary fragment color register for the real color output.
6409 */
6410 emit->fs.color_tmp_index = INVALID_INDEX;
6411
6412 if (emit->key.fs.alpha_func != SVGA3D_CMP_ALWAYS) {
6413 emit_alpha_test_instructions(emit, fs_color_tmp_index);
6414 }
6415 if (emit->key.fs.write_color0_to_n_cbufs > 1) {
6416 emit_broadcast_color_instructions(emit, fs_color_tmp_index);
6417 }
6418 }
6419
6420 return TRUE;
6421 }
6422
6423
6424 /**
6425 * Translate the TGSI tokens into VGPU10 tokens.
6426 */
6427 static boolean
6428 emit_vgpu10_instructions(struct svga_shader_emitter_v10 *emit,
6429 const struct tgsi_token *tokens)
6430 {
6431 struct tgsi_parse_context parse;
6432 boolean ret = TRUE;
6433 boolean pre_helpers_emitted = FALSE;
6434 unsigned inst_number = 0;
6435
6436 tgsi_parse_init(&parse, tokens);
6437
6438 while (!tgsi_parse_end_of_tokens(&parse)) {
6439 tgsi_parse_token(&parse);
6440
6441 switch (parse.FullToken.Token.Type) {
6442 case TGSI_TOKEN_TYPE_IMMEDIATE:
6443 ret = emit_vgpu10_immediate(emit, &parse.FullToken.FullImmediate);
6444 if (!ret)
6445 goto done;
6446 break;
6447
6448 case TGSI_TOKEN_TYPE_DECLARATION:
6449 ret = emit_vgpu10_declaration(emit, &parse.FullToken.FullDeclaration);
6450 if (!ret)
6451 goto done;
6452 break;
6453
6454 case TGSI_TOKEN_TYPE_INSTRUCTION:
6455 if (!pre_helpers_emitted) {
6456 ret = emit_pre_helpers(emit);
6457 if (!ret)
6458 goto done;
6459 pre_helpers_emitted = TRUE;
6460 }
6461 ret = emit_vgpu10_instruction(emit, inst_number++,
6462 &parse.FullToken.FullInstruction);
6463 if (!ret)
6464 goto done;
6465 break;
6466
6467 case TGSI_TOKEN_TYPE_PROPERTY:
6468 ret = emit_vgpu10_property(emit, &parse.FullToken.FullProperty);
6469 if (!ret)
6470 goto done;
6471 break;
6472
6473 default:
6474 break;
6475 }
6476 }
6477
6478 done:
6479 tgsi_parse_free(&parse);
6480 return ret;
6481 }
6482
6483
6484 /**
6485 * Emit the first VGPU10 shader tokens.
6486 */
6487 static boolean
6488 emit_vgpu10_header(struct svga_shader_emitter_v10 *emit)
6489 {
6490 VGPU10ProgramToken ptoken;
6491
6492 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6493 ptoken.majorVersion = 4;
6494 ptoken.minorVersion = 0;
6495 ptoken.programType = translate_shader_type(emit->unit);
6496 if (!emit_dword(emit, ptoken.value))
6497 return FALSE;
6498
6499 /* Second token: total length of shader, in tokens. We can't fill this
6500 * in until we're all done. Emit zero for now.
6501 */
6502 return emit_dword(emit, 0);
6503 }
6504
6505
6506 static boolean
6507 emit_vgpu10_tail(struct svga_shader_emitter_v10 *emit)
6508 {
6509 VGPU10ProgramToken *tokens;
6510
6511 /* Replace the second token with total shader length */
6512 tokens = (VGPU10ProgramToken *) emit->buf;
6513 tokens[1].value = emit_get_num_tokens(emit);
6514
6515 return TRUE;
6516 }
6517
6518
6519 /**
6520 * Modify the FS to read the BCOLORs and use the FACE register
6521 * to choose between the front/back colors.
6522 */
6523 static const struct tgsi_token *
6524 transform_fs_twoside(const struct tgsi_token *tokens)
6525 {
6526 if (0) {
6527 debug_printf("Before tgsi_add_two_side ------------------\n");
6528 tgsi_dump(tokens,0);
6529 }
6530 tokens = tgsi_add_two_side(tokens);
6531 if (0) {
6532 debug_printf("After tgsi_add_two_side ------------------\n");
6533 tgsi_dump(tokens, 0);
6534 }
6535 return tokens;
6536 }
6537
6538
6539 /**
6540 * Modify the FS to do polygon stipple.
6541 */
6542 static const struct tgsi_token *
6543 transform_fs_pstipple(struct svga_shader_emitter_v10 *emit,
6544 const struct tgsi_token *tokens)
6545 {
6546 const struct tgsi_token *new_tokens;
6547 unsigned unit;
6548
6549 if (0) {
6550 debug_printf("Before pstipple ------------------\n");
6551 tgsi_dump(tokens,0);
6552 }
6553
6554 new_tokens = util_pstipple_create_fragment_shader(tokens, &unit, 0);
6555
6556 emit->fs.pstipple_sampler_unit = unit;
6557
6558 /* Setup texture state for stipple */
6559 emit->key.tex[unit].texture_target = PIPE_TEXTURE_2D;
6560 emit->key.tex[unit].swizzle_r = TGSI_SWIZZLE_X;
6561 emit->key.tex[unit].swizzle_g = TGSI_SWIZZLE_Y;
6562 emit->key.tex[unit].swizzle_b = TGSI_SWIZZLE_Z;
6563 emit->key.tex[unit].swizzle_a = TGSI_SWIZZLE_W;
6564
6565 if (0) {
6566 debug_printf("After pstipple ------------------\n");
6567 tgsi_dump(new_tokens, 0);
6568 }
6569
6570 return new_tokens;
6571 }
6572
6573 /**
6574 * Modify the FS to support anti-aliasing point.
6575 */
6576 static const struct tgsi_token *
6577 transform_fs_aapoint(const struct tgsi_token *tokens,
6578 int aa_coord_index)
6579 {
6580 if (0) {
6581 debug_printf("Before tgsi_add_aa_point ------------------\n");
6582 tgsi_dump(tokens,0);
6583 }
6584 tokens = tgsi_add_aa_point(tokens, aa_coord_index);
6585 if (0) {
6586 debug_printf("After tgsi_add_aa_point ------------------\n");
6587 tgsi_dump(tokens, 0);
6588 }
6589 return tokens;
6590 }
6591
6592 /**
6593 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6594 */
6595 struct svga_shader_variant *
6596 svga_tgsi_vgpu10_translate(struct svga_context *svga,
6597 const struct svga_shader *shader,
6598 const struct svga_compile_key *key,
6599 unsigned unit)
6600 {
6601 struct svga_shader_variant *variant = NULL;
6602 struct svga_shader_emitter_v10 *emit;
6603 const struct tgsi_token *tokens = shader->tokens;
6604 struct svga_vertex_shader *vs = svga->curr.vs;
6605 struct svga_geometry_shader *gs = svga->curr.gs;
6606
6607 assert(unit == PIPE_SHADER_VERTEX ||
6608 unit == PIPE_SHADER_GEOMETRY ||
6609 unit == PIPE_SHADER_FRAGMENT);
6610
6611 /* These two flags cannot be used together */
6612 assert(key->vs.need_prescale + key->vs.undo_viewport <= 1);
6613
6614 /*
6615 * Setup the code emitter
6616 */
6617 emit = alloc_emitter();
6618 if (!emit)
6619 return NULL;
6620
6621 emit->unit = unit;
6622 emit->key = *key;
6623
6624 emit->vposition.need_prescale = (emit->key.vs.need_prescale ||
6625 emit->key.gs.need_prescale);
6626 emit->vposition.tmp_index = INVALID_INDEX;
6627 emit->vposition.so_index = INVALID_INDEX;
6628 emit->vposition.out_index = INVALID_INDEX;
6629
6630 emit->fs.color_tmp_index = INVALID_INDEX;
6631 emit->fs.face_input_index = INVALID_INDEX;
6632 emit->fs.fragcoord_input_index = INVALID_INDEX;
6633
6634 emit->gs.prim_id_index = INVALID_INDEX;
6635
6636 emit->clip_dist_out_index = INVALID_INDEX;
6637 emit->clip_dist_tmp_index = INVALID_INDEX;
6638 emit->clip_dist_so_index = INVALID_INDEX;
6639 emit->clip_vertex_out_index = INVALID_INDEX;
6640
6641 if (emit->key.fs.alpha_func == SVGA3D_CMP_INVALID) {
6642 emit->key.fs.alpha_func = SVGA3D_CMP_ALWAYS;
6643 }
6644
6645 if (unit == PIPE_SHADER_FRAGMENT) {
6646 if (key->fs.light_twoside) {
6647 tokens = transform_fs_twoside(tokens);
6648 }
6649 if (key->fs.pstipple) {
6650 const struct tgsi_token *new_tokens =
6651 transform_fs_pstipple(emit, tokens);
6652 if (tokens != shader->tokens) {
6653 /* free the two-sided shader tokens */
6654 tgsi_free_tokens(tokens);
6655 }
6656 tokens = new_tokens;
6657 }
6658 if (key->fs.aa_point) {
6659 tokens = transform_fs_aapoint(tokens, key->fs.aa_point_coord_index);
6660 }
6661 }
6662
6663 if (SVGA_DEBUG & DEBUG_TGSI) {
6664 debug_printf("#####################################\n");
6665 debug_printf("### TGSI Shader %u\n", shader->id);
6666 tgsi_dump(tokens, 0);
6667 }
6668
6669 /**
6670 * Rescan the header if the token string is different from the one
6671 * included in the shader; otherwise, the header info is already up-to-date
6672 */
6673 if (tokens != shader->tokens) {
6674 tgsi_scan_shader(tokens, &emit->info);
6675 } else {
6676 emit->info = shader->info;
6677 }
6678
6679 emit->num_outputs = emit->info.num_outputs;
6680
6681 if (unit == PIPE_SHADER_FRAGMENT) {
6682 /* Compute FS input remapping to match the output from VS/GS */
6683 if (gs) {
6684 svga_link_shaders(&gs->base.info, &emit->info, &emit->linkage);
6685 } else {
6686 assert(vs);
6687 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6688 }
6689 } else if (unit == PIPE_SHADER_GEOMETRY) {
6690 assert(vs);
6691 svga_link_shaders(&vs->base.info, &emit->info, &emit->linkage);
6692 }
6693
6694 determine_clipping_mode(emit);
6695
6696 if (unit == PIPE_SHADER_GEOMETRY || unit == PIPE_SHADER_VERTEX) {
6697 if (shader->stream_output != NULL || emit->clip_mode == CLIP_DISTANCE) {
6698 /* if there is stream output declarations associated
6699 * with this shader or the shader writes to ClipDistance
6700 * then reserve extra registers for the non-adjusted vertex position
6701 * and the ClipDistance shadow copy
6702 */
6703 emit->vposition.so_index = emit->num_outputs++;
6704
6705 if (emit->clip_mode == CLIP_DISTANCE) {
6706 emit->clip_dist_so_index = emit->num_outputs++;
6707 if (emit->info.num_written_clipdistance > 4)
6708 emit->num_outputs++;
6709 }
6710 }
6711 }
6712
6713 /*
6714 * Do actual shader translation.
6715 */
6716 if (!emit_vgpu10_header(emit)) {
6717 debug_printf("svga: emit VGPU10 header failed\n");
6718 goto cleanup;
6719 }
6720
6721 if (!emit_vgpu10_instructions(emit, tokens)) {
6722 debug_printf("svga: emit VGPU10 instructions failed\n");
6723 goto cleanup;
6724 }
6725
6726 if (!emit_vgpu10_tail(emit)) {
6727 debug_printf("svga: emit VGPU10 tail failed\n");
6728 goto cleanup;
6729 }
6730
6731 if (emit->register_overflow) {
6732 goto cleanup;
6733 }
6734
6735 /*
6736 * Create, initialize the 'variant' object.
6737 */
6738 variant = CALLOC_STRUCT(svga_shader_variant);
6739 if (!variant)
6740 goto cleanup;
6741
6742 variant->shader = shader;
6743 variant->nr_tokens = emit_get_num_tokens(emit);
6744 variant->tokens = (const unsigned *)emit->buf;
6745 emit->buf = NULL; /* buffer is no longer owed by emitter context */
6746 memcpy(&variant->key, key, sizeof(*key));
6747 variant->id = UTIL_BITMASK_INVALID_INDEX;
6748
6749 /* The extra constant starting offset starts with the number of
6750 * shader constants declared in the shader.
6751 */
6752 variant->extra_const_start = emit->num_shader_consts[0];
6753 if (key->gs.wide_point) {
6754 /**
6755 * The extra constant added in the transformed shader
6756 * for inverse viewport scale is to be supplied by the driver.
6757 * So the extra constant starting offset needs to be reduced by 1.
6758 */
6759 assert(variant->extra_const_start > 0);
6760 variant->extra_const_start--;
6761 }
6762
6763 variant->pstipple_sampler_unit = emit->fs.pstipple_sampler_unit;
6764
6765 /** keep track in the variant if flat interpolation is used
6766 * for any of the varyings.
6767 */
6768 variant->uses_flat_interp = emit->uses_flat_interp;
6769
6770 if (tokens != shader->tokens) {
6771 tgsi_free_tokens(tokens);
6772 }
6773
6774 cleanup:
6775 free_emitter(emit);
6776
6777 return variant;
6778 }