1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 * @file svga_tgsi_vgpu10.c
29 * TGSI -> VGPU10 shader translation.
31 * \author Mingcheng Chen
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
58 #include "VGPU10ShaderTokens.h"
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
75 CLIP_NONE
, /**< No clipping enabled */
76 CLIP_LEGACY
, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
80 CLIP_DISTANCE
, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
83 CLIP_VERTEX
/**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
90 struct svga_shader_emitter_v10
92 /* The token output buffer */
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key
;
99 struct tgsi_shader_info info
;
102 unsigned inst_start_token
;
103 boolean discard_instruction
; /**< throw away current instruction? */
105 union tgsi_immediate_data immediates
[MAX_IMMEDIATE_COUNT
][4];
106 unsigned num_immediates
; /**< Number of immediates emitted */
107 unsigned common_immediate_pos
[8]; /**< literals for common immediates */
108 unsigned num_common_immediates
;
109 boolean immediates_emitted
;
111 unsigned num_outputs
; /**< include any extra outputs */
112 /** The first extra output is reserved for
113 * non-adjusted vertex position for
114 * stream output purpose
117 /* Temporary Registers */
118 unsigned num_shader_temps
; /**< num of temps used by original shader */
119 unsigned internal_temp_count
; /**< currently allocated internal temps */
121 unsigned start
, size
;
122 } temp_arrays
[MAX_TEMP_ARRAYS
];
123 unsigned num_temp_arrays
;
125 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
127 unsigned arrayId
, index
;
128 } temp_map
[VGPU10_MAX_TEMPS
]; /**< arrayId, element */
130 /** Number of constants used by original shader for each constant buffer.
131 * The size should probably always match with that of svga_state.constbufs.
133 unsigned num_shader_consts
[SVGA_MAX_CONST_BUFS
];
136 unsigned num_samplers
;
137 boolean sampler_view
[PIPE_MAX_SAMPLERS
]; /**< True if sampler view exists*/
138 ubyte sampler_target
[PIPE_MAX_SAMPLERS
]; /**< TGSI_TEXTURE_x */
139 ubyte sampler_return_type
[PIPE_MAX_SAMPLERS
]; /**< TGSI_RETURN_TYPE_x */
141 /* Address regs (really implemented with temps) */
142 unsigned num_address_regs
;
143 unsigned address_reg_index
[MAX_VGPU10_ADDR_REGS
];
145 /* Output register usage masks */
146 ubyte output_usage_mask
[PIPE_MAX_SHADER_OUTPUTS
];
148 /* To map TGSI system value index to VGPU shader input indexes */
149 ubyte system_value_indexes
[MAX_SYSTEM_VALUES
];
152 /* vertex position scale/translation */
153 unsigned out_index
; /**< the real position output reg */
154 unsigned tmp_index
; /**< the fake/temp position output reg */
155 unsigned so_index
; /**< the non-adjusted position output reg */
156 unsigned prescale_scale_index
, prescale_trans_index
;
157 boolean need_prescale
;
160 /* For vertex shaders only */
162 /* viewport constant */
163 unsigned viewport_index
;
165 /* temp index of adjusted vertex attributes */
166 unsigned adjusted_input
[PIPE_MAX_SHADER_INPUTS
];
169 /* For fragment shaders only */
171 unsigned color_out_index
[PIPE_MAX_COLOR_BUFS
]; /**< the real color output regs */
172 unsigned num_color_outputs
;
173 unsigned color_tmp_index
; /**< fake/temp color output reg */
174 unsigned alpha_ref_index
; /**< immediate constant for alpha ref */
177 unsigned face_input_index
; /**< real fragment shader face reg (bool) */
178 unsigned face_tmp_index
; /**< temp face reg converted to -1 / +1 */
180 unsigned pstipple_sampler_unit
;
182 unsigned fragcoord_input_index
; /**< real fragment position input reg */
183 unsigned fragcoord_tmp_index
; /**< 1/w modified position temp reg */
185 /** Which texture units are doing shadow comparison in the FS code */
186 unsigned shadow_compare_units
;
189 /* For geometry shaders only */
191 VGPU10_PRIMITIVE prim_type
;/**< VGPU10 primitive type */
192 VGPU10_PRIMITIVE_TOPOLOGY prim_topology
; /**< VGPU10 primitive topology */
193 unsigned input_size
; /**< size of input arrays */
194 unsigned prim_id_index
; /**< primitive id register index */
195 unsigned max_out_vertices
; /**< maximum number of output vertices */
198 /* For vertex or geometry shaders */
199 enum clipping_mode clip_mode
;
200 unsigned clip_dist_out_index
; /**< clip distance output register index */
201 unsigned clip_dist_tmp_index
; /**< clip distance temporary register */
202 unsigned clip_dist_so_index
; /**< clip distance shadow copy */
204 /** Index of temporary holding the clipvertex coordinate */
205 unsigned clip_vertex_out_index
; /**< clip vertex output register index */
206 unsigned clip_vertex_tmp_index
; /**< clip vertex temporary index */
208 /* user clip plane constant slot indexes */
209 unsigned clip_plane_const
[PIPE_MAX_CLIP_PLANES
];
211 unsigned num_output_writes
;
212 boolean constant_color_output
;
214 boolean uses_flat_interp
;
216 /* For all shaders: const reg index for RECT coord scaling */
217 unsigned texcoord_scale_index
[PIPE_MAX_SAMPLERS
];
219 /* For all shaders: const reg index for texture buffer size */
220 unsigned texture_buffer_size_index
[PIPE_MAX_SAMPLERS
];
222 /* VS/GS/FS Linkage info */
223 struct shader_linkage linkage
;
225 bool register_overflow
; /**< Set if we exceed a VGPU10 register limit */
230 emit_post_helpers(struct svga_shader_emitter_v10
*emit
);
233 emit_vertex(struct svga_shader_emitter_v10
*emit
,
234 const struct tgsi_full_instruction
*inst
);
236 static char err_buf
[128];
239 expand(struct svga_shader_emitter_v10
*emit
)
242 unsigned newsize
= emit
->size
* 2;
244 if (emit
->buf
!= err_buf
)
245 new_buf
= REALLOC(emit
->buf
, emit
->size
, newsize
);
252 emit
->size
= sizeof(err_buf
);
256 emit
->size
= newsize
;
257 emit
->ptr
= new_buf
+ (emit
->ptr
- emit
->buf
);
263 * Create and initialize a new svga_shader_emitter_v10 object.
265 static struct svga_shader_emitter_v10
*
268 struct svga_shader_emitter_v10
*emit
= CALLOC(1, sizeof(*emit
));
273 /* to initialize the output buffer */
283 * Free an svga_shader_emitter_v10 object.
286 free_emitter(struct svga_shader_emitter_v10
*emit
)
289 FREE(emit
->buf
); /* will be NULL if translation succeeded */
293 static inline boolean
294 reserve(struct svga_shader_emitter_v10
*emit
,
297 while (emit
->ptr
- emit
->buf
+ nr_dwords
* sizeof(uint32
) >= emit
->size
) {
306 emit_dword(struct svga_shader_emitter_v10
*emit
, uint32 dword
)
308 if (!reserve(emit
, 1))
311 *(uint32
*)emit
->ptr
= dword
;
312 emit
->ptr
+= sizeof dword
;
317 emit_dwords(struct svga_shader_emitter_v10
*emit
,
318 const uint32
*dwords
,
321 if (!reserve(emit
, nr
))
324 memcpy(emit
->ptr
, dwords
, nr
* sizeof *dwords
);
325 emit
->ptr
+= nr
* sizeof *dwords
;
329 /** Return the number of tokens in the emitter's buffer */
331 emit_get_num_tokens(const struct svga_shader_emitter_v10
*emit
)
333 return (emit
->ptr
- emit
->buf
) / sizeof(unsigned);
338 * Check for register overflow. If we overflow we'll set an
339 * error flag. This function can be called for register declarations
340 * or use as src/dst instruction operands.
341 * \param type register type. One of VGPU10_OPERAND_TYPE_x
342 or VGPU10_OPCODE_DCL_x
343 * \param index the register index
346 check_register_index(struct svga_shader_emitter_v10
*emit
,
347 unsigned operandType
, unsigned index
)
349 bool overflow_before
= emit
->register_overflow
;
351 switch (operandType
) {
352 case VGPU10_OPERAND_TYPE_TEMP
:
353 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
:
354 case VGPU10_OPCODE_DCL_TEMPS
:
355 if (index
>= VGPU10_MAX_TEMPS
) {
356 emit
->register_overflow
= TRUE
;
359 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
:
360 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER
:
361 if (index
>= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
362 emit
->register_overflow
= TRUE
;
365 case VGPU10_OPERAND_TYPE_INPUT
:
366 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
:
367 case VGPU10_OPCODE_DCL_INPUT
:
368 case VGPU10_OPCODE_DCL_INPUT_SGV
:
369 case VGPU10_OPCODE_DCL_INPUT_SIV
:
370 case VGPU10_OPCODE_DCL_INPUT_PS
:
371 case VGPU10_OPCODE_DCL_INPUT_PS_SGV
:
372 case VGPU10_OPCODE_DCL_INPUT_PS_SIV
:
373 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
374 index
>= VGPU10_MAX_VS_INPUTS
) ||
375 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
376 index
>= VGPU10_MAX_GS_INPUTS
) ||
377 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
378 index
>= VGPU10_MAX_FS_INPUTS
)) {
379 emit
->register_overflow
= TRUE
;
382 case VGPU10_OPERAND_TYPE_OUTPUT
:
383 case VGPU10_OPCODE_DCL_OUTPUT
:
384 case VGPU10_OPCODE_DCL_OUTPUT_SGV
:
385 case VGPU10_OPCODE_DCL_OUTPUT_SIV
:
386 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
387 index
>= VGPU10_MAX_VS_OUTPUTS
) ||
388 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
389 index
>= VGPU10_MAX_GS_OUTPUTS
) ||
390 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
391 index
>= VGPU10_MAX_FS_OUTPUTS
)) {
392 emit
->register_overflow
= TRUE
;
395 case VGPU10_OPERAND_TYPE_SAMPLER
:
396 case VGPU10_OPCODE_DCL_SAMPLER
:
397 if (index
>= VGPU10_MAX_SAMPLERS
) {
398 emit
->register_overflow
= TRUE
;
401 case VGPU10_OPERAND_TYPE_RESOURCE
:
402 case VGPU10_OPCODE_DCL_RESOURCE
:
403 if (index
>= VGPU10_MAX_RESOURCES
) {
404 emit
->register_overflow
= TRUE
;
407 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
:
408 if (index
>= MAX_IMMEDIATE_COUNT
) {
409 emit
->register_overflow
= TRUE
;
417 if (emit
->register_overflow
&& !overflow_before
) {
418 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
425 * Examine misc state to determine the clipping mode.
428 determine_clipping_mode(struct svga_shader_emitter_v10
*emit
)
430 if (emit
->info
.num_written_clipdistance
> 0) {
431 emit
->clip_mode
= CLIP_DISTANCE
;
433 else if (emit
->info
.writes_clipvertex
) {
434 emit
->clip_mode
= CLIP_VERTEX
;
436 else if (emit
->key
.clip_plane_enable
) {
437 emit
->clip_mode
= CLIP_LEGACY
;
440 emit
->clip_mode
= CLIP_NONE
;
446 * For clip distance register declarations and clip distance register
447 * writes we need to mask the declaration usage or instruction writemask
448 * (respectively) against the set of the really-enabled clipping planes.
450 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
451 * has a VS that writes to all 8 clip distance registers, but the plane enable
452 * flags are a subset of that.
454 * This function is used to apply the plane enable flags to the register
455 * declaration or instruction writemask.
457 * \param writemask the declaration usage mask or instruction writemask
458 * \param clip_reg_index which clip plane register is being declared/written.
459 * The legal values are 0 and 1 (two clip planes per
460 * register, for a total of 8 clip planes)
463 apply_clip_plane_mask(struct svga_shader_emitter_v10
*emit
,
464 unsigned writemask
, unsigned clip_reg_index
)
468 assert(clip_reg_index
< 2);
470 /* four clip planes per clip register: */
471 shift
= clip_reg_index
* 4;
472 writemask
&= ((emit
->key
.clip_plane_enable
>> shift
) & 0xf);
479 * Translate gallium shader type into VGPU10 type.
481 static VGPU10_PROGRAM_TYPE
482 translate_shader_type(unsigned type
)
485 case PIPE_SHADER_VERTEX
:
486 return VGPU10_VERTEX_SHADER
;
487 case PIPE_SHADER_GEOMETRY
:
488 return VGPU10_GEOMETRY_SHADER
;
489 case PIPE_SHADER_FRAGMENT
:
490 return VGPU10_PIXEL_SHADER
;
492 assert(!"Unexpected shader type");
493 return VGPU10_VERTEX_SHADER
;
499 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
500 * Note: we only need to translate the opcodes for "simple" instructions,
501 * as seen below. All other opcodes are handled/translated specially.
503 static VGPU10_OPCODE_TYPE
504 translate_opcode(enum tgsi_opcode opcode
)
507 case TGSI_OPCODE_MOV
:
508 return VGPU10_OPCODE_MOV
;
509 case TGSI_OPCODE_MUL
:
510 return VGPU10_OPCODE_MUL
;
511 case TGSI_OPCODE_ADD
:
512 return VGPU10_OPCODE_ADD
;
513 case TGSI_OPCODE_DP3
:
514 return VGPU10_OPCODE_DP3
;
515 case TGSI_OPCODE_DP4
:
516 return VGPU10_OPCODE_DP4
;
517 case TGSI_OPCODE_MIN
:
518 return VGPU10_OPCODE_MIN
;
519 case TGSI_OPCODE_MAX
:
520 return VGPU10_OPCODE_MAX
;
521 case TGSI_OPCODE_MAD
:
522 return VGPU10_OPCODE_MAD
;
523 case TGSI_OPCODE_SQRT
:
524 return VGPU10_OPCODE_SQRT
;
525 case TGSI_OPCODE_FRC
:
526 return VGPU10_OPCODE_FRC
;
527 case TGSI_OPCODE_FLR
:
528 return VGPU10_OPCODE_ROUND_NI
;
529 case TGSI_OPCODE_FSEQ
:
530 return VGPU10_OPCODE_EQ
;
531 case TGSI_OPCODE_FSGE
:
532 return VGPU10_OPCODE_GE
;
533 case TGSI_OPCODE_FSNE
:
534 return VGPU10_OPCODE_NE
;
535 case TGSI_OPCODE_DDX
:
536 return VGPU10_OPCODE_DERIV_RTX
;
537 case TGSI_OPCODE_DDY
:
538 return VGPU10_OPCODE_DERIV_RTY
;
539 case TGSI_OPCODE_RET
:
540 return VGPU10_OPCODE_RET
;
541 case TGSI_OPCODE_DIV
:
542 return VGPU10_OPCODE_DIV
;
543 case TGSI_OPCODE_IDIV
:
544 return VGPU10_OPCODE_IDIV
;
545 case TGSI_OPCODE_DP2
:
546 return VGPU10_OPCODE_DP2
;
547 case TGSI_OPCODE_BRK
:
548 return VGPU10_OPCODE_BREAK
;
550 return VGPU10_OPCODE_IF
;
551 case TGSI_OPCODE_ELSE
:
552 return VGPU10_OPCODE_ELSE
;
553 case TGSI_OPCODE_ENDIF
:
554 return VGPU10_OPCODE_ENDIF
;
555 case TGSI_OPCODE_CEIL
:
556 return VGPU10_OPCODE_ROUND_PI
;
557 case TGSI_OPCODE_I2F
:
558 return VGPU10_OPCODE_ITOF
;
559 case TGSI_OPCODE_NOT
:
560 return VGPU10_OPCODE_NOT
;
561 case TGSI_OPCODE_TRUNC
:
562 return VGPU10_OPCODE_ROUND_Z
;
563 case TGSI_OPCODE_SHL
:
564 return VGPU10_OPCODE_ISHL
;
565 case TGSI_OPCODE_AND
:
566 return VGPU10_OPCODE_AND
;
568 return VGPU10_OPCODE_OR
;
569 case TGSI_OPCODE_XOR
:
570 return VGPU10_OPCODE_XOR
;
571 case TGSI_OPCODE_CONT
:
572 return VGPU10_OPCODE_CONTINUE
;
573 case TGSI_OPCODE_EMIT
:
574 return VGPU10_OPCODE_EMIT
;
575 case TGSI_OPCODE_ENDPRIM
:
576 return VGPU10_OPCODE_CUT
;
577 case TGSI_OPCODE_BGNLOOP
:
578 return VGPU10_OPCODE_LOOP
;
579 case TGSI_OPCODE_ENDLOOP
:
580 return VGPU10_OPCODE_ENDLOOP
;
581 case TGSI_OPCODE_ENDSUB
:
582 return VGPU10_OPCODE_RET
;
583 case TGSI_OPCODE_NOP
:
584 return VGPU10_OPCODE_NOP
;
585 case TGSI_OPCODE_END
:
586 return VGPU10_OPCODE_RET
;
587 case TGSI_OPCODE_F2I
:
588 return VGPU10_OPCODE_FTOI
;
589 case TGSI_OPCODE_IMAX
:
590 return VGPU10_OPCODE_IMAX
;
591 case TGSI_OPCODE_IMIN
:
592 return VGPU10_OPCODE_IMIN
;
593 case TGSI_OPCODE_UDIV
:
594 case TGSI_OPCODE_UMOD
:
595 case TGSI_OPCODE_MOD
:
596 return VGPU10_OPCODE_UDIV
;
597 case TGSI_OPCODE_IMUL_HI
:
598 return VGPU10_OPCODE_IMUL
;
599 case TGSI_OPCODE_INEG
:
600 return VGPU10_OPCODE_INEG
;
601 case TGSI_OPCODE_ISHR
:
602 return VGPU10_OPCODE_ISHR
;
603 case TGSI_OPCODE_ISGE
:
604 return VGPU10_OPCODE_IGE
;
605 case TGSI_OPCODE_ISLT
:
606 return VGPU10_OPCODE_ILT
;
607 case TGSI_OPCODE_F2U
:
608 return VGPU10_OPCODE_FTOU
;
609 case TGSI_OPCODE_UADD
:
610 return VGPU10_OPCODE_IADD
;
611 case TGSI_OPCODE_U2F
:
612 return VGPU10_OPCODE_UTOF
;
613 case TGSI_OPCODE_UCMP
:
614 return VGPU10_OPCODE_MOVC
;
615 case TGSI_OPCODE_UMAD
:
616 return VGPU10_OPCODE_UMAD
;
617 case TGSI_OPCODE_UMAX
:
618 return VGPU10_OPCODE_UMAX
;
619 case TGSI_OPCODE_UMIN
:
620 return VGPU10_OPCODE_UMIN
;
621 case TGSI_OPCODE_UMUL
:
622 case TGSI_OPCODE_UMUL_HI
:
623 return VGPU10_OPCODE_UMUL
;
624 case TGSI_OPCODE_USEQ
:
625 return VGPU10_OPCODE_IEQ
;
626 case TGSI_OPCODE_USGE
:
627 return VGPU10_OPCODE_UGE
;
628 case TGSI_OPCODE_USHR
:
629 return VGPU10_OPCODE_USHR
;
630 case TGSI_OPCODE_USLT
:
631 return VGPU10_OPCODE_ULT
;
632 case TGSI_OPCODE_USNE
:
633 return VGPU10_OPCODE_INE
;
634 case TGSI_OPCODE_SWITCH
:
635 return VGPU10_OPCODE_SWITCH
;
636 case TGSI_OPCODE_CASE
:
637 return VGPU10_OPCODE_CASE
;
638 case TGSI_OPCODE_DEFAULT
:
639 return VGPU10_OPCODE_DEFAULT
;
640 case TGSI_OPCODE_ENDSWITCH
:
641 return VGPU10_OPCODE_ENDSWITCH
;
642 case TGSI_OPCODE_FSLT
:
643 return VGPU10_OPCODE_LT
;
644 case TGSI_OPCODE_ROUND
:
645 return VGPU10_OPCODE_ROUND_NE
;
647 assert(!"Unexpected TGSI opcode in translate_opcode()");
648 return VGPU10_OPCODE_NOP
;
654 * Translate a TGSI register file type into a VGPU10 operand type.
655 * \param array is the TGSI_FILE_TEMPORARY register an array?
657 static VGPU10_OPERAND_TYPE
658 translate_register_file(enum tgsi_file_type file
, boolean array
)
661 case TGSI_FILE_CONSTANT
:
662 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
663 case TGSI_FILE_INPUT
:
664 return VGPU10_OPERAND_TYPE_INPUT
;
665 case TGSI_FILE_OUTPUT
:
666 return VGPU10_OPERAND_TYPE_OUTPUT
;
667 case TGSI_FILE_TEMPORARY
:
668 return array
? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
669 : VGPU10_OPERAND_TYPE_TEMP
;
670 case TGSI_FILE_IMMEDIATE
:
671 /* all immediates are 32-bit values at this time so
672 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
674 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
;
675 case TGSI_FILE_SAMPLER
:
676 return VGPU10_OPERAND_TYPE_SAMPLER
;
677 case TGSI_FILE_SYSTEM_VALUE
:
678 return VGPU10_OPERAND_TYPE_INPUT
;
680 /* XXX TODO more cases to finish */
683 assert(!"Bad tgsi register file!");
684 return VGPU10_OPERAND_TYPE_NULL
;
690 * Emit a null dst register
693 emit_null_dst_register(struct svga_shader_emitter_v10
*emit
)
695 VGPU10OperandToken0 operand
;
698 operand
.operandType
= VGPU10_OPERAND_TYPE_NULL
;
699 operand
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
701 emit_dword(emit
, operand
.value
);
706 * If the given register is a temporary, return the array ID.
710 get_temp_array_id(const struct svga_shader_emitter_v10
*emit
,
711 enum tgsi_file_type file
, unsigned index
)
713 if (file
== TGSI_FILE_TEMPORARY
) {
714 return emit
->temp_map
[index
].arrayId
;
723 * If the given register is a temporary, convert the index from a TGSI
724 * TEMPORARY index to a VGPU10 temp index.
727 remap_temp_index(const struct svga_shader_emitter_v10
*emit
,
728 enum tgsi_file_type file
, unsigned index
)
730 if (file
== TGSI_FILE_TEMPORARY
) {
731 return emit
->temp_map
[index
].index
;
740 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
741 * Note: the operandType field must already be initialized.
743 static VGPU10OperandToken0
744 setup_operand0_indexing(struct svga_shader_emitter_v10
*emit
,
745 VGPU10OperandToken0 operand0
,
746 enum tgsi_file_type file
,
747 boolean indirect
, boolean index2D
,
748 unsigned tempArrayID
)
750 unsigned indexDim
, index0Rep
, index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
753 * Compute index dimensions
755 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
||
756 operand0
.operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
757 /* there's no swizzle for in-line immediates */
758 indexDim
= VGPU10_OPERAND_INDEX_0D
;
759 assert(operand0
.selectionMode
== 0);
764 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
765 indexDim
= VGPU10_OPERAND_INDEX_2D
;
768 indexDim
= VGPU10_OPERAND_INDEX_1D
;
773 * Compute index representations (immediate, relative, etc).
775 if (tempArrayID
> 0) {
776 assert(file
== TGSI_FILE_TEMPORARY
);
777 /* First index is the array ID, second index is the array element */
778 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
780 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
783 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
787 if (file
== TGSI_FILE_CONSTANT
) {
788 /* index[0] indicates which constant buffer while index[1] indicates
789 * the position in the constant buffer.
791 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
792 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
795 /* All other register files are 1-dimensional */
796 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
800 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
801 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
804 operand0
.indexDimension
= indexDim
;
805 operand0
.index0Representation
= index0Rep
;
806 operand0
.index1Representation
= index1Rep
;
813 * Emit the operand for expressing an address register for indirect indexing.
814 * Note that the address register is really just a temp register.
815 * \param addr_reg_index which address register to use
818 emit_indirect_register(struct svga_shader_emitter_v10
*emit
,
819 unsigned addr_reg_index
)
821 unsigned tmp_reg_index
;
822 VGPU10OperandToken0 operand0
;
824 assert(addr_reg_index
< MAX_VGPU10_ADDR_REGS
);
826 tmp_reg_index
= emit
->address_reg_index
[addr_reg_index
];
828 /* operand0 is a simple temporary register, selecting one component */
830 operand0
.operandType
= VGPU10_OPERAND_TYPE_TEMP
;
831 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
832 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
833 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
834 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
835 operand0
.swizzleX
= 0;
836 operand0
.swizzleY
= 1;
837 operand0
.swizzleZ
= 2;
838 operand0
.swizzleW
= 3;
840 emit_dword(emit
, operand0
.value
);
841 emit_dword(emit
, remap_temp_index(emit
, TGSI_FILE_TEMPORARY
, tmp_reg_index
));
846 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
847 * \param emit the emitter context
848 * \param reg the TGSI dst register to translate
851 emit_dst_register(struct svga_shader_emitter_v10
*emit
,
852 const struct tgsi_full_dst_register
*reg
)
854 enum tgsi_file_type file
= reg
->Register
.File
;
855 unsigned index
= reg
->Register
.Index
;
856 const enum tgsi_semantic sem_name
= emit
->info
.output_semantic_name
[index
];
857 const unsigned sem_index
= emit
->info
.output_semantic_index
[index
];
858 unsigned writemask
= reg
->Register
.WriteMask
;
859 const boolean indirect
= reg
->Register
.Indirect
;
860 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
861 const boolean index2d
= reg
->Register
.Dimension
;
862 VGPU10OperandToken0 operand0
;
864 if (file
== TGSI_FILE_OUTPUT
) {
865 if (emit
->unit
== PIPE_SHADER_VERTEX
||
866 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
867 if (index
== emit
->vposition
.out_index
&&
868 emit
->vposition
.tmp_index
!= INVALID_INDEX
) {
869 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
870 * vertex position result in a temporary so that we can modify
871 * it in the post_helper() code.
873 file
= TGSI_FILE_TEMPORARY
;
874 index
= emit
->vposition
.tmp_index
;
876 else if (sem_name
== TGSI_SEMANTIC_CLIPDIST
&&
877 emit
->clip_dist_tmp_index
!= INVALID_INDEX
) {
878 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
879 * We store the clip distance in a temporary first, then
880 * we'll copy it to the shadow copy and to CLIPDIST with the
881 * enabled planes mask in emit_clip_distance_instructions().
883 file
= TGSI_FILE_TEMPORARY
;
884 index
= emit
->clip_dist_tmp_index
+ sem_index
;
886 else if (sem_name
== TGSI_SEMANTIC_CLIPVERTEX
&&
887 emit
->clip_vertex_tmp_index
!= INVALID_INDEX
) {
888 /* replace the CLIPVERTEX output register with a temporary */
889 assert(emit
->clip_mode
== CLIP_VERTEX
);
890 assert(sem_index
== 0);
891 file
= TGSI_FILE_TEMPORARY
;
892 index
= emit
->clip_vertex_tmp_index
;
895 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
896 if (sem_name
== TGSI_SEMANTIC_POSITION
) {
897 /* Fragment depth output register */
899 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
900 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
901 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
902 emit_dword(emit
, operand0
.value
);
905 else if (index
== emit
->fs
.color_out_index
[0] &&
906 emit
->fs
.color_tmp_index
!= INVALID_INDEX
) {
907 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
908 * fragment color result in a temporary so that we can read it
909 * it in the post_helper() code.
911 file
= TGSI_FILE_TEMPORARY
;
912 index
= emit
->fs
.color_tmp_index
;
915 /* Typically, for fragment shaders, the output register index
916 * matches the color semantic index. But not when we write to
917 * the fragment depth register. In that case, OUT[0] will be
918 * fragdepth and OUT[1] will be the 0th color output. We need
919 * to use the semantic index for color outputs.
921 assert(sem_name
== TGSI_SEMANTIC_COLOR
);
922 index
= emit
->info
.output_semantic_index
[index
];
924 emit
->num_output_writes
++;
929 /* init operand tokens to all zero */
932 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
934 /* the operand has a writemask */
935 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
937 /* Which of the four dest components to write to. Note that we can use a
938 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
940 STATIC_ASSERT(TGSI_WRITEMASK_X
== VGPU10_OPERAND_4_COMPONENT_MASK_X
);
941 operand0
.mask
= writemask
;
943 /* translate TGSI register file type to VGPU10 operand type */
944 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
946 check_register_index(emit
, operand0
.operandType
, index
);
948 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
949 index2d
, tempArrayId
);
952 emit_dword(emit
, operand0
.value
);
953 if (tempArrayId
> 0) {
954 emit_dword(emit
, tempArrayId
);
957 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
960 emit_indirect_register(emit
, reg
->Indirect
.Index
);
966 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
969 emit_src_register(struct svga_shader_emitter_v10
*emit
,
970 const struct tgsi_full_src_register
*reg
)
972 enum tgsi_file_type file
= reg
->Register
.File
;
973 unsigned index
= reg
->Register
.Index
;
974 const boolean indirect
= reg
->Register
.Indirect
;
975 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
976 const boolean index2d
= reg
->Register
.Dimension
;
977 const unsigned swizzleX
= reg
->Register
.SwizzleX
;
978 const unsigned swizzleY
= reg
->Register
.SwizzleY
;
979 const unsigned swizzleZ
= reg
->Register
.SwizzleZ
;
980 const unsigned swizzleW
= reg
->Register
.SwizzleW
;
981 const boolean absolute
= reg
->Register
.Absolute
;
982 const boolean negate
= reg
->Register
.Negate
;
983 bool is_prim_id
= FALSE
;
985 VGPU10OperandToken0 operand0
;
986 VGPU10OperandToken1 operand1
;
988 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
989 file
== TGSI_FILE_INPUT
) {
990 if (index
== emit
->fs
.face_input_index
) {
991 /* Replace INPUT[FACE] with TEMP[FACE] */
992 file
= TGSI_FILE_TEMPORARY
;
993 index
= emit
->fs
.face_tmp_index
;
995 else if (index
== emit
->fs
.fragcoord_input_index
) {
996 /* Replace INPUT[POSITION] with TEMP[POSITION] */
997 file
= TGSI_FILE_TEMPORARY
;
998 index
= emit
->fs
.fragcoord_tmp_index
;
1001 /* We remap fragment shader inputs to that FS input indexes
1002 * match up with VS/GS output indexes.
1004 index
= emit
->linkage
.input_map
[index
];
1007 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
1008 file
== TGSI_FILE_INPUT
) {
1009 is_prim_id
= (index
== emit
->gs
.prim_id_index
);
1010 index
= emit
->linkage
.input_map
[index
];
1012 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1013 if (file
== TGSI_FILE_INPUT
) {
1014 /* if input is adjusted... */
1015 if ((emit
->key
.vs
.adjust_attrib_w_1
|
1016 emit
->key
.vs
.adjust_attrib_itof
|
1017 emit
->key
.vs
.adjust_attrib_utof
|
1018 emit
->key
.vs
.attrib_is_bgra
|
1019 emit
->key
.vs
.attrib_puint_to_snorm
|
1020 emit
->key
.vs
.attrib_puint_to_uscaled
|
1021 emit
->key
.vs
.attrib_puint_to_sscaled
) & (1 << index
)) {
1022 file
= TGSI_FILE_TEMPORARY
;
1023 index
= emit
->vs
.adjusted_input
[index
];
1026 else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
1027 assert(index
< ARRAY_SIZE(emit
->system_value_indexes
));
1028 index
= emit
->system_value_indexes
[index
];
1032 operand0
.value
= operand1
.value
= 0;
1035 /* NOTE: we should be using VGPU10_OPERAND_1_COMPONENT here, but
1036 * our virtual GPU accepts this as-is.
1038 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
1039 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
1042 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1043 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
1046 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
1047 index2d
, tempArrayId
);
1049 if (operand0
.operandType
!= VGPU10_OPERAND_TYPE_IMMEDIATE32
&&
1050 operand0
.operandType
!= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
1051 /* there's no swizzle for in-line immediates */
1052 if (swizzleX
== swizzleY
&&
1053 swizzleX
== swizzleZ
&&
1054 swizzleX
== swizzleW
) {
1055 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1058 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1061 operand0
.swizzleX
= swizzleX
;
1062 operand0
.swizzleY
= swizzleY
;
1063 operand0
.swizzleZ
= swizzleZ
;
1064 operand0
.swizzleW
= swizzleW
;
1066 if (absolute
|| negate
) {
1067 operand0
.extended
= 1;
1068 operand1
.extendedOperandType
= VGPU10_EXTENDED_OPERAND_MODIFIER
;
1069 if (absolute
&& !negate
)
1070 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABS
;
1071 if (!absolute
&& negate
)
1072 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_NEG
;
1073 if (absolute
&& negate
)
1074 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABSNEG
;
1078 /* Emit the operand tokens */
1079 emit_dword(emit
, operand0
.value
);
1080 if (operand0
.extended
)
1081 emit_dword(emit
, operand1
.value
);
1083 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
) {
1084 /* Emit the four float/int in-line immediate values */
1086 assert(index
< ARRAY_SIZE(emit
->immediates
));
1087 assert(file
== TGSI_FILE_IMMEDIATE
);
1088 assert(swizzleX
< 4);
1089 assert(swizzleY
< 4);
1090 assert(swizzleZ
< 4);
1091 assert(swizzleW
< 4);
1092 c
= (unsigned *) emit
->immediates
[index
];
1093 emit_dword(emit
, c
[swizzleX
]);
1094 emit_dword(emit
, c
[swizzleY
]);
1095 emit_dword(emit
, c
[swizzleZ
]);
1096 emit_dword(emit
, c
[swizzleW
]);
1098 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_1D
) {
1099 /* Emit the register index(es) */
1101 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
1102 emit_dword(emit
, reg
->Dimension
.Index
);
1105 if (tempArrayId
> 0) {
1106 emit_dword(emit
, tempArrayId
);
1109 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
1112 emit_indirect_register(emit
, reg
->Indirect
.Index
);
1119 * Emit a resource operand (for use with a SAMPLE instruction).
1122 emit_resource_register(struct svga_shader_emitter_v10
*emit
,
1123 unsigned resource_number
)
1125 VGPU10OperandToken0 operand0
;
1127 check_register_index(emit
, VGPU10_OPERAND_TYPE_RESOURCE
, resource_number
);
1132 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
1133 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1134 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1135 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1136 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1137 operand0
.swizzleY
= VGPU10_COMPONENT_Y
;
1138 operand0
.swizzleZ
= VGPU10_COMPONENT_Z
;
1139 operand0
.swizzleW
= VGPU10_COMPONENT_W
;
1141 emit_dword(emit
, operand0
.value
);
1142 emit_dword(emit
, resource_number
);
1147 * Emit a sampler operand (for use with a SAMPLE instruction).
1150 emit_sampler_register(struct svga_shader_emitter_v10
*emit
,
1151 unsigned sampler_number
)
1153 VGPU10OperandToken0 operand0
;
1155 check_register_index(emit
, VGPU10_OPERAND_TYPE_SAMPLER
, sampler_number
);
1160 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
1161 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1163 emit_dword(emit
, operand0
.value
);
1164 emit_dword(emit
, sampler_number
);
1169 * Emit an operand which reads the IS_FRONT_FACING register.
1172 emit_face_register(struct svga_shader_emitter_v10
*emit
)
1174 VGPU10OperandToken0 operand0
;
1175 unsigned index
= emit
->linkage
.input_map
[emit
->fs
.face_input_index
];
1180 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT
;
1181 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1182 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1183 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1185 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1186 operand0
.swizzleY
= VGPU10_COMPONENT_X
;
1187 operand0
.swizzleZ
= VGPU10_COMPONENT_X
;
1188 operand0
.swizzleW
= VGPU10_COMPONENT_X
;
1190 emit_dword(emit
, operand0
.value
);
1191 emit_dword(emit
, index
);
1196 * Emit the token for a VGPU10 opcode.
1197 * \param saturate clamp result to [0,1]?
1200 emit_opcode(struct svga_shader_emitter_v10
*emit
,
1201 VGPU10_OPCODE_TYPE vgpu10_opcode
, boolean saturate
)
1203 VGPU10OpcodeToken0 token0
;
1205 token0
.value
= 0; /* init all fields to zero */
1206 token0
.opcodeType
= vgpu10_opcode
;
1207 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1208 token0
.saturate
= saturate
;
1210 emit_dword(emit
, token0
.value
);
1215 * Emit the token for a VGPU10 resinfo instruction.
1216 * \param modifier return type modifier, _uint or _rcpFloat.
1217 * TODO: We may want to remove this parameter if it will
1218 * only ever be used as _uint.
1221 emit_opcode_resinfo(struct svga_shader_emitter_v10
*emit
,
1222 VGPU10_RESINFO_RETURN_TYPE modifier
)
1224 VGPU10OpcodeToken0 token0
;
1226 token0
.value
= 0; /* init all fields to zero */
1227 token0
.opcodeType
= VGPU10_OPCODE_RESINFO
;
1228 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1229 token0
.resinfoReturnType
= modifier
;
1231 emit_dword(emit
, token0
.value
);
1236 * Emit opcode tokens for a texture sample instruction. Texture instructions
1237 * can be rather complicated (texel offsets, etc) so we have this specialized
1241 emit_sample_opcode(struct svga_shader_emitter_v10
*emit
,
1242 unsigned vgpu10_opcode
, boolean saturate
,
1243 const int offsets
[3])
1245 VGPU10OpcodeToken0 token0
;
1246 VGPU10OpcodeToken1 token1
;
1248 token0
.value
= 0; /* init all fields to zero */
1249 token0
.opcodeType
= vgpu10_opcode
;
1250 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1251 token0
.saturate
= saturate
;
1253 if (offsets
[0] || offsets
[1] || offsets
[2]) {
1254 assert(offsets
[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1255 assert(offsets
[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1256 assert(offsets
[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1257 assert(offsets
[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1258 assert(offsets
[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1259 assert(offsets
[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1261 token0
.extended
= 1;
1263 token1
.opcodeType
= VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS
;
1264 token1
.offsetU
= offsets
[0];
1265 token1
.offsetV
= offsets
[1];
1266 token1
.offsetW
= offsets
[2];
1269 emit_dword(emit
, token0
.value
);
1270 if (token0
.extended
) {
1271 emit_dword(emit
, token1
.value
);
1277 * Emit a DISCARD opcode token.
1278 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1279 * Otherwise, we'll discard the fragment if the X component is 0.
1282 emit_discard_opcode(struct svga_shader_emitter_v10
*emit
, boolean nonzero
)
1284 VGPU10OpcodeToken0 opcode0
;
1287 opcode0
.opcodeType
= VGPU10_OPCODE_DISCARD
;
1289 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
1291 emit_dword(emit
, opcode0
.value
);
1296 * We need to call this before we begin emitting a VGPU10 instruction.
1299 begin_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1301 assert(emit
->inst_start_token
== 0);
1302 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1303 * Note, we can't save a pointer because it would become invalid if
1304 * we have to realloc the output buffer.
1306 emit
->inst_start_token
= emit_get_num_tokens(emit
);
1311 * We need to call this after we emit the last token of a VGPU10 instruction.
1312 * This function patches in the opcode token's instructionLength field.
1315 end_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1317 VGPU10OpcodeToken0
*tokens
= (VGPU10OpcodeToken0
*) emit
->buf
;
1318 unsigned inst_length
;
1320 assert(emit
->inst_start_token
> 0);
1322 if (emit
->discard_instruction
) {
1323 /* Back up the emit->ptr to where this instruction started so
1324 * that we discard the current instruction.
1326 emit
->ptr
= (char *) (tokens
+ emit
->inst_start_token
);
1329 /* Compute instruction length and patch that into the start of
1332 inst_length
= emit_get_num_tokens(emit
) - emit
->inst_start_token
;
1334 assert(inst_length
> 0);
1336 tokens
[emit
->inst_start_token
].instructionLength
= inst_length
;
1339 emit
->inst_start_token
= 0; /* reset to zero for error checking */
1340 emit
->discard_instruction
= FALSE
;
1345 * Return index for a free temporary register.
1348 get_temp_index(struct svga_shader_emitter_v10
*emit
)
1350 assert(emit
->internal_temp_count
< MAX_INTERNAL_TEMPS
);
1351 return emit
->num_shader_temps
+ emit
->internal_temp_count
++;
1356 * Release the temporaries which were generated by get_temp_index().
1359 free_temp_indexes(struct svga_shader_emitter_v10
*emit
)
1361 emit
->internal_temp_count
= 0;
1366 * Create a tgsi_full_src_register.
1368 static struct tgsi_full_src_register
1369 make_src_reg(enum tgsi_file_type file
, unsigned index
)
1371 struct tgsi_full_src_register reg
;
1373 memset(®
, 0, sizeof(reg
));
1374 reg
.Register
.File
= file
;
1375 reg
.Register
.Index
= index
;
1376 reg
.Register
.SwizzleX
= TGSI_SWIZZLE_X
;
1377 reg
.Register
.SwizzleY
= TGSI_SWIZZLE_Y
;
1378 reg
.Register
.SwizzleZ
= TGSI_SWIZZLE_Z
;
1379 reg
.Register
.SwizzleW
= TGSI_SWIZZLE_W
;
1385 * Create a tgsi_full_src_register for a temporary.
1387 static struct tgsi_full_src_register
1388 make_src_temp_reg(unsigned index
)
1390 return make_src_reg(TGSI_FILE_TEMPORARY
, index
);
1395 * Create a tgsi_full_src_register for a constant.
1397 static struct tgsi_full_src_register
1398 make_src_const_reg(unsigned index
)
1400 return make_src_reg(TGSI_FILE_CONSTANT
, index
);
1405 * Create a tgsi_full_src_register for an immediate constant.
1407 static struct tgsi_full_src_register
1408 make_src_immediate_reg(unsigned index
)
1410 return make_src_reg(TGSI_FILE_IMMEDIATE
, index
);
1415 * Create a tgsi_full_dst_register.
1417 static struct tgsi_full_dst_register
1418 make_dst_reg(enum tgsi_file_type file
, unsigned index
)
1420 struct tgsi_full_dst_register reg
;
1422 memset(®
, 0, sizeof(reg
));
1423 reg
.Register
.File
= file
;
1424 reg
.Register
.Index
= index
;
1425 reg
.Register
.WriteMask
= TGSI_WRITEMASK_XYZW
;
1431 * Create a tgsi_full_dst_register for a temporary.
1433 static struct tgsi_full_dst_register
1434 make_dst_temp_reg(unsigned index
)
1436 return make_dst_reg(TGSI_FILE_TEMPORARY
, index
);
1441 * Create a tgsi_full_dst_register for an output.
1443 static struct tgsi_full_dst_register
1444 make_dst_output_reg(unsigned index
)
1446 return make_dst_reg(TGSI_FILE_OUTPUT
, index
);
1451 * Create negated tgsi_full_src_register.
1453 static struct tgsi_full_src_register
1454 negate_src(const struct tgsi_full_src_register
*reg
)
1456 struct tgsi_full_src_register neg
= *reg
;
1457 neg
.Register
.Negate
= !reg
->Register
.Negate
;
1462 * Create absolute value of a tgsi_full_src_register.
1464 static struct tgsi_full_src_register
1465 absolute_src(const struct tgsi_full_src_register
*reg
)
1467 struct tgsi_full_src_register absolute
= *reg
;
1468 absolute
.Register
.Absolute
= 1;
1473 /** Return the named swizzle term from the src register */
1474 static inline unsigned
1475 get_swizzle(const struct tgsi_full_src_register
*reg
, enum tgsi_swizzle term
)
1478 case TGSI_SWIZZLE_X
:
1479 return reg
->Register
.SwizzleX
;
1480 case TGSI_SWIZZLE_Y
:
1481 return reg
->Register
.SwizzleY
;
1482 case TGSI_SWIZZLE_Z
:
1483 return reg
->Register
.SwizzleZ
;
1484 case TGSI_SWIZZLE_W
:
1485 return reg
->Register
.SwizzleW
;
1487 assert(!"Bad swizzle");
1488 return TGSI_SWIZZLE_X
;
1494 * Create swizzled tgsi_full_src_register.
1496 static struct tgsi_full_src_register
1497 swizzle_src(const struct tgsi_full_src_register
*reg
,
1498 enum tgsi_swizzle swizzleX
, enum tgsi_swizzle swizzleY
,
1499 enum tgsi_swizzle swizzleZ
, enum tgsi_swizzle swizzleW
)
1501 struct tgsi_full_src_register swizzled
= *reg
;
1502 /* Note: we swizzle the current swizzle */
1503 swizzled
.Register
.SwizzleX
= get_swizzle(reg
, swizzleX
);
1504 swizzled
.Register
.SwizzleY
= get_swizzle(reg
, swizzleY
);
1505 swizzled
.Register
.SwizzleZ
= get_swizzle(reg
, swizzleZ
);
1506 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzleW
);
1512 * Create swizzled tgsi_full_src_register where all the swizzle
1513 * terms are the same.
1515 static struct tgsi_full_src_register
1516 scalar_src(const struct tgsi_full_src_register
*reg
, enum tgsi_swizzle swizzle
)
1518 struct tgsi_full_src_register swizzled
= *reg
;
1519 /* Note: we swizzle the current swizzle */
1520 swizzled
.Register
.SwizzleX
=
1521 swizzled
.Register
.SwizzleY
=
1522 swizzled
.Register
.SwizzleZ
=
1523 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzle
);
1529 * Create new tgsi_full_dst_register with writemask.
1530 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1532 static struct tgsi_full_dst_register
1533 writemask_dst(const struct tgsi_full_dst_register
*reg
, unsigned mask
)
1535 struct tgsi_full_dst_register masked
= *reg
;
1536 masked
.Register
.WriteMask
= mask
;
1542 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1545 same_swizzle_terms(const struct tgsi_full_src_register
*reg
)
1547 return (reg
->Register
.SwizzleX
== reg
->Register
.SwizzleY
&&
1548 reg
->Register
.SwizzleY
== reg
->Register
.SwizzleZ
&&
1549 reg
->Register
.SwizzleZ
== reg
->Register
.SwizzleW
);
1554 * Search the vector for the value 'x' and return its position.
1557 find_imm_in_vec4(const union tgsi_immediate_data vec
[4],
1558 union tgsi_immediate_data x
)
1561 for (i
= 0; i
< 4; i
++) {
1562 if (vec
[i
].Int
== x
.Int
)
1570 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1573 find_immediate(struct svga_shader_emitter_v10
*emit
,
1574 union tgsi_immediate_data x
, unsigned startIndex
)
1576 const unsigned endIndex
= emit
->num_immediates
;
1579 assert(emit
->immediates_emitted
);
1581 /* Search immediates for x, y, z, w */
1582 for (i
= startIndex
; i
< endIndex
; i
++) {
1583 if (x
.Int
== emit
->immediates
[i
][0].Int
||
1584 x
.Int
== emit
->immediates
[i
][1].Int
||
1585 x
.Int
== emit
->immediates
[i
][2].Int
||
1586 x
.Int
== emit
->immediates
[i
][3].Int
) {
1590 /* Should never try to use an immediate value that wasn't pre-declared */
1591 assert(!"find_immediate() failed!");
1597 * Return a tgsi_full_src_register for an immediate/literal
1598 * union tgsi_immediate_data[4] value.
1599 * Note: the values must have been previously declared/allocated in
1600 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1603 static struct tgsi_full_src_register
1604 make_immediate_reg_4(struct svga_shader_emitter_v10
*emit
,
1605 const union tgsi_immediate_data imm
[4])
1607 struct tgsi_full_src_register reg
;
1610 for (i
= 0; i
< emit
->num_common_immediates
; i
++) {
1611 /* search for first component value */
1612 int immpos
= find_immediate(emit
, imm
[0], i
);
1615 assert(immpos
>= 0);
1617 /* find remaining components within the immediate vector */
1618 x
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[0]);
1619 y
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[1]);
1620 z
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[2]);
1621 w
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[3]);
1623 if (x
>=0 && y
>= 0 && z
>= 0 && w
>= 0) {
1624 /* found them all */
1625 memset(®
, 0, sizeof(reg
));
1626 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1627 reg
.Register
.Index
= immpos
;
1628 reg
.Register
.SwizzleX
= x
;
1629 reg
.Register
.SwizzleY
= y
;
1630 reg
.Register
.SwizzleZ
= z
;
1631 reg
.Register
.SwizzleW
= w
;
1634 /* else, keep searching */
1637 assert(!"Failed to find immediate register!");
1639 /* Just return IMM[0].xxxx */
1640 memset(®
, 0, sizeof(reg
));
1641 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1647 * Return a tgsi_full_src_register for an immediate/literal
1648 * union tgsi_immediate_data value of the form {value, value, value, value}.
1649 * \sa make_immediate_reg_4() regarding allowed values.
1651 static struct tgsi_full_src_register
1652 make_immediate_reg(struct svga_shader_emitter_v10
*emit
,
1653 union tgsi_immediate_data value
)
1655 struct tgsi_full_src_register reg
;
1656 int immpos
= find_immediate(emit
, value
, 0);
1658 assert(immpos
>= 0);
1660 memset(®
, 0, sizeof(reg
));
1661 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1662 reg
.Register
.Index
= immpos
;
1663 reg
.Register
.SwizzleX
=
1664 reg
.Register
.SwizzleY
=
1665 reg
.Register
.SwizzleZ
=
1666 reg
.Register
.SwizzleW
= find_imm_in_vec4(emit
->immediates
[immpos
], value
);
1673 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1674 * \sa make_immediate_reg_4() regarding allowed values.
1676 static struct tgsi_full_src_register
1677 make_immediate_reg_float4(struct svga_shader_emitter_v10
*emit
,
1678 float x
, float y
, float z
, float w
)
1680 union tgsi_immediate_data imm
[4];
1685 return make_immediate_reg_4(emit
, imm
);
1690 * Return a tgsi_full_src_register for an immediate/literal float value
1691 * of the form {value, value, value, value}.
1692 * \sa make_immediate_reg_4() regarding allowed values.
1694 static struct tgsi_full_src_register
1695 make_immediate_reg_float(struct svga_shader_emitter_v10
*emit
, float value
)
1697 union tgsi_immediate_data imm
;
1699 return make_immediate_reg(emit
, imm
);
1704 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1706 static struct tgsi_full_src_register
1707 make_immediate_reg_int4(struct svga_shader_emitter_v10
*emit
,
1708 int x
, int y
, int z
, int w
)
1710 union tgsi_immediate_data imm
[4];
1715 return make_immediate_reg_4(emit
, imm
);
1720 * Return a tgsi_full_src_register for an immediate/literal int value
1721 * of the form {value, value, value, value}.
1722 * \sa make_immediate_reg_4() regarding allowed values.
1724 static struct tgsi_full_src_register
1725 make_immediate_reg_int(struct svga_shader_emitter_v10
*emit
, int value
)
1727 union tgsi_immediate_data imm
;
1729 return make_immediate_reg(emit
, imm
);
1734 * Allocate space for a union tgsi_immediate_data[4] immediate.
1735 * \return the index/position of the immediate.
1738 alloc_immediate_4(struct svga_shader_emitter_v10
*emit
,
1739 const union tgsi_immediate_data imm
[4])
1741 unsigned n
= emit
->num_immediates
++;
1742 assert(!emit
->immediates_emitted
);
1743 assert(n
< ARRAY_SIZE(emit
->immediates
));
1744 emit
->immediates
[n
][0] = imm
[0];
1745 emit
->immediates
[n
][1] = imm
[1];
1746 emit
->immediates
[n
][2] = imm
[2];
1747 emit
->immediates
[n
][3] = imm
[3];
1753 * Allocate space for a float[4] immediate.
1754 * \return the index/position of the immediate.
1757 alloc_immediate_float4(struct svga_shader_emitter_v10
*emit
,
1758 float x
, float y
, float z
, float w
)
1760 union tgsi_immediate_data imm
[4];
1765 return alloc_immediate_4(emit
, imm
);
1770 * Allocate space for an int[4] immediate.
1771 * \return the index/position of the immediate.
1774 alloc_immediate_int4(struct svga_shader_emitter_v10
*emit
,
1775 int x
, int y
, int z
, int w
)
1777 union tgsi_immediate_data imm
[4];
1782 return alloc_immediate_4(emit
, imm
);
1787 * Allocate a shader input to store a system value.
1790 alloc_system_value_index(struct svga_shader_emitter_v10
*emit
, unsigned index
)
1792 const unsigned n
= emit
->info
.file_max
[TGSI_FILE_INPUT
] + 1 + index
;
1793 assert(index
< ARRAY_SIZE(emit
->system_value_indexes
));
1794 emit
->system_value_indexes
[index
] = n
;
1800 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1803 emit_vgpu10_immediate(struct svga_shader_emitter_v10
*emit
,
1804 const struct tgsi_full_immediate
*imm
)
1806 /* We don't actually emit any code here. We just save the
1807 * immediate values and emit them later.
1809 alloc_immediate_4(emit
, imm
->u
);
1815 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1816 * containing all the immediate values previously allocated
1817 * with alloc_immediate_4().
1820 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10
*emit
)
1822 VGPU10OpcodeToken0 token
;
1824 assert(!emit
->immediates_emitted
);
1827 token
.opcodeType
= VGPU10_OPCODE_CUSTOMDATA
;
1828 token
.customDataClass
= VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER
;
1830 /* Note: no begin/end_emit_instruction() calls */
1831 emit_dword(emit
, token
.value
);
1832 emit_dword(emit
, 2 + 4 * emit
->num_immediates
);
1833 emit_dwords(emit
, (unsigned *) emit
->immediates
, 4 * emit
->num_immediates
);
1835 emit
->immediates_emitted
= TRUE
;
1842 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1843 * interpolation mode.
1844 * \return a VGPU10_INTERPOLATION_x value
1847 translate_interpolation(const struct svga_shader_emitter_v10
*emit
,
1848 enum tgsi_interpolate_mode interp
,
1849 enum tgsi_interpolate_loc interpolate_loc
)
1851 if (interp
== TGSI_INTERPOLATE_COLOR
) {
1852 interp
= emit
->key
.fs
.flatshade
?
1853 TGSI_INTERPOLATE_CONSTANT
: TGSI_INTERPOLATE_PERSPECTIVE
;
1857 case TGSI_INTERPOLATE_CONSTANT
:
1858 return VGPU10_INTERPOLATION_CONSTANT
;
1859 case TGSI_INTERPOLATE_LINEAR
:
1860 return interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
?
1861 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
:
1862 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
;
1863 case TGSI_INTERPOLATE_PERSPECTIVE
:
1864 return interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
?
1865 VGPU10_INTERPOLATION_LINEAR_CENTROID
:
1866 VGPU10_INTERPOLATION_LINEAR
;
1868 assert(!"Unexpected interpolation mode");
1869 return VGPU10_INTERPOLATION_CONSTANT
;
1875 * Translate a TGSI property to VGPU10.
1876 * Don't emit any instructions yet, only need to gather the primitive property
1877 * information. The output primitive topology might be changed later. The
1878 * final property instructions will be emitted as part of the pre-helper code.
1881 emit_vgpu10_property(struct svga_shader_emitter_v10
*emit
,
1882 const struct tgsi_full_property
*prop
)
1884 static const VGPU10_PRIMITIVE primType
[] = {
1885 VGPU10_PRIMITIVE_POINT
, /* PIPE_PRIM_POINTS */
1886 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINES */
1887 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_LOOP */
1888 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_STRIP */
1889 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLES */
1890 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_STRIP */
1891 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_FAN */
1892 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUADS */
1893 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
1894 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_POLYGON */
1895 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
1896 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1897 VGPU10_PRIMITIVE_TRIANGLE_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1898 VGPU10_PRIMITIVE_TRIANGLE_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1901 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology
[] = {
1902 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST
, /* PIPE_PRIM_POINTS */
1903 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINES */
1904 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINE_LOOP */
1905 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP
, /* PIPE_PRIM_LINE_STRIP */
1906 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST
, /* PIPE_PRIM_TRIANGLES */
1907 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_STRIP */
1908 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_FAN */
1909 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUADS */
1910 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
1911 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_POLYGON */
1912 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
1913 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1914 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1915 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1918 static const unsigned inputArraySize
[] = {
1919 0, /* VGPU10_PRIMITIVE_UNDEFINED */
1920 1, /* VGPU10_PRIMITIVE_POINT */
1921 2, /* VGPU10_PRIMITIVE_LINE */
1922 3, /* VGPU10_PRIMITIVE_TRIANGLE */
1925 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
1926 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
1929 switch (prop
->Property
.PropertyName
) {
1930 case TGSI_PROPERTY_GS_INPUT_PRIM
:
1931 assert(prop
->u
[0].Data
< ARRAY_SIZE(primType
));
1932 emit
->gs
.prim_type
= primType
[prop
->u
[0].Data
];
1933 assert(emit
->gs
.prim_type
!= VGPU10_PRIMITIVE_UNDEFINED
);
1934 emit
->gs
.input_size
= inputArraySize
[emit
->gs
.prim_type
];
1937 case TGSI_PROPERTY_GS_OUTPUT_PRIM
:
1938 assert(prop
->u
[0].Data
< ARRAY_SIZE(primTopology
));
1939 emit
->gs
.prim_topology
= primTopology
[prop
->u
[0].Data
];
1940 assert(emit
->gs
.prim_topology
!= VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
);
1943 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
:
1944 emit
->gs
.max_out_vertices
= prop
->u
[0].Data
;
1956 emit_property_instruction(struct svga_shader_emitter_v10
*emit
,
1957 VGPU10OpcodeToken0 opcode0
, unsigned nData
,
1960 begin_emit_instruction(emit
);
1961 emit_dword(emit
, opcode0
.value
);
1963 emit_dword(emit
, data
);
1964 end_emit_instruction(emit
);
1969 * Emit property instructions
1972 emit_property_instructions(struct svga_shader_emitter_v10
*emit
)
1974 VGPU10OpcodeToken0 opcode0
;
1976 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
1978 /* emit input primitive type declaration */
1980 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE
;
1981 opcode0
.primitive
= emit
->gs
.prim_type
;
1982 emit_property_instruction(emit
, opcode0
, 0, 0);
1984 /* emit output primitive topology declaration */
1986 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY
;
1987 opcode0
.primitiveTopology
= emit
->gs
.prim_topology
;
1988 emit_property_instruction(emit
, opcode0
, 0, 0);
1990 /* emit max output vertices */
1992 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT
;
1993 emit_property_instruction(emit
, opcode0
, 1, emit
->gs
.max_out_vertices
);
1998 * Emit a vgpu10 declaration "instruction".
1999 * \param index the register index
2000 * \param size array size of the operand. In most cases, it is 1,
2001 * but for inputs to geometry shader, the array size varies
2002 * depending on the primitive type.
2005 emit_decl_instruction(struct svga_shader_emitter_v10
*emit
,
2006 VGPU10OpcodeToken0 opcode0
,
2007 VGPU10OperandToken0 operand0
,
2008 VGPU10NameToken name_token
,
2009 unsigned index
, unsigned size
)
2011 assert(opcode0
.opcodeType
);
2012 assert(operand0
.mask
);
2014 begin_emit_instruction(emit
);
2015 emit_dword(emit
, opcode0
.value
);
2017 emit_dword(emit
, operand0
.value
);
2019 if (operand0
.indexDimension
== VGPU10_OPERAND_INDEX_1D
) {
2020 /* Next token is the index of the register to declare */
2021 emit_dword(emit
, index
);
2023 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_2D
) {
2024 /* Next token is the size of the register */
2025 emit_dword(emit
, size
);
2027 /* Followed by the index of the register */
2028 emit_dword(emit
, index
);
2031 if (name_token
.value
) {
2032 emit_dword(emit
, name_token
.value
);
2035 end_emit_instruction(emit
);
2040 * Emit the declaration for a shader input.
2041 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2042 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2043 * \param dim index dimension
2044 * \param index the input register index
2045 * \param size array size of the operand. In most cases, it is 1,
2046 * but for inputs to geometry shader, the array size varies
2047 * depending on the primitive type.
2048 * \param name one of VGPU10_NAME_x
2049 * \parma numComp number of components
2050 * \param selMode component selection mode
2051 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2052 * \param interpMode interpolation mode
2055 emit_input_declaration(struct svga_shader_emitter_v10
*emit
,
2056 VGPU10_OPCODE_TYPE opcodeType
,
2057 VGPU10_OPERAND_TYPE operandType
,
2058 VGPU10_OPERAND_INDEX_DIMENSION dim
,
2059 unsigned index
, unsigned size
,
2060 VGPU10_SYSTEM_NAME name
,
2061 VGPU10_OPERAND_NUM_COMPONENTS numComp
,
2062 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode
,
2064 VGPU10_INTERPOLATION_MODE interpMode
)
2066 VGPU10OpcodeToken0 opcode0
;
2067 VGPU10OperandToken0 operand0
;
2068 VGPU10NameToken name_token
;
2070 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2071 assert(opcodeType
== VGPU10_OPCODE_DCL_INPUT
||
2072 opcodeType
== VGPU10_OPCODE_DCL_INPUT_SIV
||
2073 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS
||
2074 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS_SGV
);
2075 assert(operandType
== VGPU10_OPERAND_TYPE_INPUT
||
2076 operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
);
2077 assert(numComp
<= VGPU10_OPERAND_4_COMPONENT
);
2078 assert(selMode
<= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
);
2079 assert(dim
<= VGPU10_OPERAND_INDEX_3D
);
2080 assert(name
== VGPU10_NAME_UNDEFINED
||
2081 name
== VGPU10_NAME_POSITION
||
2082 name
== VGPU10_NAME_INSTANCE_ID
||
2083 name
== VGPU10_NAME_VERTEX_ID
||
2084 name
== VGPU10_NAME_PRIMITIVE_ID
||
2085 name
== VGPU10_NAME_IS_FRONT_FACE
);
2086 assert(interpMode
== VGPU10_INTERPOLATION_UNDEFINED
||
2087 interpMode
== VGPU10_INTERPOLATION_CONSTANT
||
2088 interpMode
== VGPU10_INTERPOLATION_LINEAR
||
2089 interpMode
== VGPU10_INTERPOLATION_LINEAR_CENTROID
||
2090 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
||
2091 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
);
2093 check_register_index(emit
, opcodeType
, index
);
2095 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2097 opcode0
.opcodeType
= opcodeType
;
2098 opcode0
.interpolationMode
= interpMode
;
2100 operand0
.operandType
= operandType
;
2101 operand0
.numComponents
= numComp
;
2102 operand0
.selectionMode
= selMode
;
2103 operand0
.mask
= usageMask
;
2104 operand0
.indexDimension
= dim
;
2105 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2106 if (dim
== VGPU10_OPERAND_INDEX_2D
)
2107 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2109 name_token
.name
= name
;
2111 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, size
);
2116 * Emit the declaration for a shader output.
2117 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2118 * \param index the output register index
2119 * \param name one of VGPU10_NAME_x
2120 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2123 emit_output_declaration(struct svga_shader_emitter_v10
*emit
,
2124 VGPU10_OPCODE_TYPE type
, unsigned index
,
2125 VGPU10_SYSTEM_NAME name
,
2128 VGPU10OpcodeToken0 opcode0
;
2129 VGPU10OperandToken0 operand0
;
2130 VGPU10NameToken name_token
;
2132 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2133 assert(type
== VGPU10_OPCODE_DCL_OUTPUT
||
2134 type
== VGPU10_OPCODE_DCL_OUTPUT_SGV
||
2135 type
== VGPU10_OPCODE_DCL_OUTPUT_SIV
);
2136 assert(name
== VGPU10_NAME_UNDEFINED
||
2137 name
== VGPU10_NAME_POSITION
||
2138 name
== VGPU10_NAME_PRIMITIVE_ID
||
2139 name
== VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
||
2140 name
== VGPU10_NAME_CLIP_DISTANCE
);
2142 check_register_index(emit
, type
, index
);
2144 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2146 opcode0
.opcodeType
= type
;
2147 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT
;
2148 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
2149 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2150 operand0
.mask
= usageMask
;
2151 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2152 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2154 name_token
.name
= name
;
2156 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, 1);
2161 * Emit the declaration for the fragment depth output.
2164 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10
*emit
)
2166 VGPU10OpcodeToken0 opcode0
;
2167 VGPU10OperandToken0 operand0
;
2168 VGPU10NameToken name_token
;
2170 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
2172 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2174 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_OUTPUT
;
2175 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
2176 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
2177 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
2178 operand0
.mask
= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
2180 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, 0, 1);
2185 * Emit the declaration for a system value input/output.
2188 emit_system_value_declaration(struct svga_shader_emitter_v10
*emit
,
2189 enum tgsi_semantic semantic_name
, unsigned index
)
2191 switch (semantic_name
) {
2192 case TGSI_SEMANTIC_INSTANCEID
:
2193 index
= alloc_system_value_index(emit
, index
);
2194 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2195 VGPU10_OPERAND_TYPE_INPUT
,
2196 VGPU10_OPERAND_INDEX_1D
,
2198 VGPU10_NAME_INSTANCE_ID
,
2199 VGPU10_OPERAND_4_COMPONENT
,
2200 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2201 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2202 VGPU10_INTERPOLATION_UNDEFINED
);
2204 case TGSI_SEMANTIC_VERTEXID
:
2205 index
= alloc_system_value_index(emit
, index
);
2206 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2207 VGPU10_OPERAND_TYPE_INPUT
,
2208 VGPU10_OPERAND_INDEX_1D
,
2210 VGPU10_NAME_VERTEX_ID
,
2211 VGPU10_OPERAND_4_COMPONENT
,
2212 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2213 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2214 VGPU10_INTERPOLATION_UNDEFINED
);
2222 * Translate a TGSI declaration to VGPU10.
2225 emit_vgpu10_declaration(struct svga_shader_emitter_v10
*emit
,
2226 const struct tgsi_full_declaration
*decl
)
2228 switch (decl
->Declaration
.File
) {
2229 case TGSI_FILE_INPUT
:
2230 /* do nothing - see emit_input_declarations() */
2233 case TGSI_FILE_OUTPUT
:
2234 assert(decl
->Range
.First
== decl
->Range
.Last
);
2235 emit
->output_usage_mask
[decl
->Range
.First
] = decl
->Declaration
.UsageMask
;
2238 case TGSI_FILE_TEMPORARY
:
2239 /* Don't declare the temps here. Just keep track of how many
2240 * and emit the declaration later.
2242 if (decl
->Declaration
.Array
) {
2243 /* Indexed temporary array. Save the start index of the array
2244 * and the size of the array.
2246 const unsigned arrayID
= MIN2(decl
->Array
.ArrayID
, MAX_TEMP_ARRAYS
);
2249 assert(arrayID
< ARRAY_SIZE(emit
->temp_arrays
));
2251 /* Save this array so we can emit the declaration for it later */
2252 emit
->temp_arrays
[arrayID
].start
= decl
->Range
.First
;
2253 emit
->temp_arrays
[arrayID
].size
=
2254 decl
->Range
.Last
- decl
->Range
.First
+ 1;
2256 emit
->num_temp_arrays
= MAX2(emit
->num_temp_arrays
, arrayID
+ 1);
2257 assert(emit
->num_temp_arrays
<= MAX_TEMP_ARRAYS
);
2258 emit
->num_temp_arrays
= MIN2(emit
->num_temp_arrays
, MAX_TEMP_ARRAYS
);
2260 /* Fill in the temp_map entries for this array */
2261 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
2262 emit
->temp_map
[i
].arrayId
= arrayID
;
2263 emit
->temp_map
[i
].index
= i
- decl
->Range
.First
;
2267 /* for all temps, indexed or not, keep track of highest index */
2268 emit
->num_shader_temps
= MAX2(emit
->num_shader_temps
,
2269 decl
->Range
.Last
+ 1);
2272 case TGSI_FILE_CONSTANT
:
2273 /* Don't declare constants here. Just keep track and emit later. */
2275 unsigned constbuf
= 0, num_consts
;
2276 if (decl
->Declaration
.Dimension
) {
2277 constbuf
= decl
->Dim
.Index2D
;
2279 /* We throw an assertion here when, in fact, the shader should never
2280 * have linked due to constbuf index out of bounds, so we shouldn't
2281 * have reached here.
2283 assert(constbuf
< ARRAY_SIZE(emit
->num_shader_consts
));
2285 num_consts
= MAX2(emit
->num_shader_consts
[constbuf
],
2286 decl
->Range
.Last
+ 1);
2288 if (num_consts
> VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
2289 debug_printf("Warning: constant buffer is declared to size [%u]"
2290 " but [%u] is the limit.\n",
2292 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2294 /* The linker doesn't enforce the max UBO size so we clamp here */
2295 emit
->num_shader_consts
[constbuf
] =
2296 MIN2(num_consts
, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2300 case TGSI_FILE_IMMEDIATE
:
2301 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2304 case TGSI_FILE_SYSTEM_VALUE
:
2305 emit_system_value_declaration(emit
, decl
->Semantic
.Name
,
2309 case TGSI_FILE_SAMPLER
:
2310 /* Don't declare samplers here. Just keep track and emit later. */
2311 emit
->num_samplers
= MAX2(emit
->num_samplers
, decl
->Range
.Last
+ 1);
2315 case TGSI_FILE_RESOURCE
:
2316 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2317 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2318 assert(!"TGSI_FILE_RESOURCE not handled yet");
2322 case TGSI_FILE_ADDRESS
:
2323 emit
->num_address_regs
= MAX2(emit
->num_address_regs
,
2324 decl
->Range
.Last
+ 1);
2327 case TGSI_FILE_SAMPLER_VIEW
:
2329 unsigned unit
= decl
->Range
.First
;
2330 assert(decl
->Range
.First
== decl
->Range
.Last
);
2331 emit
->sampler_target
[unit
] = decl
->SamplerView
.Resource
;
2332 /* Note: we can ignore YZW return types for now */
2333 emit
->sampler_return_type
[unit
] = decl
->SamplerView
.ReturnTypeX
;
2334 emit
->sampler_view
[unit
] = TRUE
;
2339 assert(!"Unexpected type of declaration");
2347 * Emit all input declarations.
2350 emit_input_declarations(struct svga_shader_emitter_v10
*emit
)
2354 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2356 for (i
= 0; i
< emit
->linkage
.num_inputs
; i
++) {
2357 enum tgsi_semantic semantic_name
= emit
->info
.input_semantic_name
[i
];
2358 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2359 unsigned index
= emit
->linkage
.input_map
[i
];
2360 VGPU10_OPCODE_TYPE type
;
2361 VGPU10_INTERPOLATION_MODE interpolationMode
;
2362 VGPU10_SYSTEM_NAME name
;
2364 if (usage_mask
== 0)
2365 continue; /* register is not actually used */
2367 if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2368 /* fragment position input */
2369 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2370 interpolationMode
= VGPU10_INTERPOLATION_LINEAR
;
2371 name
= VGPU10_NAME_POSITION
;
2372 if (usage_mask
& TGSI_WRITEMASK_W
) {
2373 /* we need to replace use of 'w' with '1/w' */
2374 emit
->fs
.fragcoord_input_index
= i
;
2377 else if (semantic_name
== TGSI_SEMANTIC_FACE
) {
2378 /* fragment front-facing input */
2379 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2380 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2381 name
= VGPU10_NAME_IS_FRONT_FACE
;
2382 emit
->fs
.face_input_index
= i
;
2384 else if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2386 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2387 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2388 name
= VGPU10_NAME_PRIMITIVE_ID
;
2391 /* general fragment input */
2392 type
= VGPU10_OPCODE_DCL_INPUT_PS
;
2394 translate_interpolation(emit
,
2395 emit
->info
.input_interpolate
[i
],
2396 emit
->info
.input_interpolate_loc
[i
]);
2398 /* keeps track if flat interpolation mode is being used */
2399 emit
->uses_flat_interp
|=
2400 (interpolationMode
== VGPU10_INTERPOLATION_CONSTANT
);
2402 name
= VGPU10_NAME_UNDEFINED
;
2405 emit_input_declaration(emit
, type
,
2406 VGPU10_OPERAND_TYPE_INPUT
,
2407 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2409 VGPU10_OPERAND_4_COMPONENT
,
2410 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2411 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2415 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2417 for (i
= 0; i
< emit
->info
.num_inputs
; i
++) {
2418 enum tgsi_semantic semantic_name
= emit
->info
.input_semantic_name
[i
];
2419 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2420 unsigned index
= emit
->linkage
.input_map
[i
];
2421 VGPU10_OPCODE_TYPE opcodeType
, operandType
;
2422 VGPU10_OPERAND_NUM_COMPONENTS numComp
;
2423 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode
;
2424 VGPU10_SYSTEM_NAME name
;
2425 VGPU10_OPERAND_INDEX_DIMENSION dim
;
2427 if (usage_mask
== 0)
2428 continue; /* register is not actually used */
2430 opcodeType
= VGPU10_OPCODE_DCL_INPUT
;
2431 operandType
= VGPU10_OPERAND_TYPE_INPUT
;
2432 numComp
= VGPU10_OPERAND_4_COMPONENT
;
2433 selMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2434 name
= VGPU10_NAME_UNDEFINED
;
2436 /* all geometry shader inputs are two dimensional except
2439 dim
= VGPU10_OPERAND_INDEX_2D
;
2441 if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2443 operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
2444 dim
= VGPU10_OPERAND_INDEX_0D
;
2445 numComp
= VGPU10_OPERAND_0_COMPONENT
;
2448 /* also save the register index so we can check for
2449 * primitive id when emit src register. We need to modify the
2450 * operand type, index dimension when emit primitive id src reg.
2452 emit
->gs
.prim_id_index
= i
;
2454 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2455 /* vertex position input */
2456 opcodeType
= VGPU10_OPCODE_DCL_INPUT_SIV
;
2457 name
= VGPU10_NAME_POSITION
;
2460 emit_input_declaration(emit
, opcodeType
, operandType
,
2462 emit
->gs
.input_size
,
2465 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2466 VGPU10_INTERPOLATION_UNDEFINED
);
2470 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
2472 for (i
= 0; i
< emit
->info
.file_max
[TGSI_FILE_INPUT
] + 1; i
++) {
2473 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2476 if (usage_mask
== 0)
2477 continue; /* register is not actually used */
2479 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT
,
2480 VGPU10_OPERAND_TYPE_INPUT
,
2481 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2482 VGPU10_NAME_UNDEFINED
,
2483 VGPU10_OPERAND_4_COMPONENT
,
2484 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2485 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2486 VGPU10_INTERPOLATION_UNDEFINED
);
2495 * Emit all output declarations.
2498 emit_output_declarations(struct svga_shader_emitter_v10
*emit
)
2502 for (i
= 0; i
< emit
->info
.num_outputs
; i
++) {
2503 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2504 const enum tgsi_semantic semantic_name
=
2505 emit
->info
.output_semantic_name
[i
];
2506 const unsigned semantic_index
= emit
->info
.output_semantic_index
[i
];
2509 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2510 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
2511 assert(semantic_index
< ARRAY_SIZE(emit
->fs
.color_out_index
));
2513 emit
->fs
.color_out_index
[semantic_index
] = index
;
2515 emit
->fs
.num_color_outputs
= MAX2(emit
->fs
.num_color_outputs
,
2518 /* The semantic index is the shader's color output/buffer index */
2519 emit_output_declaration(emit
,
2520 VGPU10_OPCODE_DCL_OUTPUT
, semantic_index
,
2521 VGPU10_NAME_UNDEFINED
,
2522 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2524 if (semantic_index
== 0) {
2525 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2526 /* Emit declarations for the additional color outputs
2530 for (j
= 1; j
< emit
->key
.fs
.write_color0_to_n_cbufs
; j
++) {
2531 /* Allocate a new output index */
2532 unsigned idx
= emit
->info
.num_outputs
+ j
- 1;
2533 emit
->fs
.color_out_index
[j
] = idx
;
2534 emit_output_declaration(emit
,
2535 VGPU10_OPCODE_DCL_OUTPUT
, idx
,
2536 VGPU10_NAME_UNDEFINED
,
2537 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2538 emit
->info
.output_semantic_index
[idx
] = j
;
2541 emit
->fs
.num_color_outputs
=
2542 emit
->key
.fs
.write_color0_to_n_cbufs
;
2546 assert(!emit
->key
.fs
.write_color0_to_n_cbufs
);
2549 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2550 /* Fragment depth output */
2551 emit_fragdepth_output_declaration(emit
);
2554 assert(!"Bad output semantic name");
2559 VGPU10_COMPONENT_NAME name
;
2560 VGPU10_OPCODE_TYPE type
;
2561 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
2563 switch (semantic_name
) {
2564 case TGSI_SEMANTIC_POSITION
:
2565 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2566 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2567 name
= VGPU10_NAME_POSITION
;
2568 /* Save the index of the vertex position output register */
2569 emit
->vposition
.out_index
= index
;
2571 case TGSI_SEMANTIC_CLIPDIST
:
2572 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2573 name
= VGPU10_NAME_CLIP_DISTANCE
;
2574 /* save the starting index of the clip distance output register */
2575 if (semantic_index
== 0)
2576 emit
->clip_dist_out_index
= index
;
2577 writemask
= emit
->output_usage_mask
[index
];
2578 writemask
= apply_clip_plane_mask(emit
, writemask
, semantic_index
);
2579 if (writemask
== 0x0) {
2580 continue; /* discard this do-nothing declaration */
2583 case TGSI_SEMANTIC_PRIMID
:
2584 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2585 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2586 name
= VGPU10_NAME_PRIMITIVE_ID
;
2588 case TGSI_SEMANTIC_LAYER
:
2589 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2590 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2591 name
= VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
;
2593 case TGSI_SEMANTIC_CLIPVERTEX
:
2594 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2595 name
= VGPU10_NAME_UNDEFINED
;
2596 emit
->clip_vertex_out_index
= index
;
2599 /* generic output */
2600 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2601 name
= VGPU10_NAME_UNDEFINED
;
2604 emit_output_declaration(emit
, type
, index
, name
, writemask
);
2608 if (emit
->vposition
.so_index
!= INVALID_INDEX
&&
2609 emit
->vposition
.out_index
!= INVALID_INDEX
) {
2611 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2613 /* Emit the declaration for the non-adjusted vertex position
2614 * for stream output purpose
2616 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2617 emit
->vposition
.so_index
,
2618 VGPU10_NAME_UNDEFINED
,
2619 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2622 if (emit
->clip_dist_so_index
!= INVALID_INDEX
&&
2623 emit
->clip_dist_out_index
!= INVALID_INDEX
) {
2625 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2627 /* Emit the declaration for the clip distance shadow copy which
2628 * will be used for stream output purpose and for clip distance
2631 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2632 emit
->clip_dist_so_index
,
2633 VGPU10_NAME_UNDEFINED
,
2634 emit
->output_usage_mask
[emit
->clip_dist_out_index
]);
2636 if (emit
->info
.num_written_clipdistance
> 4) {
2637 /* for the second clip distance register, each handles 4 planes */
2638 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2639 emit
->clip_dist_so_index
+ 1,
2640 VGPU10_NAME_UNDEFINED
,
2641 emit
->output_usage_mask
[emit
->clip_dist_out_index
+1]);
2650 * Emit the declaration for the temporary registers.
2653 emit_temporaries_declaration(struct svga_shader_emitter_v10
*emit
)
2655 unsigned total_temps
, reg
, i
;
2657 total_temps
= emit
->num_shader_temps
;
2659 /* If there is indirect access to non-indexable temps in the shader,
2660 * convert those temps to indexable temps. This works around a bug
2661 * in the GLSL->TGSI translator exposed in piglit test
2662 * glsl-1.20/execution/fs-const-array-of-struct-of-array.shader_test.
2663 * Internal temps added by the driver remain as non-indexable temps.
2665 if ((emit
->info
.indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) &&
2666 emit
->num_temp_arrays
== 0) {
2670 emit
->num_temp_arrays
= arrayID
+ 1;
2671 emit
->temp_arrays
[arrayID
].start
= 0;
2672 emit
->temp_arrays
[arrayID
].size
= total_temps
;
2674 /* Fill in the temp_map entries for this temp array */
2675 for (i
= 0; i
< total_temps
; i
++) {
2676 emit
->temp_map
[i
].arrayId
= arrayID
;
2677 emit
->temp_map
[i
].index
= i
;
2681 /* Allocate extra temps for specially-implemented instructions,
2684 total_temps
+= MAX_INTERNAL_TEMPS
;
2686 if (emit
->unit
== PIPE_SHADER_VERTEX
|| emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2687 if (emit
->vposition
.need_prescale
|| emit
->key
.vs
.undo_viewport
||
2688 emit
->key
.clip_plane_enable
||
2689 emit
->vposition
.so_index
!= INVALID_INDEX
) {
2690 emit
->vposition
.tmp_index
= total_temps
;
2694 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2695 unsigned attrib_mask
= (emit
->key
.vs
.adjust_attrib_w_1
|
2696 emit
->key
.vs
.adjust_attrib_itof
|
2697 emit
->key
.vs
.adjust_attrib_utof
|
2698 emit
->key
.vs
.attrib_is_bgra
|
2699 emit
->key
.vs
.attrib_puint_to_snorm
|
2700 emit
->key
.vs
.attrib_puint_to_uscaled
|
2701 emit
->key
.vs
.attrib_puint_to_sscaled
);
2702 while (attrib_mask
) {
2703 unsigned index
= u_bit_scan(&attrib_mask
);
2704 emit
->vs
.adjusted_input
[index
] = total_temps
++;
2708 if (emit
->clip_mode
== CLIP_DISTANCE
) {
2709 /* We need to write the clip distance to a temporary register
2710 * first. Then it will be copied to the shadow copy for
2711 * the clip distance varying variable and stream output purpose.
2712 * It will also be copied to the actual CLIPDIST register
2713 * according to the enabled clip planes
2715 emit
->clip_dist_tmp_index
= total_temps
++;
2716 if (emit
->info
.num_written_clipdistance
> 4)
2717 total_temps
++; /* second clip register */
2719 else if (emit
->clip_mode
== CLIP_VERTEX
) {
2720 /* We need to convert the TGSI CLIPVERTEX output to one or more
2721 * clip distances. Allocate a temp reg for the clipvertex here.
2723 assert(emit
->info
.writes_clipvertex
> 0);
2724 emit
->clip_vertex_tmp_index
= total_temps
;
2728 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2729 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
||
2730 emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2731 /* Allocate a temp to hold the output color */
2732 emit
->fs
.color_tmp_index
= total_temps
;
2736 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
2737 /* Allocate a temp for the +/-1 face register */
2738 emit
->fs
.face_tmp_index
= total_temps
;
2742 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
2743 /* Allocate a temp for modified fragment position register */
2744 emit
->fs
.fragcoord_tmp_index
= total_temps
;
2749 for (i
= 0; i
< emit
->num_address_regs
; i
++) {
2750 emit
->address_reg_index
[i
] = total_temps
++;
2753 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2754 * temp indexes. Basically, we compact all the non-array temp register
2755 * indexes into a consecutive series.
2757 * Before, we may have some TGSI declarations like:
2758 * DCL TEMP[0..1], LOCAL
2759 * DCL TEMP[2..4], ARRAY(1), LOCAL
2760 * DCL TEMP[5..7], ARRAY(2), LOCAL
2761 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2763 * After, we'll have a map like this:
2764 * temp_map[0] = { array 0, index 0 }
2765 * temp_map[1] = { array 0, index 1 }
2766 * temp_map[2] = { array 1, index 0 }
2767 * temp_map[3] = { array 1, index 1 }
2768 * temp_map[4] = { array 1, index 2 }
2769 * temp_map[5] = { array 2, index 0 }
2770 * temp_map[6] = { array 2, index 1 }
2771 * temp_map[7] = { array 2, index 2 }
2772 * temp_map[8] = { array 0, index 2 }
2773 * temp_map[9] = { array 0, index 3 }
2775 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2776 * temps numbered 0..3
2778 * Any time we emit a temporary register index, we'll have to use the
2779 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2781 * Finally, we recompute the total_temps value here.
2784 for (i
= 0; i
< total_temps
; i
++) {
2785 if (emit
->temp_map
[i
].arrayId
== 0) {
2786 emit
->temp_map
[i
].index
= reg
++;
2791 debug_printf("total_temps %u\n", total_temps
);
2792 for (i
= 0; i
< total_temps
; i
++) {
2793 debug_printf("temp %u -> array %u index %u\n",
2794 i
, emit
->temp_map
[i
].arrayId
, emit
->temp_map
[i
].index
);
2800 /* Emit declaration of ordinary temp registers */
2801 if (total_temps
> 0) {
2802 VGPU10OpcodeToken0 opcode0
;
2805 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_TEMPS
;
2807 begin_emit_instruction(emit
);
2808 emit_dword(emit
, opcode0
.value
);
2809 emit_dword(emit
, total_temps
);
2810 end_emit_instruction(emit
);
2813 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2816 for (i
= 1; i
< emit
->num_temp_arrays
; i
++) {
2817 unsigned num_temps
= emit
->temp_arrays
[i
].size
;
2819 if (num_temps
> 0) {
2820 VGPU10OpcodeToken0 opcode0
;
2823 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_INDEXABLE_TEMP
;
2825 begin_emit_instruction(emit
);
2826 emit_dword(emit
, opcode0
.value
);
2827 emit_dword(emit
, i
); /* which array */
2828 emit_dword(emit
, num_temps
);
2829 emit_dword(emit
, 4); /* num components */
2830 end_emit_instruction(emit
);
2832 total_temps
+= num_temps
;
2836 /* Check that the grand total of all regular and indexed temps is
2839 check_register_index(emit
, VGPU10_OPCODE_DCL_TEMPS
, total_temps
- 1);
2846 emit_constant_declaration(struct svga_shader_emitter_v10
*emit
)
2848 VGPU10OpcodeToken0 opcode0
;
2849 VGPU10OperandToken0 operand0
;
2850 unsigned total_consts
, i
;
2853 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_CONSTANT_BUFFER
;
2854 opcode0
.accessPattern
= VGPU10_CB_IMMEDIATE_INDEXED
;
2855 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
2858 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
2859 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_2D
;
2860 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2861 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2862 operand0
.operandType
= VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
2863 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
2864 operand0
.swizzleX
= 0;
2865 operand0
.swizzleY
= 1;
2866 operand0
.swizzleZ
= 2;
2867 operand0
.swizzleW
= 3;
2870 * Emit declaration for constant buffer [0]. We also allocate
2871 * room for the extra constants here.
2873 total_consts
= emit
->num_shader_consts
[0];
2875 /* Now, allocate constant slots for the "extra" constants.
2876 * Note: it's critical that these extra constant locations
2877 * exactly match what's emitted by the "extra" constants code
2878 * in svga_state_constants.c
2881 /* Vertex position scale/translation */
2882 if (emit
->vposition
.need_prescale
) {
2883 emit
->vposition
.prescale_scale_index
= total_consts
++;
2884 emit
->vposition
.prescale_trans_index
= total_consts
++;
2887 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2888 if (emit
->key
.vs
.undo_viewport
) {
2889 emit
->vs
.viewport_index
= total_consts
++;
2893 /* user-defined clip planes */
2894 if (emit
->key
.clip_plane_enable
) {
2895 unsigned n
= util_bitcount(emit
->key
.clip_plane_enable
);
2896 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
2897 emit
->unit
== PIPE_SHADER_GEOMETRY
);
2898 for (i
= 0; i
< n
; i
++) {
2899 emit
->clip_plane_const
[i
] = total_consts
++;
2903 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2905 if (emit
->sampler_view
[i
]) {
2907 /* Texcoord scale factors for RECT textures */
2908 if (emit
->key
.tex
[i
].unnormalized
) {
2909 emit
->texcoord_scale_index
[i
] = total_consts
++;
2912 /* Texture buffer sizes */
2913 if (emit
->sampler_target
[i
] == TGSI_TEXTURE_BUFFER
) {
2914 emit
->texture_buffer_size_index
[i
] = total_consts
++;
2919 if (total_consts
> 0) {
2920 begin_emit_instruction(emit
);
2921 emit_dword(emit
, opcode0
.value
);
2922 emit_dword(emit
, operand0
.value
);
2923 emit_dword(emit
, 0); /* which const buffer slot */
2924 emit_dword(emit
, total_consts
);
2925 end_emit_instruction(emit
);
2928 /* Declare remaining constant buffers (UBOs) */
2929 for (i
= 1; i
< ARRAY_SIZE(emit
->num_shader_consts
); i
++) {
2930 if (emit
->num_shader_consts
[i
] > 0) {
2931 begin_emit_instruction(emit
);
2932 emit_dword(emit
, opcode0
.value
);
2933 emit_dword(emit
, operand0
.value
);
2934 emit_dword(emit
, i
); /* which const buffer slot */
2935 emit_dword(emit
, emit
->num_shader_consts
[i
]);
2936 end_emit_instruction(emit
);
2945 * Emit declarations for samplers.
2948 emit_sampler_declarations(struct svga_shader_emitter_v10
*emit
)
2952 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2953 VGPU10OpcodeToken0 opcode0
;
2954 VGPU10OperandToken0 operand0
;
2957 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_SAMPLER
;
2958 opcode0
.samplerMode
= VGPU10_SAMPLER_MODE_DEFAULT
;
2961 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
2962 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
2963 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2964 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2966 begin_emit_instruction(emit
);
2967 emit_dword(emit
, opcode0
.value
);
2968 emit_dword(emit
, operand0
.value
);
2969 emit_dword(emit
, i
);
2970 end_emit_instruction(emit
);
2978 * Translate TGSI_TEXTURE_x to VGAPU10_RESOURCE_DIMENSION_x.
2981 tgsi_texture_to_resource_dimension(enum tgsi_texture_type target
,
2985 case TGSI_TEXTURE_BUFFER
:
2986 return VGPU10_RESOURCE_DIMENSION_BUFFER
;
2987 case TGSI_TEXTURE_1D
:
2988 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
2989 case TGSI_TEXTURE_2D
:
2990 case TGSI_TEXTURE_RECT
:
2991 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
2992 case TGSI_TEXTURE_3D
:
2993 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D
;
2994 case TGSI_TEXTURE_CUBE
:
2995 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE
;
2996 case TGSI_TEXTURE_SHADOW1D
:
2997 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
2998 case TGSI_TEXTURE_SHADOW2D
:
2999 case TGSI_TEXTURE_SHADOWRECT
:
3000 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3001 case TGSI_TEXTURE_1D_ARRAY
:
3002 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
3003 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
3004 : VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
3005 case TGSI_TEXTURE_2D_ARRAY
:
3006 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
3007 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
3008 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3009 case TGSI_TEXTURE_SHADOWCUBE
:
3010 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE
;
3011 case TGSI_TEXTURE_2D_MSAA
:
3012 return VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
;
3013 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
3014 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
3015 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
;
3016 case TGSI_TEXTURE_CUBE_ARRAY
:
3017 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY
;
3019 assert(!"Unexpected resource type");
3020 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3026 * Given a tgsi_return_type, return true iff it is an integer type.
3029 is_integer_type(enum tgsi_return_type type
)
3032 case TGSI_RETURN_TYPE_SINT
:
3033 case TGSI_RETURN_TYPE_UINT
:
3035 case TGSI_RETURN_TYPE_FLOAT
:
3036 case TGSI_RETURN_TYPE_UNORM
:
3037 case TGSI_RETURN_TYPE_SNORM
:
3039 case TGSI_RETURN_TYPE_COUNT
:
3041 assert(!"is_integer_type: Unknown tgsi_return_type");
3048 * Emit declarations for resources.
3049 * XXX When we're sure that all TGSI shaders will be generated with
3050 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
3054 emit_resource_declarations(struct svga_shader_emitter_v10
*emit
)
3058 /* Emit resource decl for each sampler */
3059 for (i
= 0; i
< emit
->num_samplers
; i
++) {
3060 VGPU10OpcodeToken0 opcode0
;
3061 VGPU10OperandToken0 operand0
;
3062 VGPU10ResourceReturnTypeToken return_type
;
3063 VGPU10_RESOURCE_RETURN_TYPE rt
;
3066 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_RESOURCE
;
3067 opcode0
.resourceDimension
=
3068 tgsi_texture_to_resource_dimension(emit
->sampler_target
[i
],
3069 emit
->key
.tex
[i
].is_array
);
3071 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
3072 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
3073 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
3074 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
3077 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
3078 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM
== TGSI_RETURN_TYPE_UNORM
+ 1);
3079 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM
== TGSI_RETURN_TYPE_SNORM
+ 1);
3080 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT
== TGSI_RETURN_TYPE_SINT
+ 1);
3081 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT
== TGSI_RETURN_TYPE_UINT
+ 1);
3082 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT
== TGSI_RETURN_TYPE_FLOAT
+ 1);
3083 assert(emit
->sampler_return_type
[i
] <= TGSI_RETURN_TYPE_FLOAT
);
3084 rt
= emit
->sampler_return_type
[i
] + 1;
3086 switch (emit
->sampler_return_type
[i
]) {
3087 case TGSI_RETURN_TYPE_UNORM
: rt
= VGPU10_RETURN_TYPE_UNORM
; break;
3088 case TGSI_RETURN_TYPE_SNORM
: rt
= VGPU10_RETURN_TYPE_SNORM
; break;
3089 case TGSI_RETURN_TYPE_SINT
: rt
= VGPU10_RETURN_TYPE_SINT
; break;
3090 case TGSI_RETURN_TYPE_UINT
: rt
= VGPU10_RETURN_TYPE_UINT
; break;
3091 case TGSI_RETURN_TYPE_FLOAT
: rt
= VGPU10_RETURN_TYPE_FLOAT
; break;
3092 case TGSI_RETURN_TYPE_COUNT
:
3094 rt
= VGPU10_RETURN_TYPE_FLOAT
;
3095 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3099 return_type
.value
= 0;
3100 return_type
.component0
= rt
;
3101 return_type
.component1
= rt
;
3102 return_type
.component2
= rt
;
3103 return_type
.component3
= rt
;
3105 begin_emit_instruction(emit
);
3106 emit_dword(emit
, opcode0
.value
);
3107 emit_dword(emit
, operand0
.value
);
3108 emit_dword(emit
, i
);
3109 emit_dword(emit
, return_type
.value
);
3110 end_emit_instruction(emit
);
3117 emit_instruction_op1(struct svga_shader_emitter_v10
*emit
,
3118 VGPU10_OPCODE_TYPE opcode
,
3119 const struct tgsi_full_dst_register
*dst
,
3120 const struct tgsi_full_src_register
*src
,
3123 begin_emit_instruction(emit
);
3124 emit_opcode(emit
, opcode
, saturate
);
3125 emit_dst_register(emit
, dst
);
3126 emit_src_register(emit
, src
);
3127 end_emit_instruction(emit
);
3131 emit_instruction_op2(struct svga_shader_emitter_v10
*emit
,
3132 VGPU10_OPCODE_TYPE opcode
,
3133 const struct tgsi_full_dst_register
*dst
,
3134 const struct tgsi_full_src_register
*src1
,
3135 const struct tgsi_full_src_register
*src2
,
3138 begin_emit_instruction(emit
);
3139 emit_opcode(emit
, opcode
, saturate
);
3140 emit_dst_register(emit
, dst
);
3141 emit_src_register(emit
, src1
);
3142 emit_src_register(emit
, src2
);
3143 end_emit_instruction(emit
);
3147 emit_instruction_op3(struct svga_shader_emitter_v10
*emit
,
3148 VGPU10_OPCODE_TYPE opcode
,
3149 const struct tgsi_full_dst_register
*dst
,
3150 const struct tgsi_full_src_register
*src1
,
3151 const struct tgsi_full_src_register
*src2
,
3152 const struct tgsi_full_src_register
*src3
,
3155 begin_emit_instruction(emit
);
3156 emit_opcode(emit
, opcode
, saturate
);
3157 emit_dst_register(emit
, dst
);
3158 emit_src_register(emit
, src1
);
3159 emit_src_register(emit
, src2
);
3160 emit_src_register(emit
, src3
);
3161 end_emit_instruction(emit
);
3165 * Emit the actual clip distance instructions to be used for clipping
3166 * by copying the clip distance from the temporary registers to the
3167 * CLIPDIST registers written with the enabled planes mask.
3168 * Also copy the clip distance from the temporary to the clip distance
3169 * shadow copy register which will be referenced by the input shader
3172 emit_clip_distance_instructions(struct svga_shader_emitter_v10
*emit
)
3174 struct tgsi_full_src_register tmp_clip_dist_src
;
3175 struct tgsi_full_dst_register clip_dist_dst
;
3178 unsigned clip_plane_enable
= emit
->key
.clip_plane_enable
;
3179 unsigned clip_dist_tmp_index
= emit
->clip_dist_tmp_index
;
3180 int num_written_clipdist
= emit
->info
.num_written_clipdistance
;
3182 assert(emit
->clip_dist_out_index
!= INVALID_INDEX
);
3183 assert(emit
->clip_dist_tmp_index
!= INVALID_INDEX
);
3186 * Temporary reset the temporary clip dist register index so
3187 * that the copy to the real clip dist register will not
3188 * attempt to copy to the temporary register again
3190 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
3192 for (i
= 0; i
< 2 && num_written_clipdist
> 0; i
++, num_written_clipdist
-=4) {
3194 tmp_clip_dist_src
= make_src_temp_reg(clip_dist_tmp_index
+ i
);
3197 * copy to the shadow copy for use by varying variable and
3198 * stream output. All clip distances
3199 * will be written regardless of the enabled clipping planes.
3201 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3202 emit
->clip_dist_so_index
+ i
);
3204 /* MOV clip_dist_so, tmp_clip_dist */
3205 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3206 &tmp_clip_dist_src
, FALSE
);
3209 * copy those clip distances to enabled clipping planes
3210 * to CLIPDIST registers for clipping
3212 if (clip_plane_enable
& 0xf) {
3213 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3214 emit
->clip_dist_out_index
+ i
);
3215 clip_dist_dst
= writemask_dst(&clip_dist_dst
, clip_plane_enable
& 0xf);
3217 /* MOV CLIPDIST, tmp_clip_dist */
3218 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3219 &tmp_clip_dist_src
, FALSE
);
3221 /* four clip planes per clip register */
3222 clip_plane_enable
>>= 4;
3225 * set the temporary clip dist register index back to the
3226 * temporary index for the next vertex
3228 emit
->clip_dist_tmp_index
= clip_dist_tmp_index
;
3231 /* Declare clip distance output registers for user-defined clip planes
3232 * or the TGSI_CLIPVERTEX output.
3235 emit_clip_distance_declarations(struct svga_shader_emitter_v10
*emit
)
3237 unsigned num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3238 unsigned index
= emit
->num_outputs
;
3239 unsigned plane_mask
;
3241 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3242 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3243 assert(num_clip_planes
<= 8);
3245 if (emit
->clip_mode
!= CLIP_LEGACY
&&
3246 emit
->clip_mode
!= CLIP_VERTEX
) {
3250 if (num_clip_planes
== 0)
3253 /* Declare one or two clip output registers. The number of components
3254 * in the mask reflects the number of clip planes. For example, if 5
3255 * clip planes are needed, we'll declare outputs similar to:
3256 * dcl_output_siv o2.xyzw, clip_distance
3257 * dcl_output_siv o3.x, clip_distance
3259 emit
->clip_dist_out_index
= index
; /* save the starting clip dist reg index */
3261 plane_mask
= (1 << num_clip_planes
) - 1;
3262 if (plane_mask
& 0xf) {
3263 unsigned cmask
= plane_mask
& VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3264 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
,
3265 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3266 emit
->num_outputs
++;
3268 if (plane_mask
& 0xf0) {
3269 unsigned cmask
= (plane_mask
>> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3270 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
+ 1,
3271 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3272 emit
->num_outputs
++;
3278 * Emit the instructions for writing to the clip distance registers
3279 * to handle legacy/automatic clip planes.
3280 * For each clip plane, the distance is the dot product of the vertex
3281 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3282 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3283 * output registers already declared.
3286 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10
*emit
,
3287 unsigned vpos_tmp_index
)
3289 unsigned i
, num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3291 assert(emit
->clip_mode
== CLIP_LEGACY
);
3292 assert(num_clip_planes
<= 8);
3294 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3295 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3297 for (i
= 0; i
< num_clip_planes
; i
++) {
3298 struct tgsi_full_dst_register dst
;
3299 struct tgsi_full_src_register plane_src
, vpos_src
;
3300 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3301 unsigned comp
= i
% 4;
3302 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3304 /* create dst, src regs */
3305 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3306 dst
= writemask_dst(&dst
, writemask
);
3308 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3309 vpos_src
= make_src_temp_reg(vpos_tmp_index
);
3311 /* DP4 clip_dist, plane, vpos */
3312 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3313 &plane_src
, &vpos_src
, FALSE
);
3319 * Emit the instructions for computing the clip distance results from
3320 * the clip vertex temporary.
3321 * For each clip plane, the distance is the dot product of the clip vertex
3322 * position (found in a temp reg) and the clip plane coefficients.
3325 emit_clip_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
3327 const unsigned num_clip
= util_bitcount(emit
->key
.clip_plane_enable
);
3329 struct tgsi_full_dst_register dst
;
3330 struct tgsi_full_src_register clipvert_src
;
3331 const unsigned clip_vertex_tmp
= emit
->clip_vertex_tmp_index
;
3333 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3334 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3336 assert(emit
->clip_mode
== CLIP_VERTEX
);
3338 clipvert_src
= make_src_temp_reg(clip_vertex_tmp
);
3340 for (i
= 0; i
< num_clip
; i
++) {
3341 struct tgsi_full_src_register plane_src
;
3342 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3343 unsigned comp
= i
% 4;
3344 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3346 /* create dst, src regs */
3347 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3348 dst
= writemask_dst(&dst
, writemask
);
3350 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3352 /* DP4 clip_dist, plane, vpos */
3353 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3354 &plane_src
, &clipvert_src
, FALSE
);
3357 /* copy temporary clip vertex register to the clip vertex register */
3359 assert(emit
->clip_vertex_out_index
!= INVALID_INDEX
);
3362 * temporary reset the temporary clip vertex register index so
3363 * that copy to the clip vertex register will not attempt
3364 * to copy to the temporary register again
3366 emit
->clip_vertex_tmp_index
= INVALID_INDEX
;
3368 /* MOV clip_vertex, clip_vertex_tmp */
3369 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, emit
->clip_vertex_out_index
);
3370 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
3371 &dst
, &clipvert_src
, FALSE
);
3374 * set the temporary clip vertex register index back to the
3375 * temporary index for the next vertex
3377 emit
->clip_vertex_tmp_index
= clip_vertex_tmp
;
3381 * Emit code to convert RGBA to BGRA
3384 emit_swap_r_b(struct svga_shader_emitter_v10
*emit
,
3385 const struct tgsi_full_dst_register
*dst
,
3386 const struct tgsi_full_src_register
*src
)
3388 struct tgsi_full_src_register bgra_src
=
3389 swizzle_src(src
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_X
, TGSI_SWIZZLE_W
);
3391 begin_emit_instruction(emit
);
3392 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
3393 emit_dst_register(emit
, dst
);
3394 emit_src_register(emit
, &bgra_src
);
3395 end_emit_instruction(emit
);
3399 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3401 emit_puint_to_snorm(struct svga_shader_emitter_v10
*emit
,
3402 const struct tgsi_full_dst_register
*dst
,
3403 const struct tgsi_full_src_register
*src
)
3405 struct tgsi_full_src_register half
= make_immediate_reg_float(emit
, 0.5f
);
3406 struct tgsi_full_src_register two
=
3407 make_immediate_reg_float4(emit
, 2.0f
, 2.0f
, 2.0f
, 3.0f
);
3408 struct tgsi_full_src_register neg_two
=
3409 make_immediate_reg_float4(emit
, -2.0f
, -2.0f
, -2.0f
, -1.66666f
);
3411 unsigned val_tmp
= get_temp_index(emit
);
3412 struct tgsi_full_dst_register val_dst
= make_dst_temp_reg(val_tmp
);
3413 struct tgsi_full_src_register val_src
= make_src_temp_reg(val_tmp
);
3415 unsigned bias_tmp
= get_temp_index(emit
);
3416 struct tgsi_full_dst_register bias_dst
= make_dst_temp_reg(bias_tmp
);
3417 struct tgsi_full_src_register bias_src
= make_src_temp_reg(bias_tmp
);
3419 /* val = src * 2.0 */
3420 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &val_dst
,
3423 /* bias = src > 0.5 */
3424 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &bias_dst
,
3427 /* bias = bias & -2.0 */
3428 emit_instruction_op2(emit
, VGPU10_OPCODE_AND
, &bias_dst
,
3429 &bias_src
, &neg_two
, FALSE
);
3431 /* dst = val + bias */
3432 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, dst
,
3433 &val_src
, &bias_src
, FALSE
);
3435 free_temp_indexes(emit
);
3439 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3441 emit_puint_to_uscaled(struct svga_shader_emitter_v10
*emit
,
3442 const struct tgsi_full_dst_register
*dst
,
3443 const struct tgsi_full_src_register
*src
)
3445 struct tgsi_full_src_register scale
=
3446 make_immediate_reg_float4(emit
, 1023.0f
, 1023.0f
, 1023.0f
, 3.0f
);
3448 /* dst = src * scale */
3449 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, dst
, src
, &scale
, FALSE
);
3453 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3455 emit_puint_to_sscaled(struct svga_shader_emitter_v10
*emit
,
3456 const struct tgsi_full_dst_register
*dst
,
3457 const struct tgsi_full_src_register
*src
)
3459 struct tgsi_full_src_register lshift
=
3460 make_immediate_reg_int4(emit
, 22, 12, 2, 0);
3461 struct tgsi_full_src_register rshift
=
3462 make_immediate_reg_int4(emit
, 22, 22, 22, 30);
3464 struct tgsi_full_src_register src_xxxx
= scalar_src(src
, TGSI_SWIZZLE_X
);
3466 unsigned tmp
= get_temp_index(emit
);
3467 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3468 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3471 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3472 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3473 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3474 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3475 * dst = i_to_f(r,g,b,a); # convert to float
3477 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHL
, &tmp_dst
,
3478 &src_xxxx
, &lshift
, FALSE
);
3479 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHR
, &tmp_dst
,
3480 &tmp_src
, &rshift
, FALSE
);
3481 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
, dst
, &tmp_src
, FALSE
);
3483 free_temp_indexes(emit
);
3488 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3491 emit_arl_uarl(struct svga_shader_emitter_v10
*emit
,
3492 const struct tgsi_full_instruction
*inst
)
3494 unsigned index
= inst
->Dst
[0].Register
.Index
;
3495 struct tgsi_full_dst_register dst
;
3496 VGPU10_OPCODE_TYPE opcode
;
3498 assert(index
< MAX_VGPU10_ADDR_REGS
);
3499 dst
= make_dst_temp_reg(emit
->address_reg_index
[index
]);
3503 * FTOI address_tmp, s0
3507 * MOV address_tmp, s0
3509 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ARL
)
3510 opcode
= VGPU10_OPCODE_FTOI
;
3512 opcode
= VGPU10_OPCODE_MOV
;
3514 emit_instruction_op1(emit
, opcode
, &dst
, &inst
->Src
[0], FALSE
);
3521 * Emit code for TGSI_OPCODE_CAL instruction.
3524 emit_cal(struct svga_shader_emitter_v10
*emit
,
3525 const struct tgsi_full_instruction
*inst
)
3527 unsigned label
= inst
->Label
.Label
;
3528 VGPU10OperandToken0 operand
;
3530 operand
.operandType
= VGPU10_OPERAND_TYPE_LABEL
;
3532 begin_emit_instruction(emit
);
3533 emit_dword(emit
, operand
.value
);
3534 emit_dword(emit
, label
);
3535 end_emit_instruction(emit
);
3542 * Emit code for TGSI_OPCODE_IABS instruction.
3545 emit_iabs(struct svga_shader_emitter_v10
*emit
,
3546 const struct tgsi_full_instruction
*inst
)
3548 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3549 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3550 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3551 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3554 * IMAX dst, src, neg(src)
3556 struct tgsi_full_src_register neg_src
= negate_src(&inst
->Src
[0]);
3557 emit_instruction_op2(emit
, VGPU10_OPCODE_IMAX
, &inst
->Dst
[0],
3558 &inst
->Src
[0], &neg_src
, FALSE
);
3565 * Emit code for TGSI_OPCODE_CMP instruction.
3568 emit_cmp(struct svga_shader_emitter_v10
*emit
,
3569 const struct tgsi_full_instruction
*inst
)
3571 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3572 * dst.y = (src0.y < 0) ? src1.y : src2.y
3573 * dst.z = (src0.z < 0) ? src1.z : src2.z
3574 * dst.w = (src0.w < 0) ? src1.w : src2.w
3578 * MOVC dst, tmp, src1, src2
3580 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3581 unsigned tmp
= get_temp_index(emit
);
3582 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3583 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3585 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
,
3586 &inst
->Src
[0], &zero
, FALSE
);
3587 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0],
3588 &tmp_src
, &inst
->Src
[1], &inst
->Src
[2],
3589 inst
->Instruction
.Saturate
);
3591 free_temp_indexes(emit
);
3598 * Emit code for TGSI_OPCODE_DST instruction.
3601 emit_dst(struct svga_shader_emitter_v10
*emit
,
3602 const struct tgsi_full_instruction
*inst
)
3606 * dst.y = src0.y * src1.y
3611 struct tgsi_full_src_register s0_yyyy
=
3612 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
3613 struct tgsi_full_src_register s0_zzzz
=
3614 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Z
);
3615 struct tgsi_full_src_register s1_yyyy
=
3616 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Y
);
3617 struct tgsi_full_src_register s1_wwww
=
3618 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_W
);
3621 * If dst and either src0 and src1 are the same we need
3622 * to create a temporary for it and insert a extra move.
3624 unsigned tmp_move
= get_temp_index(emit
);
3625 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3626 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3628 /* MOV dst.x, 1.0 */
3629 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3630 struct tgsi_full_dst_register dst_x
=
3631 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3632 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3634 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
3637 /* MUL dst.y, s0.y, s1.y */
3638 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3639 struct tgsi_full_dst_register dst_y
=
3640 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3642 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &dst_y
, &s0_yyyy
,
3643 &s1_yyyy
, inst
->Instruction
.Saturate
);
3646 /* MOV dst.z, s0.z */
3647 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3648 struct tgsi_full_dst_register dst_z
=
3649 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3651 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
, &s0_zzzz
,
3652 inst
->Instruction
.Saturate
);
3655 /* MOV dst.w, s1.w */
3656 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3657 struct tgsi_full_dst_register dst_w
=
3658 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3660 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &s1_wwww
,
3661 inst
->Instruction
.Saturate
);
3664 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3666 free_temp_indexes(emit
);
3674 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3677 emit_endprim(struct svga_shader_emitter_v10
*emit
,
3678 const struct tgsi_full_instruction
*inst
)
3680 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
3682 /* We can't use emit_simple() because the TGSI instruction has one
3683 * operand (vertex stream number) which we must ignore for VGPU10.
3685 begin_emit_instruction(emit
);
3686 emit_opcode(emit
, VGPU10_OPCODE_CUT
, FALSE
);
3687 end_emit_instruction(emit
);
3693 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3696 emit_ex2(struct svga_shader_emitter_v10
*emit
,
3697 const struct tgsi_full_instruction
*inst
)
3699 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3700 * while VGPU10 computes four values.
3703 * dst.xyzw = 2.0 ^ src.x
3706 struct tgsi_full_src_register src_xxxx
=
3707 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3708 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3710 /* EXP tmp, s0.xxxx */
3711 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0], &src_xxxx
,
3712 inst
->Instruction
.Saturate
);
3719 * Emit code for TGSI_OPCODE_EXP instruction.
3722 emit_exp(struct svga_shader_emitter_v10
*emit
,
3723 const struct tgsi_full_instruction
*inst
)
3726 * dst.x = 2 ^ floor(s0.x)
3727 * dst.y = s0.x - floor(s0.x)
3732 struct tgsi_full_src_register src_xxxx
=
3733 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
3734 unsigned tmp
= get_temp_index(emit
);
3735 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3736 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3739 * If dst and src are the same we need to create
3740 * a temporary for it and insert a extra move.
3742 unsigned tmp_move
= get_temp_index(emit
);
3743 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3744 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3746 /* only use X component of temp reg */
3747 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3748 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3750 /* ROUND_NI tmp.x, s0.x */
3751 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
3752 &src_xxxx
, FALSE
); /* round to -infinity */
3754 /* EXP dst.x, tmp.x */
3755 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3756 struct tgsi_full_dst_register dst_x
=
3757 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3759 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_x
, &tmp_src
,
3760 inst
->Instruction
.Saturate
);
3763 /* ADD dst.y, s0.x, -tmp */
3764 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3765 struct tgsi_full_dst_register dst_y
=
3766 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3767 struct tgsi_full_src_register neg_tmp_src
= negate_src(&tmp_src
);
3769 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_y
, &src_xxxx
,
3770 &neg_tmp_src
, inst
->Instruction
.Saturate
);
3773 /* EXP dst.z, s0.x */
3774 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3775 struct tgsi_full_dst_register dst_z
=
3776 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3778 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_z
, &src_xxxx
,
3779 inst
->Instruction
.Saturate
);
3782 /* MOV dst.w, 1.0 */
3783 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3784 struct tgsi_full_dst_register dst_w
=
3785 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3786 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3788 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
,
3792 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3795 free_temp_indexes(emit
);
3802 * Emit code for TGSI_OPCODE_IF instruction.
3805 emit_if(struct svga_shader_emitter_v10
*emit
,
3806 const struct tgsi_full_instruction
*inst
)
3808 VGPU10OpcodeToken0 opcode0
;
3810 /* The src register should be a scalar */
3811 assert(inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleY
&&
3812 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleZ
&&
3813 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleW
);
3815 /* The only special thing here is that we need to set the
3816 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
3817 * src.x is non-zero.
3820 opcode0
.opcodeType
= VGPU10_OPCODE_IF
;
3821 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
3823 begin_emit_instruction(emit
);
3824 emit_dword(emit
, opcode0
.value
);
3825 emit_src_register(emit
, &inst
->Src
[0]);
3826 end_emit_instruction(emit
);
3833 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
3834 * the register components are negative).
3837 emit_kill_if(struct svga_shader_emitter_v10
*emit
,
3838 const struct tgsi_full_instruction
*inst
)
3840 unsigned tmp
= get_temp_index(emit
);
3841 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3842 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3844 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3846 struct tgsi_full_dst_register tmp_dst_x
=
3847 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3848 struct tgsi_full_src_register tmp_src_xxxx
=
3849 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3851 /* tmp = src[0] < 0.0 */
3852 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
3855 if (!same_swizzle_terms(&inst
->Src
[0])) {
3856 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
3857 * logically OR the swizzle terms. Most uses of KILL_IF only
3858 * test one channel so it's good to avoid these extra steps.
3860 struct tgsi_full_src_register tmp_src_yyyy
=
3861 scalar_src(&tmp_src
, TGSI_SWIZZLE_Y
);
3862 struct tgsi_full_src_register tmp_src_zzzz
=
3863 scalar_src(&tmp_src
, TGSI_SWIZZLE_Z
);
3864 struct tgsi_full_src_register tmp_src_wwww
=
3865 scalar_src(&tmp_src
, TGSI_SWIZZLE_W
);
3867 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3868 &tmp_src_yyyy
, FALSE
);
3869 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3870 &tmp_src_zzzz
, FALSE
);
3871 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3872 &tmp_src_wwww
, FALSE
);
3875 begin_emit_instruction(emit
);
3876 emit_discard_opcode(emit
, TRUE
); /* discard if src0.x is non-zero */
3877 emit_src_register(emit
, &tmp_src_xxxx
);
3878 end_emit_instruction(emit
);
3880 free_temp_indexes(emit
);
3887 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
3890 emit_kill(struct svga_shader_emitter_v10
*emit
,
3891 const struct tgsi_full_instruction
*inst
)
3893 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3895 /* DISCARD if 0.0 is zero */
3896 begin_emit_instruction(emit
);
3897 emit_discard_opcode(emit
, FALSE
);
3898 emit_src_register(emit
, &zero
);
3899 end_emit_instruction(emit
);
3906 * Emit code for TGSI_OPCODE_LG2 instruction.
3909 emit_lg2(struct svga_shader_emitter_v10
*emit
,
3910 const struct tgsi_full_instruction
*inst
)
3912 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
3913 * while VGPU10 computes four values.
3916 * dst.xyzw = log2(src.x)
3919 struct tgsi_full_src_register src_xxxx
=
3920 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3921 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3923 /* LOG tmp, s0.xxxx */
3924 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &inst
->Dst
[0], &src_xxxx
,
3925 inst
->Instruction
.Saturate
);
3932 * Emit code for TGSI_OPCODE_LIT instruction.
3935 emit_lit(struct svga_shader_emitter_v10
*emit
,
3936 const struct tgsi_full_instruction
*inst
)
3938 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3941 * If dst and src are the same we need to create
3942 * a temporary for it and insert a extra move.
3944 unsigned tmp_move
= get_temp_index(emit
);
3945 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3946 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3950 * dst.y = max(src.x, 0)
3951 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
3955 /* MOV dst.x, 1.0 */
3956 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3957 struct tgsi_full_dst_register dst_x
=
3958 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3959 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
3962 /* MOV dst.w, 1.0 */
3963 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3964 struct tgsi_full_dst_register dst_w
=
3965 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3966 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
3969 /* MAX dst.y, src.x, 0.0 */
3970 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3971 struct tgsi_full_dst_register dst_y
=
3972 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3973 struct tgsi_full_src_register zero
=
3974 make_immediate_reg_float(emit
, 0.0f
);
3975 struct tgsi_full_src_register src_xxxx
=
3976 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3977 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3979 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &dst_y
, &src_xxxx
,
3980 &zero
, inst
->Instruction
.Saturate
);
3984 * tmp1 = clamp(src.w, -128, 128);
3985 * MAX tmp1, src.w, -128
3986 * MIN tmp1, tmp1, 128
3988 * tmp2 = max(tmp2, 0);
3989 * MAX tmp2, src.y, 0
3991 * tmp1 = pow(tmp2, tmp1);
3993 * MUL tmp1, tmp2, tmp1
3996 * tmp1 = (src.w == 0) ? 1 : tmp1;
3998 * MOVC tmp1, tmp2, 1.0, tmp1
4000 * dst.z = (0 < src.x) ? tmp1 : 0;
4002 * MOVC dst.z, tmp2, tmp1, 0.0
4004 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4005 struct tgsi_full_dst_register dst_z
=
4006 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
4008 unsigned tmp1
= get_temp_index(emit
);
4009 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4010 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4011 unsigned tmp2
= get_temp_index(emit
);
4012 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4013 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4015 struct tgsi_full_src_register src_xxxx
=
4016 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4017 struct tgsi_full_src_register src_yyyy
=
4018 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
4019 struct tgsi_full_src_register src_wwww
=
4020 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
4022 struct tgsi_full_src_register zero
=
4023 make_immediate_reg_float(emit
, 0.0f
);
4024 struct tgsi_full_src_register lowerbound
=
4025 make_immediate_reg_float(emit
, -128.0f
);
4026 struct tgsi_full_src_register upperbound
=
4027 make_immediate_reg_float(emit
, 128.0f
);
4029 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp1_dst
, &src_wwww
,
4030 &lowerbound
, FALSE
);
4031 emit_instruction_op2(emit
, VGPU10_OPCODE_MIN
, &tmp1_dst
, &tmp1_src
,
4032 &upperbound
, FALSE
);
4033 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp2_dst
, &src_yyyy
,
4036 /* POW tmp1, tmp2, tmp1 */
4037 /* LOG tmp2, tmp2 */
4038 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp2_dst
, &tmp2_src
,
4041 /* MUL tmp1, tmp2, tmp1 */
4042 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
, &tmp2_src
,
4045 /* EXP tmp1, tmp1 */
4046 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp1_dst
, &tmp1_src
,
4049 /* EQ tmp2, 0, src.w */
4050 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp2_dst
, &zero
,
4052 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4053 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp1_dst
,
4054 &tmp2_src
, &one
, &tmp1_src
, FALSE
);
4056 /* LT tmp2, 0, src.x */
4057 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp2_dst
, &zero
,
4059 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4060 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &dst_z
,
4061 &tmp2_src
, &tmp1_src
, &zero
, FALSE
);
4064 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
4066 free_temp_indexes(emit
);
4073 * Emit code for TGSI_OPCODE_LOG instruction.
4076 emit_log(struct svga_shader_emitter_v10
*emit
,
4077 const struct tgsi_full_instruction
*inst
)
4080 * dst.x = floor(lg2(abs(s0.x)))
4081 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4082 * dst.z = lg2(abs(s0.x))
4086 struct tgsi_full_src_register src_xxxx
=
4087 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4088 unsigned tmp
= get_temp_index(emit
);
4089 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4090 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4091 struct tgsi_full_src_register abs_src_xxxx
= absolute_src(&src_xxxx
);
4093 /* only use X component of temp reg */
4094 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4095 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4097 /* LOG tmp.x, abs(s0.x) */
4098 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XYZ
) {
4099 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
,
4100 &abs_src_xxxx
, FALSE
);
4103 /* MOV dst.z, tmp.x */
4104 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4105 struct tgsi_full_dst_register dst_z
=
4106 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Z
);
4108 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
,
4109 &tmp_src
, inst
->Instruction
.Saturate
);
4112 /* FLR tmp.x, tmp.x */
4113 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XY
) {
4114 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
4118 /* MOV dst.x, tmp.x */
4119 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
4120 struct tgsi_full_dst_register dst_x
=
4121 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_X
);
4123 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &tmp_src
,
4124 inst
->Instruction
.Saturate
);
4127 /* EXP tmp.x, tmp.x */
4128 /* DIV dst.y, abs(s0.x), tmp.x */
4129 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
4130 struct tgsi_full_dst_register dst_y
=
4131 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Y
);
4133 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp_dst
, &tmp_src
,
4135 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &dst_y
, &abs_src_xxxx
,
4136 &tmp_src
, inst
->Instruction
.Saturate
);
4139 /* MOV dst.w, 1.0 */
4140 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
4141 struct tgsi_full_dst_register dst_w
=
4142 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_W
);
4143 struct tgsi_full_src_register one
=
4144 make_immediate_reg_float(emit
, 1.0f
);
4146 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
4149 free_temp_indexes(emit
);
4156 * Emit code for TGSI_OPCODE_LRP instruction.
4159 emit_lrp(struct svga_shader_emitter_v10
*emit
,
4160 const struct tgsi_full_instruction
*inst
)
4162 /* dst = LRP(s0, s1, s2):
4163 * dst = s0 * (s1 - s2) + s2
4165 * SUB tmp, s1, s2; tmp = s1 - s2
4166 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4168 unsigned tmp
= get_temp_index(emit
);
4169 struct tgsi_full_src_register src_tmp
= make_src_temp_reg(tmp
);
4170 struct tgsi_full_dst_register dst_tmp
= make_dst_temp_reg(tmp
);
4171 struct tgsi_full_src_register neg_src2
= negate_src(&inst
->Src
[2]);
4173 /* ADD tmp, s1, -s2 */
4174 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_tmp
,
4175 &inst
->Src
[1], &neg_src2
, FALSE
);
4177 /* MAD dst, s1, tmp, s3 */
4178 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &inst
->Dst
[0],
4179 &inst
->Src
[0], &src_tmp
, &inst
->Src
[2],
4180 inst
->Instruction
.Saturate
);
4182 free_temp_indexes(emit
);
4189 * Emit code for TGSI_OPCODE_POW instruction.
4192 emit_pow(struct svga_shader_emitter_v10
*emit
,
4193 const struct tgsi_full_instruction
*inst
)
4195 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4196 * src1.x while VGPU10 computes four values.
4198 * dst = POW(src0, src1):
4199 * dst.xyzw = src0.x ^ src1.x
4201 unsigned tmp
= get_temp_index(emit
);
4202 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4203 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4204 struct tgsi_full_src_register src0_xxxx
=
4205 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4206 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4207 struct tgsi_full_src_register src1_xxxx
=
4208 swizzle_src(&inst
->Src
[1], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4209 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4211 /* LOG tmp, s0.xxxx */
4212 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
, &src0_xxxx
,
4215 /* MUL tmp, tmp, s1.xxxx */
4216 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
, &tmp_src
,
4219 /* EXP tmp, s0.xxxx */
4220 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0],
4221 &tmp_src
, inst
->Instruction
.Saturate
);
4224 free_temp_indexes(emit
);
4231 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4234 emit_rcp(struct svga_shader_emitter_v10
*emit
,
4235 const struct tgsi_full_instruction
*inst
)
4237 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4239 unsigned tmp
= get_temp_index(emit
);
4240 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4241 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4243 struct tgsi_full_dst_register tmp_dst_x
=
4244 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4245 struct tgsi_full_src_register tmp_src_xxxx
=
4246 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4248 /* DIV tmp.x, 1.0, s0 */
4249 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst_x
, &one
,
4250 &inst
->Src
[0], FALSE
);
4252 /* MOV dst, tmp.xxxx */
4253 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4254 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4256 free_temp_indexes(emit
);
4263 * Emit code for TGSI_OPCODE_RSQ instruction.
4266 emit_rsq(struct svga_shader_emitter_v10
*emit
,
4267 const struct tgsi_full_instruction
*inst
)
4270 * dst.xyzw = 1 / sqrt(src.x)
4276 unsigned tmp
= get_temp_index(emit
);
4277 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4278 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4280 struct tgsi_full_dst_register tmp_dst_x
=
4281 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4282 struct tgsi_full_src_register tmp_src_xxxx
=
4283 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4285 /* RSQ tmp, src.x */
4286 emit_instruction_op1(emit
, VGPU10_OPCODE_RSQ
, &tmp_dst_x
,
4287 &inst
->Src
[0], FALSE
);
4289 /* MOV dst, tmp.xxxx */
4290 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4291 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4294 free_temp_indexes(emit
);
4301 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4304 emit_seq(struct svga_shader_emitter_v10
*emit
,
4305 const struct tgsi_full_instruction
*inst
)
4307 /* dst = SEQ(s0, s1):
4308 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4310 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4311 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4313 unsigned tmp
= get_temp_index(emit
);
4314 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4315 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4316 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4317 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4319 /* EQ tmp, s0, s1 */
4320 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp_dst
, &inst
->Src
[0],
4321 &inst
->Src
[1], FALSE
);
4323 /* MOVC dst, tmp, one, zero */
4324 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4325 &one
, &zero
, FALSE
);
4327 free_temp_indexes(emit
);
4334 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4337 emit_sge(struct svga_shader_emitter_v10
*emit
,
4338 const struct tgsi_full_instruction
*inst
)
4340 /* dst = SGE(s0, s1):
4341 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4343 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4344 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4346 unsigned tmp
= get_temp_index(emit
);
4347 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4348 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4349 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4350 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4352 /* GE tmp, s0, s1 */
4353 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[0],
4354 &inst
->Src
[1], FALSE
);
4356 /* MOVC dst, tmp, one, zero */
4357 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4358 &one
, &zero
, FALSE
);
4360 free_temp_indexes(emit
);
4367 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4370 emit_sgt(struct svga_shader_emitter_v10
*emit
,
4371 const struct tgsi_full_instruction
*inst
)
4373 /* dst = SGT(s0, s1):
4374 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4376 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4377 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4379 unsigned tmp
= get_temp_index(emit
);
4380 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4381 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4382 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4383 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4385 /* LT tmp, s1, s0 */
4386 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[1],
4387 &inst
->Src
[0], FALSE
);
4389 /* MOVC dst, tmp, one, zero */
4390 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4391 &one
, &zero
, FALSE
);
4393 free_temp_indexes(emit
);
4400 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4403 emit_sincos(struct svga_shader_emitter_v10
*emit
,
4404 const struct tgsi_full_instruction
*inst
)
4406 unsigned tmp
= get_temp_index(emit
);
4407 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4408 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4410 struct tgsi_full_src_register tmp_src_xxxx
=
4411 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4412 struct tgsi_full_dst_register tmp_dst_x
=
4413 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4415 begin_emit_instruction(emit
);
4416 emit_opcode(emit
, VGPU10_OPCODE_SINCOS
, FALSE
);
4418 if(inst
->Instruction
.Opcode
== TGSI_OPCODE_SIN
)
4420 emit_dst_register(emit
, &tmp_dst_x
); /* first destination register */
4421 emit_null_dst_register(emit
); /* second destination register */
4424 emit_null_dst_register(emit
);
4425 emit_dst_register(emit
, &tmp_dst_x
);
4428 emit_src_register(emit
, &inst
->Src
[0]);
4429 end_emit_instruction(emit
);
4431 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4432 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4434 free_temp_indexes(emit
);
4441 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4444 emit_sle(struct svga_shader_emitter_v10
*emit
,
4445 const struct tgsi_full_instruction
*inst
)
4447 /* dst = SLE(s0, s1):
4448 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4450 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4451 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4453 unsigned tmp
= get_temp_index(emit
);
4454 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4455 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4456 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4457 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4459 /* GE tmp, s1, s0 */
4460 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[1],
4461 &inst
->Src
[0], FALSE
);
4463 /* MOVC dst, tmp, one, zero */
4464 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4465 &one
, &zero
, FALSE
);
4467 free_temp_indexes(emit
);
4474 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4477 emit_slt(struct svga_shader_emitter_v10
*emit
,
4478 const struct tgsi_full_instruction
*inst
)
4480 /* dst = SLT(s0, s1):
4481 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4483 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4484 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4486 unsigned tmp
= get_temp_index(emit
);
4487 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4488 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4489 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4490 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4492 /* LT tmp, s0, s1 */
4493 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
4494 &inst
->Src
[1], FALSE
);
4496 /* MOVC dst, tmp, one, zero */
4497 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4498 &one
, &zero
, FALSE
);
4500 free_temp_indexes(emit
);
4507 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4510 emit_sne(struct svga_shader_emitter_v10
*emit
,
4511 const struct tgsi_full_instruction
*inst
)
4513 /* dst = SNE(s0, s1):
4514 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4516 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4517 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4519 unsigned tmp
= get_temp_index(emit
);
4520 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4521 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4522 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4523 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4525 /* NE tmp, s0, s1 */
4526 emit_instruction_op2(emit
, VGPU10_OPCODE_NE
, &tmp_dst
, &inst
->Src
[0],
4527 &inst
->Src
[1], FALSE
);
4529 /* MOVC dst, tmp, one, zero */
4530 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4531 &one
, &zero
, FALSE
);
4533 free_temp_indexes(emit
);
4540 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4543 emit_ssg(struct svga_shader_emitter_v10
*emit
,
4544 const struct tgsi_full_instruction
*inst
)
4546 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4547 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4548 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4549 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4551 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4552 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4553 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4554 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4556 struct tgsi_full_src_register zero
=
4557 make_immediate_reg_float(emit
, 0.0f
);
4558 struct tgsi_full_src_register one
=
4559 make_immediate_reg_float(emit
, 1.0f
);
4560 struct tgsi_full_src_register neg_one
=
4561 make_immediate_reg_float(emit
, -1.0f
);
4563 unsigned tmp1
= get_temp_index(emit
);
4564 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4565 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4567 unsigned tmp2
= get_temp_index(emit
);
4568 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4569 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4571 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &inst
->Src
[0],
4573 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp2_dst
, &tmp1_src
,
4574 &neg_one
, &zero
, FALSE
);
4575 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &zero
,
4576 &inst
->Src
[0], FALSE
);
4577 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp1_src
,
4578 &one
, &tmp2_src
, FALSE
);
4580 free_temp_indexes(emit
);
4587 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4590 emit_issg(struct svga_shader_emitter_v10
*emit
,
4591 const struct tgsi_full_instruction
*inst
)
4593 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4594 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4595 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4596 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4598 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4599 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4600 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4602 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4604 unsigned tmp1
= get_temp_index(emit
);
4605 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4606 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4608 unsigned tmp2
= get_temp_index(emit
);
4609 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4610 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4612 struct tgsi_full_src_register neg_tmp2
= negate_src(&tmp2_src
);
4614 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp1_dst
,
4615 &inst
->Src
[0], &zero
, FALSE
);
4616 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp2_dst
,
4617 &zero
, &inst
->Src
[0], FALSE
);
4618 emit_instruction_op2(emit
, VGPU10_OPCODE_IADD
, &inst
->Dst
[0],
4619 &tmp1_src
, &neg_tmp2
, FALSE
);
4621 free_temp_indexes(emit
);
4628 * Emit a comparison instruction. The dest register will get
4629 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4632 emit_comparison(struct svga_shader_emitter_v10
*emit
,
4634 const struct tgsi_full_dst_register
*dst
,
4635 const struct tgsi_full_src_register
*src0
,
4636 const struct tgsi_full_src_register
*src1
)
4638 struct tgsi_full_src_register immediate
;
4639 VGPU10OpcodeToken0 opcode0
;
4640 boolean swapSrc
= FALSE
;
4642 /* Sanity checks for svga vs. gallium enums */
4643 STATIC_ASSERT(SVGA3D_CMP_LESS
== (PIPE_FUNC_LESS
+ 1));
4644 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL
== (PIPE_FUNC_GEQUAL
+ 1));
4649 case SVGA3D_CMP_NEVER
:
4650 immediate
= make_immediate_reg_int(emit
, 0);
4652 begin_emit_instruction(emit
);
4653 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4654 emit_dst_register(emit
, dst
);
4655 emit_src_register(emit
, &immediate
);
4656 end_emit_instruction(emit
);
4658 case SVGA3D_CMP_ALWAYS
:
4659 immediate
= make_immediate_reg_int(emit
, -1);
4661 begin_emit_instruction(emit
);
4662 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4663 emit_dst_register(emit
, dst
);
4664 emit_src_register(emit
, &immediate
);
4665 end_emit_instruction(emit
);
4667 case SVGA3D_CMP_LESS
:
4668 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4670 case SVGA3D_CMP_EQUAL
:
4671 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4673 case SVGA3D_CMP_LESSEQUAL
:
4674 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4677 case SVGA3D_CMP_GREATER
:
4678 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4681 case SVGA3D_CMP_NOTEQUAL
:
4682 opcode0
.opcodeType
= VGPU10_OPCODE_NE
;
4684 case SVGA3D_CMP_GREATEREQUAL
:
4685 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4688 assert(!"Unexpected comparison mode");
4689 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4692 begin_emit_instruction(emit
);
4693 emit_dword(emit
, opcode0
.value
);
4694 emit_dst_register(emit
, dst
);
4696 emit_src_register(emit
, src1
);
4697 emit_src_register(emit
, src0
);
4700 emit_src_register(emit
, src0
);
4701 emit_src_register(emit
, src1
);
4703 end_emit_instruction(emit
);
4708 * Get texel/address offsets for a texture instruction.
4711 get_texel_offsets(const struct svga_shader_emitter_v10
*emit
,
4712 const struct tgsi_full_instruction
*inst
, int offsets
[3])
4714 if (inst
->Texture
.NumOffsets
== 1) {
4715 /* According to OpenGL Shader Language spec the offsets are only
4716 * fetched from a previously-declared immediate/literal.
4718 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
4719 const unsigned index
= off
[0].Index
;
4720 const unsigned swizzleX
= off
[0].SwizzleX
;
4721 const unsigned swizzleY
= off
[0].SwizzleY
;
4722 const unsigned swizzleZ
= off
[0].SwizzleZ
;
4723 const union tgsi_immediate_data
*imm
= emit
->immediates
[index
];
4725 assert(inst
->TexOffsets
[0].File
== TGSI_FILE_IMMEDIATE
);
4727 offsets
[0] = imm
[swizzleX
].Int
;
4728 offsets
[1] = imm
[swizzleY
].Int
;
4729 offsets
[2] = imm
[swizzleZ
].Int
;
4732 offsets
[0] = offsets
[1] = offsets
[2] = 0;
4738 * Set up the coordinate register for texture sampling.
4739 * When we're sampling from a RECT texture we have to scale the
4740 * unnormalized coordinate to a normalized coordinate.
4741 * We do that by multiplying the coordinate by an "extra" constant.
4742 * An alternative would be to use the RESINFO instruction to query the
4745 static struct tgsi_full_src_register
4746 setup_texcoord(struct svga_shader_emitter_v10
*emit
,
4748 const struct tgsi_full_src_register
*coord
)
4750 if (emit
->key
.tex
[unit
].unnormalized
) {
4751 unsigned scale_index
= emit
->texcoord_scale_index
[unit
];
4752 unsigned tmp
= get_temp_index(emit
);
4753 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4754 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4755 struct tgsi_full_src_register scale_src
= make_src_const_reg(scale_index
);
4757 if (emit
->key
.tex
[unit
].texel_bias
) {
4758 /* to fix texture coordinate rounding issue, 0.0001 offset is
4759 * been added. This fixes piglit test fbo-blit-scaled-linear. */
4760 struct tgsi_full_src_register offset
=
4761 make_immediate_reg_float(emit
, 0.0001f
);
4763 /* ADD tmp, coord, offset */
4764 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp_dst
,
4765 coord
, &offset
, FALSE
);
4766 /* MUL tmp, tmp, scale */
4767 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
,
4768 &tmp_src
, &scale_src
, FALSE
);
4771 /* MUL tmp, coord, const[] */
4772 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
,
4773 coord
, &scale_src
, FALSE
);
4778 /* use texcoord as-is */
4785 * For SAMPLE_C instructions, emit the extra src register which indicates
4786 * the reference/comparision value.
4789 emit_tex_compare_refcoord(struct svga_shader_emitter_v10
*emit
,
4790 enum tgsi_texture_type target
,
4791 const struct tgsi_full_src_register
*coord
)
4793 struct tgsi_full_src_register coord_src_ref
;
4796 assert(tgsi_is_shadow_target(target
));
4798 component
= tgsi_util_get_shadow_ref_src_index(target
) % 4;
4799 assert(component
>= 0);
4801 coord_src_ref
= scalar_src(coord
, component
);
4803 emit_src_register(emit
, &coord_src_ref
);
4808 * Info for implementing texture swizzles.
4809 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
4810 * functions use this to encapsulate the extra steps needed to perform
4811 * a texture swizzle, or shadow/depth comparisons.
4812 * The shadow/depth comparison is only done here if for the cases where
4813 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
4815 struct tex_swizzle_info
4818 boolean shadow_compare
;
4820 enum tgsi_texture_type texture_target
; /**< TGSI_TEXTURE_x */
4821 struct tgsi_full_src_register tmp_src
;
4822 struct tgsi_full_dst_register tmp_dst
;
4823 const struct tgsi_full_dst_register
*inst_dst
;
4824 const struct tgsi_full_src_register
*coord_src
;
4829 * Do setup for handling texture swizzles or shadow compares.
4830 * \param unit the texture unit
4831 * \param inst the TGSI texture instruction
4832 * \param shadow_compare do shadow/depth comparison?
4833 * \param swz returns the swizzle info
4836 begin_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
4838 const struct tgsi_full_instruction
*inst
,
4839 boolean shadow_compare
,
4840 struct tex_swizzle_info
*swz
)
4842 swz
->swizzled
= (emit
->key
.tex
[unit
].swizzle_r
!= TGSI_SWIZZLE_X
||
4843 emit
->key
.tex
[unit
].swizzle_g
!= TGSI_SWIZZLE_Y
||
4844 emit
->key
.tex
[unit
].swizzle_b
!= TGSI_SWIZZLE_Z
||
4845 emit
->key
.tex
[unit
].swizzle_a
!= TGSI_SWIZZLE_W
);
4847 swz
->shadow_compare
= shadow_compare
;
4848 swz
->texture_target
= inst
->Texture
.Texture
;
4850 if (swz
->swizzled
|| shadow_compare
) {
4851 /* Allocate temp register for the result of the SAMPLE instruction
4852 * and the source of the MOV/compare/swizzle instructions.
4854 unsigned tmp
= get_temp_index(emit
);
4855 swz
->tmp_src
= make_src_temp_reg(tmp
);
4856 swz
->tmp_dst
= make_dst_temp_reg(tmp
);
4860 swz
->inst_dst
= &inst
->Dst
[0];
4861 swz
->coord_src
= &inst
->Src
[0];
4863 emit
->fs
.shadow_compare_units
|= shadow_compare
<< unit
;
4868 * Returns the register to put the SAMPLE instruction results into.
4869 * This will either be the original instruction dst reg (if no swizzle
4870 * and no shadow comparison) or a temporary reg if there is a swizzle.
4872 static const struct tgsi_full_dst_register
*
4873 get_tex_swizzle_dst(const struct tex_swizzle_info
*swz
)
4875 return (swz
->swizzled
|| swz
->shadow_compare
)
4876 ? &swz
->tmp_dst
: swz
->inst_dst
;
4881 * This emits the MOV instruction that actually implements a texture swizzle
4882 * and/or shadow comparison.
4885 end_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
4886 const struct tex_swizzle_info
*swz
)
4888 if (swz
->shadow_compare
) {
4889 /* Emit extra instructions to compare the fetched texel value against
4890 * a texture coordinate component. The result of the comparison
4893 struct tgsi_full_src_register coord_src
;
4894 struct tgsi_full_src_register texel_src
=
4895 scalar_src(&swz
->tmp_src
, TGSI_SWIZZLE_X
);
4896 struct tgsi_full_src_register one
=
4897 make_immediate_reg_float(emit
, 1.0f
);
4898 /* convert gallium comparison func to SVGA comparison func */
4899 SVGA3dCmpFunc compare_func
= emit
->key
.tex
[swz
->unit
].compare_func
+ 1;
4901 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
4904 tgsi_util_get_shadow_ref_src_index(swz
->texture_target
) % 4;
4905 assert(component
>= 0);
4906 coord_src
= scalar_src(swz
->coord_src
, component
);
4908 /* COMPARE tmp, coord, texel */
4909 emit_comparison(emit
, compare_func
,
4910 &swz
->tmp_dst
, &coord_src
, &texel_src
);
4912 /* AND dest, tmp, {1.0} */
4913 begin_emit_instruction(emit
);
4914 emit_opcode(emit
, VGPU10_OPCODE_AND
, FALSE
);
4915 if (swz
->swizzled
) {
4916 emit_dst_register(emit
, &swz
->tmp_dst
);
4919 emit_dst_register(emit
, swz
->inst_dst
);
4921 emit_src_register(emit
, &swz
->tmp_src
);
4922 emit_src_register(emit
, &one
);
4923 end_emit_instruction(emit
);
4926 if (swz
->swizzled
) {
4927 unsigned swz_r
= emit
->key
.tex
[swz
->unit
].swizzle_r
;
4928 unsigned swz_g
= emit
->key
.tex
[swz
->unit
].swizzle_g
;
4929 unsigned swz_b
= emit
->key
.tex
[swz
->unit
].swizzle_b
;
4930 unsigned swz_a
= emit
->key
.tex
[swz
->unit
].swizzle_a
;
4931 unsigned writemask_0
= 0, writemask_1
= 0;
4932 boolean int_tex
= is_integer_type(emit
->sampler_return_type
[swz
->unit
]);
4934 /* Swizzle w/out zero/one terms */
4935 struct tgsi_full_src_register src_swizzled
=
4936 swizzle_src(&swz
->tmp_src
,
4937 swz_r
< PIPE_SWIZZLE_0
? swz_r
: PIPE_SWIZZLE_X
,
4938 swz_g
< PIPE_SWIZZLE_0
? swz_g
: PIPE_SWIZZLE_Y
,
4939 swz_b
< PIPE_SWIZZLE_0
? swz_b
: PIPE_SWIZZLE_Z
,
4940 swz_a
< PIPE_SWIZZLE_0
? swz_a
: PIPE_SWIZZLE_W
);
4942 /* MOV dst, color(tmp).<swizzle> */
4943 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
4944 swz
->inst_dst
, &src_swizzled
, FALSE
);
4946 /* handle swizzle zero terms */
4947 writemask_0
= (((swz_r
== PIPE_SWIZZLE_0
) << 0) |
4948 ((swz_g
== PIPE_SWIZZLE_0
) << 1) |
4949 ((swz_b
== PIPE_SWIZZLE_0
) << 2) |
4950 ((swz_a
== PIPE_SWIZZLE_0
) << 3));
4951 writemask_0
&= swz
->inst_dst
->Register
.WriteMask
;
4954 struct tgsi_full_src_register zero
= int_tex
?
4955 make_immediate_reg_int(emit
, 0) :
4956 make_immediate_reg_float(emit
, 0.0f
);
4957 struct tgsi_full_dst_register dst
=
4958 writemask_dst(swz
->inst_dst
, writemask_0
);
4960 /* MOV dst.writemask_0, {0,0,0,0} */
4961 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
4962 &dst
, &zero
, FALSE
);
4965 /* handle swizzle one terms */
4966 writemask_1
= (((swz_r
== PIPE_SWIZZLE_1
) << 0) |
4967 ((swz_g
== PIPE_SWIZZLE_1
) << 1) |
4968 ((swz_b
== PIPE_SWIZZLE_1
) << 2) |
4969 ((swz_a
== PIPE_SWIZZLE_1
) << 3));
4970 writemask_1
&= swz
->inst_dst
->Register
.WriteMask
;
4973 struct tgsi_full_src_register one
= int_tex
?
4974 make_immediate_reg_int(emit
, 1) :
4975 make_immediate_reg_float(emit
, 1.0f
);
4976 struct tgsi_full_dst_register dst
=
4977 writemask_dst(swz
->inst_dst
, writemask_1
);
4979 /* MOV dst.writemask_1, {1,1,1,1} */
4980 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst
, &one
, FALSE
);
4987 * Emit code for TGSI_OPCODE_SAMPLE instruction.
4990 emit_sample(struct svga_shader_emitter_v10
*emit
,
4991 const struct tgsi_full_instruction
*inst
)
4993 const unsigned resource_unit
= inst
->Src
[1].Register
.Index
;
4994 const unsigned sampler_unit
= inst
->Src
[2].Register
.Index
;
4995 struct tgsi_full_src_register coord
;
4997 struct tex_swizzle_info swz_info
;
4999 begin_tex_swizzle(emit
, sampler_unit
, inst
, FALSE
, &swz_info
);
5001 get_texel_offsets(emit
, inst
, offsets
);
5003 coord
= setup_texcoord(emit
, resource_unit
, &inst
->Src
[0]);
5005 /* SAMPLE dst, coord(s0), resource, sampler */
5006 begin_emit_instruction(emit
);
5008 /* NOTE: for non-fragment shaders, we should use VGPU10_OPCODE_SAMPLE_L
5009 * with LOD=0. But our virtual GPU accepts this as-is.
5011 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE
,
5012 inst
->Instruction
.Saturate
, offsets
);
5013 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5014 emit_src_register(emit
, &coord
);
5015 emit_resource_register(emit
, resource_unit
);
5016 emit_sampler_register(emit
, sampler_unit
);
5017 end_emit_instruction(emit
);
5019 end_tex_swizzle(emit
, &swz_info
);
5021 free_temp_indexes(emit
);
5028 * Check if a texture instruction is valid.
5029 * An example of an invalid texture instruction is doing shadow comparison
5030 * with an integer-valued texture.
5031 * If we detect an invalid texture instruction, we replace it with:
5032 * MOV dst, {1,1,1,1};
5033 * \return TRUE if valid, FALSE if invalid.
5036 is_valid_tex_instruction(struct svga_shader_emitter_v10
*emit
,
5037 const struct tgsi_full_instruction
*inst
)
5039 const unsigned unit
= inst
->Src
[1].Register
.Index
;
5040 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5041 boolean valid
= TRUE
;
5043 if (tgsi_is_shadow_target(target
) &&
5044 is_integer_type(emit
->sampler_return_type
[unit
])) {
5045 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5048 /* XXX might check for other conditions in the future here */
5051 /* emit a MOV dst, {1,1,1,1} instruction. */
5052 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
5053 begin_emit_instruction(emit
);
5054 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5055 emit_dst_register(emit
, &inst
->Dst
[0]);
5056 emit_src_register(emit
, &one
);
5057 end_emit_instruction(emit
);
5065 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5068 emit_tex(struct svga_shader_emitter_v10
*emit
,
5069 const struct tgsi_full_instruction
*inst
)
5071 const uint unit
= inst
->Src
[1].Register
.Index
;
5072 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5073 VGPU10_OPCODE_TYPE opcode
;
5074 struct tgsi_full_src_register coord
;
5076 struct tex_swizzle_info swz_info
;
5078 /* check that the sampler returns a float */
5079 if (!is_valid_tex_instruction(emit
, inst
))
5082 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5084 get_texel_offsets(emit
, inst
, offsets
);
5086 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5088 /* SAMPLE dst, coord(s0), resource, sampler */
5089 begin_emit_instruction(emit
);
5091 if (tgsi_is_shadow_target(target
))
5092 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5094 opcode
= VGPU10_OPCODE_SAMPLE
;
5096 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5097 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5098 emit_src_register(emit
, &coord
);
5099 emit_resource_register(emit
, unit
);
5100 emit_sampler_register(emit
, unit
);
5101 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5102 emit_tex_compare_refcoord(emit
, target
, &coord
);
5104 end_emit_instruction(emit
);
5106 end_tex_swizzle(emit
, &swz_info
);
5108 free_temp_indexes(emit
);
5115 * Emit code for TGSI_OPCODE_TXP (projective texture)
5118 emit_txp(struct svga_shader_emitter_v10
*emit
,
5119 const struct tgsi_full_instruction
*inst
)
5121 const uint unit
= inst
->Src
[1].Register
.Index
;
5122 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5123 VGPU10_OPCODE_TYPE opcode
;
5125 unsigned tmp
= get_temp_index(emit
);
5126 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
5127 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
5128 struct tgsi_full_src_register src0_wwww
=
5129 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5130 struct tgsi_full_src_register coord
;
5131 struct tex_swizzle_info swz_info
;
5133 /* check that the sampler returns a float */
5134 if (!is_valid_tex_instruction(emit
, inst
))
5137 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5139 get_texel_offsets(emit
, inst
, offsets
);
5141 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5143 /* DIV tmp, coord, coord.wwww */
5144 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst
,
5145 &coord
, &src0_wwww
, FALSE
);
5147 /* SAMPLE dst, coord(tmp), resource, sampler */
5148 begin_emit_instruction(emit
);
5150 if (tgsi_is_shadow_target(target
))
5151 /* NOTE: for non-fragment shaders, we should use
5152 * VGPU10_OPCODE_SAMPLE_C_LZ, but our virtual GPU accepts this as-is.
5154 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5156 opcode
= VGPU10_OPCODE_SAMPLE
;
5158 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5159 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5160 emit_src_register(emit
, &tmp_src
); /* projected coord */
5161 emit_resource_register(emit
, unit
);
5162 emit_sampler_register(emit
, unit
);
5163 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5164 emit_tex_compare_refcoord(emit
, target
, &tmp_src
);
5166 end_emit_instruction(emit
);
5168 end_tex_swizzle(emit
, &swz_info
);
5170 free_temp_indexes(emit
);
5177 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5180 emit_txd(struct svga_shader_emitter_v10
*emit
,
5181 const struct tgsi_full_instruction
*inst
)
5183 const uint unit
= inst
->Src
[3].Register
.Index
;
5184 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5186 struct tgsi_full_src_register coord
;
5187 struct tex_swizzle_info swz_info
;
5189 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5192 get_texel_offsets(emit
, inst
, offsets
);
5194 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5196 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5197 begin_emit_instruction(emit
);
5198 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE_D
,
5199 inst
->Instruction
.Saturate
, offsets
);
5200 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5201 emit_src_register(emit
, &coord
);
5202 emit_resource_register(emit
, unit
);
5203 emit_sampler_register(emit
, unit
);
5204 emit_src_register(emit
, &inst
->Src
[1]); /* Xderiv */
5205 emit_src_register(emit
, &inst
->Src
[2]); /* Yderiv */
5206 end_emit_instruction(emit
);
5208 end_tex_swizzle(emit
, &swz_info
);
5210 free_temp_indexes(emit
);
5217 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5220 emit_txf(struct svga_shader_emitter_v10
*emit
,
5221 const struct tgsi_full_instruction
*inst
)
5223 const uint unit
= inst
->Src
[1].Register
.Index
;
5224 const boolean msaa
= tgsi_is_msaa_target(inst
->Texture
.Texture
);
5226 struct tex_swizzle_info swz_info
;
5228 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5230 get_texel_offsets(emit
, inst
, offsets
);
5233 /* Fetch one sample from an MSAA texture */
5234 struct tgsi_full_src_register sampleIndex
=
5235 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5236 /* LD_MS dst, coord(s0), resource, sampleIndex */
5237 begin_emit_instruction(emit
);
5238 emit_sample_opcode(emit
, VGPU10_OPCODE_LD_MS
,
5239 inst
->Instruction
.Saturate
, offsets
);
5240 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5241 emit_src_register(emit
, &inst
->Src
[0]);
5242 emit_resource_register(emit
, unit
);
5243 emit_src_register(emit
, &sampleIndex
);
5244 end_emit_instruction(emit
);
5247 /* Fetch one texel specified by integer coordinate */
5248 /* LD dst, coord(s0), resource */
5249 begin_emit_instruction(emit
);
5250 emit_sample_opcode(emit
, VGPU10_OPCODE_LD
,
5251 inst
->Instruction
.Saturate
, offsets
);
5252 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5253 emit_src_register(emit
, &inst
->Src
[0]);
5254 emit_resource_register(emit
, unit
);
5255 end_emit_instruction(emit
);
5258 end_tex_swizzle(emit
, &swz_info
);
5260 free_temp_indexes(emit
);
5267 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5268 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5271 emit_txl_txb(struct svga_shader_emitter_v10
*emit
,
5272 const struct tgsi_full_instruction
*inst
)
5274 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5275 VGPU10_OPCODE_TYPE opcode
;
5278 struct tgsi_full_src_register coord
, lod_bias
;
5279 struct tex_swizzle_info swz_info
;
5281 assert(inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
||
5282 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
5283 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
);
5285 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
) {
5286 lod_bias
= scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5287 unit
= inst
->Src
[2].Register
.Index
;
5290 lod_bias
= scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5291 unit
= inst
->Src
[1].Register
.Index
;
5294 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5297 get_texel_offsets(emit
, inst
, offsets
);
5299 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5301 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5302 begin_emit_instruction(emit
);
5303 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
5304 opcode
= VGPU10_OPCODE_SAMPLE_L
;
5307 opcode
= VGPU10_OPCODE_SAMPLE_B
;
5309 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5310 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5311 emit_src_register(emit
, &coord
);
5312 emit_resource_register(emit
, unit
);
5313 emit_sampler_register(emit
, unit
);
5314 emit_src_register(emit
, &lod_bias
);
5315 end_emit_instruction(emit
);
5317 end_tex_swizzle(emit
, &swz_info
);
5319 free_temp_indexes(emit
);
5326 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5329 emit_txq(struct svga_shader_emitter_v10
*emit
,
5330 const struct tgsi_full_instruction
*inst
)
5332 const uint unit
= inst
->Src
[1].Register
.Index
;
5334 if (emit
->sampler_target
[unit
] == TGSI_TEXTURE_BUFFER
) {
5335 /* RESINFO does not support querying texture buffers, so we instead
5336 * store texture buffer sizes in shader constants, then copy them to
5337 * implement TXQ instead of emitting RESINFO.
5338 * MOV dst, const[texture_buffer_size_index[unit]]
5340 struct tgsi_full_src_register size_src
=
5341 make_src_const_reg(emit
->texture_buffer_size_index
[unit
]);
5342 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &size_src
,
5345 /* RESINFO dst, srcMipLevel, resource */
5346 begin_emit_instruction(emit
);
5347 emit_opcode_resinfo(emit
, VGPU10_RESINFO_RETURN_UINT
);
5348 emit_dst_register(emit
, &inst
->Dst
[0]);
5349 emit_src_register(emit
, &inst
->Src
[0]);
5350 emit_resource_register(emit
, unit
);
5351 end_emit_instruction(emit
);
5354 free_temp_indexes(emit
);
5361 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5364 emit_simple(struct svga_shader_emitter_v10
*emit
,
5365 const struct tgsi_full_instruction
*inst
)
5367 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5368 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5371 begin_emit_instruction(emit
);
5372 emit_opcode(emit
, translate_opcode(opcode
), inst
->Instruction
.Saturate
);
5373 for (i
= 0; i
< op
->num_dst
; i
++) {
5374 emit_dst_register(emit
, &inst
->Dst
[i
]);
5376 for (i
= 0; i
< op
->num_src
; i
++) {
5377 emit_src_register(emit
, &inst
->Src
[i
]);
5379 end_emit_instruction(emit
);
5386 * We only special case the MOV instruction to try to detect constant
5387 * color writes in the fragment shader.
5390 emit_mov(struct svga_shader_emitter_v10
*emit
,
5391 const struct tgsi_full_instruction
*inst
)
5393 const struct tgsi_full_src_register
*src
= &inst
->Src
[0];
5394 const struct tgsi_full_dst_register
*dst
= &inst
->Dst
[0];
5396 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
5397 dst
->Register
.File
== TGSI_FILE_OUTPUT
&&
5398 dst
->Register
.Index
== 0 &&
5399 src
->Register
.File
== TGSI_FILE_CONSTANT
&&
5400 !src
->Register
.Indirect
) {
5401 emit
->constant_color_output
= TRUE
;
5404 return emit_simple(emit
, inst
);
5409 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5410 * where TGSI only uses one dest register.
5413 emit_simple_1dst(struct svga_shader_emitter_v10
*emit
,
5414 const struct tgsi_full_instruction
*inst
,
5418 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5419 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5422 begin_emit_instruction(emit
);
5423 emit_opcode(emit
, translate_opcode(opcode
), inst
->Instruction
.Saturate
);
5425 for (i
= 0; i
< dst_count
; i
++) {
5426 if (i
== dst_index
) {
5427 emit_dst_register(emit
, &inst
->Dst
[0]);
5429 emit_null_dst_register(emit
);
5433 for (i
= 0; i
< op
->num_src
; i
++) {
5434 emit_src_register(emit
, &inst
->Src
[i
]);
5436 end_emit_instruction(emit
);
5443 * Translate a single TGSI instruction to VGPU10.
5446 emit_vgpu10_instruction(struct svga_shader_emitter_v10
*emit
,
5447 unsigned inst_number
,
5448 const struct tgsi_full_instruction
*inst
)
5450 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5453 case TGSI_OPCODE_ADD
:
5454 case TGSI_OPCODE_AND
:
5455 case TGSI_OPCODE_BGNLOOP
:
5456 case TGSI_OPCODE_BRK
:
5457 case TGSI_OPCODE_CEIL
:
5458 case TGSI_OPCODE_CONT
:
5459 case TGSI_OPCODE_DDX
:
5460 case TGSI_OPCODE_DDY
:
5461 case TGSI_OPCODE_DIV
:
5462 case TGSI_OPCODE_DP2
:
5463 case TGSI_OPCODE_DP3
:
5464 case TGSI_OPCODE_DP4
:
5465 case TGSI_OPCODE_ELSE
:
5466 case TGSI_OPCODE_ENDIF
:
5467 case TGSI_OPCODE_ENDLOOP
:
5468 case TGSI_OPCODE_ENDSUB
:
5469 case TGSI_OPCODE_F2I
:
5470 case TGSI_OPCODE_F2U
:
5471 case TGSI_OPCODE_FLR
:
5472 case TGSI_OPCODE_FRC
:
5473 case TGSI_OPCODE_FSEQ
:
5474 case TGSI_OPCODE_FSGE
:
5475 case TGSI_OPCODE_FSLT
:
5476 case TGSI_OPCODE_FSNE
:
5477 case TGSI_OPCODE_I2F
:
5478 case TGSI_OPCODE_IMAX
:
5479 case TGSI_OPCODE_IMIN
:
5480 case TGSI_OPCODE_INEG
:
5481 case TGSI_OPCODE_ISGE
:
5482 case TGSI_OPCODE_ISHR
:
5483 case TGSI_OPCODE_ISLT
:
5484 case TGSI_OPCODE_MAD
:
5485 case TGSI_OPCODE_MAX
:
5486 case TGSI_OPCODE_MIN
:
5487 case TGSI_OPCODE_MUL
:
5488 case TGSI_OPCODE_NOP
:
5489 case TGSI_OPCODE_NOT
:
5490 case TGSI_OPCODE_OR
:
5491 case TGSI_OPCODE_RET
:
5492 case TGSI_OPCODE_UADD
:
5493 case TGSI_OPCODE_USEQ
:
5494 case TGSI_OPCODE_USGE
:
5495 case TGSI_OPCODE_USLT
:
5496 case TGSI_OPCODE_UMIN
:
5497 case TGSI_OPCODE_UMAD
:
5498 case TGSI_OPCODE_UMAX
:
5499 case TGSI_OPCODE_ROUND
:
5500 case TGSI_OPCODE_SQRT
:
5501 case TGSI_OPCODE_SHL
:
5502 case TGSI_OPCODE_TRUNC
:
5503 case TGSI_OPCODE_U2F
:
5504 case TGSI_OPCODE_UCMP
:
5505 case TGSI_OPCODE_USHR
:
5506 case TGSI_OPCODE_USNE
:
5507 case TGSI_OPCODE_XOR
:
5508 /* simple instructions */
5509 return emit_simple(emit
, inst
);
5511 case TGSI_OPCODE_MOV
:
5512 return emit_mov(emit
, inst
);
5513 case TGSI_OPCODE_EMIT
:
5514 return emit_vertex(emit
, inst
);
5515 case TGSI_OPCODE_ENDPRIM
:
5516 return emit_endprim(emit
, inst
);
5517 case TGSI_OPCODE_IABS
:
5518 return emit_iabs(emit
, inst
);
5519 case TGSI_OPCODE_ARL
:
5521 case TGSI_OPCODE_UARL
:
5522 return emit_arl_uarl(emit
, inst
);
5523 case TGSI_OPCODE_BGNSUB
:
5526 case TGSI_OPCODE_CAL
:
5527 return emit_cal(emit
, inst
);
5528 case TGSI_OPCODE_CMP
:
5529 return emit_cmp(emit
, inst
);
5530 case TGSI_OPCODE_COS
:
5531 return emit_sincos(emit
, inst
);
5532 case TGSI_OPCODE_DST
:
5533 return emit_dst(emit
, inst
);
5534 case TGSI_OPCODE_EX2
:
5535 return emit_ex2(emit
, inst
);
5536 case TGSI_OPCODE_EXP
:
5537 return emit_exp(emit
, inst
);
5538 case TGSI_OPCODE_IF
:
5539 return emit_if(emit
, inst
);
5540 case TGSI_OPCODE_KILL
:
5541 return emit_kill(emit
, inst
);
5542 case TGSI_OPCODE_KILL_IF
:
5543 return emit_kill_if(emit
, inst
);
5544 case TGSI_OPCODE_LG2
:
5545 return emit_lg2(emit
, inst
);
5546 case TGSI_OPCODE_LIT
:
5547 return emit_lit(emit
, inst
);
5548 case TGSI_OPCODE_LOG
:
5549 return emit_log(emit
, inst
);
5550 case TGSI_OPCODE_LRP
:
5551 return emit_lrp(emit
, inst
);
5552 case TGSI_OPCODE_POW
:
5553 return emit_pow(emit
, inst
);
5554 case TGSI_OPCODE_RCP
:
5555 return emit_rcp(emit
, inst
);
5556 case TGSI_OPCODE_RSQ
:
5557 return emit_rsq(emit
, inst
);
5558 case TGSI_OPCODE_SAMPLE
:
5559 return emit_sample(emit
, inst
);
5560 case TGSI_OPCODE_SEQ
:
5561 return emit_seq(emit
, inst
);
5562 case TGSI_OPCODE_SGE
:
5563 return emit_sge(emit
, inst
);
5564 case TGSI_OPCODE_SGT
:
5565 return emit_sgt(emit
, inst
);
5566 case TGSI_OPCODE_SIN
:
5567 return emit_sincos(emit
, inst
);
5568 case TGSI_OPCODE_SLE
:
5569 return emit_sle(emit
, inst
);
5570 case TGSI_OPCODE_SLT
:
5571 return emit_slt(emit
, inst
);
5572 case TGSI_OPCODE_SNE
:
5573 return emit_sne(emit
, inst
);
5574 case TGSI_OPCODE_SSG
:
5575 return emit_ssg(emit
, inst
);
5576 case TGSI_OPCODE_ISSG
:
5577 return emit_issg(emit
, inst
);
5578 case TGSI_OPCODE_TEX
:
5579 return emit_tex(emit
, inst
);
5580 case TGSI_OPCODE_TXP
:
5581 return emit_txp(emit
, inst
);
5582 case TGSI_OPCODE_TXB
:
5583 case TGSI_OPCODE_TXB2
:
5584 case TGSI_OPCODE_TXL
:
5585 return emit_txl_txb(emit
, inst
);
5586 case TGSI_OPCODE_TXD
:
5587 return emit_txd(emit
, inst
);
5588 case TGSI_OPCODE_TXF
:
5589 return emit_txf(emit
, inst
);
5590 case TGSI_OPCODE_TXQ
:
5591 return emit_txq(emit
, inst
);
5592 case TGSI_OPCODE_UIF
:
5593 return emit_if(emit
, inst
);
5594 case TGSI_OPCODE_UMUL_HI
:
5595 case TGSI_OPCODE_IMUL_HI
:
5596 case TGSI_OPCODE_UDIV
:
5597 case TGSI_OPCODE_IDIV
:
5598 /* These cases use only the FIRST of two destination registers */
5599 return emit_simple_1dst(emit
, inst
, 2, 0);
5600 case TGSI_OPCODE_UMUL
:
5601 case TGSI_OPCODE_UMOD
:
5602 case TGSI_OPCODE_MOD
:
5603 /* These cases use only the SECOND of two destination registers */
5604 return emit_simple_1dst(emit
, inst
, 2, 1);
5605 case TGSI_OPCODE_END
:
5606 if (!emit_post_helpers(emit
))
5608 return emit_simple(emit
, inst
);
5611 debug_printf("Unimplemented tgsi instruction %s\n",
5612 tgsi_get_opcode_name(opcode
));
5621 * Emit the extra instructions to adjust the vertex position.
5622 * There are two possible adjustments:
5623 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5624 * "prescale" and "pretranslate" values.
5625 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5626 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5629 emit_vpos_instructions(struct svga_shader_emitter_v10
*emit
,
5630 unsigned vs_pos_tmp_index
)
5632 struct tgsi_full_src_register tmp_pos_src
;
5633 struct tgsi_full_dst_register pos_dst
;
5635 /* Don't bother to emit any extra vertex instructions if vertex position is
5638 if (emit
->vposition
.out_index
== INVALID_INDEX
)
5641 tmp_pos_src
= make_src_temp_reg(vs_pos_tmp_index
);
5642 pos_dst
= make_dst_output_reg(emit
->vposition
.out_index
);
5644 /* If non-adjusted vertex position register index
5645 * is valid, copy the vertex position from the temporary
5646 * vertex position register before it is modified by the
5647 * prescale computation.
5649 if (emit
->vposition
.so_index
!= INVALID_INDEX
) {
5650 struct tgsi_full_dst_register pos_so_dst
=
5651 make_dst_output_reg(emit
->vposition
.so_index
);
5653 /* MOV pos_so, tmp_pos */
5654 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_so_dst
,
5655 &tmp_pos_src
, FALSE
);
5658 if (emit
->vposition
.need_prescale
) {
5659 /* This code adjusts the vertex position to match the VGPU10 convention.
5660 * If p is the position computed by the shader (usually by applying the
5661 * modelview and projection matrices), the new position q is computed by:
5663 * q.x = p.w * trans.x + p.x * scale.x
5664 * q.y = p.w * trans.y + p.y * scale.y
5665 * q.z = p.w * trans.z + p.z * scale.z;
5666 * q.w = p.w * trans.w + p.w;
5668 struct tgsi_full_src_register tmp_pos_src_w
=
5669 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
5670 struct tgsi_full_dst_register tmp_pos_dst
=
5671 make_dst_temp_reg(vs_pos_tmp_index
);
5672 struct tgsi_full_dst_register tmp_pos_dst_xyz
=
5673 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XYZ
);
5675 struct tgsi_full_src_register prescale_scale
=
5676 make_src_const_reg(emit
->vposition
.prescale_scale_index
);
5677 struct tgsi_full_src_register prescale_trans
=
5678 make_src_const_reg(emit
->vposition
.prescale_trans_index
);
5680 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
5681 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xyz
,
5682 &tmp_pos_src
, &prescale_scale
, FALSE
);
5684 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
5685 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &pos_dst
, &tmp_pos_src_w
,
5686 &prescale_trans
, &tmp_pos_src
, FALSE
);
5688 else if (emit
->key
.vs
.undo_viewport
) {
5689 /* This code computes the final vertex position from the temporary
5690 * vertex position by undoing the viewport transformation and the
5691 * divide-by-W operation (we convert window coords back to clip coords).
5692 * This is needed when we use the 'draw' module for fallbacks.
5693 * If p is the temp pos in window coords, then the NDC coord q is:
5694 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
5695 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
5698 * CONST[vs_viewport_index] contains:
5699 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
5701 struct tgsi_full_dst_register tmp_pos_dst
=
5702 make_dst_temp_reg(vs_pos_tmp_index
);
5703 struct tgsi_full_dst_register tmp_pos_dst_xy
=
5704 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XY
);
5705 struct tgsi_full_src_register tmp_pos_src_wwww
=
5706 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
5708 struct tgsi_full_dst_register pos_dst_xyz
=
5709 writemask_dst(&pos_dst
, TGSI_WRITEMASK_XYZ
);
5710 struct tgsi_full_dst_register pos_dst_w
=
5711 writemask_dst(&pos_dst
, TGSI_WRITEMASK_W
);
5713 struct tgsi_full_src_register vp_xyzw
=
5714 make_src_const_reg(emit
->vs
.viewport_index
);
5715 struct tgsi_full_src_register vp_zwww
=
5716 swizzle_src(&vp_xyzw
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
5717 TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
5719 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
5720 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp_pos_dst_xy
,
5721 &tmp_pos_src
, &vp_zwww
, FALSE
);
5723 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
5724 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xy
,
5725 &tmp_pos_src
, &vp_xyzw
, FALSE
);
5727 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
5728 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &pos_dst_xyz
,
5729 &tmp_pos_src
, &tmp_pos_src_wwww
, FALSE
);
5731 /* MOV pos.w, tmp_pos.w */
5732 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_dst_w
,
5733 &tmp_pos_src
, FALSE
);
5735 else if (vs_pos_tmp_index
!= INVALID_INDEX
) {
5736 /* This code is to handle the case where the temporary vertex
5737 * position register is created when the vertex shader has stream
5738 * output and prescale is disabled because rasterization is to be
5741 struct tgsi_full_dst_register pos_dst
=
5742 make_dst_output_reg(emit
->vposition
.out_index
);
5744 /* MOV pos, tmp_pos */
5745 begin_emit_instruction(emit
);
5746 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5747 emit_dst_register(emit
, &pos_dst
);
5748 emit_src_register(emit
, &tmp_pos_src
);
5749 end_emit_instruction(emit
);
5754 emit_clipping_instructions(struct svga_shader_emitter_v10
*emit
)
5756 if (emit
->clip_mode
== CLIP_DISTANCE
) {
5757 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
5758 emit_clip_distance_instructions(emit
);
5760 } else if (emit
->clip_mode
== CLIP_VERTEX
) {
5761 /* Convert TGSI CLIPVERTEX to CLIPDIST */
5762 emit_clip_vertex_instructions(emit
);
5766 * Emit vertex position and take care of legacy user planes only if
5767 * there is a valid vertex position register index.
5768 * This is to take care of the case
5769 * where the shader doesn't output vertex position. Then in
5770 * this case, don't bother to emit more vertex instructions.
5772 if (emit
->vposition
.out_index
== INVALID_INDEX
)
5776 * Emit per-vertex clipping instructions for legacy user defined clip planes.
5777 * NOTE: we must emit the clip distance instructions before the
5778 * emit_vpos_instructions() call since the later function will change
5779 * the TEMP[vs_pos_tmp_index] value.
5781 if (emit
->clip_mode
== CLIP_LEGACY
) {
5782 /* Emit CLIPDIST for legacy user defined clip planes */
5783 emit_clip_distance_from_vpos(emit
, emit
->vposition
.tmp_index
);
5789 * Emit extra per-vertex instructions. This includes clip-coordinate
5790 * space conversion and computing clip distances. This is called for
5791 * each GS emit-vertex instruction and at the end of VS translation.
5794 emit_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
5796 const unsigned vs_pos_tmp_index
= emit
->vposition
.tmp_index
;
5798 /* Emit clipping instructions based on clipping mode */
5799 emit_clipping_instructions(emit
);
5802 * Reset the temporary vertex position register index
5803 * so that emit_dst_register() will use the real vertex position output
5805 emit
->vposition
.tmp_index
= INVALID_INDEX
;
5807 /* Emit vertex position instructions */
5808 emit_vpos_instructions(emit
, vs_pos_tmp_index
);
5810 /* Restore original vposition.tmp_index value for the next GS vertex.
5811 * It doesn't matter for VS.
5813 emit
->vposition
.tmp_index
= vs_pos_tmp_index
;
5817 * Translate the TGSI_OPCODE_EMIT GS instruction.
5820 emit_vertex(struct svga_shader_emitter_v10
*emit
,
5821 const struct tgsi_full_instruction
*inst
)
5823 unsigned ret
= TRUE
;
5825 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
5827 emit_vertex_instructions(emit
);
5829 /* We can't use emit_simple() because the TGSI instruction has one
5830 * operand (vertex stream number) which we must ignore for VGPU10.
5832 begin_emit_instruction(emit
);
5833 emit_opcode(emit
, VGPU10_OPCODE_EMIT
, FALSE
);
5834 end_emit_instruction(emit
);
5841 * Emit the extra code to convert from VGPU10's boolean front-face
5842 * register to TGSI's signed front-face register.
5844 * TODO: Make temporary front-face register a scalar.
5847 emit_frontface_instructions(struct svga_shader_emitter_v10
*emit
)
5849 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
5851 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
5852 /* convert vgpu10 boolean face register to gallium +/-1 value */
5853 struct tgsi_full_dst_register tmp_dst
=
5854 make_dst_temp_reg(emit
->fs
.face_tmp_index
);
5855 struct tgsi_full_src_register one
=
5856 make_immediate_reg_float(emit
, 1.0f
);
5857 struct tgsi_full_src_register neg_one
=
5858 make_immediate_reg_float(emit
, -1.0f
);
5860 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
5861 begin_emit_instruction(emit
);
5862 emit_opcode(emit
, VGPU10_OPCODE_MOVC
, FALSE
);
5863 emit_dst_register(emit
, &tmp_dst
);
5864 emit_face_register(emit
);
5865 emit_src_register(emit
, &one
);
5866 emit_src_register(emit
, &neg_one
);
5867 end_emit_instruction(emit
);
5873 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
5876 emit_fragcoord_instructions(struct svga_shader_emitter_v10
*emit
)
5878 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
5880 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
5881 struct tgsi_full_dst_register tmp_dst
=
5882 make_dst_temp_reg(emit
->fs
.fragcoord_tmp_index
);
5883 struct tgsi_full_dst_register tmp_dst_xyz
=
5884 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_XYZ
);
5885 struct tgsi_full_dst_register tmp_dst_w
=
5886 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
5887 struct tgsi_full_src_register one
=
5888 make_immediate_reg_float(emit
, 1.0f
);
5889 struct tgsi_full_src_register fragcoord
=
5890 make_src_reg(TGSI_FILE_INPUT
, emit
->fs
.fragcoord_input_index
);
5892 /* save the input index */
5893 unsigned fragcoord_input_index
= emit
->fs
.fragcoord_input_index
;
5894 /* set to invalid to prevent substitution in emit_src_register() */
5895 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
5897 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
5898 begin_emit_instruction(emit
);
5899 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5900 emit_dst_register(emit
, &tmp_dst_xyz
);
5901 emit_src_register(emit
, &fragcoord
);
5902 end_emit_instruction(emit
);
5904 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
5905 begin_emit_instruction(emit
);
5906 emit_opcode(emit
, VGPU10_OPCODE_DIV
, FALSE
);
5907 emit_dst_register(emit
, &tmp_dst_w
);
5908 emit_src_register(emit
, &one
);
5909 emit_src_register(emit
, &fragcoord
);
5910 end_emit_instruction(emit
);
5912 /* restore saved value */
5913 emit
->fs
.fragcoord_input_index
= fragcoord_input_index
;
5919 * Emit extra instructions to adjust VS inputs/attributes. This can
5920 * mean casting a vertex attribute from int to float or setting the
5921 * W component to 1, or both.
5924 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10
*emit
)
5926 const unsigned save_w_1_mask
= emit
->key
.vs
.adjust_attrib_w_1
;
5927 const unsigned save_itof_mask
= emit
->key
.vs
.adjust_attrib_itof
;
5928 const unsigned save_utof_mask
= emit
->key
.vs
.adjust_attrib_utof
;
5929 const unsigned save_is_bgra_mask
= emit
->key
.vs
.attrib_is_bgra
;
5930 const unsigned save_puint_to_snorm_mask
= emit
->key
.vs
.attrib_puint_to_snorm
;
5931 const unsigned save_puint_to_uscaled_mask
= emit
->key
.vs
.attrib_puint_to_uscaled
;
5932 const unsigned save_puint_to_sscaled_mask
= emit
->key
.vs
.attrib_puint_to_sscaled
;
5934 unsigned adjust_mask
= (save_w_1_mask
|
5938 save_puint_to_snorm_mask
|
5939 save_puint_to_uscaled_mask
|
5940 save_puint_to_sscaled_mask
);
5942 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
5945 struct tgsi_full_src_register one
=
5946 make_immediate_reg_float(emit
, 1.0f
);
5948 struct tgsi_full_src_register one_int
=
5949 make_immediate_reg_int(emit
, 1);
5951 /* We need to turn off these bitmasks while emitting the
5952 * instructions below, then restore them afterward.
5954 emit
->key
.vs
.adjust_attrib_w_1
= 0;
5955 emit
->key
.vs
.adjust_attrib_itof
= 0;
5956 emit
->key
.vs
.adjust_attrib_utof
= 0;
5957 emit
->key
.vs
.attrib_is_bgra
= 0;
5958 emit
->key
.vs
.attrib_puint_to_snorm
= 0;
5959 emit
->key
.vs
.attrib_puint_to_uscaled
= 0;
5960 emit
->key
.vs
.attrib_puint_to_sscaled
= 0;
5962 while (adjust_mask
) {
5963 unsigned index
= u_bit_scan(&adjust_mask
);
5965 /* skip the instruction if this vertex attribute is not being used */
5966 if (emit
->info
.input_usage_mask
[index
] == 0)
5969 unsigned tmp
= emit
->vs
.adjusted_input
[index
];
5970 struct tgsi_full_src_register input_src
=
5971 make_src_reg(TGSI_FILE_INPUT
, index
);
5973 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
5974 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
5975 struct tgsi_full_dst_register tmp_dst_w
=
5976 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
5978 /* ITOF/UTOF/MOV tmp, input[index] */
5979 if (save_itof_mask
& (1 << index
)) {
5980 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
,
5981 &tmp_dst
, &input_src
, FALSE
);
5983 else if (save_utof_mask
& (1 << index
)) {
5984 emit_instruction_op1(emit
, VGPU10_OPCODE_UTOF
,
5985 &tmp_dst
, &input_src
, FALSE
);
5987 else if (save_puint_to_snorm_mask
& (1 << index
)) {
5988 emit_puint_to_snorm(emit
, &tmp_dst
, &input_src
);
5990 else if (save_puint_to_uscaled_mask
& (1 << index
)) {
5991 emit_puint_to_uscaled(emit
, &tmp_dst
, &input_src
);
5993 else if (save_puint_to_sscaled_mask
& (1 << index
)) {
5994 emit_puint_to_sscaled(emit
, &tmp_dst
, &input_src
);
5997 assert((save_w_1_mask
| save_is_bgra_mask
) & (1 << index
));
5998 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5999 &tmp_dst
, &input_src
, FALSE
);
6002 if (save_is_bgra_mask
& (1 << index
)) {
6003 emit_swap_r_b(emit
, &tmp_dst
, &tmp_src
);
6006 if (save_w_1_mask
& (1 << index
)) {
6007 /* MOV tmp.w, 1.0 */
6008 if (emit
->key
.vs
.attrib_is_pure_int
& (1 << index
)) {
6009 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6010 &tmp_dst_w
, &one_int
, FALSE
);
6013 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6014 &tmp_dst_w
, &one
, FALSE
);
6019 emit
->key
.vs
.adjust_attrib_w_1
= save_w_1_mask
;
6020 emit
->key
.vs
.adjust_attrib_itof
= save_itof_mask
;
6021 emit
->key
.vs
.adjust_attrib_utof
= save_utof_mask
;
6022 emit
->key
.vs
.attrib_is_bgra
= save_is_bgra_mask
;
6023 emit
->key
.vs
.attrib_puint_to_snorm
= save_puint_to_snorm_mask
;
6024 emit
->key
.vs
.attrib_puint_to_uscaled
= save_puint_to_uscaled_mask
;
6025 emit
->key
.vs
.attrib_puint_to_sscaled
= save_puint_to_sscaled_mask
;
6031 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6032 * to implement some instructions. We pre-allocate those values here
6033 * in the immediate constant buffer.
6036 alloc_common_immediates(struct svga_shader_emitter_v10
*emit
)
6040 emit
->common_immediate_pos
[n
++] =
6041 alloc_immediate_float4(emit
, 0.0f
, 1.0f
, 0.5f
, -1.0f
);
6043 if (emit
->info
.opcode_count
[TGSI_OPCODE_LIT
] > 0) {
6044 emit
->common_immediate_pos
[n
++] =
6045 alloc_immediate_float4(emit
, 128.0f
, -128.0f
, 0.0f
, 0.0f
);
6048 emit
->common_immediate_pos
[n
++] =
6049 alloc_immediate_int4(emit
, 0, 1, 0, -1);
6051 if (emit
->key
.vs
.attrib_puint_to_snorm
) {
6052 emit
->common_immediate_pos
[n
++] =
6053 alloc_immediate_float4(emit
, -2.0f
, 2.0f
, 3.0f
, -1.66666f
);
6056 if (emit
->key
.vs
.attrib_puint_to_uscaled
) {
6057 emit
->common_immediate_pos
[n
++] =
6058 alloc_immediate_float4(emit
, 1023.0f
, 3.0f
, 0.0f
, 0.0f
);
6061 if (emit
->key
.vs
.attrib_puint_to_sscaled
) {
6062 emit
->common_immediate_pos
[n
++] =
6063 alloc_immediate_int4(emit
, 22, 12, 2, 0);
6065 emit
->common_immediate_pos
[n
++] =
6066 alloc_immediate_int4(emit
, 22, 30, 0, 0);
6071 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
6072 if (emit
->key
.tex
[i
].texel_bias
) {
6073 /* Replace 0.0f if more immediate float value is needed */
6074 emit
->common_immediate_pos
[n
++] =
6075 alloc_immediate_float4(emit
, 0.0001f
, 0.0f
, 0.0f
, 0.0f
);
6080 assert(n
<= ARRAY_SIZE(emit
->common_immediate_pos
));
6081 emit
->num_common_immediates
= n
;
6086 * Emit any extra/helper declarations/code that we might need between
6087 * the declaration section and code section.
6090 emit_pre_helpers(struct svga_shader_emitter_v10
*emit
)
6093 if (emit
->unit
== PIPE_SHADER_GEOMETRY
)
6094 emit_property_instructions(emit
);
6096 /* Declare inputs */
6097 if (!emit_input_declarations(emit
))
6100 /* Declare outputs */
6101 if (!emit_output_declarations(emit
))
6104 /* Declare temporary registers */
6105 emit_temporaries_declaration(emit
);
6107 /* Declare constant registers */
6108 emit_constant_declaration(emit
);
6110 /* Declare samplers and resources */
6111 emit_sampler_declarations(emit
);
6112 emit_resource_declarations(emit
);
6114 /* Declare clip distance output registers */
6115 if (emit
->unit
== PIPE_SHADER_VERTEX
||
6116 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
6117 emit_clip_distance_declarations(emit
);
6120 alloc_common_immediates(emit
);
6122 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
6123 emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6124 float alpha
= emit
->key
.fs
.alpha_ref
;
6125 emit
->fs
.alpha_ref_index
=
6126 alloc_immediate_float4(emit
, alpha
, alpha
, alpha
, alpha
);
6129 /* Now, emit the constant block containing all the immediates
6130 * declared by shader, as well as the extra ones seen above.
6132 emit_vgpu10_immediates_block(emit
);
6134 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6135 emit_frontface_instructions(emit
);
6136 emit_fragcoord_instructions(emit
);
6138 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6139 emit_vertex_attrib_instructions(emit
);
6147 * The device has no direct support for the pipe_blend_state::alpha_to_one
6148 * option so we implement it here with shader code.
6150 * Note that this is kind of pointless, actually. Here we're clobbering
6151 * the alpha value with 1.0. So if alpha-to-coverage is enabled, we'll wind
6152 * up with 100% coverage. That's almost certainly not what the user wants.
6153 * The work-around is to add extra shader code to compute coverage from alpha
6154 * and write it to the coverage output register (if the user's shader doesn't
6155 * do so already). We'll probably do that in the future.
6158 emit_alpha_to_one_instructions(struct svga_shader_emitter_v10
*emit
,
6159 unsigned fs_color_tmp_index
)
6161 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
6164 /* Note: it's not 100% clear from the spec if we're supposed to clobber
6165 * the alpha for all render targets. But that's what NVIDIA does and
6166 * that's what Piglit tests.
6168 for (i
= 0; i
< emit
->fs
.num_color_outputs
; i
++) {
6169 struct tgsi_full_dst_register color_dst
;
6171 if (fs_color_tmp_index
!= INVALID_INDEX
&& i
== 0) {
6172 /* write to the temp color register */
6173 color_dst
= make_dst_temp_reg(fs_color_tmp_index
);
6176 /* write directly to the color[i] output */
6177 color_dst
= make_dst_output_reg(emit
->fs
.color_out_index
[i
]);
6180 color_dst
= writemask_dst(&color_dst
, TGSI_WRITEMASK_W
);
6182 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
, &one
, FALSE
);
6188 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6189 * against the alpha reference value and discards the fragment if the
6193 emit_alpha_test_instructions(struct svga_shader_emitter_v10
*emit
,
6194 unsigned fs_color_tmp_index
)
6196 /* compare output color's alpha to alpha ref and kill */
6197 unsigned tmp
= get_temp_index(emit
);
6198 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
6199 struct tgsi_full_src_register tmp_src_x
=
6200 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
6201 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
6202 struct tgsi_full_src_register color_src
=
6203 make_src_temp_reg(fs_color_tmp_index
);
6204 struct tgsi_full_src_register color_src_w
=
6205 scalar_src(&color_src
, TGSI_SWIZZLE_W
);
6206 struct tgsi_full_src_register ref_src
=
6207 make_src_immediate_reg(emit
->fs
.alpha_ref_index
);
6208 struct tgsi_full_dst_register color_dst
=
6209 make_dst_output_reg(emit
->fs
.color_out_index
[0]);
6211 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6213 /* dst = src0 'alpha_func' src1 */
6214 emit_comparison(emit
, emit
->key
.fs
.alpha_func
, &tmp_dst
,
6215 &color_src_w
, &ref_src
);
6217 /* DISCARD if dst.x == 0 */
6218 begin_emit_instruction(emit
);
6219 emit_discard_opcode(emit
, FALSE
); /* discard if src0.x is zero */
6220 emit_src_register(emit
, &tmp_src_x
);
6221 end_emit_instruction(emit
);
6223 /* If we don't need to broadcast the color below, emit the final color here.
6225 if (emit
->key
.fs
.write_color0_to_n_cbufs
<= 1) {
6226 /* MOV output.color, tempcolor */
6227 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6228 &color_src
, FALSE
); /* XXX saturate? */
6231 free_temp_indexes(emit
);
6236 * Emit instructions for writing a single color output to multiple
6238 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS (or
6239 * when key.fs.white_fragments is true).
6240 * property is set and the number of render targets is greater than one.
6241 * \param fs_color_tmp_index index of the temp register that holds the
6242 * color to broadcast.
6245 emit_broadcast_color_instructions(struct svga_shader_emitter_v10
*emit
,
6246 unsigned fs_color_tmp_index
)
6248 const unsigned n
= emit
->key
.fs
.write_color0_to_n_cbufs
;
6250 struct tgsi_full_src_register color_src
;
6252 if (emit
->key
.fs
.white_fragments
) {
6253 /* set all color outputs to white */
6254 color_src
= make_immediate_reg_float(emit
, 1.0f
);
6257 /* set all color outputs to TEMP[fs_color_tmp_index] */
6258 assert(fs_color_tmp_index
!= INVALID_INDEX
);
6259 color_src
= make_src_temp_reg(fs_color_tmp_index
);
6262 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6264 for (i
= 0; i
< n
; i
++) {
6265 unsigned output_reg
= emit
->fs
.color_out_index
[i
];
6266 struct tgsi_full_dst_register color_dst
=
6267 make_dst_output_reg(output_reg
);
6269 /* Fill in this semantic here since we'll use it later in
6270 * emit_dst_register().
6272 emit
->info
.output_semantic_name
[output_reg
] = TGSI_SEMANTIC_COLOR
;
6274 /* MOV output.color[i], tempcolor */
6275 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6276 &color_src
, FALSE
); /* XXX saturate? */
6282 * Emit extra helper code after the original shader code, but before the
6283 * last END/RET instruction.
6284 * For vertex shaders this means emitting the extra code to apply the
6285 * prescale scale/translation.
6288 emit_post_helpers(struct svga_shader_emitter_v10
*emit
)
6290 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6291 emit_vertex_instructions(emit
);
6293 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6294 const unsigned fs_color_tmp_index
= emit
->fs
.color_tmp_index
;
6296 assert(!(emit
->key
.fs
.white_fragments
&&
6297 emit
->key
.fs
.write_color0_to_n_cbufs
== 0));
6299 /* We no longer want emit_dst_register() to substitute the
6300 * temporary fragment color register for the real color output.
6302 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6304 if (emit
->key
.fs
.alpha_to_one
) {
6305 emit_alpha_to_one_instructions(emit
, fs_color_tmp_index
);
6307 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6308 emit_alpha_test_instructions(emit
, fs_color_tmp_index
);
6310 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1 ||
6311 emit
->key
.fs
.white_fragments
) {
6312 emit_broadcast_color_instructions(emit
, fs_color_tmp_index
);
6321 * Translate the TGSI tokens into VGPU10 tokens.
6324 emit_vgpu10_instructions(struct svga_shader_emitter_v10
*emit
,
6325 const struct tgsi_token
*tokens
)
6327 struct tgsi_parse_context parse
;
6329 boolean pre_helpers_emitted
= FALSE
;
6330 unsigned inst_number
= 0;
6332 tgsi_parse_init(&parse
, tokens
);
6334 while (!tgsi_parse_end_of_tokens(&parse
)) {
6335 tgsi_parse_token(&parse
);
6337 switch (parse
.FullToken
.Token
.Type
) {
6338 case TGSI_TOKEN_TYPE_IMMEDIATE
:
6339 ret
= emit_vgpu10_immediate(emit
, &parse
.FullToken
.FullImmediate
);
6344 case TGSI_TOKEN_TYPE_DECLARATION
:
6345 ret
= emit_vgpu10_declaration(emit
, &parse
.FullToken
.FullDeclaration
);
6350 case TGSI_TOKEN_TYPE_INSTRUCTION
:
6351 if (!pre_helpers_emitted
) {
6352 ret
= emit_pre_helpers(emit
);
6355 pre_helpers_emitted
= TRUE
;
6357 ret
= emit_vgpu10_instruction(emit
, inst_number
++,
6358 &parse
.FullToken
.FullInstruction
);
6363 case TGSI_TOKEN_TYPE_PROPERTY
:
6364 ret
= emit_vgpu10_property(emit
, &parse
.FullToken
.FullProperty
);
6375 tgsi_parse_free(&parse
);
6381 * Emit the first VGPU10 shader tokens.
6384 emit_vgpu10_header(struct svga_shader_emitter_v10
*emit
)
6386 VGPU10ProgramToken ptoken
;
6388 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6389 ptoken
.majorVersion
= 4;
6390 ptoken
.minorVersion
= 0;
6391 ptoken
.programType
= translate_shader_type(emit
->unit
);
6392 if (!emit_dword(emit
, ptoken
.value
))
6395 /* Second token: total length of shader, in tokens. We can't fill this
6396 * in until we're all done. Emit zero for now.
6398 return emit_dword(emit
, 0);
6403 emit_vgpu10_tail(struct svga_shader_emitter_v10
*emit
)
6405 VGPU10ProgramToken
*tokens
;
6407 /* Replace the second token with total shader length */
6408 tokens
= (VGPU10ProgramToken
*) emit
->buf
;
6409 tokens
[1].value
= emit_get_num_tokens(emit
);
6416 * Modify the FS to read the BCOLORs and use the FACE register
6417 * to choose between the front/back colors.
6419 static const struct tgsi_token
*
6420 transform_fs_twoside(const struct tgsi_token
*tokens
)
6423 debug_printf("Before tgsi_add_two_side ------------------\n");
6424 tgsi_dump(tokens
,0);
6426 tokens
= tgsi_add_two_side(tokens
);
6428 debug_printf("After tgsi_add_two_side ------------------\n");
6429 tgsi_dump(tokens
, 0);
6436 * Modify the FS to do polygon stipple.
6438 static const struct tgsi_token
*
6439 transform_fs_pstipple(struct svga_shader_emitter_v10
*emit
,
6440 const struct tgsi_token
*tokens
)
6442 const struct tgsi_token
*new_tokens
;
6446 debug_printf("Before pstipple ------------------\n");
6447 tgsi_dump(tokens
,0);
6450 new_tokens
= util_pstipple_create_fragment_shader(tokens
, &unit
, 0,
6453 emit
->fs
.pstipple_sampler_unit
= unit
;
6455 /* Setup texture state for stipple */
6456 emit
->sampler_target
[unit
] = TGSI_TEXTURE_2D
;
6457 emit
->key
.tex
[unit
].swizzle_r
= TGSI_SWIZZLE_X
;
6458 emit
->key
.tex
[unit
].swizzle_g
= TGSI_SWIZZLE_Y
;
6459 emit
->key
.tex
[unit
].swizzle_b
= TGSI_SWIZZLE_Z
;
6460 emit
->key
.tex
[unit
].swizzle_a
= TGSI_SWIZZLE_W
;
6463 debug_printf("After pstipple ------------------\n");
6464 tgsi_dump(new_tokens
, 0);
6471 * Modify the FS to support anti-aliasing point.
6473 static const struct tgsi_token
*
6474 transform_fs_aapoint(const struct tgsi_token
*tokens
,
6478 debug_printf("Before tgsi_add_aa_point ------------------\n");
6479 tgsi_dump(tokens
,0);
6481 tokens
= tgsi_add_aa_point(tokens
, aa_coord_index
);
6483 debug_printf("After tgsi_add_aa_point ------------------\n");
6484 tgsi_dump(tokens
, 0);
6490 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6492 struct svga_shader_variant
*
6493 svga_tgsi_vgpu10_translate(struct svga_context
*svga
,
6494 const struct svga_shader
*shader
,
6495 const struct svga_compile_key
*key
,
6496 enum pipe_shader_type unit
)
6498 struct svga_shader_variant
*variant
= NULL
;
6499 struct svga_shader_emitter_v10
*emit
;
6500 const struct tgsi_token
*tokens
= shader
->tokens
;
6501 struct svga_vertex_shader
*vs
= svga
->curr
.vs
;
6502 struct svga_geometry_shader
*gs
= svga
->curr
.gs
;
6504 assert(unit
== PIPE_SHADER_VERTEX
||
6505 unit
== PIPE_SHADER_GEOMETRY
||
6506 unit
== PIPE_SHADER_FRAGMENT
);
6508 /* These two flags cannot be used together */
6509 assert(key
->vs
.need_prescale
+ key
->vs
.undo_viewport
<= 1);
6511 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE
);
6513 * Setup the code emitter
6515 emit
= alloc_emitter();
6522 emit
->vposition
.need_prescale
= (emit
->key
.vs
.need_prescale
||
6523 emit
->key
.gs
.need_prescale
);
6524 emit
->vposition
.tmp_index
= INVALID_INDEX
;
6525 emit
->vposition
.so_index
= INVALID_INDEX
;
6526 emit
->vposition
.out_index
= INVALID_INDEX
;
6528 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6529 emit
->fs
.face_input_index
= INVALID_INDEX
;
6530 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
6532 emit
->gs
.prim_id_index
= INVALID_INDEX
;
6534 emit
->clip_dist_out_index
= INVALID_INDEX
;
6535 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
6536 emit
->clip_dist_so_index
= INVALID_INDEX
;
6537 emit
->clip_vertex_out_index
= INVALID_INDEX
;
6539 if (emit
->key
.fs
.alpha_func
== SVGA3D_CMP_INVALID
) {
6540 emit
->key
.fs
.alpha_func
= SVGA3D_CMP_ALWAYS
;
6543 if (unit
== PIPE_SHADER_FRAGMENT
) {
6544 if (key
->fs
.light_twoside
) {
6545 tokens
= transform_fs_twoside(tokens
);
6547 if (key
->fs
.pstipple
) {
6548 const struct tgsi_token
*new_tokens
=
6549 transform_fs_pstipple(emit
, tokens
);
6550 if (tokens
!= shader
->tokens
) {
6551 /* free the two-sided shader tokens */
6552 tgsi_free_tokens(tokens
);
6554 tokens
= new_tokens
;
6556 if (key
->fs
.aa_point
) {
6557 tokens
= transform_fs_aapoint(tokens
, key
->fs
.aa_point_coord_index
);
6561 if (SVGA_DEBUG
& DEBUG_TGSI
) {
6562 debug_printf("#####################################\n");
6563 debug_printf("### TGSI Shader %u\n", shader
->id
);
6564 tgsi_dump(tokens
, 0);
6568 * Rescan the header if the token string is different from the one
6569 * included in the shader; otherwise, the header info is already up-to-date
6571 if (tokens
!= shader
->tokens
) {
6572 tgsi_scan_shader(tokens
, &emit
->info
);
6574 emit
->info
= shader
->info
;
6577 emit
->num_outputs
= emit
->info
.num_outputs
;
6579 if (unit
== PIPE_SHADER_FRAGMENT
) {
6580 /* Compute FS input remapping to match the output from VS/GS */
6582 svga_link_shaders(&gs
->base
.info
, &emit
->info
, &emit
->linkage
);
6585 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
6587 } else if (unit
== PIPE_SHADER_GEOMETRY
) {
6589 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
6592 determine_clipping_mode(emit
);
6594 if (unit
== PIPE_SHADER_GEOMETRY
|| unit
== PIPE_SHADER_VERTEX
) {
6595 if (shader
->stream_output
!= NULL
|| emit
->clip_mode
== CLIP_DISTANCE
) {
6596 /* if there is stream output declarations associated
6597 * with this shader or the shader writes to ClipDistance
6598 * then reserve extra registers for the non-adjusted vertex position
6599 * and the ClipDistance shadow copy
6601 emit
->vposition
.so_index
= emit
->num_outputs
++;
6603 if (emit
->clip_mode
== CLIP_DISTANCE
) {
6604 emit
->clip_dist_so_index
= emit
->num_outputs
++;
6605 if (emit
->info
.num_written_clipdistance
> 4)
6606 emit
->num_outputs
++;
6612 * Do actual shader translation.
6614 if (!emit_vgpu10_header(emit
)) {
6615 debug_printf("svga: emit VGPU10 header failed\n");
6619 if (!emit_vgpu10_instructions(emit
, tokens
)) {
6620 debug_printf("svga: emit VGPU10 instructions failed\n");
6624 if (!emit_vgpu10_tail(emit
)) {
6625 debug_printf("svga: emit VGPU10 tail failed\n");
6629 if (emit
->register_overflow
) {
6634 * Create, initialize the 'variant' object.
6636 variant
= svga_new_shader_variant(svga
);
6640 variant
->shader
= shader
;
6641 variant
->nr_tokens
= emit_get_num_tokens(emit
);
6642 variant
->tokens
= (const unsigned *)emit
->buf
;
6643 emit
->buf
= NULL
; /* buffer is no longer owed by emitter context */
6644 memcpy(&variant
->key
, key
, sizeof(*key
));
6645 variant
->id
= UTIL_BITMASK_INVALID_INDEX
;
6647 /* The extra constant starting offset starts with the number of
6648 * shader constants declared in the shader.
6650 variant
->extra_const_start
= emit
->num_shader_consts
[0];
6651 if (key
->gs
.wide_point
) {
6653 * The extra constant added in the transformed shader
6654 * for inverse viewport scale is to be supplied by the driver.
6655 * So the extra constant starting offset needs to be reduced by 1.
6657 assert(variant
->extra_const_start
> 0);
6658 variant
->extra_const_start
--;
6661 variant
->pstipple_sampler_unit
= emit
->fs
.pstipple_sampler_unit
;
6663 /* If there was exactly one write to a fragment shader output register
6664 * and it came from a constant buffer, we know all fragments will have
6665 * the same color (except for blending).
6667 variant
->constant_color_output
=
6668 emit
->constant_color_output
&& emit
->num_output_writes
== 1;
6670 /** keep track in the variant if flat interpolation is used
6671 * for any of the varyings.
6673 variant
->uses_flat_interp
= emit
->uses_flat_interp
;
6675 variant
->fs_shadow_compare_units
= emit
->fs
.shadow_compare_units
;
6677 if (tokens
!= shader
->tokens
) {
6678 tgsi_free_tokens(tokens
);
6685 SVGA_STATS_TIME_POP(svga_sws(svga
));