1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 * @file svga_tgsi_vgpu10.c
29 * TGSI -> VGPU10 shader translation.
31 * \author Mingcheng Chen
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
58 #include "VGPU10ShaderTokens.h"
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
75 CLIP_NONE
, /**< No clipping enabled */
76 CLIP_LEGACY
, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
80 CLIP_DISTANCE
, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
83 CLIP_VERTEX
/**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
90 struct svga_shader_emitter_v10
92 /* The token output buffer */
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key
;
99 struct tgsi_shader_info info
;
101 unsigned version
; /**< Either 40 or 41 at this time */
103 unsigned inst_start_token
;
104 boolean discard_instruction
; /**< throw away current instruction? */
106 union tgsi_immediate_data immediates
[MAX_IMMEDIATE_COUNT
][4];
107 unsigned num_immediates
; /**< Number of immediates emitted */
108 unsigned common_immediate_pos
[8]; /**< literals for common immediates */
109 unsigned num_common_immediates
;
110 boolean immediates_emitted
;
112 unsigned num_outputs
; /**< include any extra outputs */
113 /** The first extra output is reserved for
114 * non-adjusted vertex position for
115 * stream output purpose
118 /* Temporary Registers */
119 unsigned num_shader_temps
; /**< num of temps used by original shader */
120 unsigned internal_temp_count
; /**< currently allocated internal temps */
122 unsigned start
, size
;
123 } temp_arrays
[MAX_TEMP_ARRAYS
];
124 unsigned num_temp_arrays
;
126 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
128 unsigned arrayId
, index
;
129 } temp_map
[VGPU10_MAX_TEMPS
]; /**< arrayId, element */
131 /** Number of constants used by original shader for each constant buffer.
132 * The size should probably always match with that of svga_state.constbufs.
134 unsigned num_shader_consts
[SVGA_MAX_CONST_BUFS
];
137 unsigned num_samplers
;
138 boolean sampler_view
[PIPE_MAX_SAMPLERS
]; /**< True if sampler view exists*/
139 ubyte sampler_target
[PIPE_MAX_SAMPLERS
]; /**< TGSI_TEXTURE_x */
140 ubyte sampler_return_type
[PIPE_MAX_SAMPLERS
]; /**< TGSI_RETURN_TYPE_x */
142 /* Address regs (really implemented with temps) */
143 unsigned num_address_regs
;
144 unsigned address_reg_index
[MAX_VGPU10_ADDR_REGS
];
146 /* Output register usage masks */
147 ubyte output_usage_mask
[PIPE_MAX_SHADER_OUTPUTS
];
149 /* To map TGSI system value index to VGPU shader input indexes */
150 ubyte system_value_indexes
[MAX_SYSTEM_VALUES
];
153 /* vertex position scale/translation */
154 unsigned out_index
; /**< the real position output reg */
155 unsigned tmp_index
; /**< the fake/temp position output reg */
156 unsigned so_index
; /**< the non-adjusted position output reg */
157 unsigned prescale_scale_index
, prescale_trans_index
;
158 boolean need_prescale
;
161 /* For vertex shaders only */
163 /* viewport constant */
164 unsigned viewport_index
;
166 /* temp index of adjusted vertex attributes */
167 unsigned adjusted_input
[PIPE_MAX_SHADER_INPUTS
];
170 /* For fragment shaders only */
172 unsigned color_out_index
[PIPE_MAX_COLOR_BUFS
]; /**< the real color output regs */
173 unsigned num_color_outputs
;
174 unsigned color_tmp_index
; /**< fake/temp color output reg */
175 unsigned alpha_ref_index
; /**< immediate constant for alpha ref */
178 unsigned face_input_index
; /**< real fragment shader face reg (bool) */
179 unsigned face_tmp_index
; /**< temp face reg converted to -1 / +1 */
181 unsigned pstipple_sampler_unit
;
183 unsigned fragcoord_input_index
; /**< real fragment position input reg */
184 unsigned fragcoord_tmp_index
; /**< 1/w modified position temp reg */
186 /** Which texture units are doing shadow comparison in the FS code */
187 unsigned shadow_compare_units
;
189 unsigned sample_id_sys_index
; /**< TGSI index of sample id sys value */
191 unsigned sample_pos_sys_index
; /**< TGSI index of sample pos sys value */
192 unsigned sample_pos_tmp_index
; /**< which temp reg has the sample pos */
195 /* For geometry shaders only */
197 VGPU10_PRIMITIVE prim_type
;/**< VGPU10 primitive type */
198 VGPU10_PRIMITIVE_TOPOLOGY prim_topology
; /**< VGPU10 primitive topology */
199 unsigned input_size
; /**< size of input arrays */
200 unsigned prim_id_index
; /**< primitive id register index */
201 unsigned max_out_vertices
; /**< maximum number of output vertices */
204 /* For vertex or geometry shaders */
205 enum clipping_mode clip_mode
;
206 unsigned clip_dist_out_index
; /**< clip distance output register index */
207 unsigned clip_dist_tmp_index
; /**< clip distance temporary register */
208 unsigned clip_dist_so_index
; /**< clip distance shadow copy */
210 /** Index of temporary holding the clipvertex coordinate */
211 unsigned clip_vertex_out_index
; /**< clip vertex output register index */
212 unsigned clip_vertex_tmp_index
; /**< clip vertex temporary index */
214 /* user clip plane constant slot indexes */
215 unsigned clip_plane_const
[PIPE_MAX_CLIP_PLANES
];
217 unsigned num_output_writes
;
218 boolean constant_color_output
;
220 boolean uses_flat_interp
;
222 /* For all shaders: const reg index for RECT coord scaling */
223 unsigned texcoord_scale_index
[PIPE_MAX_SAMPLERS
];
225 /* For all shaders: const reg index for texture buffer size */
226 unsigned texture_buffer_size_index
[PIPE_MAX_SAMPLERS
];
228 /* VS/GS/FS Linkage info */
229 struct shader_linkage linkage
;
231 bool register_overflow
; /**< Set if we exceed a VGPU10 register limit */
236 emit_post_helpers(struct svga_shader_emitter_v10
*emit
);
239 emit_vertex(struct svga_shader_emitter_v10
*emit
,
240 const struct tgsi_full_instruction
*inst
);
242 static char err_buf
[128];
245 expand(struct svga_shader_emitter_v10
*emit
)
248 unsigned newsize
= emit
->size
* 2;
250 if (emit
->buf
!= err_buf
)
251 new_buf
= REALLOC(emit
->buf
, emit
->size
, newsize
);
258 emit
->size
= sizeof(err_buf
);
262 emit
->size
= newsize
;
263 emit
->ptr
= new_buf
+ (emit
->ptr
- emit
->buf
);
269 * Create and initialize a new svga_shader_emitter_v10 object.
271 static struct svga_shader_emitter_v10
*
274 struct svga_shader_emitter_v10
*emit
= CALLOC(1, sizeof(*emit
));
279 /* to initialize the output buffer */
289 * Free an svga_shader_emitter_v10 object.
292 free_emitter(struct svga_shader_emitter_v10
*emit
)
295 FREE(emit
->buf
); /* will be NULL if translation succeeded */
299 static inline boolean
300 reserve(struct svga_shader_emitter_v10
*emit
,
303 while (emit
->ptr
- emit
->buf
+ nr_dwords
* sizeof(uint32
) >= emit
->size
) {
312 emit_dword(struct svga_shader_emitter_v10
*emit
, uint32 dword
)
314 if (!reserve(emit
, 1))
317 *(uint32
*)emit
->ptr
= dword
;
318 emit
->ptr
+= sizeof dword
;
323 emit_dwords(struct svga_shader_emitter_v10
*emit
,
324 const uint32
*dwords
,
327 if (!reserve(emit
, nr
))
330 memcpy(emit
->ptr
, dwords
, nr
* sizeof *dwords
);
331 emit
->ptr
+= nr
* sizeof *dwords
;
335 /** Return the number of tokens in the emitter's buffer */
337 emit_get_num_tokens(const struct svga_shader_emitter_v10
*emit
)
339 return (emit
->ptr
- emit
->buf
) / sizeof(unsigned);
344 * Check for register overflow. If we overflow we'll set an
345 * error flag. This function can be called for register declarations
346 * or use as src/dst instruction operands.
347 * \param type register type. One of VGPU10_OPERAND_TYPE_x
348 or VGPU10_OPCODE_DCL_x
349 * \param index the register index
352 check_register_index(struct svga_shader_emitter_v10
*emit
,
353 unsigned operandType
, unsigned index
)
355 bool overflow_before
= emit
->register_overflow
;
357 switch (operandType
) {
358 case VGPU10_OPERAND_TYPE_TEMP
:
359 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
:
360 case VGPU10_OPCODE_DCL_TEMPS
:
361 if (index
>= VGPU10_MAX_TEMPS
) {
362 emit
->register_overflow
= TRUE
;
365 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
:
366 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER
:
367 if (index
>= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
368 emit
->register_overflow
= TRUE
;
371 case VGPU10_OPERAND_TYPE_INPUT
:
372 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
:
373 case VGPU10_OPCODE_DCL_INPUT
:
374 case VGPU10_OPCODE_DCL_INPUT_SGV
:
375 case VGPU10_OPCODE_DCL_INPUT_SIV
:
376 case VGPU10_OPCODE_DCL_INPUT_PS
:
377 case VGPU10_OPCODE_DCL_INPUT_PS_SGV
:
378 case VGPU10_OPCODE_DCL_INPUT_PS_SIV
:
379 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
380 index
>= VGPU10_MAX_VS_INPUTS
) ||
381 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
382 index
>= VGPU10_MAX_GS_INPUTS
) ||
383 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
384 index
>= VGPU10_MAX_FS_INPUTS
)) {
385 emit
->register_overflow
= TRUE
;
388 case VGPU10_OPERAND_TYPE_OUTPUT
:
389 case VGPU10_OPCODE_DCL_OUTPUT
:
390 case VGPU10_OPCODE_DCL_OUTPUT_SGV
:
391 case VGPU10_OPCODE_DCL_OUTPUT_SIV
:
392 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
393 index
>= VGPU10_MAX_VS_OUTPUTS
) ||
394 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
395 index
>= VGPU10_MAX_GS_OUTPUTS
) ||
396 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
397 index
>= VGPU10_MAX_FS_OUTPUTS
)) {
398 emit
->register_overflow
= TRUE
;
401 case VGPU10_OPERAND_TYPE_SAMPLER
:
402 case VGPU10_OPCODE_DCL_SAMPLER
:
403 if (index
>= VGPU10_MAX_SAMPLERS
) {
404 emit
->register_overflow
= TRUE
;
407 case VGPU10_OPERAND_TYPE_RESOURCE
:
408 case VGPU10_OPCODE_DCL_RESOURCE
:
409 if (index
>= VGPU10_MAX_RESOURCES
) {
410 emit
->register_overflow
= TRUE
;
413 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
:
414 if (index
>= MAX_IMMEDIATE_COUNT
) {
415 emit
->register_overflow
= TRUE
;
418 case VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK
:
426 if (emit
->register_overflow
&& !overflow_before
) {
427 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
434 * Examine misc state to determine the clipping mode.
437 determine_clipping_mode(struct svga_shader_emitter_v10
*emit
)
439 if (emit
->info
.num_written_clipdistance
> 0) {
440 emit
->clip_mode
= CLIP_DISTANCE
;
442 else if (emit
->info
.writes_clipvertex
) {
443 emit
->clip_mode
= CLIP_VERTEX
;
445 else if (emit
->key
.clip_plane_enable
) {
446 emit
->clip_mode
= CLIP_LEGACY
;
449 emit
->clip_mode
= CLIP_NONE
;
455 * For clip distance register declarations and clip distance register
456 * writes we need to mask the declaration usage or instruction writemask
457 * (respectively) against the set of the really-enabled clipping planes.
459 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
460 * has a VS that writes to all 8 clip distance registers, but the plane enable
461 * flags are a subset of that.
463 * This function is used to apply the plane enable flags to the register
464 * declaration or instruction writemask.
466 * \param writemask the declaration usage mask or instruction writemask
467 * \param clip_reg_index which clip plane register is being declared/written.
468 * The legal values are 0 and 1 (two clip planes per
469 * register, for a total of 8 clip planes)
472 apply_clip_plane_mask(struct svga_shader_emitter_v10
*emit
,
473 unsigned writemask
, unsigned clip_reg_index
)
477 assert(clip_reg_index
< 2);
479 /* four clip planes per clip register: */
480 shift
= clip_reg_index
* 4;
481 writemask
&= ((emit
->key
.clip_plane_enable
>> shift
) & 0xf);
488 * Translate gallium shader type into VGPU10 type.
490 static VGPU10_PROGRAM_TYPE
491 translate_shader_type(unsigned type
)
494 case PIPE_SHADER_VERTEX
:
495 return VGPU10_VERTEX_SHADER
;
496 case PIPE_SHADER_GEOMETRY
:
497 return VGPU10_GEOMETRY_SHADER
;
498 case PIPE_SHADER_FRAGMENT
:
499 return VGPU10_PIXEL_SHADER
;
501 assert(!"Unexpected shader type");
502 return VGPU10_VERTEX_SHADER
;
508 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
509 * Note: we only need to translate the opcodes for "simple" instructions,
510 * as seen below. All other opcodes are handled/translated specially.
512 static VGPU10_OPCODE_TYPE
513 translate_opcode(enum tgsi_opcode opcode
)
516 case TGSI_OPCODE_MOV
:
517 return VGPU10_OPCODE_MOV
;
518 case TGSI_OPCODE_MUL
:
519 return VGPU10_OPCODE_MUL
;
520 case TGSI_OPCODE_ADD
:
521 return VGPU10_OPCODE_ADD
;
522 case TGSI_OPCODE_DP3
:
523 return VGPU10_OPCODE_DP3
;
524 case TGSI_OPCODE_DP4
:
525 return VGPU10_OPCODE_DP4
;
526 case TGSI_OPCODE_MIN
:
527 return VGPU10_OPCODE_MIN
;
528 case TGSI_OPCODE_MAX
:
529 return VGPU10_OPCODE_MAX
;
530 case TGSI_OPCODE_MAD
:
531 return VGPU10_OPCODE_MAD
;
532 case TGSI_OPCODE_SQRT
:
533 return VGPU10_OPCODE_SQRT
;
534 case TGSI_OPCODE_FRC
:
535 return VGPU10_OPCODE_FRC
;
536 case TGSI_OPCODE_FLR
:
537 return VGPU10_OPCODE_ROUND_NI
;
538 case TGSI_OPCODE_FSEQ
:
539 return VGPU10_OPCODE_EQ
;
540 case TGSI_OPCODE_FSGE
:
541 return VGPU10_OPCODE_GE
;
542 case TGSI_OPCODE_FSNE
:
543 return VGPU10_OPCODE_NE
;
544 case TGSI_OPCODE_DDX
:
545 return VGPU10_OPCODE_DERIV_RTX
;
546 case TGSI_OPCODE_DDY
:
547 return VGPU10_OPCODE_DERIV_RTY
;
548 case TGSI_OPCODE_RET
:
549 return VGPU10_OPCODE_RET
;
550 case TGSI_OPCODE_DIV
:
551 return VGPU10_OPCODE_DIV
;
552 case TGSI_OPCODE_IDIV
:
553 return VGPU10_OPCODE_IDIV
;
554 case TGSI_OPCODE_DP2
:
555 return VGPU10_OPCODE_DP2
;
556 case TGSI_OPCODE_BRK
:
557 return VGPU10_OPCODE_BREAK
;
559 return VGPU10_OPCODE_IF
;
560 case TGSI_OPCODE_ELSE
:
561 return VGPU10_OPCODE_ELSE
;
562 case TGSI_OPCODE_ENDIF
:
563 return VGPU10_OPCODE_ENDIF
;
564 case TGSI_OPCODE_CEIL
:
565 return VGPU10_OPCODE_ROUND_PI
;
566 case TGSI_OPCODE_I2F
:
567 return VGPU10_OPCODE_ITOF
;
568 case TGSI_OPCODE_NOT
:
569 return VGPU10_OPCODE_NOT
;
570 case TGSI_OPCODE_TRUNC
:
571 return VGPU10_OPCODE_ROUND_Z
;
572 case TGSI_OPCODE_SHL
:
573 return VGPU10_OPCODE_ISHL
;
574 case TGSI_OPCODE_AND
:
575 return VGPU10_OPCODE_AND
;
577 return VGPU10_OPCODE_OR
;
578 case TGSI_OPCODE_XOR
:
579 return VGPU10_OPCODE_XOR
;
580 case TGSI_OPCODE_CONT
:
581 return VGPU10_OPCODE_CONTINUE
;
582 case TGSI_OPCODE_EMIT
:
583 return VGPU10_OPCODE_EMIT
;
584 case TGSI_OPCODE_ENDPRIM
:
585 return VGPU10_OPCODE_CUT
;
586 case TGSI_OPCODE_BGNLOOP
:
587 return VGPU10_OPCODE_LOOP
;
588 case TGSI_OPCODE_ENDLOOP
:
589 return VGPU10_OPCODE_ENDLOOP
;
590 case TGSI_OPCODE_ENDSUB
:
591 return VGPU10_OPCODE_RET
;
592 case TGSI_OPCODE_NOP
:
593 return VGPU10_OPCODE_NOP
;
594 case TGSI_OPCODE_END
:
595 return VGPU10_OPCODE_RET
;
596 case TGSI_OPCODE_F2I
:
597 return VGPU10_OPCODE_FTOI
;
598 case TGSI_OPCODE_IMAX
:
599 return VGPU10_OPCODE_IMAX
;
600 case TGSI_OPCODE_IMIN
:
601 return VGPU10_OPCODE_IMIN
;
602 case TGSI_OPCODE_UDIV
:
603 case TGSI_OPCODE_UMOD
:
604 case TGSI_OPCODE_MOD
:
605 return VGPU10_OPCODE_UDIV
;
606 case TGSI_OPCODE_IMUL_HI
:
607 return VGPU10_OPCODE_IMUL
;
608 case TGSI_OPCODE_INEG
:
609 return VGPU10_OPCODE_INEG
;
610 case TGSI_OPCODE_ISHR
:
611 return VGPU10_OPCODE_ISHR
;
612 case TGSI_OPCODE_ISGE
:
613 return VGPU10_OPCODE_IGE
;
614 case TGSI_OPCODE_ISLT
:
615 return VGPU10_OPCODE_ILT
;
616 case TGSI_OPCODE_F2U
:
617 return VGPU10_OPCODE_FTOU
;
618 case TGSI_OPCODE_UADD
:
619 return VGPU10_OPCODE_IADD
;
620 case TGSI_OPCODE_U2F
:
621 return VGPU10_OPCODE_UTOF
;
622 case TGSI_OPCODE_UCMP
:
623 return VGPU10_OPCODE_MOVC
;
624 case TGSI_OPCODE_UMAD
:
625 return VGPU10_OPCODE_UMAD
;
626 case TGSI_OPCODE_UMAX
:
627 return VGPU10_OPCODE_UMAX
;
628 case TGSI_OPCODE_UMIN
:
629 return VGPU10_OPCODE_UMIN
;
630 case TGSI_OPCODE_UMUL
:
631 case TGSI_OPCODE_UMUL_HI
:
632 return VGPU10_OPCODE_UMUL
;
633 case TGSI_OPCODE_USEQ
:
634 return VGPU10_OPCODE_IEQ
;
635 case TGSI_OPCODE_USGE
:
636 return VGPU10_OPCODE_UGE
;
637 case TGSI_OPCODE_USHR
:
638 return VGPU10_OPCODE_USHR
;
639 case TGSI_OPCODE_USLT
:
640 return VGPU10_OPCODE_ULT
;
641 case TGSI_OPCODE_USNE
:
642 return VGPU10_OPCODE_INE
;
643 case TGSI_OPCODE_SWITCH
:
644 return VGPU10_OPCODE_SWITCH
;
645 case TGSI_OPCODE_CASE
:
646 return VGPU10_OPCODE_CASE
;
647 case TGSI_OPCODE_DEFAULT
:
648 return VGPU10_OPCODE_DEFAULT
;
649 case TGSI_OPCODE_ENDSWITCH
:
650 return VGPU10_OPCODE_ENDSWITCH
;
651 case TGSI_OPCODE_FSLT
:
652 return VGPU10_OPCODE_LT
;
653 case TGSI_OPCODE_ROUND
:
654 return VGPU10_OPCODE_ROUND_NE
;
655 case TGSI_OPCODE_SAMPLE_POS
:
656 /* Note: we never actually get this opcode because there's no GLSL
657 * function to query multisample resource sample positions. There's
658 * only the TGSI_SEMANTIC_SAMPLEPOS system value which contains the
659 * position of the current sample in the render target.
662 case TGSI_OPCODE_SAMPLE_INFO
:
663 /* NOTE: we never actually get this opcode because the GLSL compiler
664 * implements the gl_NumSamples variable with a simple constant in the
669 assert(!"Unexpected TGSI opcode in translate_opcode()");
670 return VGPU10_OPCODE_NOP
;
676 * Translate a TGSI register file type into a VGPU10 operand type.
677 * \param array is the TGSI_FILE_TEMPORARY register an array?
679 static VGPU10_OPERAND_TYPE
680 translate_register_file(enum tgsi_file_type file
, boolean array
)
683 case TGSI_FILE_CONSTANT
:
684 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
685 case TGSI_FILE_INPUT
:
686 return VGPU10_OPERAND_TYPE_INPUT
;
687 case TGSI_FILE_OUTPUT
:
688 return VGPU10_OPERAND_TYPE_OUTPUT
;
689 case TGSI_FILE_TEMPORARY
:
690 return array
? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
691 : VGPU10_OPERAND_TYPE_TEMP
;
692 case TGSI_FILE_IMMEDIATE
:
693 /* all immediates are 32-bit values at this time so
694 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
696 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
;
697 case TGSI_FILE_SAMPLER
:
698 return VGPU10_OPERAND_TYPE_SAMPLER
;
699 case TGSI_FILE_SYSTEM_VALUE
:
700 return VGPU10_OPERAND_TYPE_INPUT
;
702 /* XXX TODO more cases to finish */
705 assert(!"Bad tgsi register file!");
706 return VGPU10_OPERAND_TYPE_NULL
;
712 * Emit a null dst register
715 emit_null_dst_register(struct svga_shader_emitter_v10
*emit
)
717 VGPU10OperandToken0 operand
;
720 operand
.operandType
= VGPU10_OPERAND_TYPE_NULL
;
721 operand
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
723 emit_dword(emit
, operand
.value
);
728 * If the given register is a temporary, return the array ID.
732 get_temp_array_id(const struct svga_shader_emitter_v10
*emit
,
733 enum tgsi_file_type file
, unsigned index
)
735 if (file
== TGSI_FILE_TEMPORARY
) {
736 return emit
->temp_map
[index
].arrayId
;
745 * If the given register is a temporary, convert the index from a TGSI
746 * TEMPORARY index to a VGPU10 temp index.
749 remap_temp_index(const struct svga_shader_emitter_v10
*emit
,
750 enum tgsi_file_type file
, unsigned index
)
752 if (file
== TGSI_FILE_TEMPORARY
) {
753 return emit
->temp_map
[index
].index
;
762 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
763 * Note: the operandType field must already be initialized.
765 static VGPU10OperandToken0
766 setup_operand0_indexing(struct svga_shader_emitter_v10
*emit
,
767 VGPU10OperandToken0 operand0
,
768 enum tgsi_file_type file
,
769 boolean indirect
, boolean index2D
,
770 unsigned tempArrayID
)
772 unsigned indexDim
, index0Rep
, index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
775 * Compute index dimensions
777 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
||
778 operand0
.operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
779 /* there's no swizzle for in-line immediates */
780 indexDim
= VGPU10_OPERAND_INDEX_0D
;
781 assert(operand0
.selectionMode
== 0);
786 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
787 indexDim
= VGPU10_OPERAND_INDEX_2D
;
790 indexDim
= VGPU10_OPERAND_INDEX_1D
;
795 * Compute index representations (immediate, relative, etc).
797 if (tempArrayID
> 0) {
798 assert(file
== TGSI_FILE_TEMPORARY
);
799 /* First index is the array ID, second index is the array element */
800 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
802 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
805 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
809 if (file
== TGSI_FILE_CONSTANT
) {
810 /* index[0] indicates which constant buffer while index[1] indicates
811 * the position in the constant buffer.
813 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
814 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
817 /* All other register files are 1-dimensional */
818 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
822 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
823 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
826 operand0
.indexDimension
= indexDim
;
827 operand0
.index0Representation
= index0Rep
;
828 operand0
.index1Representation
= index1Rep
;
835 * Emit the operand for expressing an address register for indirect indexing.
836 * Note that the address register is really just a temp register.
837 * \param addr_reg_index which address register to use
840 emit_indirect_register(struct svga_shader_emitter_v10
*emit
,
841 unsigned addr_reg_index
)
843 unsigned tmp_reg_index
;
844 VGPU10OperandToken0 operand0
;
846 assert(addr_reg_index
< MAX_VGPU10_ADDR_REGS
);
848 tmp_reg_index
= emit
->address_reg_index
[addr_reg_index
];
850 /* operand0 is a simple temporary register, selecting one component */
852 operand0
.operandType
= VGPU10_OPERAND_TYPE_TEMP
;
853 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
854 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
855 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
856 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
857 operand0
.swizzleX
= 0;
858 operand0
.swizzleY
= 1;
859 operand0
.swizzleZ
= 2;
860 operand0
.swizzleW
= 3;
862 emit_dword(emit
, operand0
.value
);
863 emit_dword(emit
, remap_temp_index(emit
, TGSI_FILE_TEMPORARY
, tmp_reg_index
));
868 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
869 * \param emit the emitter context
870 * \param reg the TGSI dst register to translate
873 emit_dst_register(struct svga_shader_emitter_v10
*emit
,
874 const struct tgsi_full_dst_register
*reg
)
876 enum tgsi_file_type file
= reg
->Register
.File
;
877 unsigned index
= reg
->Register
.Index
;
878 const enum tgsi_semantic sem_name
= emit
->info
.output_semantic_name
[index
];
879 const unsigned sem_index
= emit
->info
.output_semantic_index
[index
];
880 unsigned writemask
= reg
->Register
.WriteMask
;
881 const boolean indirect
= reg
->Register
.Indirect
;
882 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
883 const boolean index2d
= reg
->Register
.Dimension
;
884 VGPU10OperandToken0 operand0
;
886 if (file
== TGSI_FILE_OUTPUT
) {
887 if (emit
->unit
== PIPE_SHADER_VERTEX
||
888 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
889 if (index
== emit
->vposition
.out_index
&&
890 emit
->vposition
.tmp_index
!= INVALID_INDEX
) {
891 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
892 * vertex position result in a temporary so that we can modify
893 * it in the post_helper() code.
895 file
= TGSI_FILE_TEMPORARY
;
896 index
= emit
->vposition
.tmp_index
;
898 else if (sem_name
== TGSI_SEMANTIC_CLIPDIST
&&
899 emit
->clip_dist_tmp_index
!= INVALID_INDEX
) {
900 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
901 * We store the clip distance in a temporary first, then
902 * we'll copy it to the shadow copy and to CLIPDIST with the
903 * enabled planes mask in emit_clip_distance_instructions().
905 file
= TGSI_FILE_TEMPORARY
;
906 index
= emit
->clip_dist_tmp_index
+ sem_index
;
908 else if (sem_name
== TGSI_SEMANTIC_CLIPVERTEX
&&
909 emit
->clip_vertex_tmp_index
!= INVALID_INDEX
) {
910 /* replace the CLIPVERTEX output register with a temporary */
911 assert(emit
->clip_mode
== CLIP_VERTEX
);
912 assert(sem_index
== 0);
913 file
= TGSI_FILE_TEMPORARY
;
914 index
= emit
->clip_vertex_tmp_index
;
917 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
918 if (sem_name
== TGSI_SEMANTIC_POSITION
) {
919 /* Fragment depth output register */
921 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
922 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
923 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
924 emit_dword(emit
, operand0
.value
);
927 else if (sem_name
== TGSI_SEMANTIC_SAMPLEMASK
) {
928 /* Fragment sample mask output */
930 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK
;
931 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
932 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
933 emit_dword(emit
, operand0
.value
);
936 else if (index
== emit
->fs
.color_out_index
[0] &&
937 emit
->fs
.color_tmp_index
!= INVALID_INDEX
) {
938 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
939 * fragment color result in a temporary so that we can read it
940 * it in the post_helper() code.
942 file
= TGSI_FILE_TEMPORARY
;
943 index
= emit
->fs
.color_tmp_index
;
946 /* Typically, for fragment shaders, the output register index
947 * matches the color semantic index. But not when we write to
948 * the fragment depth register. In that case, OUT[0] will be
949 * fragdepth and OUT[1] will be the 0th color output. We need
950 * to use the semantic index for color outputs.
952 assert(sem_name
== TGSI_SEMANTIC_COLOR
);
953 index
= emit
->info
.output_semantic_index
[index
];
955 emit
->num_output_writes
++;
960 /* init operand tokens to all zero */
963 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
965 /* the operand has a writemask */
966 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
968 /* Which of the four dest components to write to. Note that we can use a
969 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
971 STATIC_ASSERT(TGSI_WRITEMASK_X
== VGPU10_OPERAND_4_COMPONENT_MASK_X
);
972 operand0
.mask
= writemask
;
974 /* translate TGSI register file type to VGPU10 operand type */
975 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
977 check_register_index(emit
, operand0
.operandType
, index
);
979 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
980 index2d
, tempArrayId
);
983 emit_dword(emit
, operand0
.value
);
984 if (tempArrayId
> 0) {
985 emit_dword(emit
, tempArrayId
);
988 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
991 emit_indirect_register(emit
, reg
->Indirect
.Index
);
997 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
998 * In quite a few cases, we do register substitution. For example, if
999 * the TGSI register is the front/back-face register, we replace that with
1000 * a temp register containing a value we computed earlier.
1003 emit_src_register(struct svga_shader_emitter_v10
*emit
,
1004 const struct tgsi_full_src_register
*reg
)
1006 enum tgsi_file_type file
= reg
->Register
.File
;
1007 unsigned index
= reg
->Register
.Index
;
1008 const boolean indirect
= reg
->Register
.Indirect
;
1009 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
1010 const boolean index2d
= reg
->Register
.Dimension
;
1011 const unsigned swizzleX
= reg
->Register
.SwizzleX
;
1012 const unsigned swizzleY
= reg
->Register
.SwizzleY
;
1013 const unsigned swizzleZ
= reg
->Register
.SwizzleZ
;
1014 const unsigned swizzleW
= reg
->Register
.SwizzleW
;
1015 const boolean absolute
= reg
->Register
.Absolute
;
1016 const boolean negate
= reg
->Register
.Negate
;
1017 bool is_prim_id
= FALSE
;
1019 VGPU10OperandToken0 operand0
;
1020 VGPU10OperandToken1 operand1
;
1022 if (emit
->unit
== PIPE_SHADER_FRAGMENT
){
1023 if (file
== TGSI_FILE_INPUT
) {
1024 if (index
== emit
->fs
.face_input_index
) {
1025 /* Replace INPUT[FACE] with TEMP[FACE] */
1026 file
= TGSI_FILE_TEMPORARY
;
1027 index
= emit
->fs
.face_tmp_index
;
1029 else if (index
== emit
->fs
.fragcoord_input_index
) {
1030 /* Replace INPUT[POSITION] with TEMP[POSITION] */
1031 file
= TGSI_FILE_TEMPORARY
;
1032 index
= emit
->fs
.fragcoord_tmp_index
;
1035 /* We remap fragment shader inputs to that FS input indexes
1036 * match up with VS/GS output indexes.
1038 index
= emit
->linkage
.input_map
[index
];
1041 else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
1042 if (index
== emit
->fs
.sample_pos_sys_index
) {
1043 assert(emit
->version
>= 41);
1044 /* Current sample position is in a temp register */
1045 file
= TGSI_FILE_TEMPORARY
;
1046 index
= emit
->fs
.sample_pos_tmp_index
;
1049 /* Map the TGSI system value to a VGPU10 input register */
1050 assert(index
< ARRAY_SIZE(emit
->system_value_indexes
));
1051 file
= TGSI_FILE_INPUT
;
1052 index
= emit
->system_value_indexes
[index
];
1056 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
) {
1057 if (file
== TGSI_FILE_INPUT
) {
1058 is_prim_id
= (index
== emit
->gs
.prim_id_index
);
1059 index
= emit
->linkage
.input_map
[index
];
1062 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1063 if (file
== TGSI_FILE_INPUT
) {
1064 /* if input is adjusted... */
1065 if ((emit
->key
.vs
.adjust_attrib_w_1
|
1066 emit
->key
.vs
.adjust_attrib_itof
|
1067 emit
->key
.vs
.adjust_attrib_utof
|
1068 emit
->key
.vs
.attrib_is_bgra
|
1069 emit
->key
.vs
.attrib_puint_to_snorm
|
1070 emit
->key
.vs
.attrib_puint_to_uscaled
|
1071 emit
->key
.vs
.attrib_puint_to_sscaled
) & (1 << index
)) {
1072 file
= TGSI_FILE_TEMPORARY
;
1073 index
= emit
->vs
.adjusted_input
[index
];
1076 else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
1077 /* Map the TGSI system value to a VGPU10 input register */
1078 assert(index
< ARRAY_SIZE(emit
->system_value_indexes
));
1079 file
= TGSI_FILE_INPUT
;
1080 index
= emit
->system_value_indexes
[index
];
1084 operand0
.value
= operand1
.value
= 0;
1087 /* NOTE: we should be using VGPU10_OPERAND_1_COMPONENT here, but
1088 * our virtual GPU accepts this as-is.
1090 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
1091 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
1094 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1095 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
1098 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
1099 index2d
, tempArrayId
);
1101 if (operand0
.operandType
!= VGPU10_OPERAND_TYPE_IMMEDIATE32
&&
1102 operand0
.operandType
!= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
1103 /* there's no swizzle for in-line immediates */
1104 if (swizzleX
== swizzleY
&&
1105 swizzleX
== swizzleZ
&&
1106 swizzleX
== swizzleW
) {
1107 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1110 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1113 operand0
.swizzleX
= swizzleX
;
1114 operand0
.swizzleY
= swizzleY
;
1115 operand0
.swizzleZ
= swizzleZ
;
1116 operand0
.swizzleW
= swizzleW
;
1118 if (absolute
|| negate
) {
1119 operand0
.extended
= 1;
1120 operand1
.extendedOperandType
= VGPU10_EXTENDED_OPERAND_MODIFIER
;
1121 if (absolute
&& !negate
)
1122 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABS
;
1123 if (!absolute
&& negate
)
1124 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_NEG
;
1125 if (absolute
&& negate
)
1126 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABSNEG
;
1130 /* Emit the operand tokens */
1131 emit_dword(emit
, operand0
.value
);
1132 if (operand0
.extended
)
1133 emit_dword(emit
, operand1
.value
);
1135 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
) {
1136 /* Emit the four float/int in-line immediate values */
1138 assert(index
< ARRAY_SIZE(emit
->immediates
));
1139 assert(file
== TGSI_FILE_IMMEDIATE
);
1140 assert(swizzleX
< 4);
1141 assert(swizzleY
< 4);
1142 assert(swizzleZ
< 4);
1143 assert(swizzleW
< 4);
1144 c
= (unsigned *) emit
->immediates
[index
];
1145 emit_dword(emit
, c
[swizzleX
]);
1146 emit_dword(emit
, c
[swizzleY
]);
1147 emit_dword(emit
, c
[swizzleZ
]);
1148 emit_dword(emit
, c
[swizzleW
]);
1150 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_1D
) {
1151 /* Emit the register index(es) */
1153 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
1154 emit_dword(emit
, reg
->Dimension
.Index
);
1157 if (tempArrayId
> 0) {
1158 emit_dword(emit
, tempArrayId
);
1161 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
1164 emit_indirect_register(emit
, reg
->Indirect
.Index
);
1171 * Emit a resource operand (for use with a SAMPLE instruction).
1174 emit_resource_register(struct svga_shader_emitter_v10
*emit
,
1175 unsigned resource_number
)
1177 VGPU10OperandToken0 operand0
;
1179 check_register_index(emit
, VGPU10_OPERAND_TYPE_RESOURCE
, resource_number
);
1184 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
1185 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1186 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1187 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1188 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1189 operand0
.swizzleY
= VGPU10_COMPONENT_Y
;
1190 operand0
.swizzleZ
= VGPU10_COMPONENT_Z
;
1191 operand0
.swizzleW
= VGPU10_COMPONENT_W
;
1193 emit_dword(emit
, operand0
.value
);
1194 emit_dword(emit
, resource_number
);
1199 * Emit a sampler operand (for use with a SAMPLE instruction).
1202 emit_sampler_register(struct svga_shader_emitter_v10
*emit
,
1203 unsigned sampler_number
)
1205 VGPU10OperandToken0 operand0
;
1207 check_register_index(emit
, VGPU10_OPERAND_TYPE_SAMPLER
, sampler_number
);
1212 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
1213 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1215 emit_dword(emit
, operand0
.value
);
1216 emit_dword(emit
, sampler_number
);
1221 * Emit an operand which reads the IS_FRONT_FACING register.
1224 emit_face_register(struct svga_shader_emitter_v10
*emit
)
1226 VGPU10OperandToken0 operand0
;
1227 unsigned index
= emit
->linkage
.input_map
[emit
->fs
.face_input_index
];
1232 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT
;
1233 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1234 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1235 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1237 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1238 operand0
.swizzleY
= VGPU10_COMPONENT_X
;
1239 operand0
.swizzleZ
= VGPU10_COMPONENT_X
;
1240 operand0
.swizzleW
= VGPU10_COMPONENT_X
;
1242 emit_dword(emit
, operand0
.value
);
1243 emit_dword(emit
, index
);
1248 * Emit tokens for the "rasterizer" register used by the SAMPLE_POS
1252 emit_rasterizer_register(struct svga_shader_emitter_v10
*emit
)
1254 VGPU10OperandToken0 operand0
;
1259 /* No register index for rasterizer index (there's only one) */
1260 operand0
.operandType
= VGPU10_OPERAND_TYPE_RASTERIZER
;
1261 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
1262 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1263 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1264 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1265 operand0
.swizzleY
= VGPU10_COMPONENT_Y
;
1266 operand0
.swizzleZ
= VGPU10_COMPONENT_Z
;
1267 operand0
.swizzleW
= VGPU10_COMPONENT_W
;
1269 emit_dword(emit
, operand0
.value
);
1274 * Emit the token for a VGPU10 opcode.
1275 * \param saturate clamp result to [0,1]?
1278 emit_opcode(struct svga_shader_emitter_v10
*emit
,
1279 VGPU10_OPCODE_TYPE vgpu10_opcode
, boolean saturate
)
1281 VGPU10OpcodeToken0 token0
;
1283 token0
.value
= 0; /* init all fields to zero */
1284 token0
.opcodeType
= vgpu10_opcode
;
1285 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1286 token0
.saturate
= saturate
;
1288 emit_dword(emit
, token0
.value
);
1293 * Emit the token for a VGPU10 resinfo instruction.
1294 * \param modifier return type modifier, _uint or _rcpFloat.
1295 * TODO: We may want to remove this parameter if it will
1296 * only ever be used as _uint.
1299 emit_opcode_resinfo(struct svga_shader_emitter_v10
*emit
,
1300 VGPU10_RESINFO_RETURN_TYPE modifier
)
1302 VGPU10OpcodeToken0 token0
;
1304 token0
.value
= 0; /* init all fields to zero */
1305 token0
.opcodeType
= VGPU10_OPCODE_RESINFO
;
1306 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1307 token0
.resinfoReturnType
= modifier
;
1309 emit_dword(emit
, token0
.value
);
1314 * Emit opcode tokens for a texture sample instruction. Texture instructions
1315 * can be rather complicated (texel offsets, etc) so we have this specialized
1319 emit_sample_opcode(struct svga_shader_emitter_v10
*emit
,
1320 unsigned vgpu10_opcode
, boolean saturate
,
1321 const int offsets
[3])
1323 VGPU10OpcodeToken0 token0
;
1324 VGPU10OpcodeToken1 token1
;
1326 token0
.value
= 0; /* init all fields to zero */
1327 token0
.opcodeType
= vgpu10_opcode
;
1328 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1329 token0
.saturate
= saturate
;
1331 if (offsets
[0] || offsets
[1] || offsets
[2]) {
1332 assert(offsets
[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1333 assert(offsets
[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1334 assert(offsets
[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1335 assert(offsets
[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1336 assert(offsets
[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1337 assert(offsets
[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1339 token0
.extended
= 1;
1341 token1
.opcodeType
= VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS
;
1342 token1
.offsetU
= offsets
[0];
1343 token1
.offsetV
= offsets
[1];
1344 token1
.offsetW
= offsets
[2];
1347 emit_dword(emit
, token0
.value
);
1348 if (token0
.extended
) {
1349 emit_dword(emit
, token1
.value
);
1355 * Emit a DISCARD opcode token.
1356 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1357 * Otherwise, we'll discard the fragment if the X component is 0.
1360 emit_discard_opcode(struct svga_shader_emitter_v10
*emit
, boolean nonzero
)
1362 VGPU10OpcodeToken0 opcode0
;
1365 opcode0
.opcodeType
= VGPU10_OPCODE_DISCARD
;
1367 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
1369 emit_dword(emit
, opcode0
.value
);
1374 * We need to call this before we begin emitting a VGPU10 instruction.
1377 begin_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1379 assert(emit
->inst_start_token
== 0);
1380 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1381 * Note, we can't save a pointer because it would become invalid if
1382 * we have to realloc the output buffer.
1384 emit
->inst_start_token
= emit_get_num_tokens(emit
);
1389 * We need to call this after we emit the last token of a VGPU10 instruction.
1390 * This function patches in the opcode token's instructionLength field.
1393 end_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1395 VGPU10OpcodeToken0
*tokens
= (VGPU10OpcodeToken0
*) emit
->buf
;
1396 unsigned inst_length
;
1398 assert(emit
->inst_start_token
> 0);
1400 if (emit
->discard_instruction
) {
1401 /* Back up the emit->ptr to where this instruction started so
1402 * that we discard the current instruction.
1404 emit
->ptr
= (char *) (tokens
+ emit
->inst_start_token
);
1407 /* Compute instruction length and patch that into the start of
1410 inst_length
= emit_get_num_tokens(emit
) - emit
->inst_start_token
;
1412 assert(inst_length
> 0);
1414 tokens
[emit
->inst_start_token
].instructionLength
= inst_length
;
1417 emit
->inst_start_token
= 0; /* reset to zero for error checking */
1418 emit
->discard_instruction
= FALSE
;
1423 * Return index for a free temporary register.
1426 get_temp_index(struct svga_shader_emitter_v10
*emit
)
1428 assert(emit
->internal_temp_count
< MAX_INTERNAL_TEMPS
);
1429 return emit
->num_shader_temps
+ emit
->internal_temp_count
++;
1434 * Release the temporaries which were generated by get_temp_index().
1437 free_temp_indexes(struct svga_shader_emitter_v10
*emit
)
1439 emit
->internal_temp_count
= 0;
1444 * Create a tgsi_full_src_register.
1446 static struct tgsi_full_src_register
1447 make_src_reg(enum tgsi_file_type file
, unsigned index
)
1449 struct tgsi_full_src_register reg
;
1451 memset(®
, 0, sizeof(reg
));
1452 reg
.Register
.File
= file
;
1453 reg
.Register
.Index
= index
;
1454 reg
.Register
.SwizzleX
= TGSI_SWIZZLE_X
;
1455 reg
.Register
.SwizzleY
= TGSI_SWIZZLE_Y
;
1456 reg
.Register
.SwizzleZ
= TGSI_SWIZZLE_Z
;
1457 reg
.Register
.SwizzleW
= TGSI_SWIZZLE_W
;
1463 * Create a tgsi_full_src_register with a swizzle such that all four
1464 * vector components have the same scalar value.
1466 static struct tgsi_full_src_register
1467 make_src_scalar_reg(enum tgsi_file_type file
, unsigned index
, unsigned component
)
1469 struct tgsi_full_src_register reg
;
1471 assert(component
>= TGSI_SWIZZLE_X
);
1472 assert(component
<= TGSI_SWIZZLE_W
);
1474 memset(®
, 0, sizeof(reg
));
1475 reg
.Register
.File
= file
;
1476 reg
.Register
.Index
= index
;
1477 reg
.Register
.SwizzleX
=
1478 reg
.Register
.SwizzleY
=
1479 reg
.Register
.SwizzleZ
=
1480 reg
.Register
.SwizzleW
= component
;
1486 * Create a tgsi_full_src_register for a temporary.
1488 static struct tgsi_full_src_register
1489 make_src_temp_reg(unsigned index
)
1491 return make_src_reg(TGSI_FILE_TEMPORARY
, index
);
1496 * Create a tgsi_full_src_register for a constant.
1498 static struct tgsi_full_src_register
1499 make_src_const_reg(unsigned index
)
1501 return make_src_reg(TGSI_FILE_CONSTANT
, index
);
1506 * Create a tgsi_full_src_register for an immediate constant.
1508 static struct tgsi_full_src_register
1509 make_src_immediate_reg(unsigned index
)
1511 return make_src_reg(TGSI_FILE_IMMEDIATE
, index
);
1516 * Create a tgsi_full_dst_register.
1518 static struct tgsi_full_dst_register
1519 make_dst_reg(enum tgsi_file_type file
, unsigned index
)
1521 struct tgsi_full_dst_register reg
;
1523 memset(®
, 0, sizeof(reg
));
1524 reg
.Register
.File
= file
;
1525 reg
.Register
.Index
= index
;
1526 reg
.Register
.WriteMask
= TGSI_WRITEMASK_XYZW
;
1532 * Create a tgsi_full_dst_register for a temporary.
1534 static struct tgsi_full_dst_register
1535 make_dst_temp_reg(unsigned index
)
1537 return make_dst_reg(TGSI_FILE_TEMPORARY
, index
);
1542 * Create a tgsi_full_dst_register for an output.
1544 static struct tgsi_full_dst_register
1545 make_dst_output_reg(unsigned index
)
1547 return make_dst_reg(TGSI_FILE_OUTPUT
, index
);
1552 * Create negated tgsi_full_src_register.
1554 static struct tgsi_full_src_register
1555 negate_src(const struct tgsi_full_src_register
*reg
)
1557 struct tgsi_full_src_register neg
= *reg
;
1558 neg
.Register
.Negate
= !reg
->Register
.Negate
;
1563 * Create absolute value of a tgsi_full_src_register.
1565 static struct tgsi_full_src_register
1566 absolute_src(const struct tgsi_full_src_register
*reg
)
1568 struct tgsi_full_src_register absolute
= *reg
;
1569 absolute
.Register
.Absolute
= 1;
1574 /** Return the named swizzle term from the src register */
1575 static inline unsigned
1576 get_swizzle(const struct tgsi_full_src_register
*reg
, enum tgsi_swizzle term
)
1579 case TGSI_SWIZZLE_X
:
1580 return reg
->Register
.SwizzleX
;
1581 case TGSI_SWIZZLE_Y
:
1582 return reg
->Register
.SwizzleY
;
1583 case TGSI_SWIZZLE_Z
:
1584 return reg
->Register
.SwizzleZ
;
1585 case TGSI_SWIZZLE_W
:
1586 return reg
->Register
.SwizzleW
;
1588 assert(!"Bad swizzle");
1589 return TGSI_SWIZZLE_X
;
1595 * Create swizzled tgsi_full_src_register.
1597 static struct tgsi_full_src_register
1598 swizzle_src(const struct tgsi_full_src_register
*reg
,
1599 enum tgsi_swizzle swizzleX
, enum tgsi_swizzle swizzleY
,
1600 enum tgsi_swizzle swizzleZ
, enum tgsi_swizzle swizzleW
)
1602 struct tgsi_full_src_register swizzled
= *reg
;
1603 /* Note: we swizzle the current swizzle */
1604 swizzled
.Register
.SwizzleX
= get_swizzle(reg
, swizzleX
);
1605 swizzled
.Register
.SwizzleY
= get_swizzle(reg
, swizzleY
);
1606 swizzled
.Register
.SwizzleZ
= get_swizzle(reg
, swizzleZ
);
1607 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzleW
);
1613 * Create swizzled tgsi_full_src_register where all the swizzle
1614 * terms are the same.
1616 static struct tgsi_full_src_register
1617 scalar_src(const struct tgsi_full_src_register
*reg
, enum tgsi_swizzle swizzle
)
1619 struct tgsi_full_src_register swizzled
= *reg
;
1620 /* Note: we swizzle the current swizzle */
1621 swizzled
.Register
.SwizzleX
=
1622 swizzled
.Register
.SwizzleY
=
1623 swizzled
.Register
.SwizzleZ
=
1624 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzle
);
1630 * Create new tgsi_full_dst_register with writemask.
1631 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1633 static struct tgsi_full_dst_register
1634 writemask_dst(const struct tgsi_full_dst_register
*reg
, unsigned mask
)
1636 struct tgsi_full_dst_register masked
= *reg
;
1637 masked
.Register
.WriteMask
= mask
;
1643 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1646 same_swizzle_terms(const struct tgsi_full_src_register
*reg
)
1648 return (reg
->Register
.SwizzleX
== reg
->Register
.SwizzleY
&&
1649 reg
->Register
.SwizzleY
== reg
->Register
.SwizzleZ
&&
1650 reg
->Register
.SwizzleZ
== reg
->Register
.SwizzleW
);
1655 * Search the vector for the value 'x' and return its position.
1658 find_imm_in_vec4(const union tgsi_immediate_data vec
[4],
1659 union tgsi_immediate_data x
)
1662 for (i
= 0; i
< 4; i
++) {
1663 if (vec
[i
].Int
== x
.Int
)
1671 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1674 find_immediate(struct svga_shader_emitter_v10
*emit
,
1675 union tgsi_immediate_data x
, unsigned startIndex
)
1677 const unsigned endIndex
= emit
->num_immediates
;
1680 assert(emit
->immediates_emitted
);
1682 /* Search immediates for x, y, z, w */
1683 for (i
= startIndex
; i
< endIndex
; i
++) {
1684 if (x
.Int
== emit
->immediates
[i
][0].Int
||
1685 x
.Int
== emit
->immediates
[i
][1].Int
||
1686 x
.Int
== emit
->immediates
[i
][2].Int
||
1687 x
.Int
== emit
->immediates
[i
][3].Int
) {
1691 /* Should never try to use an immediate value that wasn't pre-declared */
1692 assert(!"find_immediate() failed!");
1698 * Return a tgsi_full_src_register for an immediate/literal
1699 * union tgsi_immediate_data[4] value.
1700 * Note: the values must have been previously declared/allocated in
1701 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1704 static struct tgsi_full_src_register
1705 make_immediate_reg_4(struct svga_shader_emitter_v10
*emit
,
1706 const union tgsi_immediate_data imm
[4])
1708 struct tgsi_full_src_register reg
;
1711 for (i
= 0; i
< emit
->num_common_immediates
; i
++) {
1712 /* search for first component value */
1713 int immpos
= find_immediate(emit
, imm
[0], i
);
1716 assert(immpos
>= 0);
1718 /* find remaining components within the immediate vector */
1719 x
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[0]);
1720 y
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[1]);
1721 z
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[2]);
1722 w
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[3]);
1724 if (x
>=0 && y
>= 0 && z
>= 0 && w
>= 0) {
1725 /* found them all */
1726 memset(®
, 0, sizeof(reg
));
1727 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1728 reg
.Register
.Index
= immpos
;
1729 reg
.Register
.SwizzleX
= x
;
1730 reg
.Register
.SwizzleY
= y
;
1731 reg
.Register
.SwizzleZ
= z
;
1732 reg
.Register
.SwizzleW
= w
;
1735 /* else, keep searching */
1738 assert(!"Failed to find immediate register!");
1740 /* Just return IMM[0].xxxx */
1741 memset(®
, 0, sizeof(reg
));
1742 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1748 * Return a tgsi_full_src_register for an immediate/literal
1749 * union tgsi_immediate_data value of the form {value, value, value, value}.
1750 * \sa make_immediate_reg_4() regarding allowed values.
1752 static struct tgsi_full_src_register
1753 make_immediate_reg(struct svga_shader_emitter_v10
*emit
,
1754 union tgsi_immediate_data value
)
1756 struct tgsi_full_src_register reg
;
1757 int immpos
= find_immediate(emit
, value
, 0);
1759 assert(immpos
>= 0);
1761 memset(®
, 0, sizeof(reg
));
1762 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1763 reg
.Register
.Index
= immpos
;
1764 reg
.Register
.SwizzleX
=
1765 reg
.Register
.SwizzleY
=
1766 reg
.Register
.SwizzleZ
=
1767 reg
.Register
.SwizzleW
= find_imm_in_vec4(emit
->immediates
[immpos
], value
);
1774 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1775 * \sa make_immediate_reg_4() regarding allowed values.
1777 static struct tgsi_full_src_register
1778 make_immediate_reg_float4(struct svga_shader_emitter_v10
*emit
,
1779 float x
, float y
, float z
, float w
)
1781 union tgsi_immediate_data imm
[4];
1786 return make_immediate_reg_4(emit
, imm
);
1791 * Return a tgsi_full_src_register for an immediate/literal float value
1792 * of the form {value, value, value, value}.
1793 * \sa make_immediate_reg_4() regarding allowed values.
1795 static struct tgsi_full_src_register
1796 make_immediate_reg_float(struct svga_shader_emitter_v10
*emit
, float value
)
1798 union tgsi_immediate_data imm
;
1800 return make_immediate_reg(emit
, imm
);
1805 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1807 static struct tgsi_full_src_register
1808 make_immediate_reg_int4(struct svga_shader_emitter_v10
*emit
,
1809 int x
, int y
, int z
, int w
)
1811 union tgsi_immediate_data imm
[4];
1816 return make_immediate_reg_4(emit
, imm
);
1821 * Return a tgsi_full_src_register for an immediate/literal int value
1822 * of the form {value, value, value, value}.
1823 * \sa make_immediate_reg_4() regarding allowed values.
1825 static struct tgsi_full_src_register
1826 make_immediate_reg_int(struct svga_shader_emitter_v10
*emit
, int value
)
1828 union tgsi_immediate_data imm
;
1830 return make_immediate_reg(emit
, imm
);
1835 * Allocate space for a union tgsi_immediate_data[4] immediate.
1836 * \return the index/position of the immediate.
1839 alloc_immediate_4(struct svga_shader_emitter_v10
*emit
,
1840 const union tgsi_immediate_data imm
[4])
1842 unsigned n
= emit
->num_immediates
++;
1843 assert(!emit
->immediates_emitted
);
1844 assert(n
< ARRAY_SIZE(emit
->immediates
));
1845 emit
->immediates
[n
][0] = imm
[0];
1846 emit
->immediates
[n
][1] = imm
[1];
1847 emit
->immediates
[n
][2] = imm
[2];
1848 emit
->immediates
[n
][3] = imm
[3];
1854 * Allocate space for a float[4] immediate.
1855 * \return the index/position of the immediate.
1858 alloc_immediate_float4(struct svga_shader_emitter_v10
*emit
,
1859 float x
, float y
, float z
, float w
)
1861 union tgsi_immediate_data imm
[4];
1866 return alloc_immediate_4(emit
, imm
);
1871 * Allocate space for an int[4] immediate.
1872 * \return the index/position of the immediate.
1875 alloc_immediate_int4(struct svga_shader_emitter_v10
*emit
,
1876 int x
, int y
, int z
, int w
)
1878 union tgsi_immediate_data imm
[4];
1883 return alloc_immediate_4(emit
, imm
);
1888 * Allocate a shader input to store a system value.
1891 alloc_system_value_index(struct svga_shader_emitter_v10
*emit
, unsigned index
)
1893 const unsigned n
= emit
->linkage
.input_map_max
+ 1 + index
;
1894 assert(index
< ARRAY_SIZE(emit
->system_value_indexes
));
1895 emit
->system_value_indexes
[index
] = n
;
1901 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1904 emit_vgpu10_immediate(struct svga_shader_emitter_v10
*emit
,
1905 const struct tgsi_full_immediate
*imm
)
1907 /* We don't actually emit any code here. We just save the
1908 * immediate values and emit them later.
1910 alloc_immediate_4(emit
, imm
->u
);
1916 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1917 * containing all the immediate values previously allocated
1918 * with alloc_immediate_4().
1921 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10
*emit
)
1923 VGPU10OpcodeToken0 token
;
1925 assert(!emit
->immediates_emitted
);
1928 token
.opcodeType
= VGPU10_OPCODE_CUSTOMDATA
;
1929 token
.customDataClass
= VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER
;
1931 /* Note: no begin/end_emit_instruction() calls */
1932 emit_dword(emit
, token
.value
);
1933 emit_dword(emit
, 2 + 4 * emit
->num_immediates
);
1934 emit_dwords(emit
, (unsigned *) emit
->immediates
, 4 * emit
->num_immediates
);
1936 emit
->immediates_emitted
= TRUE
;
1943 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1944 * interpolation mode.
1945 * \return a VGPU10_INTERPOLATION_x value
1948 translate_interpolation(const struct svga_shader_emitter_v10
*emit
,
1949 enum tgsi_interpolate_mode interp
,
1950 enum tgsi_interpolate_loc interpolate_loc
)
1952 if (interp
== TGSI_INTERPOLATE_COLOR
) {
1953 interp
= emit
->key
.fs
.flatshade
?
1954 TGSI_INTERPOLATE_CONSTANT
: TGSI_INTERPOLATE_PERSPECTIVE
;
1958 case TGSI_INTERPOLATE_CONSTANT
:
1959 return VGPU10_INTERPOLATION_CONSTANT
;
1960 case TGSI_INTERPOLATE_LINEAR
:
1961 if (interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
) {
1962 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
;
1963 } else if (interpolate_loc
== TGSI_INTERPOLATE_LOC_SAMPLE
&&
1964 emit
->version
>= 41) {
1965 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE
;
1967 return VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
;
1970 case TGSI_INTERPOLATE_PERSPECTIVE
:
1971 if (interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
) {
1972 return VGPU10_INTERPOLATION_LINEAR_CENTROID
;
1973 } else if (interpolate_loc
== TGSI_INTERPOLATE_LOC_SAMPLE
&&
1974 emit
->version
>= 41) {
1975 return VGPU10_INTERPOLATION_LINEAR_SAMPLE
;
1977 return VGPU10_INTERPOLATION_LINEAR
;
1981 assert(!"Unexpected interpolation mode");
1982 return VGPU10_INTERPOLATION_CONSTANT
;
1988 * Translate a TGSI property to VGPU10.
1989 * Don't emit any instructions yet, only need to gather the primitive property
1990 * information. The output primitive topology might be changed later. The
1991 * final property instructions will be emitted as part of the pre-helper code.
1994 emit_vgpu10_property(struct svga_shader_emitter_v10
*emit
,
1995 const struct tgsi_full_property
*prop
)
1997 static const VGPU10_PRIMITIVE primType
[] = {
1998 VGPU10_PRIMITIVE_POINT
, /* PIPE_PRIM_POINTS */
1999 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINES */
2000 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_LOOP */
2001 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_STRIP */
2002 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLES */
2003 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_STRIP */
2004 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_FAN */
2005 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUADS */
2006 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
2007 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_POLYGON */
2008 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
2009 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2010 VGPU10_PRIMITIVE_TRIANGLE_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2011 VGPU10_PRIMITIVE_TRIANGLE_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2014 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology
[] = {
2015 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST
, /* PIPE_PRIM_POINTS */
2016 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINES */
2017 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINE_LOOP */
2018 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP
, /* PIPE_PRIM_LINE_STRIP */
2019 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST
, /* PIPE_PRIM_TRIANGLES */
2020 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_STRIP */
2021 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_FAN */
2022 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUADS */
2023 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
2024 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_POLYGON */
2025 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
2026 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
2027 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
2028 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
2031 static const unsigned inputArraySize
[] = {
2032 0, /* VGPU10_PRIMITIVE_UNDEFINED */
2033 1, /* VGPU10_PRIMITIVE_POINT */
2034 2, /* VGPU10_PRIMITIVE_LINE */
2035 3, /* VGPU10_PRIMITIVE_TRIANGLE */
2038 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
2039 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
2042 switch (prop
->Property
.PropertyName
) {
2043 case TGSI_PROPERTY_GS_INPUT_PRIM
:
2044 assert(prop
->u
[0].Data
< ARRAY_SIZE(primType
));
2045 emit
->gs
.prim_type
= primType
[prop
->u
[0].Data
];
2046 assert(emit
->gs
.prim_type
!= VGPU10_PRIMITIVE_UNDEFINED
);
2047 emit
->gs
.input_size
= inputArraySize
[emit
->gs
.prim_type
];
2050 case TGSI_PROPERTY_GS_OUTPUT_PRIM
:
2051 assert(prop
->u
[0].Data
< ARRAY_SIZE(primTopology
));
2052 emit
->gs
.prim_topology
= primTopology
[prop
->u
[0].Data
];
2053 assert(emit
->gs
.prim_topology
!= VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
);
2056 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
:
2057 emit
->gs
.max_out_vertices
= prop
->u
[0].Data
;
2069 emit_property_instruction(struct svga_shader_emitter_v10
*emit
,
2070 VGPU10OpcodeToken0 opcode0
, unsigned nData
,
2073 begin_emit_instruction(emit
);
2074 emit_dword(emit
, opcode0
.value
);
2076 emit_dword(emit
, data
);
2077 end_emit_instruction(emit
);
2082 * Emit property instructions
2085 emit_property_instructions(struct svga_shader_emitter_v10
*emit
)
2087 VGPU10OpcodeToken0 opcode0
;
2089 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2091 /* emit input primitive type declaration */
2093 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE
;
2094 opcode0
.primitive
= emit
->gs
.prim_type
;
2095 emit_property_instruction(emit
, opcode0
, 0, 0);
2097 /* emit output primitive topology declaration */
2099 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY
;
2100 opcode0
.primitiveTopology
= emit
->gs
.prim_topology
;
2101 emit_property_instruction(emit
, opcode0
, 0, 0);
2103 /* emit max output vertices */
2105 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT
;
2106 emit_property_instruction(emit
, opcode0
, 1, emit
->gs
.max_out_vertices
);
2111 * Emit a vgpu10 declaration "instruction".
2112 * \param index the register index
2113 * \param size array size of the operand. In most cases, it is 1,
2114 * but for inputs to geometry shader, the array size varies
2115 * depending on the primitive type.
2118 emit_decl_instruction(struct svga_shader_emitter_v10
*emit
,
2119 VGPU10OpcodeToken0 opcode0
,
2120 VGPU10OperandToken0 operand0
,
2121 VGPU10NameToken name_token
,
2122 unsigned index
, unsigned size
)
2124 assert(opcode0
.opcodeType
);
2125 assert(operand0
.mask
||
2126 (operand0
.operandType
== VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
) ||
2127 (operand0
.operandType
== VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK
));
2129 begin_emit_instruction(emit
);
2130 emit_dword(emit
, opcode0
.value
);
2132 emit_dword(emit
, operand0
.value
);
2134 if (operand0
.indexDimension
== VGPU10_OPERAND_INDEX_1D
) {
2135 /* Next token is the index of the register to declare */
2136 emit_dword(emit
, index
);
2138 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_2D
) {
2139 /* Next token is the size of the register */
2140 emit_dword(emit
, size
);
2142 /* Followed by the index of the register */
2143 emit_dword(emit
, index
);
2146 if (name_token
.value
) {
2147 emit_dword(emit
, name_token
.value
);
2150 end_emit_instruction(emit
);
2155 * Emit the declaration for a shader input.
2156 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2157 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2158 * \param dim index dimension
2159 * \param index the input register index
2160 * \param size array size of the operand. In most cases, it is 1,
2161 * but for inputs to geometry shader, the array size varies
2162 * depending on the primitive type.
2163 * \param name one of VGPU10_NAME_x
2164 * \parma numComp number of components
2165 * \param selMode component selection mode
2166 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2167 * \param interpMode interpolation mode
2170 emit_input_declaration(struct svga_shader_emitter_v10
*emit
,
2171 VGPU10_OPCODE_TYPE opcodeType
,
2172 VGPU10_OPERAND_TYPE operandType
,
2173 VGPU10_OPERAND_INDEX_DIMENSION dim
,
2174 unsigned index
, unsigned size
,
2175 VGPU10_SYSTEM_NAME name
,
2176 VGPU10_OPERAND_NUM_COMPONENTS numComp
,
2177 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode
,
2179 VGPU10_INTERPOLATION_MODE interpMode
)
2181 VGPU10OpcodeToken0 opcode0
;
2182 VGPU10OperandToken0 operand0
;
2183 VGPU10NameToken name_token
;
2185 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2186 assert(opcodeType
== VGPU10_OPCODE_DCL_INPUT
||
2187 opcodeType
== VGPU10_OPCODE_DCL_INPUT_SIV
||
2188 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS
||
2189 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS_SIV
||
2190 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS_SGV
);
2191 assert(operandType
== VGPU10_OPERAND_TYPE_INPUT
||
2192 operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
);
2193 assert(numComp
<= VGPU10_OPERAND_4_COMPONENT
);
2194 assert(selMode
<= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
);
2195 assert(dim
<= VGPU10_OPERAND_INDEX_3D
);
2196 assert(name
== VGPU10_NAME_UNDEFINED
||
2197 name
== VGPU10_NAME_POSITION
||
2198 name
== VGPU10_NAME_INSTANCE_ID
||
2199 name
== VGPU10_NAME_VERTEX_ID
||
2200 name
== VGPU10_NAME_PRIMITIVE_ID
||
2201 name
== VGPU10_NAME_IS_FRONT_FACE
||
2202 name
== VGPU10_NAME_SAMPLE_INDEX
);
2204 assert(interpMode
== VGPU10_INTERPOLATION_UNDEFINED
||
2205 interpMode
== VGPU10_INTERPOLATION_CONSTANT
||
2206 interpMode
== VGPU10_INTERPOLATION_LINEAR
||
2207 interpMode
== VGPU10_INTERPOLATION_LINEAR_CENTROID
||
2208 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
||
2209 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
||
2210 interpMode
== VGPU10_INTERPOLATION_LINEAR_SAMPLE
||
2211 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_SAMPLE
);
2213 check_register_index(emit
, opcodeType
, index
);
2215 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2217 opcode0
.opcodeType
= opcodeType
;
2218 opcode0
.interpolationMode
= interpMode
;
2220 operand0
.operandType
= operandType
;
2221 operand0
.numComponents
= numComp
;
2222 operand0
.selectionMode
= selMode
;
2223 operand0
.mask
= usageMask
;
2224 operand0
.indexDimension
= dim
;
2225 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2226 if (dim
== VGPU10_OPERAND_INDEX_2D
)
2227 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2229 name_token
.name
= name
;
2231 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, size
);
2236 * Emit the declaration for a shader output.
2237 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2238 * \param index the output register index
2239 * \param name one of VGPU10_NAME_x
2240 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2243 emit_output_declaration(struct svga_shader_emitter_v10
*emit
,
2244 VGPU10_OPCODE_TYPE type
, unsigned index
,
2245 VGPU10_SYSTEM_NAME name
,
2248 VGPU10OpcodeToken0 opcode0
;
2249 VGPU10OperandToken0 operand0
;
2250 VGPU10NameToken name_token
;
2252 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2253 assert(type
== VGPU10_OPCODE_DCL_OUTPUT
||
2254 type
== VGPU10_OPCODE_DCL_OUTPUT_SGV
||
2255 type
== VGPU10_OPCODE_DCL_OUTPUT_SIV
);
2256 assert(name
== VGPU10_NAME_UNDEFINED
||
2257 name
== VGPU10_NAME_POSITION
||
2258 name
== VGPU10_NAME_PRIMITIVE_ID
||
2259 name
== VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
||
2260 name
== VGPU10_NAME_CLIP_DISTANCE
);
2262 check_register_index(emit
, type
, index
);
2264 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2266 opcode0
.opcodeType
= type
;
2267 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT
;
2268 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
2269 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2270 operand0
.mask
= usageMask
;
2271 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2272 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2274 name_token
.name
= name
;
2276 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, 1);
2281 * Emit the declaration for the fragment depth output.
2284 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10
*emit
)
2286 VGPU10OpcodeToken0 opcode0
;
2287 VGPU10OperandToken0 operand0
;
2288 VGPU10NameToken name_token
;
2290 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
2292 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2294 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_OUTPUT
;
2295 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
2296 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
2297 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
2300 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, 0, 1);
2305 * Emit the declaration for the fragment sample mask/coverage output.
2308 emit_samplemask_output_declaration(struct svga_shader_emitter_v10
*emit
)
2310 VGPU10OpcodeToken0 opcode0
;
2311 VGPU10OperandToken0 operand0
;
2312 VGPU10NameToken name_token
;
2314 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
2315 assert(emit
->version
>= 41);
2317 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2319 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_OUTPUT
;
2320 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_COVERAGE_MASK
;
2321 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
2322 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
2325 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, 0, 1);
2330 * Emit the declaration for a system value input/output.
2333 emit_system_value_declaration(struct svga_shader_emitter_v10
*emit
,
2334 enum tgsi_semantic semantic_name
, unsigned index
)
2336 switch (semantic_name
) {
2337 case TGSI_SEMANTIC_INSTANCEID
:
2338 index
= alloc_system_value_index(emit
, index
);
2339 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2340 VGPU10_OPERAND_TYPE_INPUT
,
2341 VGPU10_OPERAND_INDEX_1D
,
2343 VGPU10_NAME_INSTANCE_ID
,
2344 VGPU10_OPERAND_4_COMPONENT
,
2345 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2346 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2347 VGPU10_INTERPOLATION_UNDEFINED
);
2349 case TGSI_SEMANTIC_VERTEXID
:
2350 index
= alloc_system_value_index(emit
, index
);
2351 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2352 VGPU10_OPERAND_TYPE_INPUT
,
2353 VGPU10_OPERAND_INDEX_1D
,
2355 VGPU10_NAME_VERTEX_ID
,
2356 VGPU10_OPERAND_4_COMPONENT
,
2357 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2358 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2359 VGPU10_INTERPOLATION_UNDEFINED
);
2361 case TGSI_SEMANTIC_SAMPLEID
:
2362 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
2363 emit
->fs
.sample_id_sys_index
= index
;
2364 index
= alloc_system_value_index(emit
, index
);
2365 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_PS_SIV
,
2366 VGPU10_OPERAND_TYPE_INPUT
,
2367 VGPU10_OPERAND_INDEX_1D
,
2369 VGPU10_NAME_SAMPLE_INDEX
,
2370 VGPU10_OPERAND_4_COMPONENT
,
2371 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2372 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2373 VGPU10_INTERPOLATION_CONSTANT
);
2375 case TGSI_SEMANTIC_SAMPLEPOS
:
2376 /* This system value contains the position of the current sample
2377 * when using per-sample shading. We implement this by calling
2378 * the VGPU10_OPCODE_SAMPLE_POS instruction with the current sample
2379 * index as the argument. See emit_sample_position_instructions().
2381 assert(emit
->version
>= 41);
2382 emit
->fs
.sample_pos_sys_index
= index
;
2383 index
= alloc_system_value_index(emit
, index
);
2386 debug_printf("unexpected sytem value semantic index %u\n",
2392 * Translate a TGSI declaration to VGPU10.
2395 emit_vgpu10_declaration(struct svga_shader_emitter_v10
*emit
,
2396 const struct tgsi_full_declaration
*decl
)
2398 switch (decl
->Declaration
.File
) {
2399 case TGSI_FILE_INPUT
:
2400 /* do nothing - see emit_input_declarations() */
2403 case TGSI_FILE_OUTPUT
:
2404 assert(decl
->Range
.First
== decl
->Range
.Last
);
2405 emit
->output_usage_mask
[decl
->Range
.First
] = decl
->Declaration
.UsageMask
;
2408 case TGSI_FILE_TEMPORARY
:
2409 /* Don't declare the temps here. Just keep track of how many
2410 * and emit the declaration later.
2412 if (decl
->Declaration
.Array
) {
2413 /* Indexed temporary array. Save the start index of the array
2414 * and the size of the array.
2416 const unsigned arrayID
= MIN2(decl
->Array
.ArrayID
, MAX_TEMP_ARRAYS
);
2419 assert(arrayID
< ARRAY_SIZE(emit
->temp_arrays
));
2421 /* Save this array so we can emit the declaration for it later */
2422 emit
->temp_arrays
[arrayID
].start
= decl
->Range
.First
;
2423 emit
->temp_arrays
[arrayID
].size
=
2424 decl
->Range
.Last
- decl
->Range
.First
+ 1;
2426 emit
->num_temp_arrays
= MAX2(emit
->num_temp_arrays
, arrayID
+ 1);
2427 assert(emit
->num_temp_arrays
<= MAX_TEMP_ARRAYS
);
2428 emit
->num_temp_arrays
= MIN2(emit
->num_temp_arrays
, MAX_TEMP_ARRAYS
);
2430 /* Fill in the temp_map entries for this array */
2431 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
2432 emit
->temp_map
[i
].arrayId
= arrayID
;
2433 emit
->temp_map
[i
].index
= i
- decl
->Range
.First
;
2437 /* for all temps, indexed or not, keep track of highest index */
2438 emit
->num_shader_temps
= MAX2(emit
->num_shader_temps
,
2439 decl
->Range
.Last
+ 1);
2442 case TGSI_FILE_CONSTANT
:
2443 /* Don't declare constants here. Just keep track and emit later. */
2445 unsigned constbuf
= 0, num_consts
;
2446 if (decl
->Declaration
.Dimension
) {
2447 constbuf
= decl
->Dim
.Index2D
;
2449 /* We throw an assertion here when, in fact, the shader should never
2450 * have linked due to constbuf index out of bounds, so we shouldn't
2451 * have reached here.
2453 assert(constbuf
< ARRAY_SIZE(emit
->num_shader_consts
));
2455 num_consts
= MAX2(emit
->num_shader_consts
[constbuf
],
2456 decl
->Range
.Last
+ 1);
2458 if (num_consts
> VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
2459 debug_printf("Warning: constant buffer is declared to size [%u]"
2460 " but [%u] is the limit.\n",
2462 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2464 /* The linker doesn't enforce the max UBO size so we clamp here */
2465 emit
->num_shader_consts
[constbuf
] =
2466 MIN2(num_consts
, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2470 case TGSI_FILE_IMMEDIATE
:
2471 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2474 case TGSI_FILE_SYSTEM_VALUE
:
2475 emit_system_value_declaration(emit
, decl
->Semantic
.Name
,
2479 case TGSI_FILE_SAMPLER
:
2480 /* Don't declare samplers here. Just keep track and emit later. */
2481 emit
->num_samplers
= MAX2(emit
->num_samplers
, decl
->Range
.Last
+ 1);
2485 case TGSI_FILE_RESOURCE
:
2486 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2487 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2488 assert(!"TGSI_FILE_RESOURCE not handled yet");
2492 case TGSI_FILE_ADDRESS
:
2493 emit
->num_address_regs
= MAX2(emit
->num_address_regs
,
2494 decl
->Range
.Last
+ 1);
2497 case TGSI_FILE_SAMPLER_VIEW
:
2499 unsigned unit
= decl
->Range
.First
;
2500 assert(decl
->Range
.First
== decl
->Range
.Last
);
2501 emit
->sampler_target
[unit
] = decl
->SamplerView
.Resource
;
2502 /* Note: we can ignore YZW return types for now */
2503 emit
->sampler_return_type
[unit
] = decl
->SamplerView
.ReturnTypeX
;
2504 emit
->sampler_view
[unit
] = TRUE
;
2509 assert(!"Unexpected type of declaration");
2517 * Emit all input declarations.
2520 emit_input_declarations(struct svga_shader_emitter_v10
*emit
)
2524 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2526 for (i
= 0; i
< emit
->linkage
.num_inputs
; i
++) {
2527 enum tgsi_semantic semantic_name
= emit
->info
.input_semantic_name
[i
];
2528 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2529 unsigned index
= emit
->linkage
.input_map
[i
];
2530 VGPU10_OPCODE_TYPE type
;
2531 VGPU10_INTERPOLATION_MODE interpolationMode
;
2532 VGPU10_SYSTEM_NAME name
;
2534 if (usage_mask
== 0)
2535 continue; /* register is not actually used */
2537 if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2538 /* fragment position input */
2539 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2540 interpolationMode
= VGPU10_INTERPOLATION_LINEAR
;
2541 name
= VGPU10_NAME_POSITION
;
2542 if (usage_mask
& TGSI_WRITEMASK_W
) {
2543 /* we need to replace use of 'w' with '1/w' */
2544 emit
->fs
.fragcoord_input_index
= i
;
2547 else if (semantic_name
== TGSI_SEMANTIC_FACE
) {
2548 /* fragment front-facing input */
2549 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2550 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2551 name
= VGPU10_NAME_IS_FRONT_FACE
;
2552 emit
->fs
.face_input_index
= i
;
2554 else if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2556 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2557 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2558 name
= VGPU10_NAME_PRIMITIVE_ID
;
2560 else if (semantic_name
== TGSI_SEMANTIC_SAMPLEID
) {
2561 /* sample index / ID */
2562 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2563 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2564 name
= VGPU10_NAME_SAMPLE_INDEX
;
2567 /* general fragment input */
2568 type
= VGPU10_OPCODE_DCL_INPUT_PS
;
2570 translate_interpolation(emit
,
2571 emit
->info
.input_interpolate
[i
],
2572 emit
->info
.input_interpolate_loc
[i
]);
2574 /* keeps track if flat interpolation mode is being used */
2575 emit
->uses_flat_interp
|=
2576 (interpolationMode
== VGPU10_INTERPOLATION_CONSTANT
);
2578 name
= VGPU10_NAME_UNDEFINED
;
2581 emit_input_declaration(emit
, type
,
2582 VGPU10_OPERAND_TYPE_INPUT
,
2583 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2585 VGPU10_OPERAND_4_COMPONENT
,
2586 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2587 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2591 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2593 for (i
= 0; i
< emit
->info
.num_inputs
; i
++) {
2594 enum tgsi_semantic semantic_name
= emit
->info
.input_semantic_name
[i
];
2595 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2596 unsigned index
= emit
->linkage
.input_map
[i
];
2597 VGPU10_OPCODE_TYPE opcodeType
, operandType
;
2598 VGPU10_OPERAND_NUM_COMPONENTS numComp
;
2599 VGPU10_OPERAND_4_COMPONENT_SELECTION_MODE selMode
;
2600 VGPU10_SYSTEM_NAME name
;
2601 VGPU10_OPERAND_INDEX_DIMENSION dim
;
2603 if (usage_mask
== 0)
2604 continue; /* register is not actually used */
2606 opcodeType
= VGPU10_OPCODE_DCL_INPUT
;
2607 operandType
= VGPU10_OPERAND_TYPE_INPUT
;
2608 numComp
= VGPU10_OPERAND_4_COMPONENT
;
2609 selMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2610 name
= VGPU10_NAME_UNDEFINED
;
2612 /* all geometry shader inputs are two dimensional except
2615 dim
= VGPU10_OPERAND_INDEX_2D
;
2617 if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2619 operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
2620 dim
= VGPU10_OPERAND_INDEX_0D
;
2621 numComp
= VGPU10_OPERAND_0_COMPONENT
;
2624 /* also save the register index so we can check for
2625 * primitive id when emit src register. We need to modify the
2626 * operand type, index dimension when emit primitive id src reg.
2628 emit
->gs
.prim_id_index
= i
;
2630 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2631 /* vertex position input */
2632 opcodeType
= VGPU10_OPCODE_DCL_INPUT_SIV
;
2633 name
= VGPU10_NAME_POSITION
;
2636 emit_input_declaration(emit
, opcodeType
, operandType
,
2638 emit
->gs
.input_size
,
2641 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2642 VGPU10_INTERPOLATION_UNDEFINED
);
2646 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
2648 for (i
= 0; i
< emit
->info
.file_max
[TGSI_FILE_INPUT
] + 1; i
++) {
2649 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2652 if (usage_mask
== 0)
2653 continue; /* register is not actually used */
2655 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT
,
2656 VGPU10_OPERAND_TYPE_INPUT
,
2657 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2658 VGPU10_NAME_UNDEFINED
,
2659 VGPU10_OPERAND_4_COMPONENT
,
2660 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2661 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2662 VGPU10_INTERPOLATION_UNDEFINED
);
2671 * Emit all output declarations.
2674 emit_output_declarations(struct svga_shader_emitter_v10
*emit
)
2678 for (i
= 0; i
< emit
->info
.num_outputs
; i
++) {
2679 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2680 const enum tgsi_semantic semantic_name
=
2681 emit
->info
.output_semantic_name
[i
];
2682 const unsigned semantic_index
= emit
->info
.output_semantic_index
[i
];
2685 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2686 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
2687 assert(semantic_index
< ARRAY_SIZE(emit
->fs
.color_out_index
));
2689 emit
->fs
.color_out_index
[semantic_index
] = index
;
2691 emit
->fs
.num_color_outputs
= MAX2(emit
->fs
.num_color_outputs
,
2694 /* The semantic index is the shader's color output/buffer index */
2695 emit_output_declaration(emit
,
2696 VGPU10_OPCODE_DCL_OUTPUT
, semantic_index
,
2697 VGPU10_NAME_UNDEFINED
,
2698 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2700 if (semantic_index
== 0) {
2701 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2702 /* Emit declarations for the additional color outputs
2706 for (j
= 1; j
< emit
->key
.fs
.write_color0_to_n_cbufs
; j
++) {
2707 /* Allocate a new output index */
2708 unsigned idx
= emit
->info
.num_outputs
+ j
- 1;
2709 emit
->fs
.color_out_index
[j
] = idx
;
2710 emit_output_declaration(emit
,
2711 VGPU10_OPCODE_DCL_OUTPUT
, idx
,
2712 VGPU10_NAME_UNDEFINED
,
2713 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2714 emit
->info
.output_semantic_index
[idx
] = j
;
2717 emit
->fs
.num_color_outputs
=
2718 emit
->key
.fs
.write_color0_to_n_cbufs
;
2722 assert(!emit
->key
.fs
.write_color0_to_n_cbufs
);
2725 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2726 /* Fragment depth output */
2727 emit_fragdepth_output_declaration(emit
);
2729 else if (semantic_name
== TGSI_SEMANTIC_SAMPLEMASK
) {
2730 /* Fragment depth output */
2731 emit_samplemask_output_declaration(emit
);
2734 assert(!"Bad output semantic name");
2739 VGPU10_COMPONENT_NAME name
;
2740 VGPU10_OPCODE_TYPE type
;
2741 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
2743 switch (semantic_name
) {
2744 case TGSI_SEMANTIC_POSITION
:
2745 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2746 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2747 name
= VGPU10_NAME_POSITION
;
2748 /* Save the index of the vertex position output register */
2749 emit
->vposition
.out_index
= index
;
2751 case TGSI_SEMANTIC_CLIPDIST
:
2752 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2753 name
= VGPU10_NAME_CLIP_DISTANCE
;
2754 /* save the starting index of the clip distance output register */
2755 if (semantic_index
== 0)
2756 emit
->clip_dist_out_index
= index
;
2757 writemask
= emit
->output_usage_mask
[index
];
2758 writemask
= apply_clip_plane_mask(emit
, writemask
, semantic_index
);
2759 if (writemask
== 0x0) {
2760 continue; /* discard this do-nothing declaration */
2763 case TGSI_SEMANTIC_PRIMID
:
2764 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2765 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2766 name
= VGPU10_NAME_PRIMITIVE_ID
;
2768 case TGSI_SEMANTIC_LAYER
:
2769 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2770 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2771 name
= VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
;
2773 case TGSI_SEMANTIC_CLIPVERTEX
:
2774 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2775 name
= VGPU10_NAME_UNDEFINED
;
2776 emit
->clip_vertex_out_index
= index
;
2779 /* generic output */
2780 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2781 name
= VGPU10_NAME_UNDEFINED
;
2784 emit_output_declaration(emit
, type
, index
, name
, writemask
);
2788 if (emit
->vposition
.so_index
!= INVALID_INDEX
&&
2789 emit
->vposition
.out_index
!= INVALID_INDEX
) {
2791 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2793 /* Emit the declaration for the non-adjusted vertex position
2794 * for stream output purpose
2796 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2797 emit
->vposition
.so_index
,
2798 VGPU10_NAME_UNDEFINED
,
2799 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2802 if (emit
->clip_dist_so_index
!= INVALID_INDEX
&&
2803 emit
->clip_dist_out_index
!= INVALID_INDEX
) {
2805 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2807 /* Emit the declaration for the clip distance shadow copy which
2808 * will be used for stream output purpose and for clip distance
2811 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2812 emit
->clip_dist_so_index
,
2813 VGPU10_NAME_UNDEFINED
,
2814 emit
->output_usage_mask
[emit
->clip_dist_out_index
]);
2816 if (emit
->info
.num_written_clipdistance
> 4) {
2817 /* for the second clip distance register, each handles 4 planes */
2818 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2819 emit
->clip_dist_so_index
+ 1,
2820 VGPU10_NAME_UNDEFINED
,
2821 emit
->output_usage_mask
[emit
->clip_dist_out_index
+1]);
2830 * Emit the declaration for the temporary registers.
2833 emit_temporaries_declaration(struct svga_shader_emitter_v10
*emit
)
2835 unsigned total_temps
, reg
, i
;
2837 total_temps
= emit
->num_shader_temps
;
2839 /* If there is indirect access to non-indexable temps in the shader,
2840 * convert those temps to indexable temps. This works around a bug
2841 * in the GLSL->TGSI translator exposed in piglit test
2842 * glsl-1.20/execution/fs-const-array-of-struct-of-array.shader_test.
2843 * Internal temps added by the driver remain as non-indexable temps.
2845 if ((emit
->info
.indirect_files
& (1 << TGSI_FILE_TEMPORARY
)) &&
2846 emit
->num_temp_arrays
== 0) {
2850 emit
->num_temp_arrays
= arrayID
+ 1;
2851 emit
->temp_arrays
[arrayID
].start
= 0;
2852 emit
->temp_arrays
[arrayID
].size
= total_temps
;
2854 /* Fill in the temp_map entries for this temp array */
2855 for (i
= 0; i
< total_temps
; i
++) {
2856 emit
->temp_map
[i
].arrayId
= arrayID
;
2857 emit
->temp_map
[i
].index
= i
;
2861 /* Allocate extra temps for specially-implemented instructions,
2864 total_temps
+= MAX_INTERNAL_TEMPS
;
2866 if (emit
->unit
== PIPE_SHADER_VERTEX
|| emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2867 if (emit
->vposition
.need_prescale
|| emit
->key
.vs
.undo_viewport
||
2868 emit
->key
.clip_plane_enable
||
2869 emit
->vposition
.so_index
!= INVALID_INDEX
) {
2870 emit
->vposition
.tmp_index
= total_temps
;
2874 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2875 unsigned attrib_mask
= (emit
->key
.vs
.adjust_attrib_w_1
|
2876 emit
->key
.vs
.adjust_attrib_itof
|
2877 emit
->key
.vs
.adjust_attrib_utof
|
2878 emit
->key
.vs
.attrib_is_bgra
|
2879 emit
->key
.vs
.attrib_puint_to_snorm
|
2880 emit
->key
.vs
.attrib_puint_to_uscaled
|
2881 emit
->key
.vs
.attrib_puint_to_sscaled
);
2882 while (attrib_mask
) {
2883 unsigned index
= u_bit_scan(&attrib_mask
);
2884 emit
->vs
.adjusted_input
[index
] = total_temps
++;
2888 if (emit
->clip_mode
== CLIP_DISTANCE
) {
2889 /* We need to write the clip distance to a temporary register
2890 * first. Then it will be copied to the shadow copy for
2891 * the clip distance varying variable and stream output purpose.
2892 * It will also be copied to the actual CLIPDIST register
2893 * according to the enabled clip planes
2895 emit
->clip_dist_tmp_index
= total_temps
++;
2896 if (emit
->info
.num_written_clipdistance
> 4)
2897 total_temps
++; /* second clip register */
2899 else if (emit
->clip_mode
== CLIP_VERTEX
) {
2900 /* We need to convert the TGSI CLIPVERTEX output to one or more
2901 * clip distances. Allocate a temp reg for the clipvertex here.
2903 assert(emit
->info
.writes_clipvertex
> 0);
2904 emit
->clip_vertex_tmp_index
= total_temps
;
2908 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2909 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
||
2910 emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2911 /* Allocate a temp to hold the output color */
2912 emit
->fs
.color_tmp_index
= total_temps
;
2916 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
2917 /* Allocate a temp for the +/-1 face register */
2918 emit
->fs
.face_tmp_index
= total_temps
;
2922 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
2923 /* Allocate a temp for modified fragment position register */
2924 emit
->fs
.fragcoord_tmp_index
= total_temps
;
2928 if (emit
->fs
.sample_pos_sys_index
!= INVALID_INDEX
) {
2929 /* Allocate a temp for the sample position */
2930 emit
->fs
.sample_pos_tmp_index
= total_temps
++;
2934 for (i
= 0; i
< emit
->num_address_regs
; i
++) {
2935 emit
->address_reg_index
[i
] = total_temps
++;
2938 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2939 * temp indexes. Basically, we compact all the non-array temp register
2940 * indexes into a consecutive series.
2942 * Before, we may have some TGSI declarations like:
2943 * DCL TEMP[0..1], LOCAL
2944 * DCL TEMP[2..4], ARRAY(1), LOCAL
2945 * DCL TEMP[5..7], ARRAY(2), LOCAL
2946 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2948 * After, we'll have a map like this:
2949 * temp_map[0] = { array 0, index 0 }
2950 * temp_map[1] = { array 0, index 1 }
2951 * temp_map[2] = { array 1, index 0 }
2952 * temp_map[3] = { array 1, index 1 }
2953 * temp_map[4] = { array 1, index 2 }
2954 * temp_map[5] = { array 2, index 0 }
2955 * temp_map[6] = { array 2, index 1 }
2956 * temp_map[7] = { array 2, index 2 }
2957 * temp_map[8] = { array 0, index 2 }
2958 * temp_map[9] = { array 0, index 3 }
2960 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2961 * temps numbered 0..3
2963 * Any time we emit a temporary register index, we'll have to use the
2964 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2966 * Finally, we recompute the total_temps value here.
2969 for (i
= 0; i
< total_temps
; i
++) {
2970 if (emit
->temp_map
[i
].arrayId
== 0) {
2971 emit
->temp_map
[i
].index
= reg
++;
2976 debug_printf("total_temps %u\n", total_temps
);
2977 for (i
= 0; i
< total_temps
; i
++) {
2978 debug_printf("temp %u -> array %u index %u\n",
2979 i
, emit
->temp_map
[i
].arrayId
, emit
->temp_map
[i
].index
);
2985 /* Emit declaration of ordinary temp registers */
2986 if (total_temps
> 0) {
2987 VGPU10OpcodeToken0 opcode0
;
2990 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_TEMPS
;
2992 begin_emit_instruction(emit
);
2993 emit_dword(emit
, opcode0
.value
);
2994 emit_dword(emit
, total_temps
);
2995 end_emit_instruction(emit
);
2998 /* Emit declarations for indexable temp arrays. Skip 0th entry since
3001 for (i
= 1; i
< emit
->num_temp_arrays
; i
++) {
3002 unsigned num_temps
= emit
->temp_arrays
[i
].size
;
3004 if (num_temps
> 0) {
3005 VGPU10OpcodeToken0 opcode0
;
3008 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_INDEXABLE_TEMP
;
3010 begin_emit_instruction(emit
);
3011 emit_dword(emit
, opcode0
.value
);
3012 emit_dword(emit
, i
); /* which array */
3013 emit_dword(emit
, num_temps
);
3014 emit_dword(emit
, 4); /* num components */
3015 end_emit_instruction(emit
);
3017 total_temps
+= num_temps
;
3021 /* Check that the grand total of all regular and indexed temps is
3024 check_register_index(emit
, VGPU10_OPCODE_DCL_TEMPS
, total_temps
- 1);
3031 emit_constant_declaration(struct svga_shader_emitter_v10
*emit
)
3033 VGPU10OpcodeToken0 opcode0
;
3034 VGPU10OperandToken0 operand0
;
3035 unsigned total_consts
, i
;
3038 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_CONSTANT_BUFFER
;
3039 opcode0
.accessPattern
= VGPU10_CB_IMMEDIATE_INDEXED
;
3040 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
3043 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
3044 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_2D
;
3045 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
3046 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
3047 operand0
.operandType
= VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
3048 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
3049 operand0
.swizzleX
= 0;
3050 operand0
.swizzleY
= 1;
3051 operand0
.swizzleZ
= 2;
3052 operand0
.swizzleW
= 3;
3055 * Emit declaration for constant buffer [0]. We also allocate
3056 * room for the extra constants here.
3058 total_consts
= emit
->num_shader_consts
[0];
3060 /* Now, allocate constant slots for the "extra" constants.
3061 * Note: it's critical that these extra constant locations
3062 * exactly match what's emitted by the "extra" constants code
3063 * in svga_state_constants.c
3066 /* Vertex position scale/translation */
3067 if (emit
->vposition
.need_prescale
) {
3068 emit
->vposition
.prescale_scale_index
= total_consts
++;
3069 emit
->vposition
.prescale_trans_index
= total_consts
++;
3072 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
3073 if (emit
->key
.vs
.undo_viewport
) {
3074 emit
->vs
.viewport_index
= total_consts
++;
3078 /* user-defined clip planes */
3079 if (emit
->key
.clip_plane_enable
) {
3080 unsigned n
= util_bitcount(emit
->key
.clip_plane_enable
);
3081 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3082 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3083 for (i
= 0; i
< n
; i
++) {
3084 emit
->clip_plane_const
[i
] = total_consts
++;
3088 for (i
= 0; i
< emit
->num_samplers
; i
++) {
3090 if (emit
->sampler_view
[i
]) {
3092 /* Texcoord scale factors for RECT textures */
3093 if (emit
->key
.tex
[i
].unnormalized
) {
3094 emit
->texcoord_scale_index
[i
] = total_consts
++;
3097 /* Texture buffer sizes */
3098 if (emit
->sampler_target
[i
] == TGSI_TEXTURE_BUFFER
) {
3099 emit
->texture_buffer_size_index
[i
] = total_consts
++;
3104 if (total_consts
> 0) {
3105 begin_emit_instruction(emit
);
3106 emit_dword(emit
, opcode0
.value
);
3107 emit_dword(emit
, operand0
.value
);
3108 emit_dword(emit
, 0); /* which const buffer slot */
3109 emit_dword(emit
, total_consts
);
3110 end_emit_instruction(emit
);
3113 /* Declare remaining constant buffers (UBOs) */
3114 for (i
= 1; i
< ARRAY_SIZE(emit
->num_shader_consts
); i
++) {
3115 if (emit
->num_shader_consts
[i
] > 0) {
3116 begin_emit_instruction(emit
);
3117 emit_dword(emit
, opcode0
.value
);
3118 emit_dword(emit
, operand0
.value
);
3119 emit_dword(emit
, i
); /* which const buffer slot */
3120 emit_dword(emit
, emit
->num_shader_consts
[i
]);
3121 end_emit_instruction(emit
);
3130 * Emit declarations for samplers.
3133 emit_sampler_declarations(struct svga_shader_emitter_v10
*emit
)
3137 for (i
= 0; i
< emit
->num_samplers
; i
++) {
3138 VGPU10OpcodeToken0 opcode0
;
3139 VGPU10OperandToken0 operand0
;
3142 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_SAMPLER
;
3143 opcode0
.samplerMode
= VGPU10_SAMPLER_MODE_DEFAULT
;
3146 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
3147 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
3148 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
3149 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
3151 begin_emit_instruction(emit
);
3152 emit_dword(emit
, opcode0
.value
);
3153 emit_dword(emit
, operand0
.value
);
3154 emit_dword(emit
, i
);
3155 end_emit_instruction(emit
);
3163 * Translate TGSI_TEXTURE_x to VGPU10_RESOURCE_DIMENSION_x.
3166 tgsi_texture_to_resource_dimension(enum tgsi_texture_type target
,
3167 unsigned num_samples
,
3170 if (target
== TGSI_TEXTURE_2D_MSAA
&& num_samples
< 2) {
3171 target
= TGSI_TEXTURE_2D
;
3173 else if (target
== TGSI_TEXTURE_2D_ARRAY_MSAA
&& num_samples
< 2) {
3174 target
= TGSI_TEXTURE_2D_ARRAY
;
3178 case TGSI_TEXTURE_BUFFER
:
3179 return VGPU10_RESOURCE_DIMENSION_BUFFER
;
3180 case TGSI_TEXTURE_1D
:
3181 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
3182 case TGSI_TEXTURE_2D
:
3183 case TGSI_TEXTURE_RECT
:
3184 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3185 case TGSI_TEXTURE_3D
:
3186 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D
;
3187 case TGSI_TEXTURE_CUBE
:
3188 case TGSI_TEXTURE_SHADOWCUBE
:
3189 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE
;
3190 case TGSI_TEXTURE_SHADOW1D
:
3191 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
3192 case TGSI_TEXTURE_SHADOW2D
:
3193 case TGSI_TEXTURE_SHADOWRECT
:
3194 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3195 case TGSI_TEXTURE_1D_ARRAY
:
3196 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
3197 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
3198 : VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
3199 case TGSI_TEXTURE_2D_ARRAY
:
3200 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
3201 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
3202 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3203 case TGSI_TEXTURE_2D_MSAA
:
3204 return VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
;
3205 case TGSI_TEXTURE_2D_ARRAY_MSAA
:
3206 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
3207 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
;
3208 case TGSI_TEXTURE_CUBE_ARRAY
:
3209 case TGSI_TEXTURE_SHADOWCUBE_ARRAY
:
3210 return is_array
? VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY
3211 : VGPU10_RESOURCE_DIMENSION_TEXTURECUBE
;
3213 assert(!"Unexpected resource type");
3214 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
3220 * Given a tgsi_return_type, return true iff it is an integer type.
3223 is_integer_type(enum tgsi_return_type type
)
3226 case TGSI_RETURN_TYPE_SINT
:
3227 case TGSI_RETURN_TYPE_UINT
:
3229 case TGSI_RETURN_TYPE_FLOAT
:
3230 case TGSI_RETURN_TYPE_UNORM
:
3231 case TGSI_RETURN_TYPE_SNORM
:
3233 case TGSI_RETURN_TYPE_COUNT
:
3235 assert(!"is_integer_type: Unknown tgsi_return_type");
3242 * Emit declarations for resources.
3243 * XXX When we're sure that all TGSI shaders will be generated with
3244 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
3248 emit_resource_declarations(struct svga_shader_emitter_v10
*emit
)
3252 /* Emit resource decl for each sampler */
3253 for (i
= 0; i
< emit
->num_samplers
; i
++) {
3254 VGPU10OpcodeToken0 opcode0
;
3255 VGPU10OperandToken0 operand0
;
3256 VGPU10ResourceReturnTypeToken return_type
;
3257 VGPU10_RESOURCE_RETURN_TYPE rt
;
3260 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_RESOURCE
;
3261 opcode0
.resourceDimension
=
3262 tgsi_texture_to_resource_dimension(emit
->sampler_target
[i
],
3263 emit
->key
.tex
[i
].num_samples
,
3264 emit
->key
.tex
[i
].is_array
);
3265 opcode0
.sampleCount
= emit
->key
.tex
[i
].num_samples
;
3267 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
3268 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
3269 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
3270 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
3273 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
3274 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM
== TGSI_RETURN_TYPE_UNORM
+ 1);
3275 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM
== TGSI_RETURN_TYPE_SNORM
+ 1);
3276 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT
== TGSI_RETURN_TYPE_SINT
+ 1);
3277 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT
== TGSI_RETURN_TYPE_UINT
+ 1);
3278 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT
== TGSI_RETURN_TYPE_FLOAT
+ 1);
3279 assert(emit
->sampler_return_type
[i
] <= TGSI_RETURN_TYPE_FLOAT
);
3280 rt
= emit
->sampler_return_type
[i
] + 1;
3282 switch (emit
->sampler_return_type
[i
]) {
3283 case TGSI_RETURN_TYPE_UNORM
: rt
= VGPU10_RETURN_TYPE_UNORM
; break;
3284 case TGSI_RETURN_TYPE_SNORM
: rt
= VGPU10_RETURN_TYPE_SNORM
; break;
3285 case TGSI_RETURN_TYPE_SINT
: rt
= VGPU10_RETURN_TYPE_SINT
; break;
3286 case TGSI_RETURN_TYPE_UINT
: rt
= VGPU10_RETURN_TYPE_UINT
; break;
3287 case TGSI_RETURN_TYPE_FLOAT
: rt
= VGPU10_RETURN_TYPE_FLOAT
; break;
3288 case TGSI_RETURN_TYPE_COUNT
:
3290 rt
= VGPU10_RETURN_TYPE_FLOAT
;
3291 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3295 return_type
.value
= 0;
3296 return_type
.component0
= rt
;
3297 return_type
.component1
= rt
;
3298 return_type
.component2
= rt
;
3299 return_type
.component3
= rt
;
3301 begin_emit_instruction(emit
);
3302 emit_dword(emit
, opcode0
.value
);
3303 emit_dword(emit
, operand0
.value
);
3304 emit_dword(emit
, i
);
3305 emit_dword(emit
, return_type
.value
);
3306 end_emit_instruction(emit
);
3313 emit_instruction_op1(struct svga_shader_emitter_v10
*emit
,
3314 VGPU10_OPCODE_TYPE opcode
,
3315 const struct tgsi_full_dst_register
*dst
,
3316 const struct tgsi_full_src_register
*src
,
3319 begin_emit_instruction(emit
);
3320 emit_opcode(emit
, opcode
, saturate
);
3321 emit_dst_register(emit
, dst
);
3322 emit_src_register(emit
, src
);
3323 end_emit_instruction(emit
);
3327 emit_instruction_op2(struct svga_shader_emitter_v10
*emit
,
3328 VGPU10_OPCODE_TYPE opcode
,
3329 const struct tgsi_full_dst_register
*dst
,
3330 const struct tgsi_full_src_register
*src1
,
3331 const struct tgsi_full_src_register
*src2
,
3334 begin_emit_instruction(emit
);
3335 emit_opcode(emit
, opcode
, saturate
);
3336 emit_dst_register(emit
, dst
);
3337 emit_src_register(emit
, src1
);
3338 emit_src_register(emit
, src2
);
3339 end_emit_instruction(emit
);
3343 emit_instruction_op3(struct svga_shader_emitter_v10
*emit
,
3344 VGPU10_OPCODE_TYPE opcode
,
3345 const struct tgsi_full_dst_register
*dst
,
3346 const struct tgsi_full_src_register
*src1
,
3347 const struct tgsi_full_src_register
*src2
,
3348 const struct tgsi_full_src_register
*src3
,
3351 begin_emit_instruction(emit
);
3352 emit_opcode(emit
, opcode
, saturate
);
3353 emit_dst_register(emit
, dst
);
3354 emit_src_register(emit
, src1
);
3355 emit_src_register(emit
, src2
);
3356 emit_src_register(emit
, src3
);
3357 end_emit_instruction(emit
);
3361 * Emit the actual clip distance instructions to be used for clipping
3362 * by copying the clip distance from the temporary registers to the
3363 * CLIPDIST registers written with the enabled planes mask.
3364 * Also copy the clip distance from the temporary to the clip distance
3365 * shadow copy register which will be referenced by the input shader
3368 emit_clip_distance_instructions(struct svga_shader_emitter_v10
*emit
)
3370 struct tgsi_full_src_register tmp_clip_dist_src
;
3371 struct tgsi_full_dst_register clip_dist_dst
;
3374 unsigned clip_plane_enable
= emit
->key
.clip_plane_enable
;
3375 unsigned clip_dist_tmp_index
= emit
->clip_dist_tmp_index
;
3376 int num_written_clipdist
= emit
->info
.num_written_clipdistance
;
3378 assert(emit
->clip_dist_out_index
!= INVALID_INDEX
);
3379 assert(emit
->clip_dist_tmp_index
!= INVALID_INDEX
);
3382 * Temporary reset the temporary clip dist register index so
3383 * that the copy to the real clip dist register will not
3384 * attempt to copy to the temporary register again
3386 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
3388 for (i
= 0; i
< 2 && num_written_clipdist
> 0; i
++, num_written_clipdist
-=4) {
3390 tmp_clip_dist_src
= make_src_temp_reg(clip_dist_tmp_index
+ i
);
3393 * copy to the shadow copy for use by varying variable and
3394 * stream output. All clip distances
3395 * will be written regardless of the enabled clipping planes.
3397 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3398 emit
->clip_dist_so_index
+ i
);
3400 /* MOV clip_dist_so, tmp_clip_dist */
3401 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3402 &tmp_clip_dist_src
, FALSE
);
3405 * copy those clip distances to enabled clipping planes
3406 * to CLIPDIST registers for clipping
3408 if (clip_plane_enable
& 0xf) {
3409 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3410 emit
->clip_dist_out_index
+ i
);
3411 clip_dist_dst
= writemask_dst(&clip_dist_dst
, clip_plane_enable
& 0xf);
3413 /* MOV CLIPDIST, tmp_clip_dist */
3414 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3415 &tmp_clip_dist_src
, FALSE
);
3417 /* four clip planes per clip register */
3418 clip_plane_enable
>>= 4;
3421 * set the temporary clip dist register index back to the
3422 * temporary index for the next vertex
3424 emit
->clip_dist_tmp_index
= clip_dist_tmp_index
;
3427 /* Declare clip distance output registers for user-defined clip planes
3428 * or the TGSI_CLIPVERTEX output.
3431 emit_clip_distance_declarations(struct svga_shader_emitter_v10
*emit
)
3433 unsigned num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3434 unsigned index
= emit
->num_outputs
;
3435 unsigned plane_mask
;
3437 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3438 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3439 assert(num_clip_planes
<= 8);
3441 if (emit
->clip_mode
!= CLIP_LEGACY
&&
3442 emit
->clip_mode
!= CLIP_VERTEX
) {
3446 if (num_clip_planes
== 0)
3449 /* Declare one or two clip output registers. The number of components
3450 * in the mask reflects the number of clip planes. For example, if 5
3451 * clip planes are needed, we'll declare outputs similar to:
3452 * dcl_output_siv o2.xyzw, clip_distance
3453 * dcl_output_siv o3.x, clip_distance
3455 emit
->clip_dist_out_index
= index
; /* save the starting clip dist reg index */
3457 plane_mask
= (1 << num_clip_planes
) - 1;
3458 if (plane_mask
& 0xf) {
3459 unsigned cmask
= plane_mask
& VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3460 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
,
3461 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3462 emit
->num_outputs
++;
3464 if (plane_mask
& 0xf0) {
3465 unsigned cmask
= (plane_mask
>> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3466 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
+ 1,
3467 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3468 emit
->num_outputs
++;
3474 * Emit the instructions for writing to the clip distance registers
3475 * to handle legacy/automatic clip planes.
3476 * For each clip plane, the distance is the dot product of the vertex
3477 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3478 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3479 * output registers already declared.
3482 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10
*emit
,
3483 unsigned vpos_tmp_index
)
3485 unsigned i
, num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3487 assert(emit
->clip_mode
== CLIP_LEGACY
);
3488 assert(num_clip_planes
<= 8);
3490 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3491 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3493 for (i
= 0; i
< num_clip_planes
; i
++) {
3494 struct tgsi_full_dst_register dst
;
3495 struct tgsi_full_src_register plane_src
, vpos_src
;
3496 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3497 unsigned comp
= i
% 4;
3498 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3500 /* create dst, src regs */
3501 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3502 dst
= writemask_dst(&dst
, writemask
);
3504 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3505 vpos_src
= make_src_temp_reg(vpos_tmp_index
);
3507 /* DP4 clip_dist, plane, vpos */
3508 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3509 &plane_src
, &vpos_src
, FALSE
);
3515 * Emit the instructions for computing the clip distance results from
3516 * the clip vertex temporary.
3517 * For each clip plane, the distance is the dot product of the clip vertex
3518 * position (found in a temp reg) and the clip plane coefficients.
3521 emit_clip_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
3523 const unsigned num_clip
= util_bitcount(emit
->key
.clip_plane_enable
);
3525 struct tgsi_full_dst_register dst
;
3526 struct tgsi_full_src_register clipvert_src
;
3527 const unsigned clip_vertex_tmp
= emit
->clip_vertex_tmp_index
;
3529 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3530 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3532 assert(emit
->clip_mode
== CLIP_VERTEX
);
3534 clipvert_src
= make_src_temp_reg(clip_vertex_tmp
);
3536 for (i
= 0; i
< num_clip
; i
++) {
3537 struct tgsi_full_src_register plane_src
;
3538 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3539 unsigned comp
= i
% 4;
3540 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3542 /* create dst, src regs */
3543 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3544 dst
= writemask_dst(&dst
, writemask
);
3546 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3548 /* DP4 clip_dist, plane, vpos */
3549 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3550 &plane_src
, &clipvert_src
, FALSE
);
3553 /* copy temporary clip vertex register to the clip vertex register */
3555 assert(emit
->clip_vertex_out_index
!= INVALID_INDEX
);
3558 * temporary reset the temporary clip vertex register index so
3559 * that copy to the clip vertex register will not attempt
3560 * to copy to the temporary register again
3562 emit
->clip_vertex_tmp_index
= INVALID_INDEX
;
3564 /* MOV clip_vertex, clip_vertex_tmp */
3565 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, emit
->clip_vertex_out_index
);
3566 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
3567 &dst
, &clipvert_src
, FALSE
);
3570 * set the temporary clip vertex register index back to the
3571 * temporary index for the next vertex
3573 emit
->clip_vertex_tmp_index
= clip_vertex_tmp
;
3577 * Emit code to convert RGBA to BGRA
3580 emit_swap_r_b(struct svga_shader_emitter_v10
*emit
,
3581 const struct tgsi_full_dst_register
*dst
,
3582 const struct tgsi_full_src_register
*src
)
3584 struct tgsi_full_src_register bgra_src
=
3585 swizzle_src(src
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_X
, TGSI_SWIZZLE_W
);
3587 begin_emit_instruction(emit
);
3588 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
3589 emit_dst_register(emit
, dst
);
3590 emit_src_register(emit
, &bgra_src
);
3591 end_emit_instruction(emit
);
3595 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3597 emit_puint_to_snorm(struct svga_shader_emitter_v10
*emit
,
3598 const struct tgsi_full_dst_register
*dst
,
3599 const struct tgsi_full_src_register
*src
)
3601 struct tgsi_full_src_register half
= make_immediate_reg_float(emit
, 0.5f
);
3602 struct tgsi_full_src_register two
=
3603 make_immediate_reg_float4(emit
, 2.0f
, 2.0f
, 2.0f
, 3.0f
);
3604 struct tgsi_full_src_register neg_two
=
3605 make_immediate_reg_float4(emit
, -2.0f
, -2.0f
, -2.0f
, -1.66666f
);
3607 unsigned val_tmp
= get_temp_index(emit
);
3608 struct tgsi_full_dst_register val_dst
= make_dst_temp_reg(val_tmp
);
3609 struct tgsi_full_src_register val_src
= make_src_temp_reg(val_tmp
);
3611 unsigned bias_tmp
= get_temp_index(emit
);
3612 struct tgsi_full_dst_register bias_dst
= make_dst_temp_reg(bias_tmp
);
3613 struct tgsi_full_src_register bias_src
= make_src_temp_reg(bias_tmp
);
3615 /* val = src * 2.0 */
3616 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &val_dst
,
3619 /* bias = src > 0.5 */
3620 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &bias_dst
,
3623 /* bias = bias & -2.0 */
3624 emit_instruction_op2(emit
, VGPU10_OPCODE_AND
, &bias_dst
,
3625 &bias_src
, &neg_two
, FALSE
);
3627 /* dst = val + bias */
3628 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, dst
,
3629 &val_src
, &bias_src
, FALSE
);
3631 free_temp_indexes(emit
);
3635 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3637 emit_puint_to_uscaled(struct svga_shader_emitter_v10
*emit
,
3638 const struct tgsi_full_dst_register
*dst
,
3639 const struct tgsi_full_src_register
*src
)
3641 struct tgsi_full_src_register scale
=
3642 make_immediate_reg_float4(emit
, 1023.0f
, 1023.0f
, 1023.0f
, 3.0f
);
3644 /* dst = src * scale */
3645 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, dst
, src
, &scale
, FALSE
);
3649 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3651 emit_puint_to_sscaled(struct svga_shader_emitter_v10
*emit
,
3652 const struct tgsi_full_dst_register
*dst
,
3653 const struct tgsi_full_src_register
*src
)
3655 struct tgsi_full_src_register lshift
=
3656 make_immediate_reg_int4(emit
, 22, 12, 2, 0);
3657 struct tgsi_full_src_register rshift
=
3658 make_immediate_reg_int4(emit
, 22, 22, 22, 30);
3660 struct tgsi_full_src_register src_xxxx
= scalar_src(src
, TGSI_SWIZZLE_X
);
3662 unsigned tmp
= get_temp_index(emit
);
3663 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3664 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3667 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3668 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3669 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3670 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3671 * dst = i_to_f(r,g,b,a); # convert to float
3673 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHL
, &tmp_dst
,
3674 &src_xxxx
, &lshift
, FALSE
);
3675 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHR
, &tmp_dst
,
3676 &tmp_src
, &rshift
, FALSE
);
3677 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
, dst
, &tmp_src
, FALSE
);
3679 free_temp_indexes(emit
);
3684 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3687 emit_arl_uarl(struct svga_shader_emitter_v10
*emit
,
3688 const struct tgsi_full_instruction
*inst
)
3690 unsigned index
= inst
->Dst
[0].Register
.Index
;
3691 struct tgsi_full_dst_register dst
;
3692 VGPU10_OPCODE_TYPE opcode
;
3694 assert(index
< MAX_VGPU10_ADDR_REGS
);
3695 dst
= make_dst_temp_reg(emit
->address_reg_index
[index
]);
3699 * FTOI address_tmp, s0
3703 * MOV address_tmp, s0
3705 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ARL
)
3706 opcode
= VGPU10_OPCODE_FTOI
;
3708 opcode
= VGPU10_OPCODE_MOV
;
3710 emit_instruction_op1(emit
, opcode
, &dst
, &inst
->Src
[0], FALSE
);
3717 * Emit code for TGSI_OPCODE_CAL instruction.
3720 emit_cal(struct svga_shader_emitter_v10
*emit
,
3721 const struct tgsi_full_instruction
*inst
)
3723 unsigned label
= inst
->Label
.Label
;
3724 VGPU10OperandToken0 operand
;
3726 operand
.operandType
= VGPU10_OPERAND_TYPE_LABEL
;
3728 begin_emit_instruction(emit
);
3729 emit_dword(emit
, operand
.value
);
3730 emit_dword(emit
, label
);
3731 end_emit_instruction(emit
);
3738 * Emit code for TGSI_OPCODE_IABS instruction.
3741 emit_iabs(struct svga_shader_emitter_v10
*emit
,
3742 const struct tgsi_full_instruction
*inst
)
3744 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3745 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3746 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3747 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3750 * IMAX dst, src, neg(src)
3752 struct tgsi_full_src_register neg_src
= negate_src(&inst
->Src
[0]);
3753 emit_instruction_op2(emit
, VGPU10_OPCODE_IMAX
, &inst
->Dst
[0],
3754 &inst
->Src
[0], &neg_src
, FALSE
);
3761 * Emit code for TGSI_OPCODE_CMP instruction.
3764 emit_cmp(struct svga_shader_emitter_v10
*emit
,
3765 const struct tgsi_full_instruction
*inst
)
3767 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3768 * dst.y = (src0.y < 0) ? src1.y : src2.y
3769 * dst.z = (src0.z < 0) ? src1.z : src2.z
3770 * dst.w = (src0.w < 0) ? src1.w : src2.w
3774 * MOVC dst, tmp, src1, src2
3776 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3777 unsigned tmp
= get_temp_index(emit
);
3778 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3779 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3781 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
,
3782 &inst
->Src
[0], &zero
, FALSE
);
3783 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0],
3784 &tmp_src
, &inst
->Src
[1], &inst
->Src
[2],
3785 inst
->Instruction
.Saturate
);
3787 free_temp_indexes(emit
);
3794 * Emit code for TGSI_OPCODE_DST instruction.
3797 emit_dst(struct svga_shader_emitter_v10
*emit
,
3798 const struct tgsi_full_instruction
*inst
)
3802 * dst.y = src0.y * src1.y
3807 struct tgsi_full_src_register s0_yyyy
=
3808 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
3809 struct tgsi_full_src_register s0_zzzz
=
3810 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Z
);
3811 struct tgsi_full_src_register s1_yyyy
=
3812 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Y
);
3813 struct tgsi_full_src_register s1_wwww
=
3814 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_W
);
3817 * If dst and either src0 and src1 are the same we need
3818 * to create a temporary for it and insert a extra move.
3820 unsigned tmp_move
= get_temp_index(emit
);
3821 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3822 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3824 /* MOV dst.x, 1.0 */
3825 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3826 struct tgsi_full_dst_register dst_x
=
3827 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3828 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3830 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
3833 /* MUL dst.y, s0.y, s1.y */
3834 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3835 struct tgsi_full_dst_register dst_y
=
3836 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3838 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &dst_y
, &s0_yyyy
,
3839 &s1_yyyy
, inst
->Instruction
.Saturate
);
3842 /* MOV dst.z, s0.z */
3843 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3844 struct tgsi_full_dst_register dst_z
=
3845 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3847 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
, &s0_zzzz
,
3848 inst
->Instruction
.Saturate
);
3851 /* MOV dst.w, s1.w */
3852 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3853 struct tgsi_full_dst_register dst_w
=
3854 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3856 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &s1_wwww
,
3857 inst
->Instruction
.Saturate
);
3860 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3862 free_temp_indexes(emit
);
3870 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3873 emit_endprim(struct svga_shader_emitter_v10
*emit
,
3874 const struct tgsi_full_instruction
*inst
)
3876 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
3878 /* We can't use emit_simple() because the TGSI instruction has one
3879 * operand (vertex stream number) which we must ignore for VGPU10.
3881 begin_emit_instruction(emit
);
3882 emit_opcode(emit
, VGPU10_OPCODE_CUT
, FALSE
);
3883 end_emit_instruction(emit
);
3889 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3892 emit_ex2(struct svga_shader_emitter_v10
*emit
,
3893 const struct tgsi_full_instruction
*inst
)
3895 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3896 * while VGPU10 computes four values.
3899 * dst.xyzw = 2.0 ^ src.x
3902 struct tgsi_full_src_register src_xxxx
=
3903 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3904 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3906 /* EXP tmp, s0.xxxx */
3907 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0], &src_xxxx
,
3908 inst
->Instruction
.Saturate
);
3915 * Emit code for TGSI_OPCODE_EXP instruction.
3918 emit_exp(struct svga_shader_emitter_v10
*emit
,
3919 const struct tgsi_full_instruction
*inst
)
3922 * dst.x = 2 ^ floor(s0.x)
3923 * dst.y = s0.x - floor(s0.x)
3928 struct tgsi_full_src_register src_xxxx
=
3929 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
3930 unsigned tmp
= get_temp_index(emit
);
3931 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3932 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3935 * If dst and src are the same we need to create
3936 * a temporary for it and insert a extra move.
3938 unsigned tmp_move
= get_temp_index(emit
);
3939 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3940 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3942 /* only use X component of temp reg */
3943 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3944 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3946 /* ROUND_NI tmp.x, s0.x */
3947 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
3948 &src_xxxx
, FALSE
); /* round to -infinity */
3950 /* EXP dst.x, tmp.x */
3951 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3952 struct tgsi_full_dst_register dst_x
=
3953 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3955 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_x
, &tmp_src
,
3956 inst
->Instruction
.Saturate
);
3959 /* ADD dst.y, s0.x, -tmp */
3960 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3961 struct tgsi_full_dst_register dst_y
=
3962 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3963 struct tgsi_full_src_register neg_tmp_src
= negate_src(&tmp_src
);
3965 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_y
, &src_xxxx
,
3966 &neg_tmp_src
, inst
->Instruction
.Saturate
);
3969 /* EXP dst.z, s0.x */
3970 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3971 struct tgsi_full_dst_register dst_z
=
3972 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3974 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_z
, &src_xxxx
,
3975 inst
->Instruction
.Saturate
);
3978 /* MOV dst.w, 1.0 */
3979 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3980 struct tgsi_full_dst_register dst_w
=
3981 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3982 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3984 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
,
3988 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3991 free_temp_indexes(emit
);
3998 * Emit code for TGSI_OPCODE_IF instruction.
4001 emit_if(struct svga_shader_emitter_v10
*emit
,
4002 const struct tgsi_full_instruction
*inst
)
4004 VGPU10OpcodeToken0 opcode0
;
4006 /* The src register should be a scalar */
4007 assert(inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleY
&&
4008 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleZ
&&
4009 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleW
);
4011 /* The only special thing here is that we need to set the
4012 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
4013 * src.x is non-zero.
4016 opcode0
.opcodeType
= VGPU10_OPCODE_IF
;
4017 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
4019 begin_emit_instruction(emit
);
4020 emit_dword(emit
, opcode0
.value
);
4021 emit_src_register(emit
, &inst
->Src
[0]);
4022 end_emit_instruction(emit
);
4029 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
4030 * the register components are negative).
4033 emit_kill_if(struct svga_shader_emitter_v10
*emit
,
4034 const struct tgsi_full_instruction
*inst
)
4036 unsigned tmp
= get_temp_index(emit
);
4037 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4038 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4040 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4042 struct tgsi_full_dst_register tmp_dst_x
=
4043 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4044 struct tgsi_full_src_register tmp_src_xxxx
=
4045 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4047 /* tmp = src[0] < 0.0 */
4048 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
4051 if (!same_swizzle_terms(&inst
->Src
[0])) {
4052 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
4053 * logically OR the swizzle terms. Most uses of KILL_IF only
4054 * test one channel so it's good to avoid these extra steps.
4056 struct tgsi_full_src_register tmp_src_yyyy
=
4057 scalar_src(&tmp_src
, TGSI_SWIZZLE_Y
);
4058 struct tgsi_full_src_register tmp_src_zzzz
=
4059 scalar_src(&tmp_src
, TGSI_SWIZZLE_Z
);
4060 struct tgsi_full_src_register tmp_src_wwww
=
4061 scalar_src(&tmp_src
, TGSI_SWIZZLE_W
);
4063 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
4064 &tmp_src_yyyy
, FALSE
);
4065 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
4066 &tmp_src_zzzz
, FALSE
);
4067 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
4068 &tmp_src_wwww
, FALSE
);
4071 begin_emit_instruction(emit
);
4072 emit_discard_opcode(emit
, TRUE
); /* discard if src0.x is non-zero */
4073 emit_src_register(emit
, &tmp_src_xxxx
);
4074 end_emit_instruction(emit
);
4076 free_temp_indexes(emit
);
4083 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
4086 emit_kill(struct svga_shader_emitter_v10
*emit
,
4087 const struct tgsi_full_instruction
*inst
)
4089 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4091 /* DISCARD if 0.0 is zero */
4092 begin_emit_instruction(emit
);
4093 emit_discard_opcode(emit
, FALSE
);
4094 emit_src_register(emit
, &zero
);
4095 end_emit_instruction(emit
);
4102 * Emit code for TGSI_OPCODE_LG2 instruction.
4105 emit_lg2(struct svga_shader_emitter_v10
*emit
,
4106 const struct tgsi_full_instruction
*inst
)
4108 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
4109 * while VGPU10 computes four values.
4112 * dst.xyzw = log2(src.x)
4115 struct tgsi_full_src_register src_xxxx
=
4116 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4117 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4119 /* LOG tmp, s0.xxxx */
4120 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &inst
->Dst
[0], &src_xxxx
,
4121 inst
->Instruction
.Saturate
);
4128 * Emit code for TGSI_OPCODE_LIT instruction.
4131 emit_lit(struct svga_shader_emitter_v10
*emit
,
4132 const struct tgsi_full_instruction
*inst
)
4134 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4137 * If dst and src are the same we need to create
4138 * a temporary for it and insert a extra move.
4140 unsigned tmp_move
= get_temp_index(emit
);
4141 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
4142 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
4146 * dst.y = max(src.x, 0)
4147 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
4151 /* MOV dst.x, 1.0 */
4152 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
4153 struct tgsi_full_dst_register dst_x
=
4154 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
4155 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
4158 /* MOV dst.w, 1.0 */
4159 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
4160 struct tgsi_full_dst_register dst_w
=
4161 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
4162 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
4165 /* MAX dst.y, src.x, 0.0 */
4166 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
4167 struct tgsi_full_dst_register dst_y
=
4168 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
4169 struct tgsi_full_src_register zero
=
4170 make_immediate_reg_float(emit
, 0.0f
);
4171 struct tgsi_full_src_register src_xxxx
=
4172 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4173 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4175 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &dst_y
, &src_xxxx
,
4176 &zero
, inst
->Instruction
.Saturate
);
4180 * tmp1 = clamp(src.w, -128, 128);
4181 * MAX tmp1, src.w, -128
4182 * MIN tmp1, tmp1, 128
4184 * tmp2 = max(tmp2, 0);
4185 * MAX tmp2, src.y, 0
4187 * tmp1 = pow(tmp2, tmp1);
4189 * MUL tmp1, tmp2, tmp1
4192 * tmp1 = (src.w == 0) ? 1 : tmp1;
4194 * MOVC tmp1, tmp2, 1.0, tmp1
4196 * dst.z = (0 < src.x) ? tmp1 : 0;
4198 * MOVC dst.z, tmp2, tmp1, 0.0
4200 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4201 struct tgsi_full_dst_register dst_z
=
4202 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
4204 unsigned tmp1
= get_temp_index(emit
);
4205 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4206 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4207 unsigned tmp2
= get_temp_index(emit
);
4208 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4209 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4211 struct tgsi_full_src_register src_xxxx
=
4212 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4213 struct tgsi_full_src_register src_yyyy
=
4214 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
4215 struct tgsi_full_src_register src_wwww
=
4216 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
4218 struct tgsi_full_src_register zero
=
4219 make_immediate_reg_float(emit
, 0.0f
);
4220 struct tgsi_full_src_register lowerbound
=
4221 make_immediate_reg_float(emit
, -128.0f
);
4222 struct tgsi_full_src_register upperbound
=
4223 make_immediate_reg_float(emit
, 128.0f
);
4225 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp1_dst
, &src_wwww
,
4226 &lowerbound
, FALSE
);
4227 emit_instruction_op2(emit
, VGPU10_OPCODE_MIN
, &tmp1_dst
, &tmp1_src
,
4228 &upperbound
, FALSE
);
4229 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp2_dst
, &src_yyyy
,
4232 /* POW tmp1, tmp2, tmp1 */
4233 /* LOG tmp2, tmp2 */
4234 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp2_dst
, &tmp2_src
,
4237 /* MUL tmp1, tmp2, tmp1 */
4238 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
, &tmp2_src
,
4241 /* EXP tmp1, tmp1 */
4242 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp1_dst
, &tmp1_src
,
4245 /* EQ tmp2, 0, src.w */
4246 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp2_dst
, &zero
,
4248 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4249 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp1_dst
,
4250 &tmp2_src
, &one
, &tmp1_src
, FALSE
);
4252 /* LT tmp2, 0, src.x */
4253 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp2_dst
, &zero
,
4255 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4256 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &dst_z
,
4257 &tmp2_src
, &tmp1_src
, &zero
, FALSE
);
4260 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
4262 free_temp_indexes(emit
);
4269 * Emit Level Of Detail Query (LODQ) instruction.
4272 emit_lodq(struct svga_shader_emitter_v10
*emit
,
4273 const struct tgsi_full_instruction
*inst
)
4275 const uint unit
= inst
->Src
[1].Register
.Index
;
4277 assert(emit
->version
>= 41);
4279 /* LOD dst, coord, resource, sampler */
4280 begin_emit_instruction(emit
);
4281 emit_opcode(emit
, VGPU10_OPCODE_LOD
, FALSE
);
4282 emit_dst_register(emit
, &inst
->Dst
[0]);
4283 emit_src_register(emit
, &inst
->Src
[0]); /* coord */
4284 emit_resource_register(emit
, unit
);
4285 emit_sampler_register(emit
, unit
);
4286 end_emit_instruction(emit
);
4293 * Emit code for TGSI_OPCODE_LOG instruction.
4296 emit_log(struct svga_shader_emitter_v10
*emit
,
4297 const struct tgsi_full_instruction
*inst
)
4300 * dst.x = floor(lg2(abs(s0.x)))
4301 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4302 * dst.z = lg2(abs(s0.x))
4306 struct tgsi_full_src_register src_xxxx
=
4307 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4308 unsigned tmp
= get_temp_index(emit
);
4309 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4310 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4311 struct tgsi_full_src_register abs_src_xxxx
= absolute_src(&src_xxxx
);
4313 /* only use X component of temp reg */
4314 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4315 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4317 /* LOG tmp.x, abs(s0.x) */
4318 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XYZ
) {
4319 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
,
4320 &abs_src_xxxx
, FALSE
);
4323 /* MOV dst.z, tmp.x */
4324 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4325 struct tgsi_full_dst_register dst_z
=
4326 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Z
);
4328 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
,
4329 &tmp_src
, inst
->Instruction
.Saturate
);
4332 /* FLR tmp.x, tmp.x */
4333 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XY
) {
4334 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
4338 /* MOV dst.x, tmp.x */
4339 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
4340 struct tgsi_full_dst_register dst_x
=
4341 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_X
);
4343 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &tmp_src
,
4344 inst
->Instruction
.Saturate
);
4347 /* EXP tmp.x, tmp.x */
4348 /* DIV dst.y, abs(s0.x), tmp.x */
4349 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
4350 struct tgsi_full_dst_register dst_y
=
4351 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Y
);
4353 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp_dst
, &tmp_src
,
4355 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &dst_y
, &abs_src_xxxx
,
4356 &tmp_src
, inst
->Instruction
.Saturate
);
4359 /* MOV dst.w, 1.0 */
4360 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
4361 struct tgsi_full_dst_register dst_w
=
4362 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_W
);
4363 struct tgsi_full_src_register one
=
4364 make_immediate_reg_float(emit
, 1.0f
);
4366 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
4369 free_temp_indexes(emit
);
4376 * Emit code for TGSI_OPCODE_LRP instruction.
4379 emit_lrp(struct svga_shader_emitter_v10
*emit
,
4380 const struct tgsi_full_instruction
*inst
)
4382 /* dst = LRP(s0, s1, s2):
4383 * dst = s0 * (s1 - s2) + s2
4385 * SUB tmp, s1, s2; tmp = s1 - s2
4386 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4388 unsigned tmp
= get_temp_index(emit
);
4389 struct tgsi_full_src_register src_tmp
= make_src_temp_reg(tmp
);
4390 struct tgsi_full_dst_register dst_tmp
= make_dst_temp_reg(tmp
);
4391 struct tgsi_full_src_register neg_src2
= negate_src(&inst
->Src
[2]);
4393 /* ADD tmp, s1, -s2 */
4394 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_tmp
,
4395 &inst
->Src
[1], &neg_src2
, FALSE
);
4397 /* MAD dst, s1, tmp, s3 */
4398 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &inst
->Dst
[0],
4399 &inst
->Src
[0], &src_tmp
, &inst
->Src
[2],
4400 inst
->Instruction
.Saturate
);
4402 free_temp_indexes(emit
);
4409 * Emit code for TGSI_OPCODE_POW instruction.
4412 emit_pow(struct svga_shader_emitter_v10
*emit
,
4413 const struct tgsi_full_instruction
*inst
)
4415 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4416 * src1.x while VGPU10 computes four values.
4418 * dst = POW(src0, src1):
4419 * dst.xyzw = src0.x ^ src1.x
4421 unsigned tmp
= get_temp_index(emit
);
4422 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4423 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4424 struct tgsi_full_src_register src0_xxxx
=
4425 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4426 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4427 struct tgsi_full_src_register src1_xxxx
=
4428 swizzle_src(&inst
->Src
[1], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4429 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4431 /* LOG tmp, s0.xxxx */
4432 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
, &src0_xxxx
,
4435 /* MUL tmp, tmp, s1.xxxx */
4436 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
, &tmp_src
,
4439 /* EXP tmp, s0.xxxx */
4440 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0],
4441 &tmp_src
, inst
->Instruction
.Saturate
);
4444 free_temp_indexes(emit
);
4451 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4454 emit_rcp(struct svga_shader_emitter_v10
*emit
,
4455 const struct tgsi_full_instruction
*inst
)
4457 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4459 unsigned tmp
= get_temp_index(emit
);
4460 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4461 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4463 struct tgsi_full_dst_register tmp_dst_x
=
4464 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4465 struct tgsi_full_src_register tmp_src_xxxx
=
4466 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4468 /* DIV tmp.x, 1.0, s0 */
4469 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst_x
, &one
,
4470 &inst
->Src
[0], FALSE
);
4472 /* MOV dst, tmp.xxxx */
4473 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4474 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4476 free_temp_indexes(emit
);
4483 * Emit code for TGSI_OPCODE_RSQ instruction.
4486 emit_rsq(struct svga_shader_emitter_v10
*emit
,
4487 const struct tgsi_full_instruction
*inst
)
4490 * dst.xyzw = 1 / sqrt(src.x)
4496 unsigned tmp
= get_temp_index(emit
);
4497 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4498 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4500 struct tgsi_full_dst_register tmp_dst_x
=
4501 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4502 struct tgsi_full_src_register tmp_src_xxxx
=
4503 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4505 /* RSQ tmp, src.x */
4506 emit_instruction_op1(emit
, VGPU10_OPCODE_RSQ
, &tmp_dst_x
,
4507 &inst
->Src
[0], FALSE
);
4509 /* MOV dst, tmp.xxxx */
4510 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4511 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4514 free_temp_indexes(emit
);
4521 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4524 emit_seq(struct svga_shader_emitter_v10
*emit
,
4525 const struct tgsi_full_instruction
*inst
)
4527 /* dst = SEQ(s0, s1):
4528 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4530 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4531 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4533 unsigned tmp
= get_temp_index(emit
);
4534 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4535 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4536 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4537 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4539 /* EQ tmp, s0, s1 */
4540 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp_dst
, &inst
->Src
[0],
4541 &inst
->Src
[1], FALSE
);
4543 /* MOVC dst, tmp, one, zero */
4544 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4545 &one
, &zero
, FALSE
);
4547 free_temp_indexes(emit
);
4554 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4557 emit_sge(struct svga_shader_emitter_v10
*emit
,
4558 const struct tgsi_full_instruction
*inst
)
4560 /* dst = SGE(s0, s1):
4561 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4563 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4564 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4566 unsigned tmp
= get_temp_index(emit
);
4567 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4568 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4569 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4570 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4572 /* GE tmp, s0, s1 */
4573 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[0],
4574 &inst
->Src
[1], FALSE
);
4576 /* MOVC dst, tmp, one, zero */
4577 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4578 &one
, &zero
, FALSE
);
4580 free_temp_indexes(emit
);
4587 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4590 emit_sgt(struct svga_shader_emitter_v10
*emit
,
4591 const struct tgsi_full_instruction
*inst
)
4593 /* dst = SGT(s0, s1):
4594 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4596 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4597 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4599 unsigned tmp
= get_temp_index(emit
);
4600 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4601 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4602 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4603 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4605 /* LT tmp, s1, s0 */
4606 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[1],
4607 &inst
->Src
[0], FALSE
);
4609 /* MOVC dst, tmp, one, zero */
4610 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4611 &one
, &zero
, FALSE
);
4613 free_temp_indexes(emit
);
4620 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4623 emit_sincos(struct svga_shader_emitter_v10
*emit
,
4624 const struct tgsi_full_instruction
*inst
)
4626 unsigned tmp
= get_temp_index(emit
);
4627 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4628 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4630 struct tgsi_full_src_register tmp_src_xxxx
=
4631 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4632 struct tgsi_full_dst_register tmp_dst_x
=
4633 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4635 begin_emit_instruction(emit
);
4636 emit_opcode(emit
, VGPU10_OPCODE_SINCOS
, FALSE
);
4638 if(inst
->Instruction
.Opcode
== TGSI_OPCODE_SIN
)
4640 emit_dst_register(emit
, &tmp_dst_x
); /* first destination register */
4641 emit_null_dst_register(emit
); /* second destination register */
4644 emit_null_dst_register(emit
);
4645 emit_dst_register(emit
, &tmp_dst_x
);
4648 emit_src_register(emit
, &inst
->Src
[0]);
4649 end_emit_instruction(emit
);
4651 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4652 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4654 free_temp_indexes(emit
);
4661 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4664 emit_sle(struct svga_shader_emitter_v10
*emit
,
4665 const struct tgsi_full_instruction
*inst
)
4667 /* dst = SLE(s0, s1):
4668 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4670 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4671 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4673 unsigned tmp
= get_temp_index(emit
);
4674 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4675 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4676 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4677 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4679 /* GE tmp, s1, s0 */
4680 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[1],
4681 &inst
->Src
[0], FALSE
);
4683 /* MOVC dst, tmp, one, zero */
4684 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4685 &one
, &zero
, FALSE
);
4687 free_temp_indexes(emit
);
4694 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4697 emit_slt(struct svga_shader_emitter_v10
*emit
,
4698 const struct tgsi_full_instruction
*inst
)
4700 /* dst = SLT(s0, s1):
4701 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4703 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4704 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4706 unsigned tmp
= get_temp_index(emit
);
4707 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4708 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4709 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4710 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4712 /* LT tmp, s0, s1 */
4713 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
4714 &inst
->Src
[1], FALSE
);
4716 /* MOVC dst, tmp, one, zero */
4717 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4718 &one
, &zero
, FALSE
);
4720 free_temp_indexes(emit
);
4727 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4730 emit_sne(struct svga_shader_emitter_v10
*emit
,
4731 const struct tgsi_full_instruction
*inst
)
4733 /* dst = SNE(s0, s1):
4734 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4736 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4737 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4739 unsigned tmp
= get_temp_index(emit
);
4740 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4741 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4742 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4743 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4745 /* NE tmp, s0, s1 */
4746 emit_instruction_op2(emit
, VGPU10_OPCODE_NE
, &tmp_dst
, &inst
->Src
[0],
4747 &inst
->Src
[1], FALSE
);
4749 /* MOVC dst, tmp, one, zero */
4750 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4751 &one
, &zero
, FALSE
);
4753 free_temp_indexes(emit
);
4760 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4763 emit_ssg(struct svga_shader_emitter_v10
*emit
,
4764 const struct tgsi_full_instruction
*inst
)
4766 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4767 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4768 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4769 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4771 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4772 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4773 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4774 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4776 struct tgsi_full_src_register zero
=
4777 make_immediate_reg_float(emit
, 0.0f
);
4778 struct tgsi_full_src_register one
=
4779 make_immediate_reg_float(emit
, 1.0f
);
4780 struct tgsi_full_src_register neg_one
=
4781 make_immediate_reg_float(emit
, -1.0f
);
4783 unsigned tmp1
= get_temp_index(emit
);
4784 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4785 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4787 unsigned tmp2
= get_temp_index(emit
);
4788 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4789 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4791 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &inst
->Src
[0],
4793 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp2_dst
, &tmp1_src
,
4794 &neg_one
, &zero
, FALSE
);
4795 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &zero
,
4796 &inst
->Src
[0], FALSE
);
4797 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp1_src
,
4798 &one
, &tmp2_src
, FALSE
);
4800 free_temp_indexes(emit
);
4807 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4810 emit_issg(struct svga_shader_emitter_v10
*emit
,
4811 const struct tgsi_full_instruction
*inst
)
4813 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4814 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4815 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4816 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4818 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4819 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4820 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4822 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4824 unsigned tmp1
= get_temp_index(emit
);
4825 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4826 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4828 unsigned tmp2
= get_temp_index(emit
);
4829 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4830 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4832 struct tgsi_full_src_register neg_tmp2
= negate_src(&tmp2_src
);
4834 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp1_dst
,
4835 &inst
->Src
[0], &zero
, FALSE
);
4836 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp2_dst
,
4837 &zero
, &inst
->Src
[0], FALSE
);
4838 emit_instruction_op2(emit
, VGPU10_OPCODE_IADD
, &inst
->Dst
[0],
4839 &tmp1_src
, &neg_tmp2
, FALSE
);
4841 free_temp_indexes(emit
);
4848 * Emit a comparison instruction. The dest register will get
4849 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4852 emit_comparison(struct svga_shader_emitter_v10
*emit
,
4854 const struct tgsi_full_dst_register
*dst
,
4855 const struct tgsi_full_src_register
*src0
,
4856 const struct tgsi_full_src_register
*src1
)
4858 struct tgsi_full_src_register immediate
;
4859 VGPU10OpcodeToken0 opcode0
;
4860 boolean swapSrc
= FALSE
;
4862 /* Sanity checks for svga vs. gallium enums */
4863 STATIC_ASSERT(SVGA3D_CMP_LESS
== (PIPE_FUNC_LESS
+ 1));
4864 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL
== (PIPE_FUNC_GEQUAL
+ 1));
4869 case SVGA3D_CMP_NEVER
:
4870 immediate
= make_immediate_reg_int(emit
, 0);
4872 begin_emit_instruction(emit
);
4873 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4874 emit_dst_register(emit
, dst
);
4875 emit_src_register(emit
, &immediate
);
4876 end_emit_instruction(emit
);
4878 case SVGA3D_CMP_ALWAYS
:
4879 immediate
= make_immediate_reg_int(emit
, -1);
4881 begin_emit_instruction(emit
);
4882 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4883 emit_dst_register(emit
, dst
);
4884 emit_src_register(emit
, &immediate
);
4885 end_emit_instruction(emit
);
4887 case SVGA3D_CMP_LESS
:
4888 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4890 case SVGA3D_CMP_EQUAL
:
4891 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4893 case SVGA3D_CMP_LESSEQUAL
:
4894 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4897 case SVGA3D_CMP_GREATER
:
4898 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4901 case SVGA3D_CMP_NOTEQUAL
:
4902 opcode0
.opcodeType
= VGPU10_OPCODE_NE
;
4904 case SVGA3D_CMP_GREATEREQUAL
:
4905 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4908 assert(!"Unexpected comparison mode");
4909 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4912 begin_emit_instruction(emit
);
4913 emit_dword(emit
, opcode0
.value
);
4914 emit_dst_register(emit
, dst
);
4916 emit_src_register(emit
, src1
);
4917 emit_src_register(emit
, src0
);
4920 emit_src_register(emit
, src0
);
4921 emit_src_register(emit
, src1
);
4923 end_emit_instruction(emit
);
4928 * Get texel/address offsets for a texture instruction.
4931 get_texel_offsets(const struct svga_shader_emitter_v10
*emit
,
4932 const struct tgsi_full_instruction
*inst
, int offsets
[3])
4934 if (inst
->Texture
.NumOffsets
== 1) {
4935 /* According to OpenGL Shader Language spec the offsets are only
4936 * fetched from a previously-declared immediate/literal.
4938 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
4939 const unsigned index
= off
[0].Index
;
4940 const unsigned swizzleX
= off
[0].SwizzleX
;
4941 const unsigned swizzleY
= off
[0].SwizzleY
;
4942 const unsigned swizzleZ
= off
[0].SwizzleZ
;
4943 const union tgsi_immediate_data
*imm
= emit
->immediates
[index
];
4945 assert(inst
->TexOffsets
[0].File
== TGSI_FILE_IMMEDIATE
);
4947 offsets
[0] = imm
[swizzleX
].Int
;
4948 offsets
[1] = imm
[swizzleY
].Int
;
4949 offsets
[2] = imm
[swizzleZ
].Int
;
4952 offsets
[0] = offsets
[1] = offsets
[2] = 0;
4958 * Set up the coordinate register for texture sampling.
4959 * When we're sampling from a RECT texture we have to scale the
4960 * unnormalized coordinate to a normalized coordinate.
4961 * We do that by multiplying the coordinate by an "extra" constant.
4962 * An alternative would be to use the RESINFO instruction to query the
4965 static struct tgsi_full_src_register
4966 setup_texcoord(struct svga_shader_emitter_v10
*emit
,
4968 const struct tgsi_full_src_register
*coord
)
4970 if (emit
->sampler_view
[unit
] && emit
->key
.tex
[unit
].unnormalized
) {
4971 unsigned scale_index
= emit
->texcoord_scale_index
[unit
];
4972 unsigned tmp
= get_temp_index(emit
);
4973 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4974 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4975 struct tgsi_full_src_register scale_src
= make_src_const_reg(scale_index
);
4977 if (emit
->key
.tex
[unit
].texel_bias
) {
4978 /* to fix texture coordinate rounding issue, 0.0001 offset is
4979 * been added. This fixes piglit test fbo-blit-scaled-linear. */
4980 struct tgsi_full_src_register offset
=
4981 make_immediate_reg_float(emit
, 0.0001f
);
4983 /* ADD tmp, coord, offset */
4984 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp_dst
,
4985 coord
, &offset
, FALSE
);
4986 /* MUL tmp, tmp, scale */
4987 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
,
4988 &tmp_src
, &scale_src
, FALSE
);
4991 /* MUL tmp, coord, const[] */
4992 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
,
4993 coord
, &scale_src
, FALSE
);
4998 /* use texcoord as-is */
5005 * For SAMPLE_C instructions, emit the extra src register which indicates
5006 * the reference/comparision value.
5009 emit_tex_compare_refcoord(struct svga_shader_emitter_v10
*emit
,
5010 enum tgsi_texture_type target
,
5011 const struct tgsi_full_src_register
*coord
)
5013 struct tgsi_full_src_register coord_src_ref
;
5016 assert(tgsi_is_shadow_target(target
));
5018 component
= tgsi_util_get_shadow_ref_src_index(target
) % 4;
5019 assert(component
>= 0);
5021 coord_src_ref
= scalar_src(coord
, component
);
5023 emit_src_register(emit
, &coord_src_ref
);
5028 * Info for implementing texture swizzles.
5029 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
5030 * functions use this to encapsulate the extra steps needed to perform
5031 * a texture swizzle, or shadow/depth comparisons.
5032 * The shadow/depth comparison is only done here if for the cases where
5033 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
5035 struct tex_swizzle_info
5038 boolean shadow_compare
;
5040 enum tgsi_texture_type texture_target
; /**< TGSI_TEXTURE_x */
5041 struct tgsi_full_src_register tmp_src
;
5042 struct tgsi_full_dst_register tmp_dst
;
5043 const struct tgsi_full_dst_register
*inst_dst
;
5044 const struct tgsi_full_src_register
*coord_src
;
5049 * Do setup for handling texture swizzles or shadow compares.
5050 * \param unit the texture unit
5051 * \param inst the TGSI texture instruction
5052 * \param shadow_compare do shadow/depth comparison?
5053 * \param swz returns the swizzle info
5056 begin_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
5058 const struct tgsi_full_instruction
*inst
,
5059 boolean shadow_compare
,
5060 struct tex_swizzle_info
*swz
)
5062 swz
->swizzled
= (emit
->key
.tex
[unit
].swizzle_r
!= TGSI_SWIZZLE_X
||
5063 emit
->key
.tex
[unit
].swizzle_g
!= TGSI_SWIZZLE_Y
||
5064 emit
->key
.tex
[unit
].swizzle_b
!= TGSI_SWIZZLE_Z
||
5065 emit
->key
.tex
[unit
].swizzle_a
!= TGSI_SWIZZLE_W
);
5067 swz
->shadow_compare
= shadow_compare
;
5068 swz
->texture_target
= inst
->Texture
.Texture
;
5070 if (swz
->swizzled
|| shadow_compare
) {
5071 /* Allocate temp register for the result of the SAMPLE instruction
5072 * and the source of the MOV/compare/swizzle instructions.
5074 unsigned tmp
= get_temp_index(emit
);
5075 swz
->tmp_src
= make_src_temp_reg(tmp
);
5076 swz
->tmp_dst
= make_dst_temp_reg(tmp
);
5080 swz
->inst_dst
= &inst
->Dst
[0];
5081 swz
->coord_src
= &inst
->Src
[0];
5083 emit
->fs
.shadow_compare_units
|= shadow_compare
<< unit
;
5088 * Returns the register to put the SAMPLE instruction results into.
5089 * This will either be the original instruction dst reg (if no swizzle
5090 * and no shadow comparison) or a temporary reg if there is a swizzle.
5092 static const struct tgsi_full_dst_register
*
5093 get_tex_swizzle_dst(const struct tex_swizzle_info
*swz
)
5095 return (swz
->swizzled
|| swz
->shadow_compare
)
5096 ? &swz
->tmp_dst
: swz
->inst_dst
;
5101 * This emits the MOV instruction that actually implements a texture swizzle
5102 * and/or shadow comparison.
5105 end_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
5106 const struct tex_swizzle_info
*swz
)
5108 if (swz
->shadow_compare
) {
5109 /* Emit extra instructions to compare the fetched texel value against
5110 * a texture coordinate component. The result of the comparison
5113 struct tgsi_full_src_register coord_src
;
5114 struct tgsi_full_src_register texel_src
=
5115 scalar_src(&swz
->tmp_src
, TGSI_SWIZZLE_X
);
5116 struct tgsi_full_src_register one
=
5117 make_immediate_reg_float(emit
, 1.0f
);
5118 /* convert gallium comparison func to SVGA comparison func */
5119 SVGA3dCmpFunc compare_func
= emit
->key
.tex
[swz
->unit
].compare_func
+ 1;
5121 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
5124 tgsi_util_get_shadow_ref_src_index(swz
->texture_target
) % 4;
5125 assert(component
>= 0);
5126 coord_src
= scalar_src(swz
->coord_src
, component
);
5128 /* COMPARE tmp, coord, texel */
5129 emit_comparison(emit
, compare_func
,
5130 &swz
->tmp_dst
, &coord_src
, &texel_src
);
5132 /* AND dest, tmp, {1.0} */
5133 begin_emit_instruction(emit
);
5134 emit_opcode(emit
, VGPU10_OPCODE_AND
, FALSE
);
5135 if (swz
->swizzled
) {
5136 emit_dst_register(emit
, &swz
->tmp_dst
);
5139 emit_dst_register(emit
, swz
->inst_dst
);
5141 emit_src_register(emit
, &swz
->tmp_src
);
5142 emit_src_register(emit
, &one
);
5143 end_emit_instruction(emit
);
5146 if (swz
->swizzled
) {
5147 unsigned swz_r
= emit
->key
.tex
[swz
->unit
].swizzle_r
;
5148 unsigned swz_g
= emit
->key
.tex
[swz
->unit
].swizzle_g
;
5149 unsigned swz_b
= emit
->key
.tex
[swz
->unit
].swizzle_b
;
5150 unsigned swz_a
= emit
->key
.tex
[swz
->unit
].swizzle_a
;
5151 unsigned writemask_0
= 0, writemask_1
= 0;
5152 boolean int_tex
= is_integer_type(emit
->sampler_return_type
[swz
->unit
]);
5154 /* Swizzle w/out zero/one terms */
5155 struct tgsi_full_src_register src_swizzled
=
5156 swizzle_src(&swz
->tmp_src
,
5157 swz_r
< PIPE_SWIZZLE_0
? swz_r
: PIPE_SWIZZLE_X
,
5158 swz_g
< PIPE_SWIZZLE_0
? swz_g
: PIPE_SWIZZLE_Y
,
5159 swz_b
< PIPE_SWIZZLE_0
? swz_b
: PIPE_SWIZZLE_Z
,
5160 swz_a
< PIPE_SWIZZLE_0
? swz_a
: PIPE_SWIZZLE_W
);
5162 /* MOV dst, color(tmp).<swizzle> */
5163 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5164 swz
->inst_dst
, &src_swizzled
, FALSE
);
5166 /* handle swizzle zero terms */
5167 writemask_0
= (((swz_r
== PIPE_SWIZZLE_0
) << 0) |
5168 ((swz_g
== PIPE_SWIZZLE_0
) << 1) |
5169 ((swz_b
== PIPE_SWIZZLE_0
) << 2) |
5170 ((swz_a
== PIPE_SWIZZLE_0
) << 3));
5171 writemask_0
&= swz
->inst_dst
->Register
.WriteMask
;
5174 struct tgsi_full_src_register zero
= int_tex
?
5175 make_immediate_reg_int(emit
, 0) :
5176 make_immediate_reg_float(emit
, 0.0f
);
5177 struct tgsi_full_dst_register dst
=
5178 writemask_dst(swz
->inst_dst
, writemask_0
);
5180 /* MOV dst.writemask_0, {0,0,0,0} */
5181 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5182 &dst
, &zero
, FALSE
);
5185 /* handle swizzle one terms */
5186 writemask_1
= (((swz_r
== PIPE_SWIZZLE_1
) << 0) |
5187 ((swz_g
== PIPE_SWIZZLE_1
) << 1) |
5188 ((swz_b
== PIPE_SWIZZLE_1
) << 2) |
5189 ((swz_a
== PIPE_SWIZZLE_1
) << 3));
5190 writemask_1
&= swz
->inst_dst
->Register
.WriteMask
;
5193 struct tgsi_full_src_register one
= int_tex
?
5194 make_immediate_reg_int(emit
, 1) :
5195 make_immediate_reg_float(emit
, 1.0f
);
5196 struct tgsi_full_dst_register dst
=
5197 writemask_dst(swz
->inst_dst
, writemask_1
);
5199 /* MOV dst.writemask_1, {1,1,1,1} */
5200 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst
, &one
, FALSE
);
5207 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5210 emit_sample(struct svga_shader_emitter_v10
*emit
,
5211 const struct tgsi_full_instruction
*inst
)
5213 const unsigned resource_unit
= inst
->Src
[1].Register
.Index
;
5214 const unsigned sampler_unit
= inst
->Src
[2].Register
.Index
;
5215 struct tgsi_full_src_register coord
;
5217 struct tex_swizzle_info swz_info
;
5219 begin_tex_swizzle(emit
, sampler_unit
, inst
, FALSE
, &swz_info
);
5221 get_texel_offsets(emit
, inst
, offsets
);
5223 coord
= setup_texcoord(emit
, resource_unit
, &inst
->Src
[0]);
5225 /* SAMPLE dst, coord(s0), resource, sampler */
5226 begin_emit_instruction(emit
);
5228 /* NOTE: for non-fragment shaders, we should use VGPU10_OPCODE_SAMPLE_L
5229 * with LOD=0. But our virtual GPU accepts this as-is.
5231 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE
,
5232 inst
->Instruction
.Saturate
, offsets
);
5233 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5234 emit_src_register(emit
, &coord
);
5235 emit_resource_register(emit
, resource_unit
);
5236 emit_sampler_register(emit
, sampler_unit
);
5237 end_emit_instruction(emit
);
5239 end_tex_swizzle(emit
, &swz_info
);
5241 free_temp_indexes(emit
);
5248 * Check if a texture instruction is valid.
5249 * An example of an invalid texture instruction is doing shadow comparison
5250 * with an integer-valued texture.
5251 * If we detect an invalid texture instruction, we replace it with:
5252 * MOV dst, {1,1,1,1};
5253 * \return TRUE if valid, FALSE if invalid.
5256 is_valid_tex_instruction(struct svga_shader_emitter_v10
*emit
,
5257 const struct tgsi_full_instruction
*inst
)
5259 const unsigned unit
= inst
->Src
[1].Register
.Index
;
5260 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5261 boolean valid
= TRUE
;
5263 if (tgsi_is_shadow_target(target
) &&
5264 is_integer_type(emit
->sampler_return_type
[unit
])) {
5265 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5268 /* XXX might check for other conditions in the future here */
5271 /* emit a MOV dst, {1,1,1,1} instruction. */
5272 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
5273 begin_emit_instruction(emit
);
5274 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5275 emit_dst_register(emit
, &inst
->Dst
[0]);
5276 emit_src_register(emit
, &one
);
5277 end_emit_instruction(emit
);
5285 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5288 emit_tex(struct svga_shader_emitter_v10
*emit
,
5289 const struct tgsi_full_instruction
*inst
)
5291 const uint unit
= inst
->Src
[1].Register
.Index
;
5292 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5293 VGPU10_OPCODE_TYPE opcode
;
5294 struct tgsi_full_src_register coord
;
5296 struct tex_swizzle_info swz_info
;
5298 /* check that the sampler returns a float */
5299 if (!is_valid_tex_instruction(emit
, inst
))
5302 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5304 get_texel_offsets(emit
, inst
, offsets
);
5306 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5308 /* SAMPLE dst, coord(s0), resource, sampler */
5309 begin_emit_instruction(emit
);
5311 if (tgsi_is_shadow_target(target
))
5312 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5314 opcode
= VGPU10_OPCODE_SAMPLE
;
5316 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5317 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5318 emit_src_register(emit
, &coord
);
5319 emit_resource_register(emit
, unit
);
5320 emit_sampler_register(emit
, unit
);
5321 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5322 emit_tex_compare_refcoord(emit
, target
, &coord
);
5324 end_emit_instruction(emit
);
5326 end_tex_swizzle(emit
, &swz_info
);
5328 free_temp_indexes(emit
);
5334 * Emit code for TGSI_OPCODE_TG4 (texture lookup for texture gather)
5337 emit_tg4(struct svga_shader_emitter_v10
*emit
,
5338 const struct tgsi_full_instruction
*inst
)
5340 const uint unit
= inst
->Src
[2].Register
.Index
;
5341 struct tgsi_full_src_register src
;
5344 /* check that the sampler returns a float */
5345 if (!is_valid_tex_instruction(emit
, inst
))
5348 /* Only a single channel is supported in SM4_1 and we report
5349 * PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS = 1.
5350 * Only the 0th component will be gathered.
5352 switch (emit
->key
.tex
[unit
].swizzle_r
) {
5353 case PIPE_SWIZZLE_X
:
5354 get_texel_offsets(emit
, inst
, offsets
);
5355 src
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5357 /* Gather dst, coord, resource, sampler */
5358 begin_emit_instruction(emit
);
5359 emit_sample_opcode(emit
, VGPU10_OPCODE_GATHER4
,
5360 inst
->Instruction
.Saturate
, offsets
);
5361 emit_dst_register(emit
, &inst
->Dst
[0]);
5362 emit_src_register(emit
, &src
);
5363 emit_resource_register(emit
, unit
);
5364 emit_sampler_register(emit
, unit
);
5365 end_emit_instruction(emit
);
5367 case PIPE_SWIZZLE_W
:
5368 case PIPE_SWIZZLE_1
:
5369 src
= make_immediate_reg_float(emit
, 1.0);
5370 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5371 &inst
->Dst
[0], &src
, FALSE
);
5373 case PIPE_SWIZZLE_Y
:
5374 case PIPE_SWIZZLE_Z
:
5375 case PIPE_SWIZZLE_0
:
5377 src
= make_immediate_reg_float(emit
, 0.0);
5378 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5379 &inst
->Dst
[0], &src
, FALSE
);
5389 * Emit code for TGSI_OPCODE_TEX2 (texture lookup for shadow cube map arrays)
5392 emit_tex2(struct svga_shader_emitter_v10
*emit
,
5393 const struct tgsi_full_instruction
*inst
)
5395 const uint unit
= inst
->Src
[2].Register
.Index
;
5396 unsigned target
= inst
->Texture
.Texture
;
5397 struct tgsi_full_src_register coord
, ref
;
5399 struct tex_swizzle_info swz_info
;
5401 /* check that the sampler returns a float */
5402 if (!is_valid_tex_instruction(emit
, inst
))
5405 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5407 get_texel_offsets(emit
, inst
, offsets
);
5409 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5410 ref
= scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5412 /* SAMPLE_C dst, coord, resource, sampler, ref */
5413 begin_emit_instruction(emit
);
5414 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE_C
,
5415 inst
->Instruction
.Saturate
, offsets
);
5416 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5417 emit_src_register(emit
, &coord
);
5418 emit_resource_register(emit
, unit
);
5419 emit_sampler_register(emit
, unit
);
5420 emit_tex_compare_refcoord(emit
, target
, &ref
);
5421 end_emit_instruction(emit
);
5423 end_tex_swizzle(emit
, &swz_info
);
5425 free_temp_indexes(emit
);
5432 * Emit code for TGSI_OPCODE_TXP (projective texture)
5435 emit_txp(struct svga_shader_emitter_v10
*emit
,
5436 const struct tgsi_full_instruction
*inst
)
5438 const uint unit
= inst
->Src
[1].Register
.Index
;
5439 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5440 VGPU10_OPCODE_TYPE opcode
;
5442 unsigned tmp
= get_temp_index(emit
);
5443 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
5444 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
5445 struct tgsi_full_src_register src0_wwww
=
5446 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5447 struct tgsi_full_src_register coord
;
5448 struct tex_swizzle_info swz_info
;
5450 /* check that the sampler returns a float */
5451 if (!is_valid_tex_instruction(emit
, inst
))
5454 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5456 get_texel_offsets(emit
, inst
, offsets
);
5458 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5460 /* DIV tmp, coord, coord.wwww */
5461 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst
,
5462 &coord
, &src0_wwww
, FALSE
);
5464 /* SAMPLE dst, coord(tmp), resource, sampler */
5465 begin_emit_instruction(emit
);
5467 if (tgsi_is_shadow_target(target
))
5468 /* NOTE: for non-fragment shaders, we should use
5469 * VGPU10_OPCODE_SAMPLE_C_LZ, but our virtual GPU accepts this as-is.
5471 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5473 opcode
= VGPU10_OPCODE_SAMPLE
;
5475 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5476 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5477 emit_src_register(emit
, &tmp_src
); /* projected coord */
5478 emit_resource_register(emit
, unit
);
5479 emit_sampler_register(emit
, unit
);
5480 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5481 emit_tex_compare_refcoord(emit
, target
, &tmp_src
);
5483 end_emit_instruction(emit
);
5485 end_tex_swizzle(emit
, &swz_info
);
5487 free_temp_indexes(emit
);
5494 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5497 emit_txd(struct svga_shader_emitter_v10
*emit
,
5498 const struct tgsi_full_instruction
*inst
)
5500 const uint unit
= inst
->Src
[3].Register
.Index
;
5501 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5503 struct tgsi_full_src_register coord
;
5504 struct tex_swizzle_info swz_info
;
5506 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5509 get_texel_offsets(emit
, inst
, offsets
);
5511 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5513 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5514 begin_emit_instruction(emit
);
5515 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE_D
,
5516 inst
->Instruction
.Saturate
, offsets
);
5517 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5518 emit_src_register(emit
, &coord
);
5519 emit_resource_register(emit
, unit
);
5520 emit_sampler_register(emit
, unit
);
5521 emit_src_register(emit
, &inst
->Src
[1]); /* Xderiv */
5522 emit_src_register(emit
, &inst
->Src
[2]); /* Yderiv */
5523 end_emit_instruction(emit
);
5525 end_tex_swizzle(emit
, &swz_info
);
5527 free_temp_indexes(emit
);
5534 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5537 emit_txf(struct svga_shader_emitter_v10
*emit
,
5538 const struct tgsi_full_instruction
*inst
)
5540 const uint unit
= inst
->Src
[1].Register
.Index
;
5541 const boolean msaa
= tgsi_is_msaa_target(inst
->Texture
.Texture
)
5542 && emit
->key
.tex
[unit
].num_samples
> 1;
5544 struct tex_swizzle_info swz_info
;
5546 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5548 get_texel_offsets(emit
, inst
, offsets
);
5551 assert(emit
->key
.tex
[unit
].num_samples
> 1);
5553 /* Fetch one sample from an MSAA texture */
5554 struct tgsi_full_src_register sampleIndex
=
5555 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5556 /* LD_MS dst, coord(s0), resource, sampleIndex */
5557 begin_emit_instruction(emit
);
5558 emit_sample_opcode(emit
, VGPU10_OPCODE_LD_MS
,
5559 inst
->Instruction
.Saturate
, offsets
);
5560 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5561 emit_src_register(emit
, &inst
->Src
[0]);
5562 emit_resource_register(emit
, unit
);
5563 emit_src_register(emit
, &sampleIndex
);
5564 end_emit_instruction(emit
);
5567 /* Fetch one texel specified by integer coordinate */
5568 /* LD dst, coord(s0), resource */
5569 begin_emit_instruction(emit
);
5570 emit_sample_opcode(emit
, VGPU10_OPCODE_LD
,
5571 inst
->Instruction
.Saturate
, offsets
);
5572 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5573 emit_src_register(emit
, &inst
->Src
[0]);
5574 emit_resource_register(emit
, unit
);
5575 end_emit_instruction(emit
);
5578 end_tex_swizzle(emit
, &swz_info
);
5580 free_temp_indexes(emit
);
5587 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5588 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5591 emit_txl_txb(struct svga_shader_emitter_v10
*emit
,
5592 const struct tgsi_full_instruction
*inst
)
5594 const enum tgsi_texture_type target
= inst
->Texture
.Texture
;
5595 VGPU10_OPCODE_TYPE opcode
;
5598 struct tgsi_full_src_register coord
, lod_bias
;
5599 struct tex_swizzle_info swz_info
;
5601 assert(inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
||
5602 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
5603 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
);
5605 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
) {
5606 lod_bias
= scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5607 unit
= inst
->Src
[2].Register
.Index
;
5610 lod_bias
= scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5611 unit
= inst
->Src
[1].Register
.Index
;
5614 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5617 get_texel_offsets(emit
, inst
, offsets
);
5619 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5621 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5622 begin_emit_instruction(emit
);
5623 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
5624 opcode
= VGPU10_OPCODE_SAMPLE_L
;
5627 opcode
= VGPU10_OPCODE_SAMPLE_B
;
5629 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5630 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5631 emit_src_register(emit
, &coord
);
5632 emit_resource_register(emit
, unit
);
5633 emit_sampler_register(emit
, unit
);
5634 emit_src_register(emit
, &lod_bias
);
5635 end_emit_instruction(emit
);
5637 end_tex_swizzle(emit
, &swz_info
);
5639 free_temp_indexes(emit
);
5646 * Emit code for TGSI_OPCODE_TXL2 (explicit LOD) for cubemap array.
5649 emit_txl2(struct svga_shader_emitter_v10
*emit
,
5650 const struct tgsi_full_instruction
*inst
)
5652 unsigned target
= inst
->Texture
.Texture
;
5653 unsigned opcode
, unit
;
5655 struct tgsi_full_src_register coord
, lod
;
5656 struct tex_swizzle_info swz_info
;
5658 assert(inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL2
);
5660 lod
= scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5661 unit
= inst
->Src
[2].Register
.Index
;
5663 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5666 get_texel_offsets(emit
, inst
, offsets
);
5668 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5670 /* SAMPLE_L dst, coord(s0), resource, sampler, lod(s3) */
5671 begin_emit_instruction(emit
);
5672 opcode
= VGPU10_OPCODE_SAMPLE_L
;
5673 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5674 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5675 emit_src_register(emit
, &coord
);
5676 emit_resource_register(emit
, unit
);
5677 emit_sampler_register(emit
, unit
);
5678 emit_src_register(emit
, &lod
);
5679 end_emit_instruction(emit
);
5681 end_tex_swizzle(emit
, &swz_info
);
5683 free_temp_indexes(emit
);
5690 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5693 emit_txq(struct svga_shader_emitter_v10
*emit
,
5694 const struct tgsi_full_instruction
*inst
)
5696 const uint unit
= inst
->Src
[1].Register
.Index
;
5698 if (emit
->sampler_target
[unit
] == TGSI_TEXTURE_BUFFER
) {
5699 /* RESINFO does not support querying texture buffers, so we instead
5700 * store texture buffer sizes in shader constants, then copy them to
5701 * implement TXQ instead of emitting RESINFO.
5702 * MOV dst, const[texture_buffer_size_index[unit]]
5704 struct tgsi_full_src_register size_src
=
5705 make_src_const_reg(emit
->texture_buffer_size_index
[unit
]);
5706 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &size_src
,
5709 /* RESINFO dst, srcMipLevel, resource */
5710 begin_emit_instruction(emit
);
5711 emit_opcode_resinfo(emit
, VGPU10_RESINFO_RETURN_UINT
);
5712 emit_dst_register(emit
, &inst
->Dst
[0]);
5713 emit_src_register(emit
, &inst
->Src
[0]);
5714 emit_resource_register(emit
, unit
);
5715 end_emit_instruction(emit
);
5718 free_temp_indexes(emit
);
5725 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5728 emit_simple(struct svga_shader_emitter_v10
*emit
,
5729 const struct tgsi_full_instruction
*inst
)
5731 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5732 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5735 begin_emit_instruction(emit
);
5736 emit_opcode(emit
, translate_opcode(opcode
), inst
->Instruction
.Saturate
);
5737 for (i
= 0; i
< op
->num_dst
; i
++) {
5738 emit_dst_register(emit
, &inst
->Dst
[i
]);
5740 for (i
= 0; i
< op
->num_src
; i
++) {
5741 emit_src_register(emit
, &inst
->Src
[i
]);
5743 end_emit_instruction(emit
);
5750 * We only special case the MOV instruction to try to detect constant
5751 * color writes in the fragment shader.
5754 emit_mov(struct svga_shader_emitter_v10
*emit
,
5755 const struct tgsi_full_instruction
*inst
)
5757 const struct tgsi_full_src_register
*src
= &inst
->Src
[0];
5758 const struct tgsi_full_dst_register
*dst
= &inst
->Dst
[0];
5760 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
5761 dst
->Register
.File
== TGSI_FILE_OUTPUT
&&
5762 dst
->Register
.Index
== 0 &&
5763 src
->Register
.File
== TGSI_FILE_CONSTANT
&&
5764 !src
->Register
.Indirect
) {
5765 emit
->constant_color_output
= TRUE
;
5768 return emit_simple(emit
, inst
);
5773 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5774 * where TGSI only uses one dest register.
5777 emit_simple_1dst(struct svga_shader_emitter_v10
*emit
,
5778 const struct tgsi_full_instruction
*inst
,
5782 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5783 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5786 begin_emit_instruction(emit
);
5787 emit_opcode(emit
, translate_opcode(opcode
), inst
->Instruction
.Saturate
);
5789 for (i
= 0; i
< dst_count
; i
++) {
5790 if (i
== dst_index
) {
5791 emit_dst_register(emit
, &inst
->Dst
[0]);
5793 emit_null_dst_register(emit
);
5797 for (i
= 0; i
< op
->num_src
; i
++) {
5798 emit_src_register(emit
, &inst
->Src
[i
]);
5800 end_emit_instruction(emit
);
5807 * Translate a single TGSI instruction to VGPU10.
5810 emit_vgpu10_instruction(struct svga_shader_emitter_v10
*emit
,
5811 unsigned inst_number
,
5812 const struct tgsi_full_instruction
*inst
)
5814 const enum tgsi_opcode opcode
= inst
->Instruction
.Opcode
;
5817 case TGSI_OPCODE_ADD
:
5818 case TGSI_OPCODE_AND
:
5819 case TGSI_OPCODE_BGNLOOP
:
5820 case TGSI_OPCODE_BRK
:
5821 case TGSI_OPCODE_CEIL
:
5822 case TGSI_OPCODE_CONT
:
5823 case TGSI_OPCODE_DDX
:
5824 case TGSI_OPCODE_DDY
:
5825 case TGSI_OPCODE_DIV
:
5826 case TGSI_OPCODE_DP2
:
5827 case TGSI_OPCODE_DP3
:
5828 case TGSI_OPCODE_DP4
:
5829 case TGSI_OPCODE_ELSE
:
5830 case TGSI_OPCODE_ENDIF
:
5831 case TGSI_OPCODE_ENDLOOP
:
5832 case TGSI_OPCODE_ENDSUB
:
5833 case TGSI_OPCODE_F2I
:
5834 case TGSI_OPCODE_F2U
:
5835 case TGSI_OPCODE_FLR
:
5836 case TGSI_OPCODE_FRC
:
5837 case TGSI_OPCODE_FSEQ
:
5838 case TGSI_OPCODE_FSGE
:
5839 case TGSI_OPCODE_FSLT
:
5840 case TGSI_OPCODE_FSNE
:
5841 case TGSI_OPCODE_I2F
:
5842 case TGSI_OPCODE_IMAX
:
5843 case TGSI_OPCODE_IMIN
:
5844 case TGSI_OPCODE_INEG
:
5845 case TGSI_OPCODE_ISGE
:
5846 case TGSI_OPCODE_ISHR
:
5847 case TGSI_OPCODE_ISLT
:
5848 case TGSI_OPCODE_MAD
:
5849 case TGSI_OPCODE_MAX
:
5850 case TGSI_OPCODE_MIN
:
5851 case TGSI_OPCODE_MUL
:
5852 case TGSI_OPCODE_NOP
:
5853 case TGSI_OPCODE_NOT
:
5854 case TGSI_OPCODE_OR
:
5855 case TGSI_OPCODE_RET
:
5856 case TGSI_OPCODE_UADD
:
5857 case TGSI_OPCODE_USEQ
:
5858 case TGSI_OPCODE_USGE
:
5859 case TGSI_OPCODE_USLT
:
5860 case TGSI_OPCODE_UMIN
:
5861 case TGSI_OPCODE_UMAD
:
5862 case TGSI_OPCODE_UMAX
:
5863 case TGSI_OPCODE_ROUND
:
5864 case TGSI_OPCODE_SQRT
:
5865 case TGSI_OPCODE_SHL
:
5866 case TGSI_OPCODE_TRUNC
:
5867 case TGSI_OPCODE_U2F
:
5868 case TGSI_OPCODE_UCMP
:
5869 case TGSI_OPCODE_USHR
:
5870 case TGSI_OPCODE_USNE
:
5871 case TGSI_OPCODE_XOR
:
5872 /* simple instructions */
5873 return emit_simple(emit
, inst
);
5875 case TGSI_OPCODE_MOV
:
5876 return emit_mov(emit
, inst
);
5877 case TGSI_OPCODE_EMIT
:
5878 return emit_vertex(emit
, inst
);
5879 case TGSI_OPCODE_ENDPRIM
:
5880 return emit_endprim(emit
, inst
);
5881 case TGSI_OPCODE_IABS
:
5882 return emit_iabs(emit
, inst
);
5883 case TGSI_OPCODE_ARL
:
5885 case TGSI_OPCODE_UARL
:
5886 return emit_arl_uarl(emit
, inst
);
5887 case TGSI_OPCODE_BGNSUB
:
5890 case TGSI_OPCODE_CAL
:
5891 return emit_cal(emit
, inst
);
5892 case TGSI_OPCODE_CMP
:
5893 return emit_cmp(emit
, inst
);
5894 case TGSI_OPCODE_COS
:
5895 return emit_sincos(emit
, inst
);
5896 case TGSI_OPCODE_DST
:
5897 return emit_dst(emit
, inst
);
5898 case TGSI_OPCODE_EX2
:
5899 return emit_ex2(emit
, inst
);
5900 case TGSI_OPCODE_EXP
:
5901 return emit_exp(emit
, inst
);
5902 case TGSI_OPCODE_IF
:
5903 return emit_if(emit
, inst
);
5904 case TGSI_OPCODE_KILL
:
5905 return emit_kill(emit
, inst
);
5906 case TGSI_OPCODE_KILL_IF
:
5907 return emit_kill_if(emit
, inst
);
5908 case TGSI_OPCODE_LG2
:
5909 return emit_lg2(emit
, inst
);
5910 case TGSI_OPCODE_LIT
:
5911 return emit_lit(emit
, inst
);
5912 case TGSI_OPCODE_LODQ
:
5913 return emit_lodq(emit
, inst
);
5914 case TGSI_OPCODE_LOG
:
5915 return emit_log(emit
, inst
);
5916 case TGSI_OPCODE_LRP
:
5917 return emit_lrp(emit
, inst
);
5918 case TGSI_OPCODE_POW
:
5919 return emit_pow(emit
, inst
);
5920 case TGSI_OPCODE_RCP
:
5921 return emit_rcp(emit
, inst
);
5922 case TGSI_OPCODE_RSQ
:
5923 return emit_rsq(emit
, inst
);
5924 case TGSI_OPCODE_SAMPLE
:
5925 return emit_sample(emit
, inst
);
5926 case TGSI_OPCODE_SEQ
:
5927 return emit_seq(emit
, inst
);
5928 case TGSI_OPCODE_SGE
:
5929 return emit_sge(emit
, inst
);
5930 case TGSI_OPCODE_SGT
:
5931 return emit_sgt(emit
, inst
);
5932 case TGSI_OPCODE_SIN
:
5933 return emit_sincos(emit
, inst
);
5934 case TGSI_OPCODE_SLE
:
5935 return emit_sle(emit
, inst
);
5936 case TGSI_OPCODE_SLT
:
5937 return emit_slt(emit
, inst
);
5938 case TGSI_OPCODE_SNE
:
5939 return emit_sne(emit
, inst
);
5940 case TGSI_OPCODE_SSG
:
5941 return emit_ssg(emit
, inst
);
5942 case TGSI_OPCODE_ISSG
:
5943 return emit_issg(emit
, inst
);
5944 case TGSI_OPCODE_TEX
:
5945 return emit_tex(emit
, inst
);
5946 case TGSI_OPCODE_TG4
:
5947 return emit_tg4(emit
, inst
);
5948 case TGSI_OPCODE_TEX2
:
5949 return emit_tex2(emit
, inst
);
5950 case TGSI_OPCODE_TXP
:
5951 return emit_txp(emit
, inst
);
5952 case TGSI_OPCODE_TXB
:
5953 case TGSI_OPCODE_TXB2
:
5954 case TGSI_OPCODE_TXL
:
5955 return emit_txl_txb(emit
, inst
);
5956 case TGSI_OPCODE_TXD
:
5957 return emit_txd(emit
, inst
);
5958 case TGSI_OPCODE_TXF
:
5959 return emit_txf(emit
, inst
);
5960 case TGSI_OPCODE_TXL2
:
5961 return emit_txl2(emit
, inst
);
5962 case TGSI_OPCODE_TXQ
:
5963 return emit_txq(emit
, inst
);
5964 case TGSI_OPCODE_UIF
:
5965 return emit_if(emit
, inst
);
5966 case TGSI_OPCODE_UMUL_HI
:
5967 case TGSI_OPCODE_IMUL_HI
:
5968 case TGSI_OPCODE_UDIV
:
5969 case TGSI_OPCODE_IDIV
:
5970 /* These cases use only the FIRST of two destination registers */
5971 return emit_simple_1dst(emit
, inst
, 2, 0);
5972 case TGSI_OPCODE_UMUL
:
5973 case TGSI_OPCODE_UMOD
:
5974 case TGSI_OPCODE_MOD
:
5975 /* These cases use only the SECOND of two destination registers */
5976 return emit_simple_1dst(emit
, inst
, 2, 1);
5977 case TGSI_OPCODE_END
:
5978 if (!emit_post_helpers(emit
))
5980 return emit_simple(emit
, inst
);
5983 debug_printf("Unimplemented tgsi instruction %s\n",
5984 tgsi_get_opcode_name(opcode
));
5993 * Emit the extra instructions to adjust the vertex position.
5994 * There are two possible adjustments:
5995 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5996 * "prescale" and "pretranslate" values.
5997 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5998 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
6001 emit_vpos_instructions(struct svga_shader_emitter_v10
*emit
,
6002 unsigned vs_pos_tmp_index
)
6004 struct tgsi_full_src_register tmp_pos_src
;
6005 struct tgsi_full_dst_register pos_dst
;
6007 /* Don't bother to emit any extra vertex instructions if vertex position is
6010 if (emit
->vposition
.out_index
== INVALID_INDEX
)
6013 tmp_pos_src
= make_src_temp_reg(vs_pos_tmp_index
);
6014 pos_dst
= make_dst_output_reg(emit
->vposition
.out_index
);
6016 /* If non-adjusted vertex position register index
6017 * is valid, copy the vertex position from the temporary
6018 * vertex position register before it is modified by the
6019 * prescale computation.
6021 if (emit
->vposition
.so_index
!= INVALID_INDEX
) {
6022 struct tgsi_full_dst_register pos_so_dst
=
6023 make_dst_output_reg(emit
->vposition
.so_index
);
6025 /* MOV pos_so, tmp_pos */
6026 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_so_dst
,
6027 &tmp_pos_src
, FALSE
);
6030 if (emit
->vposition
.need_prescale
) {
6031 /* This code adjusts the vertex position to match the VGPU10 convention.
6032 * If p is the position computed by the shader (usually by applying the
6033 * modelview and projection matrices), the new position q is computed by:
6035 * q.x = p.w * trans.x + p.x * scale.x
6036 * q.y = p.w * trans.y + p.y * scale.y
6037 * q.z = p.w * trans.z + p.z * scale.z;
6038 * q.w = p.w * trans.w + p.w;
6040 struct tgsi_full_src_register tmp_pos_src_w
=
6041 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
6042 struct tgsi_full_dst_register tmp_pos_dst
=
6043 make_dst_temp_reg(vs_pos_tmp_index
);
6044 struct tgsi_full_dst_register tmp_pos_dst_xyz
=
6045 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XYZ
);
6047 struct tgsi_full_src_register prescale_scale
=
6048 make_src_const_reg(emit
->vposition
.prescale_scale_index
);
6049 struct tgsi_full_src_register prescale_trans
=
6050 make_src_const_reg(emit
->vposition
.prescale_trans_index
);
6052 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
6053 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xyz
,
6054 &tmp_pos_src
, &prescale_scale
, FALSE
);
6056 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
6057 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &pos_dst
, &tmp_pos_src_w
,
6058 &prescale_trans
, &tmp_pos_src
, FALSE
);
6060 else if (emit
->key
.vs
.undo_viewport
) {
6061 /* This code computes the final vertex position from the temporary
6062 * vertex position by undoing the viewport transformation and the
6063 * divide-by-W operation (we convert window coords back to clip coords).
6064 * This is needed when we use the 'draw' module for fallbacks.
6065 * If p is the temp pos in window coords, then the NDC coord q is:
6066 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
6067 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
6070 * CONST[vs_viewport_index] contains:
6071 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
6073 struct tgsi_full_dst_register tmp_pos_dst
=
6074 make_dst_temp_reg(vs_pos_tmp_index
);
6075 struct tgsi_full_dst_register tmp_pos_dst_xy
=
6076 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XY
);
6077 struct tgsi_full_src_register tmp_pos_src_wwww
=
6078 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
6080 struct tgsi_full_dst_register pos_dst_xyz
=
6081 writemask_dst(&pos_dst
, TGSI_WRITEMASK_XYZ
);
6082 struct tgsi_full_dst_register pos_dst_w
=
6083 writemask_dst(&pos_dst
, TGSI_WRITEMASK_W
);
6085 struct tgsi_full_src_register vp_xyzw
=
6086 make_src_const_reg(emit
->vs
.viewport_index
);
6087 struct tgsi_full_src_register vp_zwww
=
6088 swizzle_src(&vp_xyzw
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
6089 TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
6091 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
6092 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp_pos_dst_xy
,
6093 &tmp_pos_src
, &vp_zwww
, FALSE
);
6095 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
6096 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xy
,
6097 &tmp_pos_src
, &vp_xyzw
, FALSE
);
6099 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
6100 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &pos_dst_xyz
,
6101 &tmp_pos_src
, &tmp_pos_src_wwww
, FALSE
);
6103 /* MOV pos.w, tmp_pos.w */
6104 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_dst_w
,
6105 &tmp_pos_src
, FALSE
);
6107 else if (vs_pos_tmp_index
!= INVALID_INDEX
) {
6108 /* This code is to handle the case where the temporary vertex
6109 * position register is created when the vertex shader has stream
6110 * output and prescale is disabled because rasterization is to be
6113 struct tgsi_full_dst_register pos_dst
=
6114 make_dst_output_reg(emit
->vposition
.out_index
);
6116 /* MOV pos, tmp_pos */
6117 begin_emit_instruction(emit
);
6118 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
6119 emit_dst_register(emit
, &pos_dst
);
6120 emit_src_register(emit
, &tmp_pos_src
);
6121 end_emit_instruction(emit
);
6126 emit_clipping_instructions(struct svga_shader_emitter_v10
*emit
)
6128 if (emit
->clip_mode
== CLIP_DISTANCE
) {
6129 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
6130 emit_clip_distance_instructions(emit
);
6132 } else if (emit
->clip_mode
== CLIP_VERTEX
) {
6133 /* Convert TGSI CLIPVERTEX to CLIPDIST */
6134 emit_clip_vertex_instructions(emit
);
6138 * Emit vertex position and take care of legacy user planes only if
6139 * there is a valid vertex position register index.
6140 * This is to take care of the case
6141 * where the shader doesn't output vertex position. Then in
6142 * this case, don't bother to emit more vertex instructions.
6144 if (emit
->vposition
.out_index
== INVALID_INDEX
)
6148 * Emit per-vertex clipping instructions for legacy user defined clip planes.
6149 * NOTE: we must emit the clip distance instructions before the
6150 * emit_vpos_instructions() call since the later function will change
6151 * the TEMP[vs_pos_tmp_index] value.
6153 if (emit
->clip_mode
== CLIP_LEGACY
) {
6154 /* Emit CLIPDIST for legacy user defined clip planes */
6155 emit_clip_distance_from_vpos(emit
, emit
->vposition
.tmp_index
);
6161 * Emit extra per-vertex instructions. This includes clip-coordinate
6162 * space conversion and computing clip distances. This is called for
6163 * each GS emit-vertex instruction and at the end of VS translation.
6166 emit_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
6168 const unsigned vs_pos_tmp_index
= emit
->vposition
.tmp_index
;
6170 /* Emit clipping instructions based on clipping mode */
6171 emit_clipping_instructions(emit
);
6174 * Reset the temporary vertex position register index
6175 * so that emit_dst_register() will use the real vertex position output
6177 emit
->vposition
.tmp_index
= INVALID_INDEX
;
6179 /* Emit vertex position instructions */
6180 emit_vpos_instructions(emit
, vs_pos_tmp_index
);
6182 /* Restore original vposition.tmp_index value for the next GS vertex.
6183 * It doesn't matter for VS.
6185 emit
->vposition
.tmp_index
= vs_pos_tmp_index
;
6189 * Translate the TGSI_OPCODE_EMIT GS instruction.
6192 emit_vertex(struct svga_shader_emitter_v10
*emit
,
6193 const struct tgsi_full_instruction
*inst
)
6195 unsigned ret
= TRUE
;
6197 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
6199 emit_vertex_instructions(emit
);
6201 /* We can't use emit_simple() because the TGSI instruction has one
6202 * operand (vertex stream number) which we must ignore for VGPU10.
6204 begin_emit_instruction(emit
);
6205 emit_opcode(emit
, VGPU10_OPCODE_EMIT
, FALSE
);
6206 end_emit_instruction(emit
);
6213 * Emit the extra code to convert from VGPU10's boolean front-face
6214 * register to TGSI's signed front-face register.
6216 * TODO: Make temporary front-face register a scalar.
6219 emit_frontface_instructions(struct svga_shader_emitter_v10
*emit
)
6221 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6223 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
6224 /* convert vgpu10 boolean face register to gallium +/-1 value */
6225 struct tgsi_full_dst_register tmp_dst
=
6226 make_dst_temp_reg(emit
->fs
.face_tmp_index
);
6227 struct tgsi_full_src_register one
=
6228 make_immediate_reg_float(emit
, 1.0f
);
6229 struct tgsi_full_src_register neg_one
=
6230 make_immediate_reg_float(emit
, -1.0f
);
6232 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6233 begin_emit_instruction(emit
);
6234 emit_opcode(emit
, VGPU10_OPCODE_MOVC
, FALSE
);
6235 emit_dst_register(emit
, &tmp_dst
);
6236 emit_face_register(emit
);
6237 emit_src_register(emit
, &one
);
6238 emit_src_register(emit
, &neg_one
);
6239 end_emit_instruction(emit
);
6245 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6248 emit_fragcoord_instructions(struct svga_shader_emitter_v10
*emit
)
6250 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6252 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
6253 struct tgsi_full_dst_register tmp_dst
=
6254 make_dst_temp_reg(emit
->fs
.fragcoord_tmp_index
);
6255 struct tgsi_full_dst_register tmp_dst_xyz
=
6256 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_XYZ
);
6257 struct tgsi_full_dst_register tmp_dst_w
=
6258 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
6259 struct tgsi_full_src_register one
=
6260 make_immediate_reg_float(emit
, 1.0f
);
6261 struct tgsi_full_src_register fragcoord
=
6262 make_src_reg(TGSI_FILE_INPUT
, emit
->fs
.fragcoord_input_index
);
6264 /* save the input index */
6265 unsigned fragcoord_input_index
= emit
->fs
.fragcoord_input_index
;
6266 /* set to invalid to prevent substitution in emit_src_register() */
6267 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
6269 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6270 begin_emit_instruction(emit
);
6271 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
6272 emit_dst_register(emit
, &tmp_dst_xyz
);
6273 emit_src_register(emit
, &fragcoord
);
6274 end_emit_instruction(emit
);
6276 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6277 begin_emit_instruction(emit
);
6278 emit_opcode(emit
, VGPU10_OPCODE_DIV
, FALSE
);
6279 emit_dst_register(emit
, &tmp_dst_w
);
6280 emit_src_register(emit
, &one
);
6281 emit_src_register(emit
, &fragcoord
);
6282 end_emit_instruction(emit
);
6284 /* restore saved value */
6285 emit
->fs
.fragcoord_input_index
= fragcoord_input_index
;
6291 * Emit the extra code to get the current sample position value and
6292 * put it into a temp register.
6295 emit_sample_position_instructions(struct svga_shader_emitter_v10
*emit
)
6297 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6299 if (emit
->fs
.sample_pos_sys_index
!= INVALID_INDEX
) {
6300 assert(emit
->version
>= 41);
6302 struct tgsi_full_dst_register tmp_dst
=
6303 make_dst_temp_reg(emit
->fs
.sample_pos_tmp_index
);
6304 struct tgsi_full_src_register half
=
6305 make_immediate_reg_float4(emit
, 0.5, 0.5, 0.0, 0.0);
6307 struct tgsi_full_src_register tmp_src
=
6308 make_src_temp_reg(emit
->fs
.sample_pos_tmp_index
);
6309 struct tgsi_full_src_register sample_index_reg
=
6310 make_src_scalar_reg(TGSI_FILE_SYSTEM_VALUE
,
6311 emit
->fs
.sample_id_sys_index
, TGSI_SWIZZLE_X
);
6313 /* The first src register is a shader resource (if we want a
6314 * multisampled resource sample position) or the rasterizer register
6315 * (if we want the current sample position in the color buffer). We
6319 /* SAMPLE_POS dst, RASTERIZER, sampleIndex */
6320 begin_emit_instruction(emit
);
6321 emit_opcode(emit
, VGPU10_OPCODE_SAMPLE_POS
, FALSE
);
6322 emit_dst_register(emit
, &tmp_dst
);
6323 emit_rasterizer_register(emit
);
6324 emit_src_register(emit
, &sample_index_reg
);
6325 end_emit_instruction(emit
);
6327 /* Convert from D3D coords to GL coords by adding 0.5 bias */
6328 /* ADD dst, dst, half */
6329 begin_emit_instruction(emit
);
6330 emit_opcode(emit
, VGPU10_OPCODE_ADD
, FALSE
);
6331 emit_dst_register(emit
, &tmp_dst
);
6332 emit_src_register(emit
, &tmp_src
);
6333 emit_src_register(emit
, &half
);
6334 end_emit_instruction(emit
);
6340 * Emit extra instructions to adjust VS inputs/attributes. This can
6341 * mean casting a vertex attribute from int to float or setting the
6342 * W component to 1, or both.
6345 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10
*emit
)
6347 const unsigned save_w_1_mask
= emit
->key
.vs
.adjust_attrib_w_1
;
6348 const unsigned save_itof_mask
= emit
->key
.vs
.adjust_attrib_itof
;
6349 const unsigned save_utof_mask
= emit
->key
.vs
.adjust_attrib_utof
;
6350 const unsigned save_is_bgra_mask
= emit
->key
.vs
.attrib_is_bgra
;
6351 const unsigned save_puint_to_snorm_mask
= emit
->key
.vs
.attrib_puint_to_snorm
;
6352 const unsigned save_puint_to_uscaled_mask
= emit
->key
.vs
.attrib_puint_to_uscaled
;
6353 const unsigned save_puint_to_sscaled_mask
= emit
->key
.vs
.attrib_puint_to_sscaled
;
6355 unsigned adjust_mask
= (save_w_1_mask
|
6359 save_puint_to_snorm_mask
|
6360 save_puint_to_uscaled_mask
|
6361 save_puint_to_sscaled_mask
);
6363 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
6366 struct tgsi_full_src_register one
=
6367 make_immediate_reg_float(emit
, 1.0f
);
6369 struct tgsi_full_src_register one_int
=
6370 make_immediate_reg_int(emit
, 1);
6372 /* We need to turn off these bitmasks while emitting the
6373 * instructions below, then restore them afterward.
6375 emit
->key
.vs
.adjust_attrib_w_1
= 0;
6376 emit
->key
.vs
.adjust_attrib_itof
= 0;
6377 emit
->key
.vs
.adjust_attrib_utof
= 0;
6378 emit
->key
.vs
.attrib_is_bgra
= 0;
6379 emit
->key
.vs
.attrib_puint_to_snorm
= 0;
6380 emit
->key
.vs
.attrib_puint_to_uscaled
= 0;
6381 emit
->key
.vs
.attrib_puint_to_sscaled
= 0;
6383 while (adjust_mask
) {
6384 unsigned index
= u_bit_scan(&adjust_mask
);
6386 /* skip the instruction if this vertex attribute is not being used */
6387 if (emit
->info
.input_usage_mask
[index
] == 0)
6390 unsigned tmp
= emit
->vs
.adjusted_input
[index
];
6391 struct tgsi_full_src_register input_src
=
6392 make_src_reg(TGSI_FILE_INPUT
, index
);
6394 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
6395 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
6396 struct tgsi_full_dst_register tmp_dst_w
=
6397 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
6399 /* ITOF/UTOF/MOV tmp, input[index] */
6400 if (save_itof_mask
& (1 << index
)) {
6401 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
,
6402 &tmp_dst
, &input_src
, FALSE
);
6404 else if (save_utof_mask
& (1 << index
)) {
6405 emit_instruction_op1(emit
, VGPU10_OPCODE_UTOF
,
6406 &tmp_dst
, &input_src
, FALSE
);
6408 else if (save_puint_to_snorm_mask
& (1 << index
)) {
6409 emit_puint_to_snorm(emit
, &tmp_dst
, &input_src
);
6411 else if (save_puint_to_uscaled_mask
& (1 << index
)) {
6412 emit_puint_to_uscaled(emit
, &tmp_dst
, &input_src
);
6414 else if (save_puint_to_sscaled_mask
& (1 << index
)) {
6415 emit_puint_to_sscaled(emit
, &tmp_dst
, &input_src
);
6418 assert((save_w_1_mask
| save_is_bgra_mask
) & (1 << index
));
6419 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6420 &tmp_dst
, &input_src
, FALSE
);
6423 if (save_is_bgra_mask
& (1 << index
)) {
6424 emit_swap_r_b(emit
, &tmp_dst
, &tmp_src
);
6427 if (save_w_1_mask
& (1 << index
)) {
6428 /* MOV tmp.w, 1.0 */
6429 if (emit
->key
.vs
.attrib_is_pure_int
& (1 << index
)) {
6430 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6431 &tmp_dst_w
, &one_int
, FALSE
);
6434 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6435 &tmp_dst_w
, &one
, FALSE
);
6440 emit
->key
.vs
.adjust_attrib_w_1
= save_w_1_mask
;
6441 emit
->key
.vs
.adjust_attrib_itof
= save_itof_mask
;
6442 emit
->key
.vs
.adjust_attrib_utof
= save_utof_mask
;
6443 emit
->key
.vs
.attrib_is_bgra
= save_is_bgra_mask
;
6444 emit
->key
.vs
.attrib_puint_to_snorm
= save_puint_to_snorm_mask
;
6445 emit
->key
.vs
.attrib_puint_to_uscaled
= save_puint_to_uscaled_mask
;
6446 emit
->key
.vs
.attrib_puint_to_sscaled
= save_puint_to_sscaled_mask
;
6452 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6453 * to implement some instructions. We pre-allocate those values here
6454 * in the immediate constant buffer.
6457 alloc_common_immediates(struct svga_shader_emitter_v10
*emit
)
6461 emit
->common_immediate_pos
[n
++] =
6462 alloc_immediate_float4(emit
, 0.0f
, 1.0f
, 0.5f
, -1.0f
);
6464 if (emit
->info
.opcode_count
[TGSI_OPCODE_LIT
] > 0) {
6465 emit
->common_immediate_pos
[n
++] =
6466 alloc_immediate_float4(emit
, 128.0f
, -128.0f
, 0.0f
, 0.0f
);
6469 emit
->common_immediate_pos
[n
++] =
6470 alloc_immediate_int4(emit
, 0, 1, 0, -1);
6472 if (emit
->key
.vs
.attrib_puint_to_snorm
) {
6473 emit
->common_immediate_pos
[n
++] =
6474 alloc_immediate_float4(emit
, -2.0f
, 2.0f
, 3.0f
, -1.66666f
);
6477 if (emit
->key
.vs
.attrib_puint_to_uscaled
) {
6478 emit
->common_immediate_pos
[n
++] =
6479 alloc_immediate_float4(emit
, 1023.0f
, 3.0f
, 0.0f
, 0.0f
);
6482 if (emit
->key
.vs
.attrib_puint_to_sscaled
) {
6483 emit
->common_immediate_pos
[n
++] =
6484 alloc_immediate_int4(emit
, 22, 12, 2, 0);
6486 emit
->common_immediate_pos
[n
++] =
6487 alloc_immediate_int4(emit
, 22, 30, 0, 0);
6492 for (i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
6493 if (emit
->key
.tex
[i
].texel_bias
) {
6494 /* Replace 0.0f if more immediate float value is needed */
6495 emit
->common_immediate_pos
[n
++] =
6496 alloc_immediate_float4(emit
, 0.0001f
, 0.0f
, 0.0f
, 0.0f
);
6501 assert(n
<= ARRAY_SIZE(emit
->common_immediate_pos
));
6502 emit
->num_common_immediates
= n
;
6507 * Emit any extra/helper declarations/code that we might need between
6508 * the declaration section and code section.
6511 emit_pre_helpers(struct svga_shader_emitter_v10
*emit
)
6514 if (emit
->unit
== PIPE_SHADER_GEOMETRY
)
6515 emit_property_instructions(emit
);
6517 /* Declare inputs */
6518 if (!emit_input_declarations(emit
))
6521 /* Declare outputs */
6522 if (!emit_output_declarations(emit
))
6525 /* Declare temporary registers */
6526 emit_temporaries_declaration(emit
);
6528 /* Declare constant registers */
6529 emit_constant_declaration(emit
);
6531 /* Declare samplers and resources */
6532 emit_sampler_declarations(emit
);
6533 emit_resource_declarations(emit
);
6535 /* Declare clip distance output registers */
6536 if (emit
->unit
== PIPE_SHADER_VERTEX
||
6537 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
6538 emit_clip_distance_declarations(emit
);
6541 alloc_common_immediates(emit
);
6543 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
6544 emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6545 float alpha
= emit
->key
.fs
.alpha_ref
;
6546 emit
->fs
.alpha_ref_index
=
6547 alloc_immediate_float4(emit
, alpha
, alpha
, alpha
, alpha
);
6550 /* Now, emit the constant block containing all the immediates
6551 * declared by shader, as well as the extra ones seen above.
6553 emit_vgpu10_immediates_block(emit
);
6555 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6556 emit_frontface_instructions(emit
);
6557 emit_fragcoord_instructions(emit
);
6558 emit_sample_position_instructions(emit
);
6560 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6561 emit_vertex_attrib_instructions(emit
);
6569 * The device has no direct support for the pipe_blend_state::alpha_to_one
6570 * option so we implement it here with shader code.
6572 * Note that this is kind of pointless, actually. Here we're clobbering
6573 * the alpha value with 1.0. So if alpha-to-coverage is enabled, we'll wind
6574 * up with 100% coverage. That's almost certainly not what the user wants.
6575 * The work-around is to add extra shader code to compute coverage from alpha
6576 * and write it to the coverage output register (if the user's shader doesn't
6577 * do so already). We'll probably do that in the future.
6580 emit_alpha_to_one_instructions(struct svga_shader_emitter_v10
*emit
,
6581 unsigned fs_color_tmp_index
)
6583 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
6586 /* Note: it's not 100% clear from the spec if we're supposed to clobber
6587 * the alpha for all render targets. But that's what NVIDIA does and
6588 * that's what Piglit tests.
6590 for (i
= 0; i
< emit
->fs
.num_color_outputs
; i
++) {
6591 struct tgsi_full_dst_register color_dst
;
6593 if (fs_color_tmp_index
!= INVALID_INDEX
&& i
== 0) {
6594 /* write to the temp color register */
6595 color_dst
= make_dst_temp_reg(fs_color_tmp_index
);
6598 /* write directly to the color[i] output */
6599 color_dst
= make_dst_output_reg(emit
->fs
.color_out_index
[i
]);
6602 color_dst
= writemask_dst(&color_dst
, TGSI_WRITEMASK_W
);
6604 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
, &one
, FALSE
);
6610 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6611 * against the alpha reference value and discards the fragment if the
6615 emit_alpha_test_instructions(struct svga_shader_emitter_v10
*emit
,
6616 unsigned fs_color_tmp_index
)
6618 /* compare output color's alpha to alpha ref and kill */
6619 unsigned tmp
= get_temp_index(emit
);
6620 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
6621 struct tgsi_full_src_register tmp_src_x
=
6622 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
6623 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
6624 struct tgsi_full_src_register color_src
=
6625 make_src_temp_reg(fs_color_tmp_index
);
6626 struct tgsi_full_src_register color_src_w
=
6627 scalar_src(&color_src
, TGSI_SWIZZLE_W
);
6628 struct tgsi_full_src_register ref_src
=
6629 make_src_immediate_reg(emit
->fs
.alpha_ref_index
);
6630 struct tgsi_full_dst_register color_dst
=
6631 make_dst_output_reg(emit
->fs
.color_out_index
[0]);
6633 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6635 /* dst = src0 'alpha_func' src1 */
6636 emit_comparison(emit
, emit
->key
.fs
.alpha_func
, &tmp_dst
,
6637 &color_src_w
, &ref_src
);
6639 /* DISCARD if dst.x == 0 */
6640 begin_emit_instruction(emit
);
6641 emit_discard_opcode(emit
, FALSE
); /* discard if src0.x is zero */
6642 emit_src_register(emit
, &tmp_src_x
);
6643 end_emit_instruction(emit
);
6645 /* If we don't need to broadcast the color below, emit the final color here.
6647 if (emit
->key
.fs
.write_color0_to_n_cbufs
<= 1) {
6648 /* MOV output.color, tempcolor */
6649 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6650 &color_src
, FALSE
); /* XXX saturate? */
6653 free_temp_indexes(emit
);
6658 * Emit instructions for writing a single color output to multiple
6660 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS (or
6661 * when key.fs.white_fragments is true).
6662 * property is set and the number of render targets is greater than one.
6663 * \param fs_color_tmp_index index of the temp register that holds the
6664 * color to broadcast.
6667 emit_broadcast_color_instructions(struct svga_shader_emitter_v10
*emit
,
6668 unsigned fs_color_tmp_index
)
6670 const unsigned n
= emit
->key
.fs
.write_color0_to_n_cbufs
;
6672 struct tgsi_full_src_register color_src
;
6674 if (emit
->key
.fs
.white_fragments
) {
6675 /* set all color outputs to white */
6676 color_src
= make_immediate_reg_float(emit
, 1.0f
);
6679 /* set all color outputs to TEMP[fs_color_tmp_index] */
6680 assert(fs_color_tmp_index
!= INVALID_INDEX
);
6681 color_src
= make_src_temp_reg(fs_color_tmp_index
);
6684 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6686 for (i
= 0; i
< n
; i
++) {
6687 unsigned output_reg
= emit
->fs
.color_out_index
[i
];
6688 struct tgsi_full_dst_register color_dst
=
6689 make_dst_output_reg(output_reg
);
6691 /* Fill in this semantic here since we'll use it later in
6692 * emit_dst_register().
6694 emit
->info
.output_semantic_name
[output_reg
] = TGSI_SEMANTIC_COLOR
;
6696 /* MOV output.color[i], tempcolor */
6697 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6698 &color_src
, FALSE
); /* XXX saturate? */
6704 * Emit extra helper code after the original shader code, but before the
6705 * last END/RET instruction.
6706 * For vertex shaders this means emitting the extra code to apply the
6707 * prescale scale/translation.
6710 emit_post_helpers(struct svga_shader_emitter_v10
*emit
)
6712 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6713 emit_vertex_instructions(emit
);
6715 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6716 const unsigned fs_color_tmp_index
= emit
->fs
.color_tmp_index
;
6718 assert(!(emit
->key
.fs
.white_fragments
&&
6719 emit
->key
.fs
.write_color0_to_n_cbufs
== 0));
6721 /* We no longer want emit_dst_register() to substitute the
6722 * temporary fragment color register for the real color output.
6724 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6726 if (emit
->key
.fs
.alpha_to_one
) {
6727 emit_alpha_to_one_instructions(emit
, fs_color_tmp_index
);
6729 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6730 emit_alpha_test_instructions(emit
, fs_color_tmp_index
);
6732 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1 ||
6733 emit
->key
.fs
.white_fragments
) {
6734 emit_broadcast_color_instructions(emit
, fs_color_tmp_index
);
6743 * Translate the TGSI tokens into VGPU10 tokens.
6746 emit_vgpu10_instructions(struct svga_shader_emitter_v10
*emit
,
6747 const struct tgsi_token
*tokens
)
6749 struct tgsi_parse_context parse
;
6751 boolean pre_helpers_emitted
= FALSE
;
6752 unsigned inst_number
= 0;
6754 tgsi_parse_init(&parse
, tokens
);
6756 while (!tgsi_parse_end_of_tokens(&parse
)) {
6757 tgsi_parse_token(&parse
);
6759 switch (parse
.FullToken
.Token
.Type
) {
6760 case TGSI_TOKEN_TYPE_IMMEDIATE
:
6761 ret
= emit_vgpu10_immediate(emit
, &parse
.FullToken
.FullImmediate
);
6766 case TGSI_TOKEN_TYPE_DECLARATION
:
6767 ret
= emit_vgpu10_declaration(emit
, &parse
.FullToken
.FullDeclaration
);
6772 case TGSI_TOKEN_TYPE_INSTRUCTION
:
6773 if (!pre_helpers_emitted
) {
6774 ret
= emit_pre_helpers(emit
);
6777 pre_helpers_emitted
= TRUE
;
6779 ret
= emit_vgpu10_instruction(emit
, inst_number
++,
6780 &parse
.FullToken
.FullInstruction
);
6785 case TGSI_TOKEN_TYPE_PROPERTY
:
6786 ret
= emit_vgpu10_property(emit
, &parse
.FullToken
.FullProperty
);
6797 tgsi_parse_free(&parse
);
6803 * Emit the first VGPU10 shader tokens.
6806 emit_vgpu10_header(struct svga_shader_emitter_v10
*emit
)
6808 VGPU10ProgramToken ptoken
;
6810 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6811 ptoken
.majorVersion
= emit
->version
/ 10;
6812 ptoken
.minorVersion
= emit
->version
% 10;
6813 ptoken
.programType
= translate_shader_type(emit
->unit
);
6814 if (!emit_dword(emit
, ptoken
.value
))
6817 /* Second token: total length of shader, in tokens. We can't fill this
6818 * in until we're all done. Emit zero for now.
6820 return emit_dword(emit
, 0);
6825 emit_vgpu10_tail(struct svga_shader_emitter_v10
*emit
)
6827 VGPU10ProgramToken
*tokens
;
6829 /* Replace the second token with total shader length */
6830 tokens
= (VGPU10ProgramToken
*) emit
->buf
;
6831 tokens
[1].value
= emit_get_num_tokens(emit
);
6838 * Modify the FS to read the BCOLORs and use the FACE register
6839 * to choose between the front/back colors.
6841 static const struct tgsi_token
*
6842 transform_fs_twoside(const struct tgsi_token
*tokens
)
6845 debug_printf("Before tgsi_add_two_side ------------------\n");
6846 tgsi_dump(tokens
,0);
6848 tokens
= tgsi_add_two_side(tokens
);
6850 debug_printf("After tgsi_add_two_side ------------------\n");
6851 tgsi_dump(tokens
, 0);
6858 * Modify the FS to do polygon stipple.
6860 static const struct tgsi_token
*
6861 transform_fs_pstipple(struct svga_shader_emitter_v10
*emit
,
6862 const struct tgsi_token
*tokens
)
6864 const struct tgsi_token
*new_tokens
;
6868 debug_printf("Before pstipple ------------------\n");
6869 tgsi_dump(tokens
,0);
6872 new_tokens
= util_pstipple_create_fragment_shader(tokens
, &unit
, 0,
6875 emit
->fs
.pstipple_sampler_unit
= unit
;
6877 /* Setup texture state for stipple */
6878 emit
->sampler_target
[unit
] = TGSI_TEXTURE_2D
;
6879 emit
->key
.tex
[unit
].swizzle_r
= TGSI_SWIZZLE_X
;
6880 emit
->key
.tex
[unit
].swizzle_g
= TGSI_SWIZZLE_Y
;
6881 emit
->key
.tex
[unit
].swizzle_b
= TGSI_SWIZZLE_Z
;
6882 emit
->key
.tex
[unit
].swizzle_a
= TGSI_SWIZZLE_W
;
6885 debug_printf("After pstipple ------------------\n");
6886 tgsi_dump(new_tokens
, 0);
6893 * Modify the FS to support anti-aliasing point.
6895 static const struct tgsi_token
*
6896 transform_fs_aapoint(const struct tgsi_token
*tokens
,
6900 debug_printf("Before tgsi_add_aa_point ------------------\n");
6901 tgsi_dump(tokens
,0);
6903 tokens
= tgsi_add_aa_point(tokens
, aa_coord_index
);
6905 debug_printf("After tgsi_add_aa_point ------------------\n");
6906 tgsi_dump(tokens
, 0);
6912 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6914 struct svga_shader_variant
*
6915 svga_tgsi_vgpu10_translate(struct svga_context
*svga
,
6916 const struct svga_shader
*shader
,
6917 const struct svga_compile_key
*key
,
6918 enum pipe_shader_type unit
)
6920 struct svga_shader_variant
*variant
= NULL
;
6921 struct svga_shader_emitter_v10
*emit
;
6922 const struct tgsi_token
*tokens
= shader
->tokens
;
6923 struct svga_vertex_shader
*vs
= svga
->curr
.vs
;
6924 struct svga_geometry_shader
*gs
= svga
->curr
.gs
;
6926 assert(unit
== PIPE_SHADER_VERTEX
||
6927 unit
== PIPE_SHADER_GEOMETRY
||
6928 unit
== PIPE_SHADER_FRAGMENT
);
6930 /* These two flags cannot be used together */
6931 assert(key
->vs
.need_prescale
+ key
->vs
.undo_viewport
<= 1);
6933 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE
);
6935 * Setup the code emitter
6937 emit
= alloc_emitter();
6942 emit
->version
= svga_have_sm4_1(svga
) ? 41 : 40;
6946 emit
->vposition
.need_prescale
= (emit
->key
.vs
.need_prescale
||
6947 emit
->key
.gs
.need_prescale
);
6948 emit
->vposition
.tmp_index
= INVALID_INDEX
;
6949 emit
->vposition
.so_index
= INVALID_INDEX
;
6950 emit
->vposition
.out_index
= INVALID_INDEX
;
6952 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6953 emit
->fs
.face_input_index
= INVALID_INDEX
;
6954 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
6955 emit
->fs
.sample_id_sys_index
= INVALID_INDEX
;
6956 emit
->fs
.sample_pos_sys_index
= INVALID_INDEX
;
6958 emit
->gs
.prim_id_index
= INVALID_INDEX
;
6960 emit
->clip_dist_out_index
= INVALID_INDEX
;
6961 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
6962 emit
->clip_dist_so_index
= INVALID_INDEX
;
6963 emit
->clip_vertex_out_index
= INVALID_INDEX
;
6965 if (emit
->key
.fs
.alpha_func
== SVGA3D_CMP_INVALID
) {
6966 emit
->key
.fs
.alpha_func
= SVGA3D_CMP_ALWAYS
;
6969 if (unit
== PIPE_SHADER_FRAGMENT
) {
6970 if (key
->fs
.light_twoside
) {
6971 tokens
= transform_fs_twoside(tokens
);
6973 if (key
->fs
.pstipple
) {
6974 const struct tgsi_token
*new_tokens
=
6975 transform_fs_pstipple(emit
, tokens
);
6976 if (tokens
!= shader
->tokens
) {
6977 /* free the two-sided shader tokens */
6978 tgsi_free_tokens(tokens
);
6980 tokens
= new_tokens
;
6982 if (key
->fs
.aa_point
) {
6983 tokens
= transform_fs_aapoint(tokens
, key
->fs
.aa_point_coord_index
);
6987 if (SVGA_DEBUG
& DEBUG_TGSI
) {
6988 debug_printf("#####################################\n");
6989 debug_printf("### TGSI Shader %u\n", shader
->id
);
6990 tgsi_dump(tokens
, 0);
6994 * Rescan the header if the token string is different from the one
6995 * included in the shader; otherwise, the header info is already up-to-date
6997 if (tokens
!= shader
->tokens
) {
6998 tgsi_scan_shader(tokens
, &emit
->info
);
7000 emit
->info
= shader
->info
;
7003 emit
->num_outputs
= emit
->info
.num_outputs
;
7005 if (unit
== PIPE_SHADER_FRAGMENT
) {
7006 /* Compute FS input remapping to match the output from VS/GS */
7008 svga_link_shaders(&gs
->base
.info
, &emit
->info
, &emit
->linkage
);
7011 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
7013 } else if (unit
== PIPE_SHADER_GEOMETRY
) {
7015 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
7018 /* Since vertex shader does not need to go through the linker to
7019 * establish the input map, we need to make sure the highest index
7020 * of input registers is set properly here.
7022 emit
->linkage
.input_map_max
= MAX2((int)emit
->linkage
.input_map_max
,
7023 emit
->info
.file_max
[TGSI_FILE_INPUT
]);
7025 determine_clipping_mode(emit
);
7027 if (unit
== PIPE_SHADER_GEOMETRY
|| unit
== PIPE_SHADER_VERTEX
) {
7028 if (shader
->stream_output
!= NULL
|| emit
->clip_mode
== CLIP_DISTANCE
) {
7029 /* if there is stream output declarations associated
7030 * with this shader or the shader writes to ClipDistance
7031 * then reserve extra registers for the non-adjusted vertex position
7032 * and the ClipDistance shadow copy
7034 emit
->vposition
.so_index
= emit
->num_outputs
++;
7036 if (emit
->clip_mode
== CLIP_DISTANCE
) {
7037 emit
->clip_dist_so_index
= emit
->num_outputs
++;
7038 if (emit
->info
.num_written_clipdistance
> 4)
7039 emit
->num_outputs
++;
7045 * Do actual shader translation.
7047 if (!emit_vgpu10_header(emit
)) {
7048 debug_printf("svga: emit VGPU10 header failed\n");
7052 if (!emit_vgpu10_instructions(emit
, tokens
)) {
7053 debug_printf("svga: emit VGPU10 instructions failed\n");
7057 if (!emit_vgpu10_tail(emit
)) {
7058 debug_printf("svga: emit VGPU10 tail failed\n");
7062 if (emit
->register_overflow
) {
7067 * Create, initialize the 'variant' object.
7069 variant
= svga_new_shader_variant(svga
, unit
);
7073 variant
->shader
= shader
;
7074 variant
->nr_tokens
= emit_get_num_tokens(emit
);
7075 variant
->tokens
= (const unsigned *)emit
->buf
;
7076 emit
->buf
= NULL
; /* buffer is no longer owed by emitter context */
7077 memcpy(&variant
->key
, key
, sizeof(*key
));
7078 variant
->id
= UTIL_BITMASK_INVALID_INDEX
;
7080 /* The extra constant starting offset starts with the number of
7081 * shader constants declared in the shader.
7083 variant
->extra_const_start
= emit
->num_shader_consts
[0];
7084 if (key
->gs
.wide_point
) {
7086 * The extra constant added in the transformed shader
7087 * for inverse viewport scale is to be supplied by the driver.
7088 * So the extra constant starting offset needs to be reduced by 1.
7090 assert(variant
->extra_const_start
> 0);
7091 variant
->extra_const_start
--;
7094 variant
->pstipple_sampler_unit
= emit
->fs
.pstipple_sampler_unit
;
7096 /* If there was exactly one write to a fragment shader output register
7097 * and it came from a constant buffer, we know all fragments will have
7098 * the same color (except for blending).
7100 variant
->constant_color_output
=
7101 emit
->constant_color_output
&& emit
->num_output_writes
== 1;
7103 /** keep track in the variant if flat interpolation is used
7104 * for any of the varyings.
7106 variant
->uses_flat_interp
= emit
->uses_flat_interp
;
7108 variant
->fs_shadow_compare_units
= emit
->fs
.shadow_compare_units
;
7110 variant
->fs_shadow_compare_units
= emit
->fs
.shadow_compare_units
;
7112 if (tokens
!= shader
->tokens
) {
7113 tgsi_free_tokens(tokens
);
7120 SVGA_STATS_TIME_POP(svga_sws(svga
));