1 /**********************************************************
2 * Copyright 1998-2013 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 * @file svga_tgsi_vgpu10.c
29 * TGSI -> VGPU10 shader translation.
31 * \author Mingcheng Chen
35 #include "pipe/p_compiler.h"
36 #include "pipe/p_shader_tokens.h"
37 #include "pipe/p_defines.h"
38 #include "tgsi/tgsi_build.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_parse.h"
42 #include "tgsi/tgsi_scan.h"
43 #include "tgsi/tgsi_two_side.h"
44 #include "tgsi/tgsi_aa_point.h"
45 #include "tgsi/tgsi_util.h"
46 #include "util/u_math.h"
47 #include "util/u_memory.h"
48 #include "util/u_bitmask.h"
49 #include "util/u_debug.h"
50 #include "util/u_pstipple.h"
52 #include "svga_context.h"
53 #include "svga_debug.h"
54 #include "svga_link.h"
55 #include "svga_shader.h"
56 #include "svga_tgsi.h"
58 #include "VGPU10ShaderTokens.h"
61 #define INVALID_INDEX 99999
62 #define MAX_INTERNAL_TEMPS 3
63 #define MAX_SYSTEM_VALUES 4
64 #define MAX_IMMEDIATE_COUNT \
65 (VGPU10_MAX_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT/4)
66 #define MAX_TEMP_ARRAYS 64 /* Enough? */
70 * Clipping is complicated. There's four different cases which we
71 * handle during VS/GS shader translation:
75 CLIP_NONE
, /**< No clipping enabled */
76 CLIP_LEGACY
, /**< The shader has no clipping declarations or code but
77 * one or more user-defined clip planes are enabled. We
78 * generate extra code to emit clip distances.
80 CLIP_DISTANCE
, /**< The shader already declares clip distance output
81 * registers and has code to write to them.
83 CLIP_VERTEX
/**< The shader declares a clip vertex output register and
84 * has code that writes to the register. We convert the
85 * clipvertex position into one or more clip distances.
90 struct svga_shader_emitter_v10
92 /* The token output buffer */
97 /* Information about the shader and state (does not change) */
98 struct svga_compile_key key
;
99 struct tgsi_shader_info info
;
102 unsigned inst_start_token
;
103 boolean discard_instruction
; /**< throw away current instruction? */
105 union tgsi_immediate_data immediates
[MAX_IMMEDIATE_COUNT
][4];
106 unsigned num_immediates
; /**< Number of immediates emitted */
107 unsigned common_immediate_pos
[8]; /**< literals for common immediates */
108 unsigned num_common_immediates
;
109 boolean immediates_emitted
;
111 unsigned num_outputs
; /**< include any extra outputs */
112 /** The first extra output is reserved for
113 * non-adjusted vertex position for
114 * stream output purpose
117 /* Temporary Registers */
118 unsigned num_shader_temps
; /**< num of temps used by original shader */
119 unsigned internal_temp_count
; /**< currently allocated internal temps */
121 unsigned start
, size
;
122 } temp_arrays
[MAX_TEMP_ARRAYS
];
123 unsigned num_temp_arrays
;
125 /** Map TGSI temp registers to VGPU10 temp array IDs and indexes */
127 unsigned arrayId
, index
;
128 } temp_map
[VGPU10_MAX_TEMPS
]; /**< arrayId, element */
130 /** Number of constants used by original shader for each constant buffer.
131 * The size should probably always match with that of svga_state.constbufs.
133 unsigned num_shader_consts
[SVGA_MAX_CONST_BUFS
];
136 unsigned num_samplers
;
138 /* Address regs (really implemented with temps) */
139 unsigned num_address_regs
;
140 unsigned address_reg_index
[MAX_VGPU10_ADDR_REGS
];
142 /* Output register usage masks */
143 ubyte output_usage_mask
[PIPE_MAX_SHADER_OUTPUTS
];
145 /* To map TGSI system value index to VGPU shader input indexes */
146 ubyte system_value_indexes
[MAX_SYSTEM_VALUES
];
149 /* vertex position scale/translation */
150 unsigned out_index
; /**< the real position output reg */
151 unsigned tmp_index
; /**< the fake/temp position output reg */
152 unsigned so_index
; /**< the non-adjusted position output reg */
153 unsigned prescale_scale_index
, prescale_trans_index
;
154 boolean need_prescale
;
157 /* For vertex shaders only */
159 /* viewport constant */
160 unsigned viewport_index
;
162 /* temp index of adjusted vertex attributes */
163 unsigned adjusted_input
[PIPE_MAX_SHADER_INPUTS
];
166 /* For fragment shaders only */
169 unsigned color_out_index
[PIPE_MAX_COLOR_BUFS
]; /**< the real color output regs */
170 unsigned color_tmp_index
; /**< fake/temp color output reg */
171 unsigned alpha_ref_index
; /**< immediate constant for alpha ref */
174 unsigned face_input_index
; /**< real fragment shader face reg (bool) */
175 unsigned face_tmp_index
; /**< temp face reg converted to -1 / +1 */
177 unsigned pstipple_sampler_unit
;
179 unsigned fragcoord_input_index
; /**< real fragment position input reg */
180 unsigned fragcoord_tmp_index
; /**< 1/w modified position temp reg */
183 /* For geometry shaders only */
185 VGPU10_PRIMITIVE prim_type
;/**< VGPU10 primitive type */
186 VGPU10_PRIMITIVE_TOPOLOGY prim_topology
; /**< VGPU10 primitive topology */
187 unsigned input_size
; /**< size of input arrays */
188 unsigned prim_id_index
; /**< primitive id register index */
189 unsigned max_out_vertices
; /**< maximum number of output vertices */
192 /* For vertex or geometry shaders */
193 enum clipping_mode clip_mode
;
194 unsigned clip_dist_out_index
; /**< clip distance output register index */
195 unsigned clip_dist_tmp_index
; /**< clip distance temporary register */
196 unsigned clip_dist_so_index
; /**< clip distance shadow copy */
198 /** Index of temporary holding the clipvertex coordinate */
199 unsigned clip_vertex_out_index
; /**< clip vertex output register index */
200 unsigned clip_vertex_tmp_index
; /**< clip vertex temporary index */
202 /* user clip plane constant slot indexes */
203 unsigned clip_plane_const
[PIPE_MAX_CLIP_PLANES
];
205 boolean uses_flat_interp
;
207 /* For all shaders: const reg index for RECT coord scaling */
208 unsigned texcoord_scale_index
[PIPE_MAX_SAMPLERS
];
210 /* For all shaders: const reg index for texture buffer size */
211 unsigned texture_buffer_size_index
[PIPE_MAX_SAMPLERS
];
213 /* VS/GS/FS Linkage info */
214 struct shader_linkage linkage
;
216 bool register_overflow
; /**< Set if we exceed a VGPU10 register limit */
221 emit_post_helpers(struct svga_shader_emitter_v10
*emit
);
224 emit_vertex(struct svga_shader_emitter_v10
*emit
,
225 const struct tgsi_full_instruction
*inst
);
227 static char err_buf
[128];
230 expand(struct svga_shader_emitter_v10
*emit
)
233 unsigned newsize
= emit
->size
* 2;
235 if (emit
->buf
!= err_buf
)
236 new_buf
= REALLOC(emit
->buf
, emit
->size
, newsize
);
240 if (new_buf
== NULL
) {
243 emit
->size
= sizeof(err_buf
);
247 emit
->size
= newsize
;
248 emit
->ptr
= new_buf
+ (emit
->ptr
- emit
->buf
);
254 * Create and initialize a new svga_shader_emitter_v10 object.
256 static struct svga_shader_emitter_v10
*
259 struct svga_shader_emitter_v10
*emit
= CALLOC(1, sizeof(*emit
));
264 /* to initialize the output buffer */
274 * Free an svga_shader_emitter_v10 object.
277 free_emitter(struct svga_shader_emitter_v10
*emit
)
280 FREE(emit
->buf
); /* will be NULL if translation succeeded */
284 static inline boolean
285 reserve(struct svga_shader_emitter_v10
*emit
,
288 while (emit
->ptr
- emit
->buf
+ nr_dwords
* sizeof(uint32
) >= emit
->size
) {
297 emit_dword(struct svga_shader_emitter_v10
*emit
, uint32 dword
)
299 if (!reserve(emit
, 1))
302 *(uint32
*)emit
->ptr
= dword
;
303 emit
->ptr
+= sizeof dword
;
308 emit_dwords(struct svga_shader_emitter_v10
*emit
,
309 const uint32
*dwords
,
312 if (!reserve(emit
, nr
))
315 memcpy(emit
->ptr
, dwords
, nr
* sizeof *dwords
);
316 emit
->ptr
+= nr
* sizeof *dwords
;
320 /** Return the number of tokens in the emitter's buffer */
322 emit_get_num_tokens(const struct svga_shader_emitter_v10
*emit
)
324 return (emit
->ptr
- emit
->buf
) / sizeof(unsigned);
329 * Check for register overflow. If we overflow we'll set an
330 * error flag. This function can be called for register declarations
331 * or use as src/dst instruction operands.
332 * \param type register type. One of VGPU10_OPERAND_TYPE_x
333 or VGPU10_OPCODE_DCL_x
334 * \param index the register index
337 check_register_index(struct svga_shader_emitter_v10
*emit
,
338 unsigned operandType
, unsigned index
)
340 bool overflow_before
= emit
->register_overflow
;
342 switch (operandType
) {
343 case VGPU10_OPERAND_TYPE_TEMP
:
344 case VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
:
345 case VGPU10_OPCODE_DCL_TEMPS
:
346 if (index
>= VGPU10_MAX_TEMPS
) {
347 emit
->register_overflow
= TRUE
;
350 case VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
:
351 case VGPU10_OPCODE_DCL_CONSTANT_BUFFER
:
352 if (index
>= VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
353 emit
->register_overflow
= TRUE
;
356 case VGPU10_OPERAND_TYPE_INPUT
:
357 case VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
:
358 case VGPU10_OPCODE_DCL_INPUT
:
359 case VGPU10_OPCODE_DCL_INPUT_SGV
:
360 case VGPU10_OPCODE_DCL_INPUT_SIV
:
361 case VGPU10_OPCODE_DCL_INPUT_PS
:
362 case VGPU10_OPCODE_DCL_INPUT_PS_SGV
:
363 case VGPU10_OPCODE_DCL_INPUT_PS_SIV
:
364 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
365 index
>= VGPU10_MAX_VS_INPUTS
) ||
366 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
367 index
>= VGPU10_MAX_GS_INPUTS
) ||
368 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
369 index
>= VGPU10_MAX_FS_INPUTS
)) {
370 emit
->register_overflow
= TRUE
;
373 case VGPU10_OPERAND_TYPE_OUTPUT
:
374 case VGPU10_OPCODE_DCL_OUTPUT
:
375 case VGPU10_OPCODE_DCL_OUTPUT_SGV
:
376 case VGPU10_OPCODE_DCL_OUTPUT_SIV
:
377 if ((emit
->unit
== PIPE_SHADER_VERTEX
&&
378 index
>= VGPU10_MAX_VS_OUTPUTS
) ||
379 (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
380 index
>= VGPU10_MAX_GS_OUTPUTS
) ||
381 (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
382 index
>= VGPU10_MAX_FS_OUTPUTS
)) {
383 emit
->register_overflow
= TRUE
;
386 case VGPU10_OPERAND_TYPE_SAMPLER
:
387 case VGPU10_OPCODE_DCL_SAMPLER
:
388 if (index
>= VGPU10_MAX_SAMPLERS
) {
389 emit
->register_overflow
= TRUE
;
392 case VGPU10_OPERAND_TYPE_RESOURCE
:
393 case VGPU10_OPCODE_DCL_RESOURCE
:
394 if (index
>= VGPU10_MAX_RESOURCES
) {
395 emit
->register_overflow
= TRUE
;
398 case VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
:
399 if (index
>= MAX_IMMEDIATE_COUNT
) {
400 emit
->register_overflow
= TRUE
;
408 if (emit
->register_overflow
&& !overflow_before
) {
409 debug_printf("svga: vgpu10 register overflow (reg %u, index %u)\n",
416 * Examine misc state to determine the clipping mode.
419 determine_clipping_mode(struct svga_shader_emitter_v10
*emit
)
421 if (emit
->info
.num_written_clipdistance
> 0) {
422 emit
->clip_mode
= CLIP_DISTANCE
;
424 else if (emit
->info
.writes_clipvertex
) {
425 emit
->clip_mode
= CLIP_VERTEX
;
427 else if (emit
->key
.clip_plane_enable
) {
428 emit
->clip_mode
= CLIP_LEGACY
;
431 emit
->clip_mode
= CLIP_NONE
;
437 * For clip distance register declarations and clip distance register
438 * writes we need to mask the declaration usage or instruction writemask
439 * (respectively) against the set of the really-enabled clipping planes.
441 * The piglit test spec/glsl-1.30/execution/clipping/vs-clip-distance-enables
442 * has a VS that writes to all 8 clip distance registers, but the plane enable
443 * flags are a subset of that.
445 * This function is used to apply the plane enable flags to the register
446 * declaration or instruction writemask.
448 * \param writemask the declaration usage mask or instruction writemask
449 * \param clip_reg_index which clip plane register is being declared/written.
450 * The legal values are 0 and 1 (two clip planes per
451 * register, for a total of 8 clip planes)
454 apply_clip_plane_mask(struct svga_shader_emitter_v10
*emit
,
455 unsigned writemask
, unsigned clip_reg_index
)
459 assert(clip_reg_index
< 2);
461 /* four clip planes per clip register: */
462 shift
= clip_reg_index
* 4;
463 writemask
&= ((emit
->key
.clip_plane_enable
>> shift
) & 0xf);
470 * Translate gallium shader type into VGPU10 type.
472 static VGPU10_PROGRAM_TYPE
473 translate_shader_type(unsigned type
)
476 case PIPE_SHADER_VERTEX
:
477 return VGPU10_VERTEX_SHADER
;
478 case PIPE_SHADER_GEOMETRY
:
479 return VGPU10_GEOMETRY_SHADER
;
480 case PIPE_SHADER_FRAGMENT
:
481 return VGPU10_PIXEL_SHADER
;
483 assert(!"Unexpected shader type");
484 return VGPU10_VERTEX_SHADER
;
490 * Translate a TGSI_OPCODE_x into a VGPU10_OPCODE_x
491 * Note: we only need to translate the opcodes for "simple" instructions,
492 * as seen below. All other opcodes are handled/translated specially.
494 static VGPU10_OPCODE_TYPE
495 translate_opcode(unsigned opcode
)
498 case TGSI_OPCODE_MOV
:
499 return VGPU10_OPCODE_MOV
;
500 case TGSI_OPCODE_MUL
:
501 return VGPU10_OPCODE_MUL
;
502 case TGSI_OPCODE_ADD
:
503 return VGPU10_OPCODE_ADD
;
504 case TGSI_OPCODE_DP3
:
505 return VGPU10_OPCODE_DP3
;
506 case TGSI_OPCODE_DP4
:
507 return VGPU10_OPCODE_DP4
;
508 case TGSI_OPCODE_MIN
:
509 return VGPU10_OPCODE_MIN
;
510 case TGSI_OPCODE_MAX
:
511 return VGPU10_OPCODE_MAX
;
512 case TGSI_OPCODE_MAD
:
513 return VGPU10_OPCODE_MAD
;
514 case TGSI_OPCODE_SQRT
:
515 return VGPU10_OPCODE_SQRT
;
516 case TGSI_OPCODE_FRC
:
517 return VGPU10_OPCODE_FRC
;
518 case TGSI_OPCODE_FLR
:
519 return VGPU10_OPCODE_ROUND_NI
;
520 case TGSI_OPCODE_FSEQ
:
521 return VGPU10_OPCODE_EQ
;
522 case TGSI_OPCODE_FSGE
:
523 return VGPU10_OPCODE_GE
;
524 case TGSI_OPCODE_FSNE
:
525 return VGPU10_OPCODE_NE
;
526 case TGSI_OPCODE_DDX
:
527 return VGPU10_OPCODE_DERIV_RTX
;
528 case TGSI_OPCODE_DDY
:
529 return VGPU10_OPCODE_DERIV_RTY
;
530 case TGSI_OPCODE_RET
:
531 return VGPU10_OPCODE_RET
;
532 case TGSI_OPCODE_DIV
:
533 return VGPU10_OPCODE_DIV
;
534 case TGSI_OPCODE_IDIV
:
535 return VGPU10_OPCODE_IDIV
;
536 case TGSI_OPCODE_DP2
:
537 return VGPU10_OPCODE_DP2
;
538 case TGSI_OPCODE_BRK
:
539 return VGPU10_OPCODE_BREAK
;
541 return VGPU10_OPCODE_IF
;
542 case TGSI_OPCODE_ELSE
:
543 return VGPU10_OPCODE_ELSE
;
544 case TGSI_OPCODE_ENDIF
:
545 return VGPU10_OPCODE_ENDIF
;
546 case TGSI_OPCODE_CEIL
:
547 return VGPU10_OPCODE_ROUND_PI
;
548 case TGSI_OPCODE_I2F
:
549 return VGPU10_OPCODE_ITOF
;
550 case TGSI_OPCODE_NOT
:
551 return VGPU10_OPCODE_NOT
;
552 case TGSI_OPCODE_TRUNC
:
553 return VGPU10_OPCODE_ROUND_Z
;
554 case TGSI_OPCODE_SHL
:
555 return VGPU10_OPCODE_ISHL
;
556 case TGSI_OPCODE_AND
:
557 return VGPU10_OPCODE_AND
;
559 return VGPU10_OPCODE_OR
;
560 case TGSI_OPCODE_XOR
:
561 return VGPU10_OPCODE_XOR
;
562 case TGSI_OPCODE_CONT
:
563 return VGPU10_OPCODE_CONTINUE
;
564 case TGSI_OPCODE_EMIT
:
565 return VGPU10_OPCODE_EMIT
;
566 case TGSI_OPCODE_ENDPRIM
:
567 return VGPU10_OPCODE_CUT
;
568 case TGSI_OPCODE_BGNLOOP
:
569 return VGPU10_OPCODE_LOOP
;
570 case TGSI_OPCODE_ENDLOOP
:
571 return VGPU10_OPCODE_ENDLOOP
;
572 case TGSI_OPCODE_ENDSUB
:
573 return VGPU10_OPCODE_RET
;
574 case TGSI_OPCODE_NOP
:
575 return VGPU10_OPCODE_NOP
;
576 case TGSI_OPCODE_BREAKC
:
577 return VGPU10_OPCODE_BREAKC
;
578 case TGSI_OPCODE_END
:
579 return VGPU10_OPCODE_RET
;
580 case TGSI_OPCODE_F2I
:
581 return VGPU10_OPCODE_FTOI
;
582 case TGSI_OPCODE_IMAX
:
583 return VGPU10_OPCODE_IMAX
;
584 case TGSI_OPCODE_IMIN
:
585 return VGPU10_OPCODE_IMIN
;
586 case TGSI_OPCODE_UDIV
:
587 case TGSI_OPCODE_UMOD
:
588 case TGSI_OPCODE_MOD
:
589 return VGPU10_OPCODE_UDIV
;
590 case TGSI_OPCODE_IMUL_HI
:
591 return VGPU10_OPCODE_IMUL
;
592 case TGSI_OPCODE_INEG
:
593 return VGPU10_OPCODE_INEG
;
594 case TGSI_OPCODE_ISHR
:
595 return VGPU10_OPCODE_ISHR
;
596 case TGSI_OPCODE_ISGE
:
597 return VGPU10_OPCODE_IGE
;
598 case TGSI_OPCODE_ISLT
:
599 return VGPU10_OPCODE_ILT
;
600 case TGSI_OPCODE_F2U
:
601 return VGPU10_OPCODE_FTOU
;
602 case TGSI_OPCODE_UADD
:
603 return VGPU10_OPCODE_IADD
;
604 case TGSI_OPCODE_U2F
:
605 return VGPU10_OPCODE_UTOF
;
606 case TGSI_OPCODE_UCMP
:
607 return VGPU10_OPCODE_MOVC
;
608 case TGSI_OPCODE_UMAD
:
609 return VGPU10_OPCODE_UMAD
;
610 case TGSI_OPCODE_UMAX
:
611 return VGPU10_OPCODE_UMAX
;
612 case TGSI_OPCODE_UMIN
:
613 return VGPU10_OPCODE_UMIN
;
614 case TGSI_OPCODE_UMUL
:
615 case TGSI_OPCODE_UMUL_HI
:
616 return VGPU10_OPCODE_UMUL
;
617 case TGSI_OPCODE_USEQ
:
618 return VGPU10_OPCODE_IEQ
;
619 case TGSI_OPCODE_USGE
:
620 return VGPU10_OPCODE_UGE
;
621 case TGSI_OPCODE_USHR
:
622 return VGPU10_OPCODE_USHR
;
623 case TGSI_OPCODE_USLT
:
624 return VGPU10_OPCODE_ULT
;
625 case TGSI_OPCODE_USNE
:
626 return VGPU10_OPCODE_INE
;
627 case TGSI_OPCODE_SWITCH
:
628 return VGPU10_OPCODE_SWITCH
;
629 case TGSI_OPCODE_CASE
:
630 return VGPU10_OPCODE_CASE
;
631 case TGSI_OPCODE_DEFAULT
:
632 return VGPU10_OPCODE_DEFAULT
;
633 case TGSI_OPCODE_ENDSWITCH
:
634 return VGPU10_OPCODE_ENDSWITCH
;
635 case TGSI_OPCODE_FSLT
:
636 return VGPU10_OPCODE_LT
;
637 case TGSI_OPCODE_ROUND
:
638 return VGPU10_OPCODE_ROUND_NE
;
640 assert(!"Unexpected TGSI opcode in translate_opcode()");
641 return VGPU10_OPCODE_NOP
;
647 * Translate a TGSI register file type into a VGPU10 operand type.
648 * \param array is the TGSI_FILE_TEMPORARY register an array?
650 static VGPU10_OPERAND_TYPE
651 translate_register_file(enum tgsi_file_type file
, boolean array
)
654 case TGSI_FILE_CONSTANT
:
655 return VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
656 case TGSI_FILE_INPUT
:
657 return VGPU10_OPERAND_TYPE_INPUT
;
658 case TGSI_FILE_OUTPUT
:
659 return VGPU10_OPERAND_TYPE_OUTPUT
;
660 case TGSI_FILE_TEMPORARY
:
661 return array
? VGPU10_OPERAND_TYPE_INDEXABLE_TEMP
662 : VGPU10_OPERAND_TYPE_TEMP
;
663 case TGSI_FILE_IMMEDIATE
:
664 /* all immediates are 32-bit values at this time so
665 * VGPU10_OPERAND_TYPE_IMMEDIATE64 is not possible at this time.
667 return VGPU10_OPERAND_TYPE_IMMEDIATE_CONSTANT_BUFFER
;
668 case TGSI_FILE_SAMPLER
:
669 return VGPU10_OPERAND_TYPE_SAMPLER
;
670 case TGSI_FILE_SYSTEM_VALUE
:
671 return VGPU10_OPERAND_TYPE_INPUT
;
673 /* XXX TODO more cases to finish */
676 assert(!"Bad tgsi register file!");
677 return VGPU10_OPERAND_TYPE_NULL
;
683 * Emit a null dst register
686 emit_null_dst_register(struct svga_shader_emitter_v10
*emit
)
688 VGPU10OperandToken0 operand
;
691 operand
.operandType
= VGPU10_OPERAND_TYPE_NULL
;
692 operand
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
694 emit_dword(emit
, operand
.value
);
699 * If the given register is a temporary, return the array ID.
703 get_temp_array_id(const struct svga_shader_emitter_v10
*emit
,
704 unsigned file
, unsigned index
)
706 if (file
== TGSI_FILE_TEMPORARY
) {
707 return emit
->temp_map
[index
].arrayId
;
716 * If the given register is a temporary, convert the index from a TGSI
717 * TEMPORARY index to a VGPU10 temp index.
720 remap_temp_index(const struct svga_shader_emitter_v10
*emit
,
721 unsigned file
, unsigned index
)
723 if (file
== TGSI_FILE_TEMPORARY
) {
724 return emit
->temp_map
[index
].index
;
733 * Setup the operand0 fields related to indexing (1D, 2D, relative, etc).
734 * Note: the operandType field must already be initialized.
736 static VGPU10OperandToken0
737 setup_operand0_indexing(struct svga_shader_emitter_v10
*emit
,
738 VGPU10OperandToken0 operand0
,
740 boolean indirect
, boolean index2D
,
741 unsigned tempArrayID
)
743 unsigned indexDim
, index0Rep
, index1Rep
= VGPU10_OPERAND_INDEX_0D
;
746 * Compute index dimensions
748 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
||
749 operand0
.operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
750 /* there's no swizzle for in-line immediates */
751 indexDim
= VGPU10_OPERAND_INDEX_0D
;
752 assert(operand0
.selectionMode
== 0);
757 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
758 indexDim
= VGPU10_OPERAND_INDEX_2D
;
761 indexDim
= VGPU10_OPERAND_INDEX_1D
;
766 * Compute index representations (immediate, relative, etc).
768 if (tempArrayID
> 0) {
769 assert(file
== TGSI_FILE_TEMPORARY
);
770 /* First index is the array ID, second index is the array element */
771 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
773 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
776 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
780 if (file
== TGSI_FILE_CONSTANT
) {
781 /* index[0] indicates which constant buffer while index[1] indicates
782 * the position in the constant buffer.
784 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
785 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
788 /* All other register files are 1-dimensional */
789 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32_PLUS_RELATIVE
;
793 index0Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
794 index1Rep
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
797 operand0
.indexDimension
= indexDim
;
798 operand0
.index0Representation
= index0Rep
;
799 operand0
.index1Representation
= index1Rep
;
806 * Emit the operand for expressing an address register for indirect indexing.
807 * Note that the address register is really just a temp register.
808 * \param addr_reg_index which address register to use
811 emit_indirect_register(struct svga_shader_emitter_v10
*emit
,
812 unsigned addr_reg_index
)
814 unsigned tmp_reg_index
;
815 VGPU10OperandToken0 operand0
;
817 assert(addr_reg_index
< MAX_VGPU10_ADDR_REGS
);
819 tmp_reg_index
= emit
->address_reg_index
[addr_reg_index
];
821 /* operand0 is a simple temporary register, selecting one component */
823 operand0
.operandType
= VGPU10_OPERAND_TYPE_TEMP
;
824 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
825 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
826 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
827 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
828 operand0
.swizzleX
= 0;
829 operand0
.swizzleY
= 1;
830 operand0
.swizzleZ
= 2;
831 operand0
.swizzleW
= 3;
833 emit_dword(emit
, operand0
.value
);
834 emit_dword(emit
, remap_temp_index(emit
, TGSI_FILE_TEMPORARY
, tmp_reg_index
));
839 * Translate the dst register of a TGSI instruction and emit VGPU10 tokens.
840 * \param emit the emitter context
841 * \param reg the TGSI dst register to translate
844 emit_dst_register(struct svga_shader_emitter_v10
*emit
,
845 const struct tgsi_full_dst_register
*reg
)
847 unsigned file
= reg
->Register
.File
;
848 unsigned index
= reg
->Register
.Index
;
849 const unsigned sem_name
= emit
->info
.output_semantic_name
[index
];
850 const unsigned sem_index
= emit
->info
.output_semantic_index
[index
];
851 unsigned writemask
= reg
->Register
.WriteMask
;
852 const unsigned indirect
= reg
->Register
.Indirect
;
853 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
854 const unsigned index2d
= reg
->Register
.Dimension
;
855 VGPU10OperandToken0 operand0
;
857 if (file
== TGSI_FILE_OUTPUT
) {
858 if (emit
->unit
== PIPE_SHADER_VERTEX
||
859 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
860 if (index
== emit
->vposition
.out_index
&&
861 emit
->vposition
.tmp_index
!= INVALID_INDEX
) {
862 /* replace OUTPUT[POS] with TEMP[POS]. We need to store the
863 * vertex position result in a temporary so that we can modify
864 * it in the post_helper() code.
866 file
= TGSI_FILE_TEMPORARY
;
867 index
= emit
->vposition
.tmp_index
;
869 else if (sem_name
== TGSI_SEMANTIC_CLIPDIST
&&
870 emit
->clip_dist_tmp_index
!= INVALID_INDEX
) {
871 /* replace OUTPUT[CLIPDIST] with TEMP[CLIPDIST].
872 * We store the clip distance in a temporary first, then
873 * we'll copy it to the shadow copy and to CLIPDIST with the
874 * enabled planes mask in emit_clip_distance_instructions().
876 file
= TGSI_FILE_TEMPORARY
;
877 index
= emit
->clip_dist_tmp_index
+ sem_index
;
879 else if (sem_name
== TGSI_SEMANTIC_CLIPVERTEX
&&
880 emit
->clip_vertex_tmp_index
!= INVALID_INDEX
) {
881 /* replace the CLIPVERTEX output register with a temporary */
882 assert(emit
->clip_mode
== CLIP_VERTEX
);
883 assert(sem_index
== 0);
884 file
= TGSI_FILE_TEMPORARY
;
885 index
= emit
->clip_vertex_tmp_index
;
888 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
889 if (sem_name
== TGSI_SEMANTIC_POSITION
) {
890 /* Fragment depth output register */
892 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
893 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
894 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
895 emit_dword(emit
, operand0
.value
);
898 else if (index
== emit
->fs
.color_out_index
[0] &&
899 emit
->fs
.color_tmp_index
!= INVALID_INDEX
) {
900 /* replace OUTPUT[COLOR] with TEMP[COLOR]. We need to store the
901 * fragment color result in a temporary so that we can read it
902 * it in the post_helper() code.
904 file
= TGSI_FILE_TEMPORARY
;
905 index
= emit
->fs
.color_tmp_index
;
908 /* Typically, for fragment shaders, the output register index
909 * matches the color semantic index. But not when we write to
910 * the fragment depth register. In that case, OUT[0] will be
911 * fragdepth and OUT[1] will be the 0th color output. We need
912 * to use the semantic index for color outputs.
914 assert(sem_name
== TGSI_SEMANTIC_COLOR
);
915 index
= emit
->info
.output_semantic_index
[index
];
920 /* init operand tokens to all zero */
923 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
925 /* the operand has a writemask */
926 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
928 /* Which of the four dest components to write to. Note that we can use a
929 * simple assignment here since TGSI writemasks match VGPU10 writemasks.
931 STATIC_ASSERT(TGSI_WRITEMASK_X
== VGPU10_OPERAND_4_COMPONENT_MASK_X
);
932 operand0
.mask
= writemask
;
934 /* translate TGSI register file type to VGPU10 operand type */
935 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
937 check_register_index(emit
, operand0
.operandType
, index
);
939 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
940 index2d
, tempArrayId
);
943 emit_dword(emit
, operand0
.value
);
944 if (tempArrayId
> 0) {
945 emit_dword(emit
, tempArrayId
);
948 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
951 emit_indirect_register(emit
, reg
->Indirect
.Index
);
957 * Translate a src register of a TGSI instruction and emit VGPU10 tokens.
960 emit_src_register(struct svga_shader_emitter_v10
*emit
,
961 const struct tgsi_full_src_register
*reg
)
963 unsigned file
= reg
->Register
.File
;
964 unsigned index
= reg
->Register
.Index
;
965 const unsigned indirect
= reg
->Register
.Indirect
;
966 const unsigned tempArrayId
= get_temp_array_id(emit
, file
, index
);
967 const unsigned index2d
= reg
->Register
.Dimension
;
968 const unsigned swizzleX
= reg
->Register
.SwizzleX
;
969 const unsigned swizzleY
= reg
->Register
.SwizzleY
;
970 const unsigned swizzleZ
= reg
->Register
.SwizzleZ
;
971 const unsigned swizzleW
= reg
->Register
.SwizzleW
;
972 const unsigned absolute
= reg
->Register
.Absolute
;
973 const unsigned negate
= reg
->Register
.Negate
;
974 bool is_prim_id
= FALSE
;
976 VGPU10OperandToken0 operand0
;
977 VGPU10OperandToken1 operand1
;
979 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
980 file
== TGSI_FILE_INPUT
) {
981 if (index
== emit
->fs
.face_input_index
) {
982 /* Replace INPUT[FACE] with TEMP[FACE] */
983 file
= TGSI_FILE_TEMPORARY
;
984 index
= emit
->fs
.face_tmp_index
;
986 else if (index
== emit
->fs
.fragcoord_input_index
) {
987 /* Replace INPUT[POSITION] with TEMP[POSITION] */
988 file
= TGSI_FILE_TEMPORARY
;
989 index
= emit
->fs
.fragcoord_tmp_index
;
992 /* We remap fragment shader inputs to that FS input indexes
993 * match up with VS/GS output indexes.
995 index
= emit
->linkage
.input_map
[index
];
998 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
&&
999 file
== TGSI_FILE_INPUT
) {
1000 is_prim_id
= (index
== emit
->gs
.prim_id_index
);
1001 index
= emit
->linkage
.input_map
[index
];
1003 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1004 if (file
== TGSI_FILE_INPUT
) {
1005 /* if input is adjusted... */
1006 if ((emit
->key
.vs
.adjust_attrib_w_1
|
1007 emit
->key
.vs
.adjust_attrib_itof
|
1008 emit
->key
.vs
.adjust_attrib_utof
|
1009 emit
->key
.vs
.attrib_is_bgra
|
1010 emit
->key
.vs
.attrib_puint_to_snorm
|
1011 emit
->key
.vs
.attrib_puint_to_uscaled
|
1012 emit
->key
.vs
.attrib_puint_to_sscaled
) & (1 << index
)) {
1013 file
= TGSI_FILE_TEMPORARY
;
1014 index
= emit
->vs
.adjusted_input
[index
];
1017 else if (file
== TGSI_FILE_SYSTEM_VALUE
) {
1018 assert(index
< Elements(emit
->system_value_indexes
));
1019 index
= emit
->system_value_indexes
[index
];
1023 operand0
.value
= operand1
.value
= 0;
1026 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
1027 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
1030 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1031 operand0
.operandType
= translate_register_file(file
, tempArrayId
> 0);
1034 operand0
= setup_operand0_indexing(emit
, operand0
, file
, indirect
,
1035 index2d
, tempArrayId
);
1037 if (operand0
.operandType
!= VGPU10_OPERAND_TYPE_IMMEDIATE32
&&
1038 operand0
.operandType
!= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
) {
1039 /* there's no swizzle for in-line immediates */
1040 if (swizzleX
== swizzleY
&&
1041 swizzleX
== swizzleZ
&&
1042 swizzleX
== swizzleW
) {
1043 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1046 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1049 operand0
.swizzleX
= swizzleX
;
1050 operand0
.swizzleY
= swizzleY
;
1051 operand0
.swizzleZ
= swizzleZ
;
1052 operand0
.swizzleW
= swizzleW
;
1054 if (absolute
|| negate
) {
1055 operand0
.extended
= 1;
1056 operand1
.extendedOperandType
= VGPU10_EXTENDED_OPERAND_MODIFIER
;
1057 if (absolute
&& !negate
)
1058 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABS
;
1059 if (!absolute
&& negate
)
1060 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_NEG
;
1061 if (absolute
&& negate
)
1062 operand1
.operandModifier
= VGPU10_OPERAND_MODIFIER_ABSNEG
;
1066 /* Emit the operand tokens */
1067 emit_dword(emit
, operand0
.value
);
1068 if (operand0
.extended
)
1069 emit_dword(emit
, operand1
.value
);
1071 if (operand0
.operandType
== VGPU10_OPERAND_TYPE_IMMEDIATE32
) {
1072 /* Emit the four float/int in-line immediate values */
1074 assert(index
< Elements(emit
->immediates
));
1075 assert(file
== TGSI_FILE_IMMEDIATE
);
1076 assert(swizzleX
< 4);
1077 assert(swizzleY
< 4);
1078 assert(swizzleZ
< 4);
1079 assert(swizzleW
< 4);
1080 c
= (unsigned *) emit
->immediates
[index
];
1081 emit_dword(emit
, c
[swizzleX
]);
1082 emit_dword(emit
, c
[swizzleY
]);
1083 emit_dword(emit
, c
[swizzleZ
]);
1084 emit_dword(emit
, c
[swizzleW
]);
1086 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_1D
) {
1087 /* Emit the register index(es) */
1089 operand0
.operandType
== VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
) {
1090 emit_dword(emit
, reg
->Dimension
.Index
);
1093 if (tempArrayId
> 0) {
1094 emit_dword(emit
, tempArrayId
);
1097 emit_dword(emit
, remap_temp_index(emit
, file
, index
));
1100 emit_indirect_register(emit
, reg
->Indirect
.Index
);
1107 * Emit a resource operand (for use with a SAMPLE instruction).
1110 emit_resource_register(struct svga_shader_emitter_v10
*emit
,
1111 unsigned resource_number
)
1113 VGPU10OperandToken0 operand0
;
1115 check_register_index(emit
, VGPU10_OPERAND_TYPE_RESOURCE
, resource_number
);
1120 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
1121 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1122 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1123 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
1124 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1125 operand0
.swizzleY
= VGPU10_COMPONENT_Y
;
1126 operand0
.swizzleZ
= VGPU10_COMPONENT_Z
;
1127 operand0
.swizzleW
= VGPU10_COMPONENT_W
;
1129 emit_dword(emit
, operand0
.value
);
1130 emit_dword(emit
, resource_number
);
1135 * Emit a sampler operand (for use with a SAMPLE instruction).
1138 emit_sampler_register(struct svga_shader_emitter_v10
*emit
,
1139 unsigned sampler_number
)
1141 VGPU10OperandToken0 operand0
;
1143 check_register_index(emit
, VGPU10_OPERAND_TYPE_SAMPLER
, sampler_number
);
1148 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
1149 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1151 emit_dword(emit
, operand0
.value
);
1152 emit_dword(emit
, sampler_number
);
1157 * Emit an operand which reads the IS_FRONT_FACING register.
1160 emit_face_register(struct svga_shader_emitter_v10
*emit
)
1162 VGPU10OperandToken0 operand0
;
1163 unsigned index
= emit
->linkage
.input_map
[emit
->fs
.face_input_index
];
1168 operand0
.operandType
= VGPU10_OPERAND_TYPE_INPUT
;
1169 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
1170 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SELECT_1_MODE
;
1171 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
1173 operand0
.swizzleX
= VGPU10_COMPONENT_X
;
1174 operand0
.swizzleY
= VGPU10_COMPONENT_X
;
1175 operand0
.swizzleZ
= VGPU10_COMPONENT_X
;
1176 operand0
.swizzleW
= VGPU10_COMPONENT_X
;
1178 emit_dword(emit
, operand0
.value
);
1179 emit_dword(emit
, index
);
1184 * Emit the token for a VGPU10 opcode.
1185 * \param saturate clamp result to [0,1]?
1188 emit_opcode(struct svga_shader_emitter_v10
*emit
,
1189 unsigned vgpu10_opcode
, boolean saturate
)
1191 VGPU10OpcodeToken0 token0
;
1193 token0
.value
= 0; /* init all fields to zero */
1194 token0
.opcodeType
= vgpu10_opcode
;
1195 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1196 token0
.saturate
= saturate
;
1198 emit_dword(emit
, token0
.value
);
1203 * Emit the token for a VGPU10 resinfo instruction.
1204 * \param modifier return type modifier, _uint or _rcpFloat.
1205 * TODO: We may want to remove this parameter if it will
1206 * only ever be used as _uint.
1209 emit_opcode_resinfo(struct svga_shader_emitter_v10
*emit
,
1210 VGPU10_RESINFO_RETURN_TYPE modifier
)
1212 VGPU10OpcodeToken0 token0
;
1214 token0
.value
= 0; /* init all fields to zero */
1215 token0
.opcodeType
= VGPU10_OPCODE_RESINFO
;
1216 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1217 token0
.resinfoReturnType
= modifier
;
1219 emit_dword(emit
, token0
.value
);
1224 * Emit opcode tokens for a texture sample instruction. Texture instructions
1225 * can be rather complicated (texel offsets, etc) so we have this specialized
1229 emit_sample_opcode(struct svga_shader_emitter_v10
*emit
,
1230 unsigned vgpu10_opcode
, boolean saturate
,
1231 const int offsets
[3])
1233 VGPU10OpcodeToken0 token0
;
1234 VGPU10OpcodeToken1 token1
;
1236 token0
.value
= 0; /* init all fields to zero */
1237 token0
.opcodeType
= vgpu10_opcode
;
1238 token0
.instructionLength
= 0; /* Filled in by end_emit_instruction() */
1239 token0
.saturate
= saturate
;
1241 if (offsets
[0] || offsets
[1] || offsets
[2]) {
1242 assert(offsets
[0] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1243 assert(offsets
[1] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1244 assert(offsets
[2] >= VGPU10_MIN_TEXEL_FETCH_OFFSET
);
1245 assert(offsets
[0] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1246 assert(offsets
[1] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1247 assert(offsets
[2] <= VGPU10_MAX_TEXEL_FETCH_OFFSET
);
1249 token0
.extended
= 1;
1251 token1
.opcodeType
= VGPU10_EXTENDED_OPCODE_SAMPLE_CONTROLS
;
1252 token1
.offsetU
= offsets
[0];
1253 token1
.offsetV
= offsets
[1];
1254 token1
.offsetW
= offsets
[2];
1257 emit_dword(emit
, token0
.value
);
1258 if (token0
.extended
) {
1259 emit_dword(emit
, token1
.value
);
1265 * Emit a DISCARD opcode token.
1266 * If nonzero is set, we'll discard the fragment if the X component is not 0.
1267 * Otherwise, we'll discard the fragment if the X component is 0.
1270 emit_discard_opcode(struct svga_shader_emitter_v10
*emit
, boolean nonzero
)
1272 VGPU10OpcodeToken0 opcode0
;
1275 opcode0
.opcodeType
= VGPU10_OPCODE_DISCARD
;
1277 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
1279 emit_dword(emit
, opcode0
.value
);
1284 * We need to call this before we begin emitting a VGPU10 instruction.
1287 begin_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1289 assert(emit
->inst_start_token
== 0);
1290 /* Save location of the instruction's VGPU10OpcodeToken0 token.
1291 * Note, we can't save a pointer because it would become invalid if
1292 * we have to realloc the output buffer.
1294 emit
->inst_start_token
= emit_get_num_tokens(emit
);
1299 * We need to call this after we emit the last token of a VGPU10 instruction.
1300 * This function patches in the opcode token's instructionLength field.
1303 end_emit_instruction(struct svga_shader_emitter_v10
*emit
)
1305 VGPU10OpcodeToken0
*tokens
= (VGPU10OpcodeToken0
*) emit
->buf
;
1306 unsigned inst_length
;
1308 assert(emit
->inst_start_token
> 0);
1310 if (emit
->discard_instruction
) {
1311 /* Back up the emit->ptr to where this instruction started so
1312 * that we discard the current instruction.
1314 emit
->ptr
= (char *) (tokens
+ emit
->inst_start_token
);
1317 /* Compute instruction length and patch that into the start of
1320 inst_length
= emit_get_num_tokens(emit
) - emit
->inst_start_token
;
1322 assert(inst_length
> 0);
1324 tokens
[emit
->inst_start_token
].instructionLength
= inst_length
;
1327 emit
->inst_start_token
= 0; /* reset to zero for error checking */
1328 emit
->discard_instruction
= FALSE
;
1333 * Return index for a free temporary register.
1336 get_temp_index(struct svga_shader_emitter_v10
*emit
)
1338 assert(emit
->internal_temp_count
< MAX_INTERNAL_TEMPS
);
1339 return emit
->num_shader_temps
+ emit
->internal_temp_count
++;
1344 * Release the temporaries which were generated by get_temp_index().
1347 free_temp_indexes(struct svga_shader_emitter_v10
*emit
)
1349 emit
->internal_temp_count
= 0;
1354 * Create a tgsi_full_src_register.
1356 static struct tgsi_full_src_register
1357 make_src_reg(unsigned file
, unsigned index
)
1359 struct tgsi_full_src_register reg
;
1361 memset(®
, 0, sizeof(reg
));
1362 reg
.Register
.File
= file
;
1363 reg
.Register
.Index
= index
;
1364 reg
.Register
.SwizzleX
= TGSI_SWIZZLE_X
;
1365 reg
.Register
.SwizzleY
= TGSI_SWIZZLE_Y
;
1366 reg
.Register
.SwizzleZ
= TGSI_SWIZZLE_Z
;
1367 reg
.Register
.SwizzleW
= TGSI_SWIZZLE_W
;
1373 * Create a tgsi_full_src_register for a temporary.
1375 static struct tgsi_full_src_register
1376 make_src_temp_reg(unsigned index
)
1378 return make_src_reg(TGSI_FILE_TEMPORARY
, index
);
1383 * Create a tgsi_full_src_register for a constant.
1385 static struct tgsi_full_src_register
1386 make_src_const_reg(unsigned index
)
1388 return make_src_reg(TGSI_FILE_CONSTANT
, index
);
1393 * Create a tgsi_full_src_register for an immediate constant.
1395 static struct tgsi_full_src_register
1396 make_src_immediate_reg(unsigned index
)
1398 return make_src_reg(TGSI_FILE_IMMEDIATE
, index
);
1403 * Create a tgsi_full_dst_register.
1405 static struct tgsi_full_dst_register
1406 make_dst_reg(unsigned file
, unsigned index
)
1408 struct tgsi_full_dst_register reg
;
1410 memset(®
, 0, sizeof(reg
));
1411 reg
.Register
.File
= file
;
1412 reg
.Register
.Index
= index
;
1413 reg
.Register
.WriteMask
= TGSI_WRITEMASK_XYZW
;
1419 * Create a tgsi_full_dst_register for a temporary.
1421 static struct tgsi_full_dst_register
1422 make_dst_temp_reg(unsigned index
)
1424 return make_dst_reg(TGSI_FILE_TEMPORARY
, index
);
1429 * Create a tgsi_full_dst_register for an output.
1431 static struct tgsi_full_dst_register
1432 make_dst_output_reg(unsigned index
)
1434 return make_dst_reg(TGSI_FILE_OUTPUT
, index
);
1439 * Create negated tgsi_full_src_register.
1441 static struct tgsi_full_src_register
1442 negate_src(const struct tgsi_full_src_register
*reg
)
1444 struct tgsi_full_src_register neg
= *reg
;
1445 neg
.Register
.Negate
= !reg
->Register
.Negate
;
1450 * Create absolute value of a tgsi_full_src_register.
1452 static struct tgsi_full_src_register
1453 absolute_src(const struct tgsi_full_src_register
*reg
)
1455 struct tgsi_full_src_register absolute
= *reg
;
1456 absolute
.Register
.Absolute
= 1;
1461 /** Return the named swizzle term from the src register */
1462 static inline unsigned
1463 get_swizzle(const struct tgsi_full_src_register
*reg
, unsigned term
)
1466 case TGSI_SWIZZLE_X
:
1467 return reg
->Register
.SwizzleX
;
1468 case TGSI_SWIZZLE_Y
:
1469 return reg
->Register
.SwizzleY
;
1470 case TGSI_SWIZZLE_Z
:
1471 return reg
->Register
.SwizzleZ
;
1472 case TGSI_SWIZZLE_W
:
1473 return reg
->Register
.SwizzleW
;
1475 assert(!"Bad swizzle");
1476 return TGSI_SWIZZLE_X
;
1482 * Create swizzled tgsi_full_src_register.
1484 static struct tgsi_full_src_register
1485 swizzle_src(const struct tgsi_full_src_register
*reg
,
1486 unsigned swizzleX
, unsigned swizzleY
,
1487 unsigned swizzleZ
, unsigned swizzleW
)
1489 struct tgsi_full_src_register swizzled
= *reg
;
1490 /* Note: we swizzle the current swizzle */
1491 swizzled
.Register
.SwizzleX
= get_swizzle(reg
, swizzleX
);
1492 swizzled
.Register
.SwizzleY
= get_swizzle(reg
, swizzleY
);
1493 swizzled
.Register
.SwizzleZ
= get_swizzle(reg
, swizzleZ
);
1494 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzleW
);
1500 * Create swizzled tgsi_full_src_register where all the swizzle
1501 * terms are the same.
1503 static struct tgsi_full_src_register
1504 scalar_src(const struct tgsi_full_src_register
*reg
, unsigned swizzle
)
1506 struct tgsi_full_src_register swizzled
= *reg
;
1507 /* Note: we swizzle the current swizzle */
1508 swizzled
.Register
.SwizzleX
=
1509 swizzled
.Register
.SwizzleY
=
1510 swizzled
.Register
.SwizzleZ
=
1511 swizzled
.Register
.SwizzleW
= get_swizzle(reg
, swizzle
);
1517 * Create new tgsi_full_dst_register with writemask.
1518 * \param mask bitmask of TGSI_WRITEMASK_[XYZW]
1520 static struct tgsi_full_dst_register
1521 writemask_dst(const struct tgsi_full_dst_register
*reg
, unsigned mask
)
1523 struct tgsi_full_dst_register masked
= *reg
;
1524 masked
.Register
.WriteMask
= mask
;
1530 * Check if the register's swizzle is XXXX, YYYY, ZZZZ, or WWWW.
1533 same_swizzle_terms(const struct tgsi_full_src_register
*reg
)
1535 return (reg
->Register
.SwizzleX
== reg
->Register
.SwizzleY
&&
1536 reg
->Register
.SwizzleY
== reg
->Register
.SwizzleZ
&&
1537 reg
->Register
.SwizzleZ
== reg
->Register
.SwizzleW
);
1542 * Search the vector for the value 'x' and return its position.
1545 find_imm_in_vec4(const union tgsi_immediate_data vec
[4],
1546 union tgsi_immediate_data x
)
1549 for (i
= 0; i
< 4; i
++) {
1550 if (vec
[i
].Int
== x
.Int
)
1558 * Helper used by make_immediate_reg(), make_immediate_reg_4().
1561 find_immediate(struct svga_shader_emitter_v10
*emit
,
1562 union tgsi_immediate_data x
, unsigned startIndex
)
1564 const unsigned endIndex
= emit
->num_immediates
;
1567 assert(emit
->immediates_emitted
);
1569 /* Search immediates for x, y, z, w */
1570 for (i
= startIndex
; i
< endIndex
; i
++) {
1571 if (x
.Int
== emit
->immediates
[i
][0].Int
||
1572 x
.Int
== emit
->immediates
[i
][1].Int
||
1573 x
.Int
== emit
->immediates
[i
][2].Int
||
1574 x
.Int
== emit
->immediates
[i
][3].Int
) {
1578 /* Should never try to use an immediate value that wasn't pre-declared */
1579 assert(!"find_immediate() failed!");
1585 * Return a tgsi_full_src_register for an immediate/literal
1586 * union tgsi_immediate_data[4] value.
1587 * Note: the values must have been previously declared/allocated in
1588 * emit_pre_helpers(). And, all of x,y,z,w must be located in the same
1591 static struct tgsi_full_src_register
1592 make_immediate_reg_4(struct svga_shader_emitter_v10
*emit
,
1593 const union tgsi_immediate_data imm
[4])
1595 struct tgsi_full_src_register reg
;
1598 for (i
= 0; i
< emit
->num_common_immediates
; i
++) {
1599 /* search for first component value */
1600 int immpos
= find_immediate(emit
, imm
[0], i
);
1603 assert(immpos
>= 0);
1605 /* find remaining components within the immediate vector */
1606 x
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[0]);
1607 y
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[1]);
1608 z
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[2]);
1609 w
= find_imm_in_vec4(emit
->immediates
[immpos
], imm
[3]);
1611 if (x
>=0 && y
>= 0 && z
>= 0 && w
>= 0) {
1612 /* found them all */
1613 memset(®
, 0, sizeof(reg
));
1614 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1615 reg
.Register
.Index
= immpos
;
1616 reg
.Register
.SwizzleX
= x
;
1617 reg
.Register
.SwizzleY
= y
;
1618 reg
.Register
.SwizzleZ
= z
;
1619 reg
.Register
.SwizzleW
= w
;
1622 /* else, keep searching */
1625 assert(!"Failed to find immediate register!");
1627 /* Just return IMM[0].xxxx */
1628 memset(®
, 0, sizeof(reg
));
1629 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1635 * Return a tgsi_full_src_register for an immediate/literal
1636 * union tgsi_immediate_data value of the form {value, value, value, value}.
1637 * \sa make_immediate_reg_4() regarding allowed values.
1639 static struct tgsi_full_src_register
1640 make_immediate_reg(struct svga_shader_emitter_v10
*emit
,
1641 union tgsi_immediate_data value
)
1643 struct tgsi_full_src_register reg
;
1644 int immpos
= find_immediate(emit
, value
, 0);
1646 assert(immpos
>= 0);
1648 memset(®
, 0, sizeof(reg
));
1649 reg
.Register
.File
= TGSI_FILE_IMMEDIATE
;
1650 reg
.Register
.Index
= immpos
;
1651 reg
.Register
.SwizzleX
=
1652 reg
.Register
.SwizzleY
=
1653 reg
.Register
.SwizzleZ
=
1654 reg
.Register
.SwizzleW
= find_imm_in_vec4(emit
->immediates
[immpos
], value
);
1661 * Return a tgsi_full_src_register for an immediate/literal float[4] value.
1662 * \sa make_immediate_reg_4() regarding allowed values.
1664 static struct tgsi_full_src_register
1665 make_immediate_reg_float4(struct svga_shader_emitter_v10
*emit
,
1666 float x
, float y
, float z
, float w
)
1668 union tgsi_immediate_data imm
[4];
1673 return make_immediate_reg_4(emit
, imm
);
1678 * Return a tgsi_full_src_register for an immediate/literal float value
1679 * of the form {value, value, value, value}.
1680 * \sa make_immediate_reg_4() regarding allowed values.
1682 static struct tgsi_full_src_register
1683 make_immediate_reg_float(struct svga_shader_emitter_v10
*emit
, float value
)
1685 union tgsi_immediate_data imm
;
1687 return make_immediate_reg(emit
, imm
);
1692 * Return a tgsi_full_src_register for an immediate/literal int[4] vector.
1694 static struct tgsi_full_src_register
1695 make_immediate_reg_int4(struct svga_shader_emitter_v10
*emit
,
1696 int x
, int y
, int z
, int w
)
1698 union tgsi_immediate_data imm
[4];
1703 return make_immediate_reg_4(emit
, imm
);
1708 * Return a tgsi_full_src_register for an immediate/literal int value
1709 * of the form {value, value, value, value}.
1710 * \sa make_immediate_reg_4() regarding allowed values.
1712 static struct tgsi_full_src_register
1713 make_immediate_reg_int(struct svga_shader_emitter_v10
*emit
, int value
)
1715 union tgsi_immediate_data imm
;
1717 return make_immediate_reg(emit
, imm
);
1722 * Allocate space for a union tgsi_immediate_data[4] immediate.
1723 * \return the index/position of the immediate.
1726 alloc_immediate_4(struct svga_shader_emitter_v10
*emit
,
1727 const union tgsi_immediate_data imm
[4])
1729 unsigned n
= emit
->num_immediates
++;
1730 assert(!emit
->immediates_emitted
);
1731 assert(n
< Elements(emit
->immediates
));
1732 emit
->immediates
[n
][0] = imm
[0];
1733 emit
->immediates
[n
][1] = imm
[1];
1734 emit
->immediates
[n
][2] = imm
[2];
1735 emit
->immediates
[n
][3] = imm
[3];
1741 * Allocate space for a float[4] immediate.
1742 * \return the index/position of the immediate.
1745 alloc_immediate_float4(struct svga_shader_emitter_v10
*emit
,
1746 float x
, float y
, float z
, float w
)
1748 union tgsi_immediate_data imm
[4];
1753 return alloc_immediate_4(emit
, imm
);
1758 * Allocate space for a int[4] immediate.
1759 * \return the index/position of the immediate.
1762 alloc_immediate_int4(struct svga_shader_emitter_v10
*emit
,
1763 int x
, int y
, int z
, int w
)
1765 union tgsi_immediate_data imm
[4];
1770 return alloc_immediate_4(emit
, imm
);
1775 * Allocate a shader input to store a system value.
1778 alloc_system_value_index(struct svga_shader_emitter_v10
*emit
, unsigned index
)
1780 const unsigned n
= emit
->info
.num_inputs
+ index
;
1781 assert(index
< Elements(emit
->system_value_indexes
));
1782 emit
->system_value_indexes
[index
] = n
;
1788 * Translate a TGSI immediate value (union tgsi_immediate_data[4]) to VGPU10.
1791 emit_vgpu10_immediate(struct svga_shader_emitter_v10
*emit
,
1792 const struct tgsi_full_immediate
*imm
)
1794 /* We don't actually emit any code here. We just save the
1795 * immediate values and emit them later.
1797 alloc_immediate_4(emit
, imm
->u
);
1803 * Emit a VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER block
1804 * containing all the immediate values previously allocated
1805 * with alloc_immediate_4().
1808 emit_vgpu10_immediates_block(struct svga_shader_emitter_v10
*emit
)
1810 VGPU10OpcodeToken0 token
;
1812 assert(!emit
->immediates_emitted
);
1815 token
.opcodeType
= VGPU10_OPCODE_CUSTOMDATA
;
1816 token
.customDataClass
= VGPU10_CUSTOMDATA_DCL_IMMEDIATE_CONSTANT_BUFFER
;
1818 /* Note: no begin/end_emit_instruction() calls */
1819 emit_dword(emit
, token
.value
);
1820 emit_dword(emit
, 2 + 4 * emit
->num_immediates
);
1821 emit_dwords(emit
, (unsigned *) emit
->immediates
, 4 * emit
->num_immediates
);
1823 emit
->immediates_emitted
= TRUE
;
1830 * Translate a fragment shader's TGSI_INTERPOLATE_x mode to a vgpu10
1831 * interpolation mode.
1832 * \return a VGPU10_INTERPOLATION_x value
1835 translate_interpolation(const struct svga_shader_emitter_v10
*emit
,
1836 unsigned interp
, unsigned interpolate_loc
)
1838 if (interp
== TGSI_INTERPOLATE_COLOR
) {
1839 interp
= emit
->key
.fs
.flatshade
?
1840 TGSI_INTERPOLATE_CONSTANT
: TGSI_INTERPOLATE_PERSPECTIVE
;
1844 case TGSI_INTERPOLATE_CONSTANT
:
1845 return VGPU10_INTERPOLATION_CONSTANT
;
1846 case TGSI_INTERPOLATE_LINEAR
:
1847 return interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
?
1848 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
:
1849 VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
;
1850 case TGSI_INTERPOLATE_PERSPECTIVE
:
1851 return interpolate_loc
== TGSI_INTERPOLATE_LOC_CENTROID
?
1852 VGPU10_INTERPOLATION_LINEAR_CENTROID
:
1853 VGPU10_INTERPOLATION_LINEAR
;
1855 assert(!"Unexpected interpolation mode");
1856 return VGPU10_INTERPOLATION_CONSTANT
;
1862 * Translate a TGSI property to VGPU10.
1863 * Don't emit any instructions yet, only need to gather the primitive property information.
1864 * The output primitive topology might be changed later. The final property instructions
1865 * will be emitted as part of the pre-helper code.
1868 emit_vgpu10_property(struct svga_shader_emitter_v10
*emit
,
1869 const struct tgsi_full_property
*prop
)
1871 static const VGPU10_PRIMITIVE primType
[] = {
1872 VGPU10_PRIMITIVE_POINT
, /* PIPE_PRIM_POINTS */
1873 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINES */
1874 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_LOOP */
1875 VGPU10_PRIMITIVE_LINE
, /* PIPE_PRIM_LINE_STRIP */
1876 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLES */
1877 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_STRIP */
1878 VGPU10_PRIMITIVE_TRIANGLE
, /* PIPE_PRIM_TRIANGLE_FAN */
1879 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUADS */
1880 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
1881 VGPU10_PRIMITIVE_UNDEFINED
, /* PIPE_PRIM_POLYGON */
1882 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
1883 VGPU10_PRIMITIVE_LINE_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1884 VGPU10_PRIMITIVE_TRIANGLE_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1885 VGPU10_PRIMITIVE_TRIANGLE_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1888 static const VGPU10_PRIMITIVE_TOPOLOGY primTopology
[] = {
1889 VGPU10_PRIMITIVE_TOPOLOGY_POINTLIST
, /* PIPE_PRIM_POINTS */
1890 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINES */
1891 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST
, /* PIPE_PRIM_LINE_LOOP */
1892 VGPU10_PRIMITIVE_TOPOLOGY_LINESTRIP
, /* PIPE_PRIM_LINE_STRIP */
1893 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST
, /* PIPE_PRIM_TRIANGLES */
1894 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_STRIP */
1895 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
, /* PIPE_PRIM_TRIANGLE_FAN */
1896 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUADS */
1897 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_QUAD_STRIP */
1898 VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
, /* PIPE_PRIM_POLYGON */
1899 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINES_ADJACENCY */
1900 VGPU10_PRIMITIVE_TOPOLOGY_LINELIST_ADJ
, /* PIPE_PRIM_LINE_STRIP_ADJACENCY */
1901 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ
, /* PIPE_PRIM_TRIANGLES_ADJACENCY */
1902 VGPU10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ
/* PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY */
1905 static const unsigned inputArraySize
[] = {
1906 0, /* VGPU10_PRIMITIVE_UNDEFINED */
1907 1, /* VGPU10_PRIMITIVE_POINT */
1908 2, /* VGPU10_PRIMITIVE_LINE */
1909 3, /* VGPU10_PRIMITIVE_TRIANGLE */
1912 4, /* VGPU10_PRIMITIVE_LINE_ADJ */
1913 6 /* VGPU10_PRIMITIVE_TRIANGLE_ADJ */
1916 switch (prop
->Property
.PropertyName
) {
1917 case TGSI_PROPERTY_GS_INPUT_PRIM
:
1918 assert(prop
->u
[0].Data
< Elements(primType
));
1919 emit
->gs
.prim_type
= primType
[prop
->u
[0].Data
];
1920 assert(emit
->gs
.prim_type
!= VGPU10_PRIMITIVE_UNDEFINED
);
1921 emit
->gs
.input_size
= inputArraySize
[emit
->gs
.prim_type
];
1924 case TGSI_PROPERTY_GS_OUTPUT_PRIM
:
1925 assert(prop
->u
[0].Data
< Elements(primTopology
));
1926 emit
->gs
.prim_topology
= primTopology
[prop
->u
[0].Data
];
1927 assert(emit
->gs
.prim_topology
!= VGPU10_PRIMITIVE_TOPOLOGY_UNDEFINED
);
1930 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
:
1931 emit
->gs
.max_out_vertices
= prop
->u
[0].Data
;
1943 emit_property_instruction(struct svga_shader_emitter_v10
*emit
,
1944 VGPU10OpcodeToken0 opcode0
, unsigned nData
,
1947 begin_emit_instruction(emit
);
1948 emit_dword(emit
, opcode0
.value
);
1950 emit_dword(emit
, data
);
1951 end_emit_instruction(emit
);
1956 * Emit property instructions
1959 emit_property_instructions(struct svga_shader_emitter_v10
*emit
)
1961 VGPU10OpcodeToken0 opcode0
;
1963 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
1965 /* emit input primitive type declaration */
1967 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_INPUT_PRIMITIVE
;
1968 opcode0
.primitive
= emit
->gs
.prim_type
;
1969 emit_property_instruction(emit
, opcode0
, 0, 0);
1971 /* emit output primitive topology declaration */
1973 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_GS_OUTPUT_PRIMITIVE_TOPOLOGY
;
1974 opcode0
.primitiveTopology
= emit
->gs
.prim_topology
;
1975 emit_property_instruction(emit
, opcode0
, 0, 0);
1977 /* emit max output vertices */
1979 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_MAX_OUTPUT_VERTEX_COUNT
;
1980 emit_property_instruction(emit
, opcode0
, 1, emit
->gs
.max_out_vertices
);
1985 * Emit a vgpu10 declaration "instruction".
1986 * \param index the register index
1987 * \param size array size of the operand. In most cases, it is 1,
1988 * but for inputs to geometry shader, the array size varies
1989 * depending on the primitive type.
1992 emit_decl_instruction(struct svga_shader_emitter_v10
*emit
,
1993 VGPU10OpcodeToken0 opcode0
,
1994 VGPU10OperandToken0 operand0
,
1995 VGPU10NameToken name_token
,
1996 unsigned index
, unsigned size
)
1998 assert(opcode0
.opcodeType
);
1999 assert(operand0
.mask
);
2001 begin_emit_instruction(emit
);
2002 emit_dword(emit
, opcode0
.value
);
2004 emit_dword(emit
, operand0
.value
);
2006 if (operand0
.indexDimension
== VGPU10_OPERAND_INDEX_1D
) {
2007 /* Next token is the index of the register to declare */
2008 emit_dword(emit
, index
);
2010 else if (operand0
.indexDimension
>= VGPU10_OPERAND_INDEX_2D
) {
2011 /* Next token is the size of the register */
2012 emit_dword(emit
, size
);
2014 /* Followed by the index of the register */
2015 emit_dword(emit
, index
);
2018 if (name_token
.value
) {
2019 emit_dword(emit
, name_token
.value
);
2022 end_emit_instruction(emit
);
2027 * Emit the declaration for a shader input.
2028 * \param opcodeType opcode type, one of VGPU10_OPCODE_DCL_INPUTx
2029 * \param operandType operand type, one of VGPU10_OPERAND_TYPE_INPUT_x
2030 * \param dim index dimension
2031 * \param index the input register index
2032 * \param size array size of the operand. In most cases, it is 1,
2033 * but for inputs to geometry shader, the array size varies
2034 * depending on the primitive type.
2035 * \param name one of VGPU10_NAME_x
2036 * \parma numComp number of components
2037 * \param selMode component selection mode
2038 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2039 * \param interpMode interpolation mode
2042 emit_input_declaration(struct svga_shader_emitter_v10
*emit
,
2043 unsigned opcodeType
, unsigned operandType
,
2044 unsigned dim
, unsigned index
, unsigned size
,
2045 unsigned name
, unsigned numComp
,
2046 unsigned selMode
, unsigned usageMask
,
2047 unsigned interpMode
)
2049 VGPU10OpcodeToken0 opcode0
;
2050 VGPU10OperandToken0 operand0
;
2051 VGPU10NameToken name_token
;
2053 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2054 assert(opcodeType
== VGPU10_OPCODE_DCL_INPUT
||
2055 opcodeType
== VGPU10_OPCODE_DCL_INPUT_SIV
||
2056 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS
||
2057 opcodeType
== VGPU10_OPCODE_DCL_INPUT_PS_SGV
);
2058 assert(operandType
== VGPU10_OPERAND_TYPE_INPUT
||
2059 operandType
== VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
);
2060 assert(numComp
<= VGPU10_OPERAND_4_COMPONENT
);
2061 assert(selMode
<= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
);
2062 assert(dim
<= VGPU10_OPERAND_INDEX_3D
);
2063 assert(name
== VGPU10_NAME_UNDEFINED
||
2064 name
== VGPU10_NAME_POSITION
||
2065 name
== VGPU10_NAME_INSTANCE_ID
||
2066 name
== VGPU10_NAME_VERTEX_ID
||
2067 name
== VGPU10_NAME_PRIMITIVE_ID
||
2068 name
== VGPU10_NAME_IS_FRONT_FACE
);
2069 assert(interpMode
== VGPU10_INTERPOLATION_UNDEFINED
||
2070 interpMode
== VGPU10_INTERPOLATION_CONSTANT
||
2071 interpMode
== VGPU10_INTERPOLATION_LINEAR
||
2072 interpMode
== VGPU10_INTERPOLATION_LINEAR_CENTROID
||
2073 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE
||
2074 interpMode
== VGPU10_INTERPOLATION_LINEAR_NOPERSPECTIVE_CENTROID
);
2076 check_register_index(emit
, opcodeType
, index
);
2078 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2080 opcode0
.opcodeType
= opcodeType
;
2081 opcode0
.interpolationMode
= interpMode
;
2083 operand0
.operandType
= operandType
;
2084 operand0
.numComponents
= numComp
;
2085 operand0
.selectionMode
= selMode
;
2086 operand0
.mask
= usageMask
;
2087 operand0
.indexDimension
= dim
;
2088 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2089 if (dim
== VGPU10_OPERAND_INDEX_2D
)
2090 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2092 name_token
.name
= name
;
2094 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, size
);
2099 * Emit the declaration for a shader output.
2100 * \param type one of VGPU10_OPCODE_DCL_OUTPUTx
2101 * \param index the output register index
2102 * \param name one of VGPU10_NAME_x
2103 * \param usageMask bitfield of VGPU10_OPERAND_4_COMPONENT_MASK_x values
2106 emit_output_declaration(struct svga_shader_emitter_v10
*emit
,
2107 unsigned type
, unsigned index
,
2108 unsigned name
, unsigned usageMask
)
2110 VGPU10OpcodeToken0 opcode0
;
2111 VGPU10OperandToken0 operand0
;
2112 VGPU10NameToken name_token
;
2114 assert(usageMask
<= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2115 assert(type
== VGPU10_OPCODE_DCL_OUTPUT
||
2116 type
== VGPU10_OPCODE_DCL_OUTPUT_SGV
||
2117 type
== VGPU10_OPCODE_DCL_OUTPUT_SIV
);
2118 assert(name
== VGPU10_NAME_UNDEFINED
||
2119 name
== VGPU10_NAME_POSITION
||
2120 name
== VGPU10_NAME_PRIMITIVE_ID
||
2121 name
== VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
||
2122 name
== VGPU10_NAME_CLIP_DISTANCE
);
2124 check_register_index(emit
, type
, index
);
2126 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2128 opcode0
.opcodeType
= type
;
2129 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT
;
2130 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
2131 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2132 operand0
.mask
= usageMask
;
2133 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2134 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2136 name_token
.name
= name
;
2138 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, index
, 1);
2143 * Emit the declaration for the fragment depth output.
2146 emit_fragdepth_output_declaration(struct svga_shader_emitter_v10
*emit
)
2148 VGPU10OpcodeToken0 opcode0
;
2149 VGPU10OperandToken0 operand0
;
2150 VGPU10NameToken name_token
;
2152 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
2154 opcode0
.value
= operand0
.value
= name_token
.value
= 0;
2156 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_OUTPUT
;
2157 operand0
.operandType
= VGPU10_OPERAND_TYPE_OUTPUT_DEPTH
;
2158 operand0
.numComponents
= VGPU10_OPERAND_1_COMPONENT
;
2159 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_0D
;
2160 operand0
.mask
= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
2162 emit_decl_instruction(emit
, opcode0
, operand0
, name_token
, 0, 1);
2167 * Emit the declaration for a system value input/output.
2170 emit_system_value_declaration(struct svga_shader_emitter_v10
*emit
,
2171 unsigned semantic_name
, unsigned index
)
2173 switch (semantic_name
) {
2174 case TGSI_SEMANTIC_INSTANCEID
:
2175 index
= alloc_system_value_index(emit
, index
);
2176 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2177 VGPU10_OPERAND_TYPE_INPUT
,
2178 VGPU10_OPERAND_INDEX_1D
,
2180 VGPU10_NAME_INSTANCE_ID
,
2181 VGPU10_OPERAND_4_COMPONENT
,
2182 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2183 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2184 VGPU10_INTERPOLATION_UNDEFINED
);
2186 case TGSI_SEMANTIC_VERTEXID
:
2187 index
= alloc_system_value_index(emit
, index
);
2188 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT_SIV
,
2189 VGPU10_OPERAND_TYPE_INPUT
,
2190 VGPU10_OPERAND_INDEX_1D
,
2192 VGPU10_NAME_VERTEX_ID
,
2193 VGPU10_OPERAND_4_COMPONENT
,
2194 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2195 VGPU10_OPERAND_4_COMPONENT_MASK_X
,
2196 VGPU10_INTERPOLATION_UNDEFINED
);
2204 * Translate a TGSI declaration to VGPU10.
2207 emit_vgpu10_declaration(struct svga_shader_emitter_v10
*emit
,
2208 const struct tgsi_full_declaration
*decl
)
2210 switch (decl
->Declaration
.File
) {
2211 case TGSI_FILE_INPUT
:
2212 /* do nothing - see emit_input_declarations() */
2215 case TGSI_FILE_OUTPUT
:
2216 assert(decl
->Range
.First
== decl
->Range
.Last
);
2217 emit
->output_usage_mask
[decl
->Range
.First
] = decl
->Declaration
.UsageMask
;
2220 case TGSI_FILE_TEMPORARY
:
2221 /* Don't declare the temps here. Just keep track of how many
2222 * and emit the declaration later.
2224 if (decl
->Declaration
.Array
) {
2225 /* Indexed temporary array. Save the start index of the array
2226 * and the size of the array.
2228 const unsigned arrayID
= MIN2(decl
->Array
.ArrayID
, MAX_TEMP_ARRAYS
);
2231 assert(arrayID
< ARRAY_SIZE(emit
->temp_arrays
));
2233 /* Save this array so we can emit the declaration for it later */
2234 emit
->temp_arrays
[arrayID
].start
= decl
->Range
.First
;
2235 emit
->temp_arrays
[arrayID
].size
=
2236 decl
->Range
.Last
- decl
->Range
.First
+ 1;
2238 emit
->num_temp_arrays
= MAX2(emit
->num_temp_arrays
, arrayID
+ 1);
2239 assert(emit
->num_temp_arrays
<= MAX_TEMP_ARRAYS
);
2240 emit
->num_temp_arrays
= MIN2(emit
->num_temp_arrays
, MAX_TEMP_ARRAYS
);
2242 /* Fill in the temp_map entries for this array */
2243 for (i
= decl
->Range
.First
; i
<= decl
->Range
.Last
; i
++) {
2244 emit
->temp_map
[i
].arrayId
= arrayID
;
2245 emit
->temp_map
[i
].index
= i
- decl
->Range
.First
;
2249 /* for all temps, indexed or not, keep track of highest index */
2250 emit
->num_shader_temps
= MAX2(emit
->num_shader_temps
,
2251 decl
->Range
.Last
+ 1);
2254 case TGSI_FILE_CONSTANT
:
2255 /* Don't declare constants here. Just keep track and emit later. */
2257 unsigned constbuf
= 0, num_consts
;
2258 if (decl
->Declaration
.Dimension
) {
2259 constbuf
= decl
->Dim
.Index2D
;
2261 /* We throw an assertion here when, in fact, the shader should never
2262 * have linked due to constbuf index out of bounds, so we shouldn't
2263 * have reached here.
2265 assert(constbuf
< Elements(emit
->num_shader_consts
));
2267 num_consts
= MAX2(emit
->num_shader_consts
[constbuf
],
2268 decl
->Range
.Last
+ 1);
2270 if (num_consts
> VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
) {
2271 debug_printf("Warning: constant buffer is declared to size [%u]"
2272 " but [%u] is the limit.\n",
2274 VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2276 /* The linker doesn't enforce the max UBO size so we clamp here */
2277 emit
->num_shader_consts
[constbuf
] =
2278 MIN2(num_consts
, VGPU10_MAX_CONSTANT_BUFFER_ELEMENT_COUNT
);
2282 case TGSI_FILE_IMMEDIATE
:
2283 assert(!"TGSI_FILE_IMMEDIATE not handled yet!");
2286 case TGSI_FILE_SYSTEM_VALUE
:
2287 emit_system_value_declaration(emit
, decl
->Semantic
.Name
,
2291 case TGSI_FILE_SAMPLER
:
2292 /* Don't declare samplers here. Just keep track and emit later. */
2293 emit
->num_samplers
= MAX2(emit
->num_samplers
, decl
->Range
.Last
+ 1);
2296 case TGSI_FILE_RESOURCE
:
2297 /*opcode0.opcodeType = VGPU10_OPCODE_DCL_RESOURCE;*/
2298 /* XXX more, VGPU10_RETURN_TYPE_FLOAT */
2299 assert(!"TGSI_FILE_RESOURCE not handled yet");
2302 case TGSI_FILE_ADDRESS
:
2303 emit
->num_address_regs
= MAX2(emit
->num_address_regs
,
2304 decl
->Range
.Last
+ 1);
2307 case TGSI_FILE_SAMPLER_VIEW
:
2308 /* Not used at this time, but maybe in the future.
2309 * See emit_resource_declarations().
2314 assert(!"Unexpected type of declaration");
2322 * Emit all input declarations.
2325 emit_input_declarations(struct svga_shader_emitter_v10
*emit
)
2329 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2331 for (i
= 0; i
< emit
->linkage
.num_inputs
; i
++) {
2332 unsigned semantic_name
= emit
->info
.input_semantic_name
[i
];
2333 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2334 unsigned index
= emit
->linkage
.input_map
[i
];
2335 unsigned type
, interpolationMode
, name
;
2337 if (usage_mask
== 0)
2338 continue; /* register is not actually used */
2340 if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2341 /* fragment position input */
2342 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2343 interpolationMode
= VGPU10_INTERPOLATION_LINEAR
;
2344 name
= VGPU10_NAME_POSITION
;
2345 if (usage_mask
& TGSI_WRITEMASK_W
) {
2346 /* we need to replace use of 'w' with '1/w' */
2347 emit
->fs
.fragcoord_input_index
= i
;
2350 else if (semantic_name
== TGSI_SEMANTIC_FACE
) {
2351 /* fragment front-facing input */
2352 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2353 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2354 name
= VGPU10_NAME_IS_FRONT_FACE
;
2355 emit
->fs
.face_input_index
= i
;
2357 else if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2359 type
= VGPU10_OPCODE_DCL_INPUT_PS_SGV
;
2360 interpolationMode
= VGPU10_INTERPOLATION_CONSTANT
;
2361 name
= VGPU10_NAME_PRIMITIVE_ID
;
2364 /* general fragment input */
2365 type
= VGPU10_OPCODE_DCL_INPUT_PS
;
2367 translate_interpolation(emit
,
2368 emit
->info
.input_interpolate
[i
],
2369 emit
->info
.input_interpolate_loc
[i
]);
2371 /* keeps track if flat interpolation mode is being used */
2372 emit
->uses_flat_interp
= emit
->uses_flat_interp
||
2373 (interpolationMode
== VGPU10_INTERPOLATION_CONSTANT
);
2375 name
= VGPU10_NAME_UNDEFINED
;
2378 emit_input_declaration(emit
, type
,
2379 VGPU10_OPERAND_TYPE_INPUT
,
2380 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2382 VGPU10_OPERAND_4_COMPONENT
,
2383 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2384 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2388 else if (emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2390 for (i
= 0; i
< emit
->info
.num_inputs
; i
++) {
2391 unsigned semantic_name
= emit
->info
.input_semantic_name
[i
];
2392 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2393 unsigned index
= emit
->linkage
.input_map
[i
];
2394 unsigned opcodeType
, operandType
;
2395 unsigned numComp
, selMode
;
2399 if (usage_mask
== 0)
2400 continue; /* register is not actually used */
2402 opcodeType
= VGPU10_OPCODE_DCL_INPUT
;
2403 operandType
= VGPU10_OPERAND_TYPE_INPUT
;
2404 numComp
= VGPU10_OPERAND_4_COMPONENT
;
2405 selMode
= VGPU10_OPERAND_4_COMPONENT_MASK_MODE
;
2406 name
= VGPU10_NAME_UNDEFINED
;
2408 /* all geometry shader inputs are two dimensional except gl_PrimitiveID */
2409 dim
= VGPU10_OPERAND_INDEX_2D
;
2411 if (semantic_name
== TGSI_SEMANTIC_PRIMID
) {
2413 operandType
= VGPU10_OPERAND_TYPE_INPUT_PRIMITIVEID
;
2414 dim
= VGPU10_OPERAND_INDEX_0D
;
2415 numComp
= VGPU10_OPERAND_0_COMPONENT
;
2418 /* also save the register index so we can check for
2419 * primitive id when emit src register. We need to modify the
2420 * operand type, index dimension when emit primitive id src reg.
2422 emit
->gs
.prim_id_index
= i
;
2424 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2425 /* vertex position input */
2426 opcodeType
= VGPU10_OPCODE_DCL_INPUT_SIV
;
2427 name
= VGPU10_NAME_POSITION
;
2430 emit_input_declaration(emit
, opcodeType
, operandType
,
2432 emit
->gs
.input_size
,
2435 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2436 VGPU10_INTERPOLATION_UNDEFINED
);
2440 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
2442 for (i
= 0; i
< emit
->info
.num_inputs
; i
++) {
2443 unsigned usage_mask
= emit
->info
.input_usage_mask
[i
];
2446 if (usage_mask
== 0)
2447 continue; /* register is not actually used */
2449 emit_input_declaration(emit
, VGPU10_OPCODE_DCL_INPUT
,
2450 VGPU10_OPERAND_TYPE_INPUT
,
2451 VGPU10_OPERAND_INDEX_1D
, index
, 1,
2452 VGPU10_NAME_UNDEFINED
,
2453 VGPU10_OPERAND_4_COMPONENT
,
2454 VGPU10_OPERAND_4_COMPONENT_MASK_MODE
,
2455 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
,
2456 VGPU10_INTERPOLATION_UNDEFINED
);
2465 * Emit all output declarations.
2468 emit_output_declarations(struct svga_shader_emitter_v10
*emit
)
2472 for (i
= 0; i
< emit
->info
.num_outputs
; i
++) {
2473 /*const unsigned usage_mask = emit->info.output_usage_mask[i];*/
2474 const unsigned semantic_name
= emit
->info
.output_semantic_name
[i
];
2475 const unsigned semantic_index
= emit
->info
.output_semantic_index
[i
];
2478 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2479 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
2480 assert(semantic_index
< Elements(emit
->fs
.color_out_index
));
2482 emit
->fs
.color_out_index
[semantic_index
] = index
;
2484 /* The semantic index is the shader's color output/buffer index */
2485 emit_output_declaration(emit
,
2486 VGPU10_OPCODE_DCL_OUTPUT
, semantic_index
,
2487 VGPU10_NAME_UNDEFINED
,
2488 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2490 if (semantic_index
== 0) {
2491 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2492 /* Emit declarations for the additional color outputs
2496 for (j
= 1; j
< emit
->key
.fs
.write_color0_to_n_cbufs
; j
++) {
2497 /* Allocate a new output index */
2498 unsigned idx
= emit
->info
.num_outputs
+ j
- 1;
2499 emit
->fs
.color_out_index
[j
] = idx
;
2500 emit_output_declaration(emit
,
2501 VGPU10_OPCODE_DCL_OUTPUT
, idx
,
2502 VGPU10_NAME_UNDEFINED
,
2503 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2504 emit
->info
.output_semantic_index
[idx
] = j
;
2509 assert(!emit
->key
.fs
.write_color0_to_n_cbufs
);
2512 else if (semantic_name
== TGSI_SEMANTIC_POSITION
) {
2513 /* Fragment depth output */
2514 emit_fragdepth_output_declaration(emit
);
2517 assert(!"Bad output semantic name");
2522 unsigned name
, type
;
2523 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
2525 switch (semantic_name
) {
2526 case TGSI_SEMANTIC_POSITION
:
2527 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2528 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2529 name
= VGPU10_NAME_POSITION
;
2530 /* Save the index of the vertex position output register */
2531 emit
->vposition
.out_index
= index
;
2533 case TGSI_SEMANTIC_CLIPDIST
:
2534 type
= VGPU10_OPCODE_DCL_OUTPUT_SIV
;
2535 name
= VGPU10_NAME_CLIP_DISTANCE
;
2536 /* save the starting index of the clip distance output register */
2537 if (semantic_index
== 0)
2538 emit
->clip_dist_out_index
= index
;
2539 writemask
= emit
->output_usage_mask
[index
];
2540 writemask
= apply_clip_plane_mask(emit
, writemask
, semantic_index
);
2541 if (writemask
== 0x0) {
2542 continue; /* discard this do-nothing declaration */
2545 case TGSI_SEMANTIC_PRIMID
:
2546 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2547 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2548 name
= VGPU10_NAME_PRIMITIVE_ID
;
2550 case TGSI_SEMANTIC_LAYER
:
2551 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
2552 type
= VGPU10_OPCODE_DCL_OUTPUT_SGV
;
2553 name
= VGPU10_NAME_RENDER_TARGET_ARRAY_INDEX
;
2555 case TGSI_SEMANTIC_CLIPVERTEX
:
2556 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2557 name
= VGPU10_NAME_UNDEFINED
;
2558 emit
->clip_vertex_out_index
= index
;
2561 /* generic output */
2562 type
= VGPU10_OPCODE_DCL_OUTPUT
;
2563 name
= VGPU10_NAME_UNDEFINED
;
2566 emit_output_declaration(emit
, type
, index
, name
, writemask
);
2570 if (emit
->vposition
.so_index
!= INVALID_INDEX
&&
2571 emit
->vposition
.out_index
!= INVALID_INDEX
) {
2573 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2575 /* Emit the declaration for the non-adjusted vertex position
2576 * for stream output purpose
2578 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2579 emit
->vposition
.so_index
,
2580 VGPU10_NAME_UNDEFINED
,
2581 VGPU10_OPERAND_4_COMPONENT_MASK_ALL
);
2584 if (emit
->clip_dist_so_index
!= INVALID_INDEX
&&
2585 emit
->clip_dist_out_index
!= INVALID_INDEX
) {
2587 assert(emit
->unit
!= PIPE_SHADER_FRAGMENT
);
2589 /* Emit the declaration for the clip distance shadow copy which
2590 * will be used for stream output purpose and for clip distance
2593 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2594 emit
->clip_dist_so_index
,
2595 VGPU10_NAME_UNDEFINED
,
2596 emit
->output_usage_mask
[emit
->clip_dist_out_index
]);
2598 if (emit
->info
.num_written_clipdistance
> 4) {
2599 /* for the second clip distance register, each handles 4 planes */
2600 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT
,
2601 emit
->clip_dist_so_index
+ 1,
2602 VGPU10_NAME_UNDEFINED
,
2603 emit
->output_usage_mask
[emit
->clip_dist_out_index
+1]);
2612 * Emit the declaration for the temporary registers.
2615 emit_temporaries_declaration(struct svga_shader_emitter_v10
*emit
)
2617 unsigned total_temps
, reg
, i
;
2619 total_temps
= emit
->num_shader_temps
;
2621 /* Allocate extra temps for specially-implemented instructions,
2624 total_temps
+= MAX_INTERNAL_TEMPS
;
2626 if (emit
->unit
== PIPE_SHADER_VERTEX
|| emit
->unit
== PIPE_SHADER_GEOMETRY
) {
2627 if (emit
->vposition
.need_prescale
|| emit
->key
.vs
.undo_viewport
||
2628 emit
->key
.clip_plane_enable
||
2629 emit
->vposition
.so_index
!= INVALID_INDEX
) {
2630 emit
->vposition
.tmp_index
= total_temps
;
2634 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2635 unsigned attrib_mask
= (emit
->key
.vs
.adjust_attrib_w_1
|
2636 emit
->key
.vs
.adjust_attrib_itof
|
2637 emit
->key
.vs
.adjust_attrib_utof
|
2638 emit
->key
.vs
.attrib_is_bgra
|
2639 emit
->key
.vs
.attrib_puint_to_snorm
|
2640 emit
->key
.vs
.attrib_puint_to_uscaled
|
2641 emit
->key
.vs
.attrib_puint_to_sscaled
);
2642 while (attrib_mask
) {
2643 unsigned index
= u_bit_scan(&attrib_mask
);
2644 emit
->vs
.adjusted_input
[index
] = total_temps
++;
2648 if (emit
->clip_mode
== CLIP_DISTANCE
) {
2649 /* We need to write the clip distance to a temporary register
2650 * first. Then it will be copied to the shadow copy for
2651 * the clip distance varying variable and stream output purpose.
2652 * It will also be copied to the actual CLIPDIST register
2653 * according to the enabled clip planes
2655 emit
->clip_dist_tmp_index
= total_temps
++;
2656 if (emit
->info
.num_written_clipdistance
> 4)
2657 total_temps
++; /* second clip register */
2659 else if (emit
->clip_mode
== CLIP_VERTEX
) {
2660 /* We need to convert the TGSI CLIPVERTEX output to one or more
2661 * clip distances. Allocate a temp reg for the clipvertex here.
2663 assert(emit
->info
.writes_clipvertex
> 0);
2664 emit
->clip_vertex_tmp_index
= total_temps
;
2668 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2669 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
||
2670 emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
2671 /* Allocate a temp to hold the output color */
2672 emit
->fs
.color_tmp_index
= total_temps
;
2676 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
2677 /* Allocate a temp for the +/-1 face register */
2678 emit
->fs
.face_tmp_index
= total_temps
;
2682 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
2683 /* Allocate a temp for modified fragment position register */
2684 emit
->fs
.fragcoord_tmp_index
= total_temps
;
2689 for (i
= 0; i
< emit
->num_address_regs
; i
++) {
2690 emit
->address_reg_index
[i
] = total_temps
++;
2693 /* Initialize the temp_map array which maps TGSI temp indexes to VGPU10
2694 * temp indexes. Basically, we compact all the non-array temp register
2695 * indexes into a consecutive series.
2697 * Before, we may have some TGSI declarations like:
2698 * DCL TEMP[0..1], LOCAL
2699 * DCL TEMP[2..4], ARRAY(1), LOCAL
2700 * DCL TEMP[5..7], ARRAY(2), LOCAL
2701 * plus, some extra temps, like TEMP[8], TEMP[9] for misc things
2703 * After, we'll have a map like this:
2704 * temp_map[0] = { array 0, index 0 }
2705 * temp_map[1] = { array 0, index 1 }
2706 * temp_map[2] = { array 1, index 0 }
2707 * temp_map[3] = { array 1, index 1 }
2708 * temp_map[4] = { array 1, index 2 }
2709 * temp_map[5] = { array 2, index 0 }
2710 * temp_map[6] = { array 2, index 1 }
2711 * temp_map[7] = { array 2, index 2 }
2712 * temp_map[8] = { array 0, index 2 }
2713 * temp_map[9] = { array 0, index 3 }
2715 * We'll declare two arrays of 3 elements, plus a set of four non-indexed
2716 * temps numbered 0..3
2718 * Any time we emit a temporary register index, we'll have to use the
2719 * temp_map[] table to convert the TGSI index to the VGPU10 index.
2721 * Finally, we recompute the total_temps value here.
2724 for (i
= 0; i
< total_temps
; i
++) {
2725 if (emit
->temp_map
[i
].arrayId
== 0) {
2726 emit
->temp_map
[i
].index
= reg
++;
2732 debug_printf("total_temps %u\n", total_temps
);
2733 for (i
= 0; i
< 30; i
++) {
2734 debug_printf("temp %u -> array %u index %u\n",
2735 i
, emit
->temp_map
[i
].arrayId
, emit
->temp_map
[i
].index
);
2739 /* Emit declaration of ordinary temp registers */
2740 if (total_temps
> 0) {
2741 VGPU10OpcodeToken0 opcode0
;
2744 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_TEMPS
;
2746 begin_emit_instruction(emit
);
2747 emit_dword(emit
, opcode0
.value
);
2748 emit_dword(emit
, total_temps
);
2749 end_emit_instruction(emit
);
2752 /* Emit declarations for indexable temp arrays. Skip 0th entry since
2755 for (i
= 1; i
< emit
->num_temp_arrays
; i
++) {
2756 unsigned num_temps
= emit
->temp_arrays
[i
].size
;
2758 if (num_temps
> 0) {
2759 VGPU10OpcodeToken0 opcode0
;
2762 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_INDEXABLE_TEMP
;
2764 begin_emit_instruction(emit
);
2765 emit_dword(emit
, opcode0
.value
);
2766 emit_dword(emit
, i
); /* which array */
2767 emit_dword(emit
, num_temps
);
2768 emit_dword(emit
, 4); /* num components */
2769 end_emit_instruction(emit
);
2771 total_temps
+= num_temps
;
2775 /* Check that the grand total of all regular and indexed temps is
2778 check_register_index(emit
, VGPU10_OPCODE_DCL_TEMPS
, total_temps
- 1);
2785 emit_constant_declaration(struct svga_shader_emitter_v10
*emit
)
2787 VGPU10OpcodeToken0 opcode0
;
2788 VGPU10OperandToken0 operand0
;
2789 unsigned total_consts
, i
;
2792 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_CONSTANT_BUFFER
;
2793 opcode0
.accessPattern
= VGPU10_CB_IMMEDIATE_INDEXED
;
2794 /* XXX or, access pattern = VGPU10_CB_DYNAMIC_INDEXED */
2797 operand0
.numComponents
= VGPU10_OPERAND_4_COMPONENT
;
2798 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_2D
;
2799 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2800 operand0
.index1Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2801 operand0
.operandType
= VGPU10_OPERAND_TYPE_CONSTANT_BUFFER
;
2802 operand0
.selectionMode
= VGPU10_OPERAND_4_COMPONENT_SWIZZLE_MODE
;
2803 operand0
.swizzleX
= 0;
2804 operand0
.swizzleY
= 1;
2805 operand0
.swizzleZ
= 2;
2806 operand0
.swizzleW
= 3;
2809 * Emit declaration for constant buffer [0]. We also allocate
2810 * room for the extra constants here.
2812 total_consts
= emit
->num_shader_consts
[0];
2814 /* Now, allocate constant slots for the "extra" constants */
2816 /* Vertex position scale/translation */
2817 if (emit
->vposition
.need_prescale
) {
2818 emit
->vposition
.prescale_scale_index
= total_consts
++;
2819 emit
->vposition
.prescale_trans_index
= total_consts
++;
2822 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2823 if (emit
->key
.vs
.undo_viewport
) {
2824 emit
->vs
.viewport_index
= total_consts
++;
2828 /* user-defined clip planes */
2829 if (emit
->key
.clip_plane_enable
) {
2830 unsigned n
= util_bitcount(emit
->key
.clip_plane_enable
);
2831 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
2832 emit
->unit
== PIPE_SHADER_GEOMETRY
);
2833 for (i
= 0; i
< n
; i
++) {
2834 emit
->clip_plane_const
[i
] = total_consts
++;
2838 /* Texcoord scale factors for RECT textures */
2840 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2841 if (emit
->key
.tex
[i
].unnormalized
) {
2842 emit
->texcoord_scale_index
[i
] = total_consts
++;
2847 /* Texture buffer sizes */
2848 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2849 if (emit
->key
.tex
[i
].texture_target
== PIPE_BUFFER
) {
2850 emit
->texture_buffer_size_index
[i
] = total_consts
++;
2854 if (total_consts
> 0) {
2855 begin_emit_instruction(emit
);
2856 emit_dword(emit
, opcode0
.value
);
2857 emit_dword(emit
, operand0
.value
);
2858 emit_dword(emit
, 0); /* which const buffer slot */
2859 emit_dword(emit
, total_consts
);
2860 end_emit_instruction(emit
);
2863 /* Declare remaining constant buffers (UBOs) */
2864 for (i
= 1; i
< Elements(emit
->num_shader_consts
); i
++) {
2865 if (emit
->num_shader_consts
[i
] > 0) {
2866 begin_emit_instruction(emit
);
2867 emit_dword(emit
, opcode0
.value
);
2868 emit_dword(emit
, operand0
.value
);
2869 emit_dword(emit
, i
); /* which const buffer slot */
2870 emit_dword(emit
, emit
->num_shader_consts
[i
]);
2871 end_emit_instruction(emit
);
2880 * Emit declarations for samplers.
2883 emit_sampler_declarations(struct svga_shader_emitter_v10
*emit
)
2887 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2888 VGPU10OpcodeToken0 opcode0
;
2889 VGPU10OperandToken0 operand0
;
2892 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_SAMPLER
;
2893 opcode0
.samplerMode
= VGPU10_SAMPLER_MODE_DEFAULT
;
2896 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
2897 operand0
.operandType
= VGPU10_OPERAND_TYPE_SAMPLER
;
2898 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2899 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2901 begin_emit_instruction(emit
);
2902 emit_dword(emit
, opcode0
.value
);
2903 emit_dword(emit
, operand0
.value
);
2904 emit_dword(emit
, i
);
2905 end_emit_instruction(emit
);
2913 * Translate PIPE_TEXTURE_x to VGAPU10_RESOURCE_DIMENSION_x.
2916 pipe_texture_to_resource_dimension(unsigned target
, bool msaa
)
2920 return VGPU10_RESOURCE_DIMENSION_BUFFER
;
2921 case PIPE_TEXTURE_1D
:
2922 return VGPU10_RESOURCE_DIMENSION_TEXTURE1D
;
2923 case PIPE_TEXTURE_2D
:
2924 case PIPE_TEXTURE_RECT
:
2925 return msaa
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMS
2926 : VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
2927 case PIPE_TEXTURE_3D
:
2928 return VGPU10_RESOURCE_DIMENSION_TEXTURE3D
;
2929 case PIPE_TEXTURE_CUBE
:
2930 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBE
;
2931 case PIPE_TEXTURE_1D_ARRAY
:
2932 return VGPU10_RESOURCE_DIMENSION_TEXTURE1DARRAY
;
2933 case PIPE_TEXTURE_2D_ARRAY
:
2934 return msaa
? VGPU10_RESOURCE_DIMENSION_TEXTURE2DMSARRAY
2935 : VGPU10_RESOURCE_DIMENSION_TEXTURE2DARRAY
;
2936 case PIPE_TEXTURE_CUBE_ARRAY
:
2937 return VGPU10_RESOURCE_DIMENSION_TEXTURECUBEARRAY
;
2939 assert(!"Unexpected resource type");
2940 return VGPU10_RESOURCE_DIMENSION_TEXTURE2D
;
2946 * Given a tgsi_return_type, return true iff it is an integer type.
2949 is_integer_type(enum tgsi_return_type type
)
2952 case TGSI_RETURN_TYPE_SINT
:
2953 case TGSI_RETURN_TYPE_UINT
:
2955 case TGSI_RETURN_TYPE_FLOAT
:
2956 case TGSI_RETURN_TYPE_UNORM
:
2957 case TGSI_RETURN_TYPE_SNORM
:
2959 case TGSI_RETURN_TYPE_COUNT
:
2961 assert(!"is_integer_type: Unknown tgsi_return_type");
2968 * Emit declarations for resources.
2969 * XXX When we're sure that all TGSI shaders will be generated with
2970 * sampler view declarations (Ex: DCL SVIEW[n], 2D, UINT) we may
2974 emit_resource_declarations(struct svga_shader_emitter_v10
*emit
)
2978 /* Emit resource decl for each sampler */
2979 for (i
= 0; i
< emit
->num_samplers
; i
++) {
2980 VGPU10OpcodeToken0 opcode0
;
2981 VGPU10OperandToken0 operand0
;
2982 VGPU10ResourceReturnTypeToken return_type
;
2983 VGPU10_RESOURCE_RETURN_TYPE rt
;
2986 opcode0
.opcodeType
= VGPU10_OPCODE_DCL_RESOURCE
;
2987 opcode0
.resourceDimension
=
2988 pipe_texture_to_resource_dimension(emit
->key
.tex
[i
].texture_target
,
2989 emit
->key
.tex
[i
].texture_msaa
);
2991 operand0
.numComponents
= VGPU10_OPERAND_0_COMPONENT
;
2992 operand0
.operandType
= VGPU10_OPERAND_TYPE_RESOURCE
;
2993 operand0
.indexDimension
= VGPU10_OPERAND_INDEX_1D
;
2994 operand0
.index0Representation
= VGPU10_OPERAND_INDEX_IMMEDIATE32
;
2997 /* convert TGSI_RETURN_TYPE_x to VGPU10_RETURN_TYPE_x */
2998 STATIC_ASSERT(VGPU10_RETURN_TYPE_UNORM
== TGSI_RETURN_TYPE_UNORM
+ 1);
2999 STATIC_ASSERT(VGPU10_RETURN_TYPE_SNORM
== TGSI_RETURN_TYPE_SNORM
+ 1);
3000 STATIC_ASSERT(VGPU10_RETURN_TYPE_SINT
== TGSI_RETURN_TYPE_SINT
+ 1);
3001 STATIC_ASSERT(VGPU10_RETURN_TYPE_UINT
== TGSI_RETURN_TYPE_UINT
+ 1);
3002 STATIC_ASSERT(VGPU10_RETURN_TYPE_FLOAT
== TGSI_RETURN_TYPE_FLOAT
+ 1);
3003 assert(emit
->key
.tex
[i
].return_type
<= TGSI_RETURN_TYPE_FLOAT
);
3004 rt
= emit
->key
.tex
[i
].return_type
+ 1;
3006 switch (emit
->key
.tex
[i
].return_type
) {
3007 case TGSI_RETURN_TYPE_UNORM
: rt
= VGPU10_RETURN_TYPE_UNORM
; break;
3008 case TGSI_RETURN_TYPE_SNORM
: rt
= VGPU10_RETURN_TYPE_SNORM
; break;
3009 case TGSI_RETURN_TYPE_SINT
: rt
= VGPU10_RETURN_TYPE_SINT
; break;
3010 case TGSI_RETURN_TYPE_UINT
: rt
= VGPU10_RETURN_TYPE_UINT
; break;
3011 case TGSI_RETURN_TYPE_FLOAT
: rt
= VGPU10_RETURN_TYPE_FLOAT
; break;
3012 case TGSI_RETURN_TYPE_COUNT
:
3014 rt
= VGPU10_RETURN_TYPE_FLOAT
;
3015 assert(!"emit_resource_declarations: Unknown tgsi_return_type");
3019 return_type
.value
= 0;
3020 return_type
.component0
= rt
;
3021 return_type
.component1
= rt
;
3022 return_type
.component2
= rt
;
3023 return_type
.component3
= rt
;
3025 begin_emit_instruction(emit
);
3026 emit_dword(emit
, opcode0
.value
);
3027 emit_dword(emit
, operand0
.value
);
3028 emit_dword(emit
, i
);
3029 emit_dword(emit
, return_type
.value
);
3030 end_emit_instruction(emit
);
3037 emit_instruction_op1(struct svga_shader_emitter_v10
*emit
,
3039 const struct tgsi_full_dst_register
*dst
,
3040 const struct tgsi_full_src_register
*src
,
3043 begin_emit_instruction(emit
);
3044 emit_opcode(emit
, opcode
, saturate
);
3045 emit_dst_register(emit
, dst
);
3046 emit_src_register(emit
, src
);
3047 end_emit_instruction(emit
);
3051 emit_instruction_op2(struct svga_shader_emitter_v10
*emit
,
3053 const struct tgsi_full_dst_register
*dst
,
3054 const struct tgsi_full_src_register
*src1
,
3055 const struct tgsi_full_src_register
*src2
,
3058 begin_emit_instruction(emit
);
3059 emit_opcode(emit
, opcode
, saturate
);
3060 emit_dst_register(emit
, dst
);
3061 emit_src_register(emit
, src1
);
3062 emit_src_register(emit
, src2
);
3063 end_emit_instruction(emit
);
3067 emit_instruction_op3(struct svga_shader_emitter_v10
*emit
,
3069 const struct tgsi_full_dst_register
*dst
,
3070 const struct tgsi_full_src_register
*src1
,
3071 const struct tgsi_full_src_register
*src2
,
3072 const struct tgsi_full_src_register
*src3
,
3075 begin_emit_instruction(emit
);
3076 emit_opcode(emit
, opcode
, saturate
);
3077 emit_dst_register(emit
, dst
);
3078 emit_src_register(emit
, src1
);
3079 emit_src_register(emit
, src2
);
3080 emit_src_register(emit
, src3
);
3081 end_emit_instruction(emit
);
3085 * Emit the actual clip distance instructions to be used for clipping
3086 * by copying the clip distance from the temporary registers to the
3087 * CLIPDIST registers written with the enabled planes mask.
3088 * Also copy the clip distance from the temporary to the clip distance
3089 * shadow copy register which will be referenced by the input shader
3092 emit_clip_distance_instructions(struct svga_shader_emitter_v10
*emit
)
3094 struct tgsi_full_src_register tmp_clip_dist_src
;
3095 struct tgsi_full_dst_register clip_dist_dst
;
3098 unsigned clip_plane_enable
= emit
->key
.clip_plane_enable
;
3099 unsigned clip_dist_tmp_index
= emit
->clip_dist_tmp_index
;
3100 unsigned num_written_clipdist
= emit
->info
.num_written_clipdistance
;
3102 assert(emit
->clip_dist_out_index
!= INVALID_INDEX
);
3103 assert(emit
->clip_dist_tmp_index
!= INVALID_INDEX
);
3106 * Temporary reset the temporary clip dist register index so
3107 * that the copy to the real clip dist register will not
3108 * attempt to copy to the temporary register again
3110 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
3112 for (i
= 0; i
< 2 && num_written_clipdist
; i
++, num_written_clipdist
-=4) {
3114 tmp_clip_dist_src
= make_src_temp_reg(clip_dist_tmp_index
+ i
);
3117 * copy to the shadow copy for use by varying variable and
3118 * stream output. All clip distances
3119 * will be written regardless of the enabled clipping planes.
3121 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3122 emit
->clip_dist_so_index
+ i
);
3124 /* MOV clip_dist_so, tmp_clip_dist */
3125 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3126 &tmp_clip_dist_src
, FALSE
);
3129 * copy those clip distances to enabled clipping planes
3130 * to CLIPDIST registers for clipping
3132 if (clip_plane_enable
& 0xf) {
3133 clip_dist_dst
= make_dst_reg(TGSI_FILE_OUTPUT
,
3134 emit
->clip_dist_out_index
+ i
);
3135 clip_dist_dst
= writemask_dst(&clip_dist_dst
, clip_plane_enable
& 0xf);
3137 /* MOV CLIPDIST, tmp_clip_dist */
3138 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &clip_dist_dst
,
3139 &tmp_clip_dist_src
, FALSE
);
3141 /* four clip planes per clip register */
3142 clip_plane_enable
>>= 4;
3145 * set the temporary clip dist register index back to the
3146 * temporary index for the next vertex
3148 emit
->clip_dist_tmp_index
= clip_dist_tmp_index
;
3151 /* Declare clip distance output registers for user-defined clip planes
3152 * or the TGSI_CLIPVERTEX output.
3155 emit_clip_distance_declarations(struct svga_shader_emitter_v10
*emit
)
3157 unsigned num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3158 unsigned index
= emit
->num_outputs
;
3159 unsigned plane_mask
;
3161 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3162 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3163 assert(num_clip_planes
<= 8);
3165 if (emit
->clip_mode
!= CLIP_LEGACY
&&
3166 emit
->clip_mode
!= CLIP_VERTEX
) {
3170 if (num_clip_planes
== 0)
3173 /* Declare one or two clip output registers. The number of components
3174 * in the mask reflects the number of clip planes. For example, if 5
3175 * clip planes are needed, we'll declare outputs similar to:
3176 * dcl_output_siv o2.xyzw, clip_distance
3177 * dcl_output_siv o3.x, clip_distance
3179 emit
->clip_dist_out_index
= index
; /* save the starting clip dist reg index */
3181 plane_mask
= (1 << num_clip_planes
) - 1;
3182 if (plane_mask
& 0xf) {
3183 unsigned cmask
= plane_mask
& VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3184 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
,
3185 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3186 emit
->num_outputs
++;
3188 if (plane_mask
& 0xf0) {
3189 unsigned cmask
= (plane_mask
>> 4) & VGPU10_OPERAND_4_COMPONENT_MASK_ALL
;
3190 emit_output_declaration(emit
, VGPU10_OPCODE_DCL_OUTPUT_SIV
, index
+ 1,
3191 VGPU10_NAME_CLIP_DISTANCE
, cmask
);
3192 emit
->num_outputs
++;
3198 * Emit the instructions for writing to the clip distance registers
3199 * to handle legacy/automatic clip planes.
3200 * For each clip plane, the distance is the dot product of the vertex
3201 * position (found in TEMP[vpos_tmp_index]) and the clip plane coefficients.
3202 * This is not used when the shader has an explicit CLIPVERTEX or CLIPDISTANCE
3203 * output registers already declared.
3206 emit_clip_distance_from_vpos(struct svga_shader_emitter_v10
*emit
,
3207 unsigned vpos_tmp_index
)
3209 unsigned i
, num_clip_planes
= util_bitcount(emit
->key
.clip_plane_enable
);
3211 assert(emit
->clip_mode
== CLIP_LEGACY
);
3212 assert(num_clip_planes
<= 8);
3214 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3215 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3217 for (i
= 0; i
< num_clip_planes
; i
++) {
3218 struct tgsi_full_dst_register dst
;
3219 struct tgsi_full_src_register plane_src
, vpos_src
;
3220 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3221 unsigned comp
= i
% 4;
3222 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3224 /* create dst, src regs */
3225 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3226 dst
= writemask_dst(&dst
, writemask
);
3228 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3229 vpos_src
= make_src_temp_reg(vpos_tmp_index
);
3231 /* DP4 clip_dist, plane, vpos */
3232 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3233 &plane_src
, &vpos_src
, FALSE
);
3239 * Emit the instructions for computing the clip distance results from
3240 * the clip vertex temporary.
3241 * For each clip plane, the distance is the dot product of the clip vertex
3242 * position (found in a temp reg) and the clip plane coefficients.
3245 emit_clip_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
3247 const unsigned num_clip
= util_bitcount(emit
->key
.clip_plane_enable
);
3249 struct tgsi_full_dst_register dst
;
3250 struct tgsi_full_src_register clipvert_src
;
3251 const unsigned clip_vertex_tmp
= emit
->clip_vertex_tmp_index
;
3253 assert(emit
->unit
== PIPE_SHADER_VERTEX
||
3254 emit
->unit
== PIPE_SHADER_GEOMETRY
);
3256 assert(emit
->clip_mode
== CLIP_VERTEX
);
3258 clipvert_src
= make_src_temp_reg(clip_vertex_tmp
);
3260 for (i
= 0; i
< num_clip
; i
++) {
3261 struct tgsi_full_src_register plane_src
;
3262 unsigned reg_index
= emit
->clip_dist_out_index
+ i
/ 4;
3263 unsigned comp
= i
% 4;
3264 unsigned writemask
= VGPU10_OPERAND_4_COMPONENT_MASK_X
<< comp
;
3266 /* create dst, src regs */
3267 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, reg_index
);
3268 dst
= writemask_dst(&dst
, writemask
);
3270 plane_src
= make_src_const_reg(emit
->clip_plane_const
[i
]);
3272 /* DP4 clip_dist, plane, vpos */
3273 emit_instruction_op2(emit
, VGPU10_OPCODE_DP4
, &dst
,
3274 &plane_src
, &clipvert_src
, FALSE
);
3277 /* copy temporary clip vertex register to the clip vertex register */
3279 assert(emit
->clip_vertex_out_index
!= INVALID_INDEX
);
3282 * temporary reset the temporary clip vertex register index so
3283 * that copy to the clip vertex register will not attempt
3284 * to copy to the temporary register again
3286 emit
->clip_vertex_tmp_index
= INVALID_INDEX
;
3288 /* MOV clip_vertex, clip_vertex_tmp */
3289 dst
= make_dst_reg(TGSI_FILE_OUTPUT
, emit
->clip_vertex_out_index
);
3290 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
3291 &dst
, &clipvert_src
, FALSE
);
3294 * set the temporary clip vertex register index back to the
3295 * temporary index for the next vertex
3297 emit
->clip_vertex_tmp_index
= clip_vertex_tmp
;
3301 * Emit code to convert RGBA to BGRA
3304 emit_swap_r_b(struct svga_shader_emitter_v10
*emit
,
3305 const struct tgsi_full_dst_register
*dst
,
3306 const struct tgsi_full_src_register
*src
)
3308 struct tgsi_full_src_register bgra_src
=
3309 swizzle_src(src
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_X
, TGSI_SWIZZLE_W
);
3311 begin_emit_instruction(emit
);
3312 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
3313 emit_dst_register(emit
, dst
);
3314 emit_src_register(emit
, &bgra_src
);
3315 end_emit_instruction(emit
);
3319 /** Convert from 10_10_10_2 normalized to 10_10_10_2_snorm */
3321 emit_puint_to_snorm(struct svga_shader_emitter_v10
*emit
,
3322 const struct tgsi_full_dst_register
*dst
,
3323 const struct tgsi_full_src_register
*src
)
3325 struct tgsi_full_src_register half
= make_immediate_reg_float(emit
, 0.5f
);
3326 struct tgsi_full_src_register two
=
3327 make_immediate_reg_float4(emit
, 2.0f
, 2.0f
, 2.0f
, 3.0f
);
3328 struct tgsi_full_src_register neg_two
=
3329 make_immediate_reg_float4(emit
, -2.0f
, -2.0f
, -2.0f
, -1.66666f
);
3331 unsigned val_tmp
= get_temp_index(emit
);
3332 struct tgsi_full_dst_register val_dst
= make_dst_temp_reg(val_tmp
);
3333 struct tgsi_full_src_register val_src
= make_src_temp_reg(val_tmp
);
3335 unsigned bias_tmp
= get_temp_index(emit
);
3336 struct tgsi_full_dst_register bias_dst
= make_dst_temp_reg(bias_tmp
);
3337 struct tgsi_full_src_register bias_src
= make_src_temp_reg(bias_tmp
);
3339 /* val = src * 2.0 */
3340 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &val_dst
,
3343 /* bias = src > 0.5 */
3344 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &bias_dst
,
3347 /* bias = bias & -2.0 */
3348 emit_instruction_op2(emit
, VGPU10_OPCODE_AND
, &bias_dst
,
3349 &bias_src
, &neg_two
, FALSE
);
3351 /* dst = val + bias */
3352 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, dst
,
3353 &val_src
, &bias_src
, FALSE
);
3355 free_temp_indexes(emit
);
3359 /** Convert from 10_10_10_2_unorm to 10_10_10_2_uscaled */
3361 emit_puint_to_uscaled(struct svga_shader_emitter_v10
*emit
,
3362 const struct tgsi_full_dst_register
*dst
,
3363 const struct tgsi_full_src_register
*src
)
3365 struct tgsi_full_src_register scale
=
3366 make_immediate_reg_float4(emit
, 1023.0f
, 1023.0f
, 1023.0f
, 3.0f
);
3368 /* dst = src * scale */
3369 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, dst
, src
, &scale
, FALSE
);
3373 /** Convert from R32_UINT to 10_10_10_2_sscaled */
3375 emit_puint_to_sscaled(struct svga_shader_emitter_v10
*emit
,
3376 const struct tgsi_full_dst_register
*dst
,
3377 const struct tgsi_full_src_register
*src
)
3379 struct tgsi_full_src_register lshift
=
3380 make_immediate_reg_int4(emit
, 22, 12, 2, 0);
3381 struct tgsi_full_src_register rshift
=
3382 make_immediate_reg_int4(emit
, 22, 22, 22, 30);
3384 struct tgsi_full_src_register src_xxxx
= scalar_src(src
, TGSI_SWIZZLE_X
);
3386 unsigned tmp
= get_temp_index(emit
);
3387 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3388 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3391 * r = (pixel << 22) >> 22; # signed int in [511, -512]
3392 * g = (pixel << 12) >> 22; # signed int in [511, -512]
3393 * b = (pixel << 2) >> 22; # signed int in [511, -512]
3394 * a = (pixel << 0) >> 30; # signed int in [1, -2]
3395 * dst = i_to_f(r,g,b,a); # convert to float
3397 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHL
, &tmp_dst
,
3398 &src_xxxx
, &lshift
, FALSE
);
3399 emit_instruction_op2(emit
, VGPU10_OPCODE_ISHR
, &tmp_dst
,
3400 &tmp_src
, &rshift
, FALSE
);
3401 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
, dst
, &tmp_src
, FALSE
);
3403 free_temp_indexes(emit
);
3408 * Emit code for TGSI_OPCODE_ABS instruction.
3411 emit_abs(struct svga_shader_emitter_v10
*emit
,
3412 const struct tgsi_full_instruction
*inst
)
3419 struct tgsi_full_src_register abs_src0
= absolute_src(&inst
->Src
[0]);
3421 /* MOV dst, abs(s0) */
3422 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
3423 &abs_src0
, inst
->Instruction
.Saturate
);
3430 * Emit code for TGSI_OPCODE_ARL or TGSI_OPCODE_UARL instruction.
3433 emit_arl_uarl(struct svga_shader_emitter_v10
*emit
,
3434 const struct tgsi_full_instruction
*inst
)
3436 unsigned index
= inst
->Dst
[0].Register
.Index
;
3437 struct tgsi_full_dst_register dst
;
3440 assert(index
< MAX_VGPU10_ADDR_REGS
);
3441 dst
= make_dst_temp_reg(emit
->address_reg_index
[index
]);
3445 * FTOI address_tmp, s0
3449 * MOV address_tmp, s0
3451 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_ARL
)
3452 opcode
= VGPU10_OPCODE_FTOI
;
3454 opcode
= VGPU10_OPCODE_MOV
;
3456 emit_instruction_op1(emit
, opcode
, &dst
, &inst
->Src
[0], FALSE
);
3463 * Emit code for TGSI_OPCODE_CAL instruction.
3466 emit_cal(struct svga_shader_emitter_v10
*emit
,
3467 const struct tgsi_full_instruction
*inst
)
3469 unsigned label
= inst
->Label
.Label
;
3470 VGPU10OperandToken0 operand
;
3472 operand
.operandType
= VGPU10_OPERAND_TYPE_LABEL
;
3474 begin_emit_instruction(emit
);
3475 emit_dword(emit
, operand
.value
);
3476 emit_dword(emit
, label
);
3477 end_emit_instruction(emit
);
3484 * Emit code for TGSI_OPCODE_IABS instruction.
3487 emit_iabs(struct svga_shader_emitter_v10
*emit
,
3488 const struct tgsi_full_instruction
*inst
)
3490 /* dst.x = (src0.x < 0) ? -src0.x : src0.x
3491 * dst.y = (src0.y < 0) ? -src0.y : src0.y
3492 * dst.z = (src0.z < 0) ? -src0.z : src0.z
3493 * dst.w = (src0.w < 0) ? -src0.w : src0.w
3496 * IMAX dst, src, neg(src)
3498 struct tgsi_full_src_register neg_src
= negate_src(&inst
->Src
[0]);
3499 emit_instruction_op2(emit
, VGPU10_OPCODE_IMAX
, &inst
->Dst
[0],
3500 &inst
->Src
[0], &neg_src
, FALSE
);
3507 * Emit code for TGSI_OPCODE_CMP instruction.
3510 emit_cmp(struct svga_shader_emitter_v10
*emit
,
3511 const struct tgsi_full_instruction
*inst
)
3513 /* dst.x = (src0.x < 0) ? src1.x : src2.x
3514 * dst.y = (src0.y < 0) ? src1.y : src2.y
3515 * dst.z = (src0.z < 0) ? src1.z : src2.z
3516 * dst.w = (src0.w < 0) ? src1.w : src2.w
3520 * MOVC dst, tmp, src1, src2
3522 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3523 unsigned tmp
= get_temp_index(emit
);
3524 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3525 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3527 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
,
3528 &inst
->Src
[0], &zero
, FALSE
);
3529 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0],
3530 &tmp_src
, &inst
->Src
[1], &inst
->Src
[2],
3531 inst
->Instruction
.Saturate
);
3533 free_temp_indexes(emit
);
3540 * Emit code for TGSI_OPCODE_DP2A instruction.
3543 emit_dp2a(struct svga_shader_emitter_v10
*emit
,
3544 const struct tgsi_full_instruction
*inst
)
3546 /* dst.x = src0.x * src1.x + src0.y * src1.y + src2.x
3547 * dst.y = src0.x * src1.x + src0.y * src1.y + src2.x
3548 * dst.z = src0.x * src1.x + src0.y * src1.y + src2.x
3549 * dst.w = src0.x * src1.x + src0.y * src1.y + src2.x
3551 * MAD tmp.x, s0.y, s1.y, s2.x
3552 * MAD tmp.x, s0.x, s1.x, tmp.x
3553 * MOV dst.xyzw, tmp.xxxx
3555 unsigned tmp
= get_temp_index(emit
);
3556 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3557 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3559 struct tgsi_full_src_register tmp_src_xxxx
=
3560 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3561 struct tgsi_full_dst_register tmp_dst_x
=
3562 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3564 struct tgsi_full_src_register src0_xxxx
=
3565 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
3566 struct tgsi_full_src_register src0_yyyy
=
3567 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
3568 struct tgsi_full_src_register src1_xxxx
=
3569 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
3570 struct tgsi_full_src_register src1_yyyy
=
3571 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Y
);
3572 struct tgsi_full_src_register src2_xxxx
=
3573 scalar_src(&inst
->Src
[2], TGSI_SWIZZLE_X
);
3575 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &tmp_dst_x
, &src0_yyyy
,
3576 &src1_yyyy
, &src2_xxxx
, FALSE
);
3577 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &tmp_dst_x
, &src0_xxxx
,
3578 &src1_xxxx
, &tmp_src_xxxx
, FALSE
);
3579 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
3580 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
3582 free_temp_indexes(emit
);
3589 * Emit code for TGSI_OPCODE_DPH instruction.
3592 emit_dph(struct svga_shader_emitter_v10
*emit
,
3593 const struct tgsi_full_instruction
*inst
)
3597 * ADD dst, tmp, s1.wwww
3600 struct tgsi_full_src_register s1_wwww
=
3601 swizzle_src(&inst
->Src
[1], TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
,
3602 TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
3604 unsigned tmp
= get_temp_index(emit
);
3605 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3606 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3608 /* DP3 tmp, s0, s1 */
3609 emit_instruction_op2(emit
, VGPU10_OPCODE_DP3
, &tmp_dst
, &inst
->Src
[0],
3610 &inst
->Src
[1], FALSE
);
3612 /* ADD dst, tmp, s1.wwww */
3613 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &inst
->Dst
[0], &tmp_src
,
3614 &s1_wwww
, inst
->Instruction
.Saturate
);
3616 free_temp_indexes(emit
);
3623 * Emit code for TGSI_OPCODE_DST instruction.
3626 emit_dst(struct svga_shader_emitter_v10
*emit
,
3627 const struct tgsi_full_instruction
*inst
)
3631 * dst.y = src0.y * src1.y
3636 struct tgsi_full_src_register s0_yyyy
=
3637 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
3638 struct tgsi_full_src_register s0_zzzz
=
3639 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Z
);
3640 struct tgsi_full_src_register s1_yyyy
=
3641 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Y
);
3642 struct tgsi_full_src_register s1_wwww
=
3643 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_W
);
3646 * If dst and either src0 and src1 are the same we need
3647 * to create a temporary for it and insert a extra move.
3649 unsigned tmp_move
= get_temp_index(emit
);
3650 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3651 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3653 /* MOV dst.x, 1.0 */
3654 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3655 struct tgsi_full_dst_register dst_x
=
3656 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3657 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3659 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
3662 /* MUL dst.y, s0.y, s1.y */
3663 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3664 struct tgsi_full_dst_register dst_y
=
3665 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3667 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &dst_y
, &s0_yyyy
,
3668 &s1_yyyy
, inst
->Instruction
.Saturate
);
3671 /* MOV dst.z, s0.z */
3672 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3673 struct tgsi_full_dst_register dst_z
=
3674 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3676 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
, &s0_zzzz
,
3677 inst
->Instruction
.Saturate
);
3680 /* MOV dst.w, s1.w */
3681 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3682 struct tgsi_full_dst_register dst_w
=
3683 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3685 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &s1_wwww
,
3686 inst
->Instruction
.Saturate
);
3689 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3691 free_temp_indexes(emit
);
3699 * Emit code for TGSI_OPCODE_ENDPRIM (GS only)
3702 emit_endprim(struct svga_shader_emitter_v10
*emit
,
3703 const struct tgsi_full_instruction
*inst
)
3705 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
3707 /* We can't use emit_simple() because the TGSI instruction has one
3708 * operand (vertex stream number) which we must ignore for VGPU10.
3710 begin_emit_instruction(emit
);
3711 emit_opcode(emit
, VGPU10_OPCODE_CUT
, FALSE
);
3712 end_emit_instruction(emit
);
3718 * Emit code for TGSI_OPCODE_EX2 (2^x) instruction.
3721 emit_ex2(struct svga_shader_emitter_v10
*emit
,
3722 const struct tgsi_full_instruction
*inst
)
3724 /* Note that TGSI_OPCODE_EX2 computes only one value from src.x
3725 * while VGPU10 computes four values.
3728 * dst.xyzw = 2.0 ^ src.x
3731 struct tgsi_full_src_register src_xxxx
=
3732 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3733 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3735 /* EXP tmp, s0.xxxx */
3736 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0], &src_xxxx
,
3737 inst
->Instruction
.Saturate
);
3744 * Emit code for TGSI_OPCODE_EXP instruction.
3747 emit_exp(struct svga_shader_emitter_v10
*emit
,
3748 const struct tgsi_full_instruction
*inst
)
3751 * dst.x = 2 ^ floor(s0.x)
3752 * dst.y = s0.x - floor(s0.x)
3757 struct tgsi_full_src_register src_xxxx
=
3758 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
3759 unsigned tmp
= get_temp_index(emit
);
3760 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3761 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3764 * If dst and src are the same we need to create
3765 * a temporary for it and insert a extra move.
3767 unsigned tmp_move
= get_temp_index(emit
);
3768 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3769 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3771 /* only use X component of temp reg */
3772 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3773 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3775 /* ROUND_NI tmp.x, s0.x */
3776 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
3777 &src_xxxx
, FALSE
); /* round to -infinity */
3779 /* EXP dst.x, tmp.x */
3780 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3781 struct tgsi_full_dst_register dst_x
=
3782 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3784 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_x
, &tmp_src
,
3785 inst
->Instruction
.Saturate
);
3788 /* ADD dst.y, s0.x, -tmp */
3789 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3790 struct tgsi_full_dst_register dst_y
=
3791 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3792 struct tgsi_full_src_register neg_tmp_src
= negate_src(&tmp_src
);
3794 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_y
, &src_xxxx
,
3795 &neg_tmp_src
, inst
->Instruction
.Saturate
);
3798 /* EXP dst.z, s0.x */
3799 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
3800 struct tgsi_full_dst_register dst_z
=
3801 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
3803 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &dst_z
, &src_xxxx
,
3804 inst
->Instruction
.Saturate
);
3807 /* MOV dst.w, 1.0 */
3808 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3809 struct tgsi_full_dst_register dst_w
=
3810 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3811 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3813 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
,
3817 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
3820 free_temp_indexes(emit
);
3827 * Emit code for TGSI_OPCODE_IF instruction.
3830 emit_if(struct svga_shader_emitter_v10
*emit
,
3831 const struct tgsi_full_instruction
*inst
)
3833 VGPU10OpcodeToken0 opcode0
;
3835 /* The src register should be a scalar */
3836 assert(inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleY
&&
3837 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleZ
&&
3838 inst
->Src
[0].Register
.SwizzleX
== inst
->Src
[0].Register
.SwizzleW
);
3840 /* The only special thing here is that we need to set the
3841 * VGPU10_INSTRUCTION_TEST_NONZERO flag since we want to test if
3842 * src.x is non-zero.
3845 opcode0
.opcodeType
= VGPU10_OPCODE_IF
;
3846 opcode0
.testBoolean
= VGPU10_INSTRUCTION_TEST_NONZERO
;
3848 begin_emit_instruction(emit
);
3849 emit_dword(emit
, opcode0
.value
);
3850 emit_src_register(emit
, &inst
->Src
[0]);
3851 end_emit_instruction(emit
);
3858 * Emit code for TGSI_OPCODE_KILL_IF instruction (kill fragment if any of
3859 * the register components are negative).
3862 emit_kill_if(struct svga_shader_emitter_v10
*emit
,
3863 const struct tgsi_full_instruction
*inst
)
3865 unsigned tmp
= get_temp_index(emit
);
3866 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
3867 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
3869 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3871 struct tgsi_full_dst_register tmp_dst_x
=
3872 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
3873 struct tgsi_full_src_register tmp_src_xxxx
=
3874 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
3876 /* tmp = src[0] < 0.0 */
3877 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
3880 if (!same_swizzle_terms(&inst
->Src
[0])) {
3881 /* If the swizzle is not XXXX, YYYY, ZZZZ or WWWW we need to
3882 * logically OR the swizzle terms. Most uses of KILL_IF only
3883 * test one channel so it's good to avoid these extra steps.
3885 struct tgsi_full_src_register tmp_src_yyyy
=
3886 scalar_src(&tmp_src
, TGSI_SWIZZLE_Y
);
3887 struct tgsi_full_src_register tmp_src_zzzz
=
3888 scalar_src(&tmp_src
, TGSI_SWIZZLE_Z
);
3889 struct tgsi_full_src_register tmp_src_wwww
=
3890 scalar_src(&tmp_src
, TGSI_SWIZZLE_W
);
3892 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3893 &tmp_src_yyyy
, FALSE
);
3894 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3895 &tmp_src_zzzz
, FALSE
);
3896 emit_instruction_op2(emit
, VGPU10_OPCODE_OR
, &tmp_dst_x
, &tmp_src_xxxx
,
3897 &tmp_src_wwww
, FALSE
);
3900 begin_emit_instruction(emit
);
3901 emit_discard_opcode(emit
, TRUE
); /* discard if src0.x is non-zero */
3902 emit_src_register(emit
, &tmp_src_xxxx
);
3903 end_emit_instruction(emit
);
3905 free_temp_indexes(emit
);
3912 * Emit code for TGSI_OPCODE_KILL instruction (unconditional discard).
3915 emit_kill(struct svga_shader_emitter_v10
*emit
,
3916 const struct tgsi_full_instruction
*inst
)
3918 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
3920 /* DISCARD if 0.0 is zero */
3921 begin_emit_instruction(emit
);
3922 emit_discard_opcode(emit
, FALSE
);
3923 emit_src_register(emit
, &zero
);
3924 end_emit_instruction(emit
);
3931 * Emit code for TGSI_OPCODE_LG2 instruction.
3934 emit_lg2(struct svga_shader_emitter_v10
*emit
,
3935 const struct tgsi_full_instruction
*inst
)
3937 /* Note that TGSI_OPCODE_LG2 computes only one value from src.x
3938 * while VGPU10 computes four values.
3941 * dst.xyzw = log2(src.x)
3944 struct tgsi_full_src_register src_xxxx
=
3945 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
3946 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
3948 /* LOG tmp, s0.xxxx */
3949 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &inst
->Dst
[0], &src_xxxx
,
3950 inst
->Instruction
.Saturate
);
3957 * Emit code for TGSI_OPCODE_LIT instruction.
3960 emit_lit(struct svga_shader_emitter_v10
*emit
,
3961 const struct tgsi_full_instruction
*inst
)
3963 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
3966 * If dst and src are the same we need to create
3967 * a temporary for it and insert a extra move.
3969 unsigned tmp_move
= get_temp_index(emit
);
3970 struct tgsi_full_src_register move_src
= make_src_temp_reg(tmp_move
);
3971 struct tgsi_full_dst_register move_dst
= make_dst_temp_reg(tmp_move
);
3975 * dst.y = max(src.x, 0)
3976 * dst.z = (src.x > 0) ? max(src.y, 0)^{clamp(src.w, -128, 128))} : 0
3980 /* MOV dst.x, 1.0 */
3981 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
3982 struct tgsi_full_dst_register dst_x
=
3983 writemask_dst(&move_dst
, TGSI_WRITEMASK_X
);
3984 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &one
, FALSE
);
3987 /* MOV dst.w, 1.0 */
3988 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
3989 struct tgsi_full_dst_register dst_w
=
3990 writemask_dst(&move_dst
, TGSI_WRITEMASK_W
);
3991 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
3994 /* MAX dst.y, src.x, 0.0 */
3995 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
3996 struct tgsi_full_dst_register dst_y
=
3997 writemask_dst(&move_dst
, TGSI_WRITEMASK_Y
);
3998 struct tgsi_full_src_register zero
=
3999 make_immediate_reg_float(emit
, 0.0f
);
4000 struct tgsi_full_src_register src_xxxx
=
4001 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4002 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4004 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &dst_y
, &src_xxxx
,
4005 &zero
, inst
->Instruction
.Saturate
);
4009 * tmp1 = clamp(src.w, -128, 128);
4010 * MAX tmp1, src.w, -128
4011 * MIN tmp1, tmp1, 128
4013 * tmp2 = max(tmp2, 0);
4014 * MAX tmp2, src.y, 0
4016 * tmp1 = pow(tmp2, tmp1);
4018 * MUL tmp1, tmp2, tmp1
4021 * tmp1 = (src.w == 0) ? 1 : tmp1;
4023 * MOVC tmp1, tmp2, 1.0, tmp1
4025 * dst.z = (0 < src.x) ? tmp1 : 0;
4027 * MOVC dst.z, tmp2, tmp1, 0.0
4029 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4030 struct tgsi_full_dst_register dst_z
=
4031 writemask_dst(&move_dst
, TGSI_WRITEMASK_Z
);
4033 unsigned tmp1
= get_temp_index(emit
);
4034 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4035 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4036 unsigned tmp2
= get_temp_index(emit
);
4037 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4038 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4040 struct tgsi_full_src_register src_xxxx
=
4041 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4042 struct tgsi_full_src_register src_yyyy
=
4043 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
4044 struct tgsi_full_src_register src_wwww
=
4045 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
4047 struct tgsi_full_src_register zero
=
4048 make_immediate_reg_float(emit
, 0.0f
);
4049 struct tgsi_full_src_register lowerbound
=
4050 make_immediate_reg_float(emit
, -128.0f
);
4051 struct tgsi_full_src_register upperbound
=
4052 make_immediate_reg_float(emit
, 128.0f
);
4054 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp1_dst
, &src_wwww
,
4055 &lowerbound
, FALSE
);
4056 emit_instruction_op2(emit
, VGPU10_OPCODE_MIN
, &tmp1_dst
, &tmp1_src
,
4057 &upperbound
, FALSE
);
4058 emit_instruction_op2(emit
, VGPU10_OPCODE_MAX
, &tmp2_dst
, &src_yyyy
,
4061 /* POW tmp1, tmp2, tmp1 */
4062 /* LOG tmp2, tmp2 */
4063 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp2_dst
, &tmp2_src
,
4066 /* MUL tmp1, tmp2, tmp1 */
4067 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
, &tmp2_src
,
4070 /* EXP tmp1, tmp1 */
4071 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp1_dst
, &tmp1_src
,
4074 /* EQ tmp2, 0, src.w */
4075 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp2_dst
, &zero
,
4077 /* MOVC tmp1.z, tmp2, tmp1, 1.0 */
4078 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp1_dst
,
4079 &tmp2_src
, &one
, &tmp1_src
, FALSE
);
4081 /* LT tmp2, 0, src.x */
4082 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp2_dst
, &zero
,
4084 /* MOVC dst.z, tmp2, tmp1, 0.0 */
4085 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &dst_z
,
4086 &tmp2_src
, &tmp1_src
, &zero
, FALSE
);
4089 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &move_src
,
4091 free_temp_indexes(emit
);
4098 * Emit code for TGSI_OPCODE_LOG instruction.
4101 emit_log(struct svga_shader_emitter_v10
*emit
,
4102 const struct tgsi_full_instruction
*inst
)
4105 * dst.x = floor(lg2(abs(s0.x)))
4106 * dst.y = abs(s0.x) / (2 ^ floor(lg2(abs(s0.x))))
4107 * dst.z = lg2(abs(s0.x))
4111 struct tgsi_full_src_register src_xxxx
=
4112 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
4113 unsigned tmp
= get_temp_index(emit
);
4114 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4115 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4116 struct tgsi_full_src_register abs_src_xxxx
= absolute_src(&src_xxxx
);
4118 /* only use X component of temp reg */
4119 tmp_dst
= writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4120 tmp_src
= scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4122 /* LOG tmp.x, abs(s0.x) */
4123 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XYZ
) {
4124 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
,
4125 &abs_src_xxxx
, FALSE
);
4128 /* MOV dst.z, tmp.x */
4129 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
4130 struct tgsi_full_dst_register dst_z
=
4131 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Z
);
4133 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_z
,
4134 &tmp_src
, inst
->Instruction
.Saturate
);
4137 /* FLR tmp.x, tmp.x */
4138 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_XY
) {
4139 emit_instruction_op1(emit
, VGPU10_OPCODE_ROUND_NI
, &tmp_dst
,
4143 /* MOV dst.x, tmp.x */
4144 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
4145 struct tgsi_full_dst_register dst_x
=
4146 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_X
);
4148 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_x
, &tmp_src
,
4149 inst
->Instruction
.Saturate
);
4152 /* EXP tmp.x, tmp.x */
4153 /* DIV dst.y, abs(s0.x), tmp.x */
4154 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
4155 struct tgsi_full_dst_register dst_y
=
4156 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Y
);
4158 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &tmp_dst
, &tmp_src
,
4160 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &dst_y
, &abs_src_xxxx
,
4161 &tmp_src
, inst
->Instruction
.Saturate
);
4164 /* MOV dst.w, 1.0 */
4165 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
4166 struct tgsi_full_dst_register dst_w
=
4167 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_W
);
4168 struct tgsi_full_src_register one
=
4169 make_immediate_reg_float(emit
, 1.0f
);
4171 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst_w
, &one
, FALSE
);
4174 free_temp_indexes(emit
);
4181 * Emit code for TGSI_OPCODE_LRP instruction.
4184 emit_lrp(struct svga_shader_emitter_v10
*emit
,
4185 const struct tgsi_full_instruction
*inst
)
4187 /* dst = LRP(s0, s1, s2):
4188 * dst = s0 * (s1 - s2) + s2
4190 * SUB tmp, s1, s2; tmp = s1 - s2
4191 * MAD dst, s0, tmp, s2; dst = s0 * t1 + s2
4193 unsigned tmp
= get_temp_index(emit
);
4194 struct tgsi_full_src_register src_tmp
= make_src_temp_reg(tmp
);
4195 struct tgsi_full_dst_register dst_tmp
= make_dst_temp_reg(tmp
);
4196 struct tgsi_full_src_register neg_src2
= negate_src(&inst
->Src
[2]);
4198 /* ADD tmp, s1, -s2 */
4199 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &dst_tmp
,
4200 &inst
->Src
[1], &neg_src2
, FALSE
);
4202 /* MAD dst, s1, tmp, s3 */
4203 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &inst
->Dst
[0],
4204 &inst
->Src
[0], &src_tmp
, &inst
->Src
[2],
4205 inst
->Instruction
.Saturate
);
4207 free_temp_indexes(emit
);
4214 * Emit code for TGSI_OPCODE_POW instruction.
4217 emit_pow(struct svga_shader_emitter_v10
*emit
,
4218 const struct tgsi_full_instruction
*inst
)
4220 /* Note that TGSI_OPCODE_POW computes only one value from src0.x and
4221 * src1.x while VGPU10 computes four values.
4223 * dst = POW(src0, src1):
4224 * dst.xyzw = src0.x ^ src1.x
4226 unsigned tmp
= get_temp_index(emit
);
4227 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4228 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4229 struct tgsi_full_src_register src0_xxxx
=
4230 swizzle_src(&inst
->Src
[0], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4231 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4232 struct tgsi_full_src_register src1_xxxx
=
4233 swizzle_src(&inst
->Src
[1], TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
,
4234 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_X
);
4236 /* LOG tmp, s0.xxxx */
4237 emit_instruction_op1(emit
, VGPU10_OPCODE_LOG
, &tmp_dst
, &src0_xxxx
,
4240 /* MUL tmp, tmp, s1.xxxx */
4241 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
, &tmp_src
,
4244 /* EXP tmp, s0.xxxx */
4245 emit_instruction_op1(emit
, VGPU10_OPCODE_EXP
, &inst
->Dst
[0],
4246 &tmp_src
, inst
->Instruction
.Saturate
);
4249 free_temp_indexes(emit
);
4256 * Emit code for TGSI_OPCODE_RCP (reciprocal) instruction.
4259 emit_rcp(struct svga_shader_emitter_v10
*emit
,
4260 const struct tgsi_full_instruction
*inst
)
4262 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4264 unsigned tmp
= get_temp_index(emit
);
4265 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4266 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4268 struct tgsi_full_dst_register tmp_dst_x
=
4269 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4270 struct tgsi_full_src_register tmp_src_xxxx
=
4271 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4273 /* DIV tmp.x, 1.0, s0 */
4274 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst_x
, &one
,
4275 &inst
->Src
[0], FALSE
);
4277 /* MOV dst, tmp.xxxx */
4278 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4279 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4281 free_temp_indexes(emit
);
4288 * Emit code for TGSI_OPCODE_RSQ instruction.
4291 emit_rsq(struct svga_shader_emitter_v10
*emit
,
4292 const struct tgsi_full_instruction
*inst
)
4295 * dst.xyzw = 1 / sqrt(src.x)
4301 unsigned tmp
= get_temp_index(emit
);
4302 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4303 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4305 struct tgsi_full_dst_register tmp_dst_x
=
4306 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4307 struct tgsi_full_src_register tmp_src_xxxx
=
4308 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4310 /* RSQ tmp, src.x */
4311 emit_instruction_op1(emit
, VGPU10_OPCODE_RSQ
, &tmp_dst_x
,
4312 &inst
->Src
[0], FALSE
);
4314 /* MOV dst, tmp.xxxx */
4315 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4316 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4319 free_temp_indexes(emit
);
4326 * Emit code for TGSI_OPCODE_SCS instruction.
4329 emit_scs(struct svga_shader_emitter_v10
*emit
,
4330 const struct tgsi_full_instruction
*inst
)
4332 /* dst.x = cos(src.x)
4333 * dst.y = sin(src.x)
4337 struct tgsi_full_dst_register dst_x
=
4338 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_X
);
4339 struct tgsi_full_dst_register dst_y
=
4340 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_Y
);
4341 struct tgsi_full_dst_register dst_zw
=
4342 writemask_dst(&inst
->Dst
[0], TGSI_WRITEMASK_ZW
);
4344 struct tgsi_full_src_register zero_one
=
4345 make_immediate_reg_float4(emit
, 0.0f
, 0.0f
, 0.0f
, 1.0f
);
4347 begin_emit_instruction(emit
);
4348 emit_opcode(emit
, VGPU10_OPCODE_SINCOS
, inst
->Instruction
.Saturate
);
4349 emit_dst_register(emit
, &dst_y
);
4350 emit_dst_register(emit
, &dst_x
);
4351 emit_src_register(emit
, &inst
->Src
[0]);
4352 end_emit_instruction(emit
);
4354 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
4355 &dst_zw
, &zero_one
, inst
->Instruction
.Saturate
);
4362 * Emit code for TGSI_OPCODE_SEQ (Set Equal) instruction.
4365 emit_seq(struct svga_shader_emitter_v10
*emit
,
4366 const struct tgsi_full_instruction
*inst
)
4368 /* dst = SEQ(s0, s1):
4369 * dst = s0 == s1 ? 1.0 : 0.0 (per component)
4371 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4372 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4374 unsigned tmp
= get_temp_index(emit
);
4375 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4376 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4377 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4378 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4380 /* EQ tmp, s0, s1 */
4381 emit_instruction_op2(emit
, VGPU10_OPCODE_EQ
, &tmp_dst
, &inst
->Src
[0],
4382 &inst
->Src
[1], FALSE
);
4384 /* MOVC dst, tmp, one, zero */
4385 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4386 &one
, &zero
, FALSE
);
4388 free_temp_indexes(emit
);
4395 * Emit code for TGSI_OPCODE_SGE (Set Greater than or Equal) instruction.
4398 emit_sge(struct svga_shader_emitter_v10
*emit
,
4399 const struct tgsi_full_instruction
*inst
)
4401 /* dst = SGE(s0, s1):
4402 * dst = s0 >= s1 ? 1.0 : 0.0 (per component)
4404 * GE tmp, s0, s1; tmp = s0 >= s1 : 0xffffffff : 0 (per comp)
4405 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4407 unsigned tmp
= get_temp_index(emit
);
4408 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4409 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4410 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4411 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4413 /* GE tmp, s0, s1 */
4414 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[0],
4415 &inst
->Src
[1], FALSE
);
4417 /* MOVC dst, tmp, one, zero */
4418 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4419 &one
, &zero
, FALSE
);
4421 free_temp_indexes(emit
);
4428 * Emit code for TGSI_OPCODE_SGT (Set Greater than) instruction.
4431 emit_sgt(struct svga_shader_emitter_v10
*emit
,
4432 const struct tgsi_full_instruction
*inst
)
4434 /* dst = SGT(s0, s1):
4435 * dst = s0 > s1 ? 1.0 : 0.0 (per component)
4437 * LT tmp, s1, s0; tmp = s1 < s0 ? 0xffffffff : 0 (per comp)
4438 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4440 unsigned tmp
= get_temp_index(emit
);
4441 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4442 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4443 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4444 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4446 /* LT tmp, s1, s0 */
4447 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[1],
4448 &inst
->Src
[0], FALSE
);
4450 /* MOVC dst, tmp, one, zero */
4451 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4452 &one
, &zero
, FALSE
);
4454 free_temp_indexes(emit
);
4461 * Emit code for TGSI_OPCODE_SIN and TGSI_OPCODE_COS instructions.
4464 emit_sincos(struct svga_shader_emitter_v10
*emit
,
4465 const struct tgsi_full_instruction
*inst
)
4467 unsigned tmp
= get_temp_index(emit
);
4468 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4469 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4471 struct tgsi_full_src_register tmp_src_xxxx
=
4472 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
4473 struct tgsi_full_dst_register tmp_dst_x
=
4474 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_X
);
4476 begin_emit_instruction(emit
);
4477 emit_opcode(emit
, VGPU10_OPCODE_SINCOS
, FALSE
);
4479 if(inst
->Instruction
.Opcode
== TGSI_OPCODE_SIN
)
4481 emit_dst_register(emit
, &tmp_dst_x
); /* first destination register */
4482 emit_null_dst_register(emit
); /* second destination register */
4485 emit_null_dst_register(emit
);
4486 emit_dst_register(emit
, &tmp_dst_x
);
4489 emit_src_register(emit
, &inst
->Src
[0]);
4490 end_emit_instruction(emit
);
4492 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0],
4493 &tmp_src_xxxx
, inst
->Instruction
.Saturate
);
4495 free_temp_indexes(emit
);
4502 * Emit code for TGSI_OPCODE_SLE (Set Less than or Equal) instruction.
4505 emit_sle(struct svga_shader_emitter_v10
*emit
,
4506 const struct tgsi_full_instruction
*inst
)
4508 /* dst = SLE(s0, s1):
4509 * dst = s0 <= s1 ? 1.0 : 0.0 (per component)
4511 * GE tmp, s1, s0; tmp = s1 >= s0 : 0xffffffff : 0 (per comp)
4512 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4514 unsigned tmp
= get_temp_index(emit
);
4515 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4516 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4517 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4518 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4520 /* GE tmp, s1, s0 */
4521 emit_instruction_op2(emit
, VGPU10_OPCODE_GE
, &tmp_dst
, &inst
->Src
[1],
4522 &inst
->Src
[0], FALSE
);
4524 /* MOVC dst, tmp, one, zero */
4525 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4526 &one
, &zero
, FALSE
);
4528 free_temp_indexes(emit
);
4535 * Emit code for TGSI_OPCODE_SLT (Set Less than) instruction.
4538 emit_slt(struct svga_shader_emitter_v10
*emit
,
4539 const struct tgsi_full_instruction
*inst
)
4541 /* dst = SLT(s0, s1):
4542 * dst = s0 < s1 ? 1.0 : 0.0 (per component)
4544 * LT tmp, s0, s1; tmp = s0 < s1 ? 0xffffffff : 0 (per comp)
4545 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4547 unsigned tmp
= get_temp_index(emit
);
4548 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4549 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4550 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4551 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4553 /* LT tmp, s0, s1 */
4554 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp_dst
, &inst
->Src
[0],
4555 &inst
->Src
[1], FALSE
);
4557 /* MOVC dst, tmp, one, zero */
4558 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4559 &one
, &zero
, FALSE
);
4561 free_temp_indexes(emit
);
4568 * Emit code for TGSI_OPCODE_SNE (Set Not Equal) instruction.
4571 emit_sne(struct svga_shader_emitter_v10
*emit
,
4572 const struct tgsi_full_instruction
*inst
)
4574 /* dst = SNE(s0, s1):
4575 * dst = s0 != s1 ? 1.0 : 0.0 (per component)
4577 * EQ tmp, s0, s1; tmp = s0 == s1 : 0xffffffff : 0 (per comp)
4578 * MOVC dst, tmp, 1.0, 0.0; dst = tmp ? 1.0 : 0.0 (per component)
4580 unsigned tmp
= get_temp_index(emit
);
4581 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4582 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4583 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4584 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
4586 /* NE tmp, s0, s1 */
4587 emit_instruction_op2(emit
, VGPU10_OPCODE_NE
, &tmp_dst
, &inst
->Src
[0],
4588 &inst
->Src
[1], FALSE
);
4590 /* MOVC dst, tmp, one, zero */
4591 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp_src
,
4592 &one
, &zero
, FALSE
);
4594 free_temp_indexes(emit
);
4601 * Emit code for TGSI_OPCODE_SSG (Set Sign) instruction.
4604 emit_ssg(struct svga_shader_emitter_v10
*emit
,
4605 const struct tgsi_full_instruction
*inst
)
4607 /* dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
4608 * dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
4609 * dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
4610 * dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
4612 * LT tmp1, src, zero; tmp1 = src < zero ? 0xffffffff : 0 (per comp)
4613 * MOVC tmp2, tmp1, -1.0, 0.0; tmp2 = tmp1 ? -1.0 : 0.0 (per component)
4614 * LT tmp1, zero, src; tmp1 = zero < src ? 0xffffffff : 0 (per comp)
4615 * MOVC dst, tmp1, 1.0, tmp2; dst = tmp1 ? 1.0 : tmp2 (per component)
4617 struct tgsi_full_src_register zero
=
4618 make_immediate_reg_float(emit
, 0.0f
);
4619 struct tgsi_full_src_register one
=
4620 make_immediate_reg_float(emit
, 1.0f
);
4621 struct tgsi_full_src_register neg_one
=
4622 make_immediate_reg_float(emit
, -1.0f
);
4624 unsigned tmp1
= get_temp_index(emit
);
4625 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4626 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4628 unsigned tmp2
= get_temp_index(emit
);
4629 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4630 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4632 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &inst
->Src
[0],
4634 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &tmp2_dst
, &tmp1_src
,
4635 &neg_one
, &zero
, FALSE
);
4636 emit_instruction_op2(emit
, VGPU10_OPCODE_LT
, &tmp1_dst
, &zero
,
4637 &inst
->Src
[0], FALSE
);
4638 emit_instruction_op3(emit
, VGPU10_OPCODE_MOVC
, &inst
->Dst
[0], &tmp1_src
,
4639 &one
, &tmp2_src
, FALSE
);
4641 free_temp_indexes(emit
);
4648 * Emit code for TGSI_OPCODE_ISSG (Integer Set Sign) instruction.
4651 emit_issg(struct svga_shader_emitter_v10
*emit
,
4652 const struct tgsi_full_instruction
*inst
)
4654 /* dst.x = (src.x > 0) ? 1 : (src.x < 0) ? -1 : 0
4655 * dst.y = (src.y > 0) ? 1 : (src.y < 0) ? -1 : 0
4656 * dst.z = (src.z > 0) ? 1 : (src.z < 0) ? -1 : 0
4657 * dst.w = (src.w > 0) ? 1 : (src.w < 0) ? -1 : 0
4659 * ILT tmp1, src, 0 tmp1 = src < 0 ? -1 : 0 (per component)
4660 * ILT tmp2, 0, src tmp2 = 0 < src ? -1 : 0 (per component)
4661 * IADD dst, tmp1, neg(tmp2) dst = tmp1 - tmp2 (per component)
4663 struct tgsi_full_src_register zero
= make_immediate_reg_float(emit
, 0.0f
);
4665 unsigned tmp1
= get_temp_index(emit
);
4666 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
4667 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
4669 unsigned tmp2
= get_temp_index(emit
);
4670 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
4671 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
4673 struct tgsi_full_src_register neg_tmp2
= negate_src(&tmp2_src
);
4675 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp1_dst
,
4676 &inst
->Src
[0], &zero
, FALSE
);
4677 emit_instruction_op2(emit
, VGPU10_OPCODE_ILT
, &tmp2_dst
,
4678 &zero
, &inst
->Src
[0], FALSE
);
4679 emit_instruction_op2(emit
, VGPU10_OPCODE_IADD
, &inst
->Dst
[0],
4680 &tmp1_src
, &neg_tmp2
, FALSE
);
4682 free_temp_indexes(emit
);
4689 * Emit code for TGSI_OPCODE_SUB instruction.
4692 emit_sub(struct svga_shader_emitter_v10
*emit
,
4693 const struct tgsi_full_instruction
*inst
)
4695 /* dst = SUB(s0, s1):
4698 * ADD dst, s0, neg(s1)
4700 struct tgsi_full_src_register neg_src1
= negate_src(&inst
->Src
[1]);
4702 /* ADD dst, s0, neg(s1) */
4703 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &inst
->Dst
[0],
4704 &inst
->Src
[0], &neg_src1
,
4705 inst
->Instruction
.Saturate
);
4712 * Emit a comparison instruction. The dest register will get
4713 * 0 or ~0 values depending on the outcome of comparing src0 to src1.
4716 emit_comparison(struct svga_shader_emitter_v10
*emit
,
4718 const struct tgsi_full_dst_register
*dst
,
4719 const struct tgsi_full_src_register
*src0
,
4720 const struct tgsi_full_src_register
*src1
)
4722 struct tgsi_full_src_register immediate
;
4723 VGPU10OpcodeToken0 opcode0
;
4724 boolean swapSrc
= FALSE
;
4726 /* Sanity checks for svga vs. gallium enums */
4727 STATIC_ASSERT(SVGA3D_CMP_LESS
== (PIPE_FUNC_LESS
+ 1));
4728 STATIC_ASSERT(SVGA3D_CMP_GREATEREQUAL
== (PIPE_FUNC_GEQUAL
+ 1));
4733 case SVGA3D_CMP_NEVER
:
4734 immediate
= make_immediate_reg_int(emit
, 0);
4736 begin_emit_instruction(emit
);
4737 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4738 emit_dst_register(emit
, dst
);
4739 emit_src_register(emit
, &immediate
);
4740 end_emit_instruction(emit
);
4742 case SVGA3D_CMP_ALWAYS
:
4743 immediate
= make_immediate_reg_int(emit
, -1);
4745 begin_emit_instruction(emit
);
4746 emit_dword(emit
, VGPU10_OPCODE_MOV
);
4747 emit_dst_register(emit
, dst
);
4748 emit_src_register(emit
, &immediate
);
4749 end_emit_instruction(emit
);
4751 case SVGA3D_CMP_LESS
:
4752 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4754 case SVGA3D_CMP_EQUAL
:
4755 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4757 case SVGA3D_CMP_LESSEQUAL
:
4758 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4761 case SVGA3D_CMP_GREATER
:
4762 opcode0
.opcodeType
= VGPU10_OPCODE_LT
;
4765 case SVGA3D_CMP_NOTEQUAL
:
4766 opcode0
.opcodeType
= VGPU10_OPCODE_NE
;
4768 case SVGA3D_CMP_GREATEREQUAL
:
4769 opcode0
.opcodeType
= VGPU10_OPCODE_GE
;
4772 assert(!"Unexpected comparison mode");
4773 opcode0
.opcodeType
= VGPU10_OPCODE_EQ
;
4776 begin_emit_instruction(emit
);
4777 emit_dword(emit
, opcode0
.value
);
4778 emit_dst_register(emit
, dst
);
4780 emit_src_register(emit
, src1
);
4781 emit_src_register(emit
, src0
);
4784 emit_src_register(emit
, src0
);
4785 emit_src_register(emit
, src1
);
4787 end_emit_instruction(emit
);
4792 * Get texel/address offsets for a texture instruction.
4795 get_texel_offsets(const struct svga_shader_emitter_v10
*emit
,
4796 const struct tgsi_full_instruction
*inst
, int offsets
[3])
4798 if (inst
->Texture
.NumOffsets
== 1) {
4799 /* According to OpenGL Shader Language spec the offsets are only
4800 * fetched from a previously-declared immediate/literal.
4802 const struct tgsi_texture_offset
*off
= inst
->TexOffsets
;
4803 const unsigned index
= off
[0].Index
;
4804 const unsigned swizzleX
= off
[0].SwizzleX
;
4805 const unsigned swizzleY
= off
[0].SwizzleY
;
4806 const unsigned swizzleZ
= off
[0].SwizzleZ
;
4807 const union tgsi_immediate_data
*imm
= emit
->immediates
[index
];
4809 assert(inst
->TexOffsets
[0].File
== TGSI_FILE_IMMEDIATE
);
4811 offsets
[0] = imm
[swizzleX
].Int
;
4812 offsets
[1] = imm
[swizzleY
].Int
;
4813 offsets
[2] = imm
[swizzleZ
].Int
;
4816 offsets
[0] = offsets
[1] = offsets
[2] = 0;
4822 * Set up the coordinate register for texture sampling.
4823 * When we're sampling from a RECT texture we have to scale the
4824 * unnormalized coordinate to a normalized coordinate.
4825 * We do that by multiplying the coordinate by an "extra" constant.
4826 * An alternative would be to use the RESINFO instruction to query the
4829 static struct tgsi_full_src_register
4830 setup_texcoord(struct svga_shader_emitter_v10
*emit
,
4832 const struct tgsi_full_src_register
*coord
)
4834 if (emit
->key
.tex
[unit
].unnormalized
) {
4835 unsigned scale_index
= emit
->texcoord_scale_index
[unit
];
4836 unsigned tmp
= get_temp_index(emit
);
4837 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
4838 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
4839 struct tgsi_full_src_register scale_src
= make_src_const_reg(scale_index
);
4841 /* MUL tmp, coord, const[] */
4842 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_dst
,
4843 coord
, &scale_src
, FALSE
);
4847 /* use texcoord as-is */
4854 * For SAMPLE_C instructions, emit the extra src register which indicates
4855 * the reference/comparision value.
4858 emit_tex_compare_refcoord(struct svga_shader_emitter_v10
*emit
,
4860 const struct tgsi_full_src_register
*coord
)
4862 struct tgsi_full_src_register coord_src_ref
;
4865 assert(tgsi_is_shadow_target(target
));
4867 assert(target
!= TGSI_TEXTURE_SHADOWCUBE_ARRAY
); /* XXX not implemented */
4868 if (target
== TGSI_TEXTURE_SHADOW2D_ARRAY
||
4869 target
== TGSI_TEXTURE_SHADOWCUBE
)
4870 component
= TGSI_SWIZZLE_W
;
4872 component
= TGSI_SWIZZLE_Z
;
4874 coord_src_ref
= scalar_src(coord
, component
);
4876 emit_src_register(emit
, &coord_src_ref
);
4881 * Info for implementing texture swizzles.
4882 * The begin_tex_swizzle(), get_tex_swizzle_dst() and end_tex_swizzle()
4883 * functions use this to encapsulate the extra steps needed to perform
4884 * a texture swizzle, or shadow/depth comparisons.
4885 * The shadow/depth comparison is only done here if for the cases where
4886 * there's no VGPU10 opcode (like texture bias lookup w/ shadow compare).
4888 struct tex_swizzle_info
4891 boolean shadow_compare
;
4893 unsigned texture_target
; /**< TGSI_TEXTURE_x */
4894 struct tgsi_full_src_register tmp_src
;
4895 struct tgsi_full_dst_register tmp_dst
;
4896 const struct tgsi_full_dst_register
*inst_dst
;
4897 const struct tgsi_full_src_register
*coord_src
;
4902 * Do setup for handling texture swizzles or shadow compares.
4903 * \param unit the texture unit
4904 * \param inst the TGSI texture instruction
4905 * \param shadow_compare do shadow/depth comparison?
4906 * \param swz returns the swizzle info
4909 begin_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
4911 const struct tgsi_full_instruction
*inst
,
4912 boolean shadow_compare
,
4913 struct tex_swizzle_info
*swz
)
4915 swz
->swizzled
= (emit
->key
.tex
[unit
].swizzle_r
!= TGSI_SWIZZLE_X
||
4916 emit
->key
.tex
[unit
].swizzle_g
!= TGSI_SWIZZLE_Y
||
4917 emit
->key
.tex
[unit
].swizzle_b
!= TGSI_SWIZZLE_Z
||
4918 emit
->key
.tex
[unit
].swizzle_a
!= TGSI_SWIZZLE_W
);
4920 swz
->shadow_compare
= shadow_compare
;
4921 swz
->texture_target
= inst
->Texture
.Texture
;
4923 if (swz
->swizzled
|| shadow_compare
) {
4924 /* Allocate temp register for the result of the SAMPLE instruction
4925 * and the source of the MOV/compare/swizzle instructions.
4927 unsigned tmp
= get_temp_index(emit
);
4928 swz
->tmp_src
= make_src_temp_reg(tmp
);
4929 swz
->tmp_dst
= make_dst_temp_reg(tmp
);
4933 swz
->inst_dst
= &inst
->Dst
[0];
4934 swz
->coord_src
= &inst
->Src
[0];
4939 * Returns the register to put the SAMPLE instruction results into.
4940 * This will either be the original instruction dst reg (if no swizzle
4941 * and no shadow comparison) or a temporary reg if there is a swizzle.
4943 static const struct tgsi_full_dst_register
*
4944 get_tex_swizzle_dst(const struct tex_swizzle_info
*swz
)
4946 return (swz
->swizzled
|| swz
->shadow_compare
)
4947 ? &swz
->tmp_dst
: swz
->inst_dst
;
4952 * This emits the MOV instruction that actually implements a texture swizzle
4953 * and/or shadow comparison.
4956 end_tex_swizzle(struct svga_shader_emitter_v10
*emit
,
4957 const struct tex_swizzle_info
*swz
)
4959 if (swz
->shadow_compare
) {
4960 /* Emit extra instructions to compare the fetched texel value against
4961 * a texture coordinate component. The result of the comparison
4964 struct tgsi_full_src_register coord_src
;
4965 struct tgsi_full_src_register texel_src
=
4966 scalar_src(&swz
->tmp_src
, TGSI_SWIZZLE_X
);
4967 struct tgsi_full_src_register one
=
4968 make_immediate_reg_float(emit
, 1.0f
);
4969 /* convert gallium comparison func to SVGA comparison func */
4970 SVGA3dCmpFunc compare_func
= emit
->key
.tex
[swz
->unit
].compare_func
+ 1;
4972 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
4974 switch (swz
->texture_target
) {
4975 case TGSI_TEXTURE_SHADOW2D
:
4976 case TGSI_TEXTURE_SHADOWRECT
:
4977 case TGSI_TEXTURE_SHADOW1D_ARRAY
:
4978 coord_src
= scalar_src(swz
->coord_src
, TGSI_SWIZZLE_Z
);
4980 case TGSI_TEXTURE_SHADOW1D
:
4981 coord_src
= scalar_src(swz
->coord_src
, TGSI_SWIZZLE_Y
);
4983 case TGSI_TEXTURE_SHADOWCUBE
:
4984 case TGSI_TEXTURE_SHADOW2D_ARRAY
:
4985 coord_src
= scalar_src(swz
->coord_src
, TGSI_SWIZZLE_W
);
4988 assert(!"Unexpected texture target in end_tex_swizzle()");
4989 coord_src
= scalar_src(swz
->coord_src
, TGSI_SWIZZLE_Z
);
4992 /* COMPARE tmp, coord, texel */
4993 /* XXX it would seem that the texel and coord arguments should
4994 * be transposed here, but piglit tests indicate otherwise.
4996 emit_comparison(emit
, compare_func
,
4997 &swz
->tmp_dst
, &texel_src
, &coord_src
);
4999 /* AND dest, tmp, {1.0} */
5000 begin_emit_instruction(emit
);
5001 emit_opcode(emit
, VGPU10_OPCODE_AND
, FALSE
);
5002 if (swz
->swizzled
) {
5003 emit_dst_register(emit
, &swz
->tmp_dst
);
5006 emit_dst_register(emit
, swz
->inst_dst
);
5008 emit_src_register(emit
, &swz
->tmp_src
);
5009 emit_src_register(emit
, &one
);
5010 end_emit_instruction(emit
);
5013 if (swz
->swizzled
) {
5014 unsigned swz_r
= emit
->key
.tex
[swz
->unit
].swizzle_r
;
5015 unsigned swz_g
= emit
->key
.tex
[swz
->unit
].swizzle_g
;
5016 unsigned swz_b
= emit
->key
.tex
[swz
->unit
].swizzle_b
;
5017 unsigned swz_a
= emit
->key
.tex
[swz
->unit
].swizzle_a
;
5018 unsigned writemask_0
= 0, writemask_1
= 0;
5019 boolean int_tex
= is_integer_type(emit
->key
.tex
[swz
->unit
].return_type
);
5021 /* Swizzle w/out zero/one terms */
5022 struct tgsi_full_src_register src_swizzled
=
5023 swizzle_src(&swz
->tmp_src
,
5024 swz_r
< PIPE_SWIZZLE_ZERO
? swz_r
: PIPE_SWIZZLE_RED
,
5025 swz_g
< PIPE_SWIZZLE_ZERO
? swz_g
: PIPE_SWIZZLE_GREEN
,
5026 swz_b
< PIPE_SWIZZLE_ZERO
? swz_b
: PIPE_SWIZZLE_BLUE
,
5027 swz_a
< PIPE_SWIZZLE_ZERO
? swz_a
: PIPE_SWIZZLE_ALPHA
);
5029 /* MOV dst, color(tmp).<swizzle> */
5030 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5031 swz
->inst_dst
, &src_swizzled
, FALSE
);
5033 /* handle swizzle zero terms */
5034 writemask_0
= (((swz_r
== PIPE_SWIZZLE_ZERO
) << 0) |
5035 ((swz_g
== PIPE_SWIZZLE_ZERO
) << 1) |
5036 ((swz_b
== PIPE_SWIZZLE_ZERO
) << 2) |
5037 ((swz_a
== PIPE_SWIZZLE_ZERO
) << 3));
5040 struct tgsi_full_src_register zero
= int_tex
?
5041 make_immediate_reg_int(emit
, 0) :
5042 make_immediate_reg_float(emit
, 0.0f
);
5043 struct tgsi_full_dst_register dst
=
5044 writemask_dst(swz
->inst_dst
, writemask_0
);
5046 /* MOV dst.writemask_0, {0,0,0,0} */
5047 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
5048 &dst
, &zero
, FALSE
);
5051 /* handle swizzle one terms */
5052 writemask_1
= (((swz_r
== PIPE_SWIZZLE_ONE
) << 0) |
5053 ((swz_g
== PIPE_SWIZZLE_ONE
) << 1) |
5054 ((swz_b
== PIPE_SWIZZLE_ONE
) << 2) |
5055 ((swz_a
== PIPE_SWIZZLE_ONE
) << 3));
5058 struct tgsi_full_src_register one
= int_tex
?
5059 make_immediate_reg_int(emit
, 1) :
5060 make_immediate_reg_float(emit
, 1.0f
);
5061 struct tgsi_full_dst_register dst
=
5062 writemask_dst(swz
->inst_dst
, writemask_1
);
5064 /* MOV dst.writemask_1, {1,1,1,1} */
5065 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &dst
, &one
, FALSE
);
5072 * Emit code for TGSI_OPCODE_SAMPLE instruction.
5075 emit_sample(struct svga_shader_emitter_v10
*emit
,
5076 const struct tgsi_full_instruction
*inst
)
5078 const unsigned resource_unit
= inst
->Src
[1].Register
.Index
;
5079 const unsigned sampler_unit
= inst
->Src
[2].Register
.Index
;
5080 struct tgsi_full_src_register coord
;
5082 struct tex_swizzle_info swz_info
;
5084 begin_tex_swizzle(emit
, sampler_unit
, inst
, FALSE
, &swz_info
);
5086 get_texel_offsets(emit
, inst
, offsets
);
5088 coord
= setup_texcoord(emit
, resource_unit
, &inst
->Src
[0]);
5090 /* SAMPLE dst, coord(s0), resource, sampler */
5091 begin_emit_instruction(emit
);
5093 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE
,
5094 inst
->Instruction
.Saturate
, offsets
);
5095 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5096 emit_src_register(emit
, &coord
);
5097 emit_resource_register(emit
, resource_unit
);
5098 emit_sampler_register(emit
, sampler_unit
);
5099 end_emit_instruction(emit
);
5101 end_tex_swizzle(emit
, &swz_info
);
5103 free_temp_indexes(emit
);
5110 * Check if a texture instruction is valid.
5111 * An example of an invalid texture instruction is doing shadow comparison
5112 * with an integer-valued texture.
5113 * If we detect an invalid texture instruction, we replace it with:
5114 * MOV dst, {1,1,1,1};
5115 * \return TRUE if valid, FALSE if invalid.
5118 is_valid_tex_instruction(struct svga_shader_emitter_v10
*emit
,
5119 const struct tgsi_full_instruction
*inst
)
5121 const unsigned unit
= inst
->Src
[1].Register
.Index
;
5122 const unsigned target
= inst
->Texture
.Texture
;
5123 boolean valid
= TRUE
;
5125 if (tgsi_is_shadow_target(target
) &&
5126 is_integer_type(emit
->key
.tex
[unit
].return_type
)) {
5127 debug_printf("Invalid SAMPLE_C with an integer texture!\n");
5130 /* XXX might check for other conditions in the future here */
5133 /* emit a MOV dst, {1,1,1,1} instruction. */
5134 struct tgsi_full_src_register one
= make_immediate_reg_float(emit
, 1.0f
);
5135 begin_emit_instruction(emit
);
5136 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5137 emit_dst_register(emit
, &inst
->Dst
[0]);
5138 emit_src_register(emit
, &one
);
5139 end_emit_instruction(emit
);
5147 * Emit code for TGSI_OPCODE_TEX (simple texture lookup)
5150 emit_tex(struct svga_shader_emitter_v10
*emit
,
5151 const struct tgsi_full_instruction
*inst
)
5153 const uint unit
= inst
->Src
[1].Register
.Index
;
5154 unsigned target
= inst
->Texture
.Texture
;
5156 struct tgsi_full_src_register coord
;
5158 struct tex_swizzle_info swz_info
;
5160 /* check that the sampler returns a float */
5161 if (!is_valid_tex_instruction(emit
, inst
))
5164 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5166 get_texel_offsets(emit
, inst
, offsets
);
5168 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5170 /* SAMPLE dst, coord(s0), resource, sampler */
5171 begin_emit_instruction(emit
);
5173 if (tgsi_is_shadow_target(target
))
5174 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5176 opcode
= VGPU10_OPCODE_SAMPLE
;
5178 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5179 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5180 emit_src_register(emit
, &coord
);
5181 emit_resource_register(emit
, unit
);
5182 emit_sampler_register(emit
, unit
);
5183 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5184 emit_tex_compare_refcoord(emit
, target
, &coord
);
5186 end_emit_instruction(emit
);
5188 end_tex_swizzle(emit
, &swz_info
);
5190 free_temp_indexes(emit
);
5197 * Emit code for TGSI_OPCODE_TXP (projective texture)
5200 emit_txp(struct svga_shader_emitter_v10
*emit
,
5201 const struct tgsi_full_instruction
*inst
)
5203 const uint unit
= inst
->Src
[1].Register
.Index
;
5204 unsigned target
= inst
->Texture
.Texture
;
5207 unsigned tmp
= get_temp_index(emit
);
5208 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
5209 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
5210 struct tgsi_full_src_register src0_wwww
=
5211 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5212 struct tgsi_full_src_register coord
;
5213 struct tex_swizzle_info swz_info
;
5215 /* check that the sampler returns a float */
5216 if (!is_valid_tex_instruction(emit
, inst
))
5219 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5221 get_texel_offsets(emit
, inst
, offsets
);
5223 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5225 /* DIV tmp, coord, coord.wwww */
5226 emit_instruction_op2(emit
, VGPU10_OPCODE_DIV
, &tmp_dst
,
5227 &coord
, &src0_wwww
, FALSE
);
5229 /* SAMPLE dst, coord(tmp), resource, sampler */
5230 begin_emit_instruction(emit
);
5232 if (tgsi_is_shadow_target(target
))
5233 opcode
= VGPU10_OPCODE_SAMPLE_C
;
5235 opcode
= VGPU10_OPCODE_SAMPLE
;
5237 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5238 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5239 emit_src_register(emit
, &tmp_src
); /* projected coord */
5240 emit_resource_register(emit
, unit
);
5241 emit_sampler_register(emit
, unit
);
5242 if (opcode
== VGPU10_OPCODE_SAMPLE_C
) {
5243 emit_tex_compare_refcoord(emit
, target
, &tmp_src
);
5245 end_emit_instruction(emit
);
5247 end_tex_swizzle(emit
, &swz_info
);
5249 free_temp_indexes(emit
);
5256 * Emit code for TGSI_OPCODE_XPD instruction.
5259 emit_xpd(struct svga_shader_emitter_v10
*emit
,
5260 const struct tgsi_full_instruction
*inst
)
5262 /* dst.x = src0.y * src1.z - src1.y * src0.z
5263 * dst.y = src0.z * src1.x - src1.z * src0.x
5264 * dst.z = src0.x * src1.y - src1.x * src0.y
5267 struct tgsi_full_src_register s0_xxxx
=
5268 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_X
);
5269 struct tgsi_full_src_register s0_yyyy
=
5270 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Y
);
5271 struct tgsi_full_src_register s0_zzzz
=
5272 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_Z
);
5274 struct tgsi_full_src_register s1_xxxx
=
5275 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5276 struct tgsi_full_src_register s1_yyyy
=
5277 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Y
);
5278 struct tgsi_full_src_register s1_zzzz
=
5279 scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_Z
);
5281 unsigned tmp1
= get_temp_index(emit
);
5282 struct tgsi_full_src_register tmp1_src
= make_src_temp_reg(tmp1
);
5283 struct tgsi_full_dst_register tmp1_dst
= make_dst_temp_reg(tmp1
);
5285 unsigned tmp2
= get_temp_index(emit
);
5286 struct tgsi_full_src_register tmp2_src
= make_src_temp_reg(tmp2
);
5287 struct tgsi_full_dst_register tmp2_dst
= make_dst_temp_reg(tmp2
);
5288 struct tgsi_full_src_register neg_tmp2_src
= negate_src(&tmp2_src
);
5290 unsigned tmp3
= get_temp_index(emit
);
5291 struct tgsi_full_src_register tmp3_src
= make_src_temp_reg(tmp3
);
5292 struct tgsi_full_dst_register tmp3_dst
= make_dst_temp_reg(tmp3
);
5293 struct tgsi_full_dst_register tmp3_dst_x
=
5294 writemask_dst(&tmp3_dst
, TGSI_WRITEMASK_X
);
5295 struct tgsi_full_dst_register tmp3_dst_y
=
5296 writemask_dst(&tmp3_dst
, TGSI_WRITEMASK_Y
);
5297 struct tgsi_full_dst_register tmp3_dst_z
=
5298 writemask_dst(&tmp3_dst
, TGSI_WRITEMASK_Z
);
5299 struct tgsi_full_dst_register tmp3_dst_w
=
5300 writemask_dst(&tmp3_dst
, TGSI_WRITEMASK_W
);
5302 /* Note: we put all the intermediate computations into tmp3 in case
5303 * the XPD dest register is that same as one of the src regs (in which
5304 * case we could clobber a src reg before we're done with it) .
5306 * Note: we could get by with just one temp register instead of three
5307 * since we're doing scalar operations and there's enough room in one
5308 * temp for everything.
5311 /* MUL tmp1, src0.y, src1.z */
5312 /* MUL tmp2, src1.y, src0.z */
5313 /* ADD tmp3.x, tmp1, -tmp2 */
5314 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_X
) {
5315 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
,
5316 &s0_yyyy
, &s1_zzzz
, FALSE
);
5317 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp2_dst
,
5318 &s1_yyyy
, &s0_zzzz
, FALSE
);
5319 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp3_dst_x
,
5320 &tmp1_src
, &neg_tmp2_src
, FALSE
);
5323 /* MUL tmp1, src0.z, src1.x */
5324 /* MUL tmp2, src1.z, src0.x */
5325 /* ADD tmp3.y, tmp1, -tmp2 */
5326 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Y
) {
5327 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
, &s0_zzzz
,
5329 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp2_dst
, &s1_zzzz
,
5331 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp3_dst_y
,
5332 &tmp1_src
, &neg_tmp2_src
, FALSE
);
5335 /* MUL tmp1, src0.x, src1.y */
5336 /* MUL tmp2, src1.x, src0.y */
5337 /* ADD tmp3.z, tmp1, -tmp2 */
5338 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_Z
) {
5339 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp1_dst
, &s0_xxxx
,
5341 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp2_dst
, &s1_xxxx
,
5343 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp3_dst_z
,
5344 &tmp1_src
, &neg_tmp2_src
, FALSE
);
5347 /* MOV tmp3.w, 1.0 */
5348 if (inst
->Dst
[0].Register
.WriteMask
& TGSI_WRITEMASK_W
) {
5349 struct tgsi_full_src_register one
=
5350 make_immediate_reg_float(emit
, 1.0f
);
5352 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &tmp3_dst_w
, &one
, FALSE
);
5356 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &tmp3_src
,
5357 inst
->Instruction
.Saturate
);
5360 free_temp_indexes(emit
);
5367 * Emit code for TGSI_OPCODE_TXD (explicit derivatives)
5370 emit_txd(struct svga_shader_emitter_v10
*emit
,
5371 const struct tgsi_full_instruction
*inst
)
5373 const uint unit
= inst
->Src
[3].Register
.Index
;
5374 unsigned target
= inst
->Texture
.Texture
;
5376 struct tgsi_full_src_register coord
;
5377 struct tex_swizzle_info swz_info
;
5379 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5382 get_texel_offsets(emit
, inst
, offsets
);
5384 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5386 /* SAMPLE_D dst, coord(s0), resource, sampler, Xderiv(s1), Yderiv(s2) */
5387 begin_emit_instruction(emit
);
5388 emit_sample_opcode(emit
, VGPU10_OPCODE_SAMPLE_D
,
5389 inst
->Instruction
.Saturate
, offsets
);
5390 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5391 emit_src_register(emit
, &coord
);
5392 emit_resource_register(emit
, unit
);
5393 emit_sampler_register(emit
, unit
);
5394 emit_src_register(emit
, &inst
->Src
[1]); /* Xderiv */
5395 emit_src_register(emit
, &inst
->Src
[2]); /* Yderiv */
5396 end_emit_instruction(emit
);
5398 end_tex_swizzle(emit
, &swz_info
);
5400 free_temp_indexes(emit
);
5407 * Emit code for TGSI_OPCODE_TXF (texel fetch)
5410 emit_txf(struct svga_shader_emitter_v10
*emit
,
5411 const struct tgsi_full_instruction
*inst
)
5413 const uint unit
= inst
->Src
[1].Register
.Index
;
5414 const unsigned msaa
= emit
->key
.tex
[unit
].texture_msaa
;
5416 struct tex_swizzle_info swz_info
;
5418 begin_tex_swizzle(emit
, unit
, inst
, FALSE
, &swz_info
);
5420 get_texel_offsets(emit
, inst
, offsets
);
5423 /* Fetch one sample from an MSAA texture */
5424 struct tgsi_full_src_register sampleIndex
=
5425 scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5426 /* LD_MS dst, coord(s0), resource, sampleIndex */
5427 begin_emit_instruction(emit
);
5428 emit_sample_opcode(emit
, VGPU10_OPCODE_LD_MS
,
5429 inst
->Instruction
.Saturate
, offsets
);
5430 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5431 emit_src_register(emit
, &inst
->Src
[0]);
5432 emit_resource_register(emit
, unit
);
5433 emit_src_register(emit
, &sampleIndex
);
5434 end_emit_instruction(emit
);
5437 /* Fetch one texel specified by integer coordinate */
5438 /* LD dst, coord(s0), resource */
5439 begin_emit_instruction(emit
);
5440 emit_sample_opcode(emit
, VGPU10_OPCODE_LD
,
5441 inst
->Instruction
.Saturate
, offsets
);
5442 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5443 emit_src_register(emit
, &inst
->Src
[0]);
5444 emit_resource_register(emit
, unit
);
5445 end_emit_instruction(emit
);
5448 end_tex_swizzle(emit
, &swz_info
);
5450 free_temp_indexes(emit
);
5457 * Emit code for TGSI_OPCODE_TXL (explicit LOD) or TGSI_OPCODE_TXB (LOD bias)
5458 * or TGSI_OPCODE_TXB2 (for cube shadow maps).
5461 emit_txl_txb(struct svga_shader_emitter_v10
*emit
,
5462 const struct tgsi_full_instruction
*inst
)
5464 unsigned target
= inst
->Texture
.Texture
;
5465 unsigned opcode
, unit
;
5467 struct tgsi_full_src_register coord
, lod_bias
;
5468 struct tex_swizzle_info swz_info
;
5470 assert(inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
||
5471 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
5472 inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
);
5474 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB2
) {
5475 lod_bias
= scalar_src(&inst
->Src
[1], TGSI_SWIZZLE_X
);
5476 unit
= inst
->Src
[2].Register
.Index
;
5479 lod_bias
= scalar_src(&inst
->Src
[0], TGSI_SWIZZLE_W
);
5480 unit
= inst
->Src
[1].Register
.Index
;
5483 begin_tex_swizzle(emit
, unit
, inst
, tgsi_is_shadow_target(target
),
5486 get_texel_offsets(emit
, inst
, offsets
);
5488 coord
= setup_texcoord(emit
, unit
, &inst
->Src
[0]);
5490 /* SAMPLE_L/B dst, coord(s0), resource, sampler, lod(s3) */
5491 begin_emit_instruction(emit
);
5492 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
) {
5493 opcode
= VGPU10_OPCODE_SAMPLE_L
;
5496 opcode
= VGPU10_OPCODE_SAMPLE_B
;
5498 emit_sample_opcode(emit
, opcode
, inst
->Instruction
.Saturate
, offsets
);
5499 emit_dst_register(emit
, get_tex_swizzle_dst(&swz_info
));
5500 emit_src_register(emit
, &coord
);
5501 emit_resource_register(emit
, unit
);
5502 emit_sampler_register(emit
, unit
);
5503 emit_src_register(emit
, &lod_bias
);
5504 end_emit_instruction(emit
);
5506 end_tex_swizzle(emit
, &swz_info
);
5508 free_temp_indexes(emit
);
5515 * Emit code for TGSI_OPCODE_TXQ (texture query) instruction.
5518 emit_txq(struct svga_shader_emitter_v10
*emit
,
5519 const struct tgsi_full_instruction
*inst
)
5521 const uint unit
= inst
->Src
[1].Register
.Index
;
5523 if (emit
->key
.tex
[unit
].texture_target
== PIPE_BUFFER
) {
5524 /* RESINFO does not support querying texture buffers, so we instead
5525 * store texture buffer sizes in shader constants, then copy them to
5526 * implement TXQ instead of emitting RESINFO.
5527 * MOV dst, const[texture_buffer_size_index[unit]]
5529 struct tgsi_full_src_register size_src
=
5530 make_src_const_reg(emit
->texture_buffer_size_index
[unit
]);
5531 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &inst
->Dst
[0], &size_src
,
5534 /* RESINFO dst, srcMipLevel, resource */
5535 begin_emit_instruction(emit
);
5536 emit_opcode_resinfo(emit
, VGPU10_RESINFO_RETURN_UINT
);
5537 emit_dst_register(emit
, &inst
->Dst
[0]);
5538 emit_src_register(emit
, &inst
->Src
[0]);
5539 emit_resource_register(emit
, unit
);
5540 end_emit_instruction(emit
);
5543 free_temp_indexes(emit
);
5550 * Emit a simple instruction (like ADD, MUL, MIN, etc).
5553 emit_simple(struct svga_shader_emitter_v10
*emit
,
5554 const struct tgsi_full_instruction
*inst
)
5556 const unsigned opcode
= inst
->Instruction
.Opcode
;
5557 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5560 begin_emit_instruction(emit
);
5561 emit_opcode(emit
, translate_opcode(inst
->Instruction
.Opcode
),
5562 inst
->Instruction
.Saturate
);
5563 for (i
= 0; i
< op
->num_dst
; i
++) {
5564 emit_dst_register(emit
, &inst
->Dst
[i
]);
5566 for (i
= 0; i
< op
->num_src
; i
++) {
5567 emit_src_register(emit
, &inst
->Src
[i
]);
5569 end_emit_instruction(emit
);
5576 * Emit a simple VGPU10 instruction which writes to multiple dest registers,
5577 * where TGSI only uses one dest register.
5580 emit_simple_1dst(struct svga_shader_emitter_v10
*emit
,
5581 const struct tgsi_full_instruction
*inst
,
5585 const unsigned opcode
= inst
->Instruction
.Opcode
;
5586 const struct tgsi_opcode_info
*op
= tgsi_get_opcode_info(opcode
);
5589 begin_emit_instruction(emit
);
5590 emit_opcode(emit
, translate_opcode(inst
->Instruction
.Opcode
),
5591 inst
->Instruction
.Saturate
);
5593 for (i
= 0; i
< dst_count
; i
++) {
5594 if (i
== dst_index
) {
5595 emit_dst_register(emit
, &inst
->Dst
[0]);
5597 emit_null_dst_register(emit
);
5601 for (i
= 0; i
< op
->num_src
; i
++) {
5602 emit_src_register(emit
, &inst
->Src
[i
]);
5604 end_emit_instruction(emit
);
5611 * Translate a single TGSI instruction to VGPU10.
5614 emit_vgpu10_instruction(struct svga_shader_emitter_v10
*emit
,
5615 unsigned inst_number
,
5616 const struct tgsi_full_instruction
*inst
)
5618 const unsigned opcode
= inst
->Instruction
.Opcode
;
5621 case TGSI_OPCODE_ADD
:
5622 case TGSI_OPCODE_AND
:
5623 case TGSI_OPCODE_BGNLOOP
:
5624 case TGSI_OPCODE_BRK
:
5625 case TGSI_OPCODE_CEIL
:
5626 case TGSI_OPCODE_CONT
:
5627 case TGSI_OPCODE_DDX
:
5628 case TGSI_OPCODE_DDY
:
5629 case TGSI_OPCODE_DIV
:
5630 case TGSI_OPCODE_DP2
:
5631 case TGSI_OPCODE_DP3
:
5632 case TGSI_OPCODE_DP4
:
5633 case TGSI_OPCODE_ELSE
:
5634 case TGSI_OPCODE_ENDIF
:
5635 case TGSI_OPCODE_ENDLOOP
:
5636 case TGSI_OPCODE_ENDSUB
:
5637 case TGSI_OPCODE_F2I
:
5638 case TGSI_OPCODE_F2U
:
5639 case TGSI_OPCODE_FLR
:
5640 case TGSI_OPCODE_FRC
:
5641 case TGSI_OPCODE_FSEQ
:
5642 case TGSI_OPCODE_FSGE
:
5643 case TGSI_OPCODE_FSLT
:
5644 case TGSI_OPCODE_FSNE
:
5645 case TGSI_OPCODE_I2F
:
5646 case TGSI_OPCODE_IMAX
:
5647 case TGSI_OPCODE_IMIN
:
5648 case TGSI_OPCODE_INEG
:
5649 case TGSI_OPCODE_ISGE
:
5650 case TGSI_OPCODE_ISHR
:
5651 case TGSI_OPCODE_ISLT
:
5652 case TGSI_OPCODE_MAD
:
5653 case TGSI_OPCODE_MAX
:
5654 case TGSI_OPCODE_MIN
:
5655 case TGSI_OPCODE_MOV
:
5656 case TGSI_OPCODE_MUL
:
5657 case TGSI_OPCODE_NOP
:
5658 case TGSI_OPCODE_NOT
:
5659 case TGSI_OPCODE_OR
:
5660 case TGSI_OPCODE_RET
:
5661 case TGSI_OPCODE_UADD
:
5662 case TGSI_OPCODE_USEQ
:
5663 case TGSI_OPCODE_USGE
:
5664 case TGSI_OPCODE_USLT
:
5665 case TGSI_OPCODE_UMIN
:
5666 case TGSI_OPCODE_UMAD
:
5667 case TGSI_OPCODE_UMAX
:
5668 case TGSI_OPCODE_ROUND
:
5669 case TGSI_OPCODE_SQRT
:
5670 case TGSI_OPCODE_SHL
:
5671 case TGSI_OPCODE_TRUNC
:
5672 case TGSI_OPCODE_U2F
:
5673 case TGSI_OPCODE_UCMP
:
5674 case TGSI_OPCODE_USHR
:
5675 case TGSI_OPCODE_USNE
:
5676 case TGSI_OPCODE_XOR
:
5677 /* simple instructions */
5678 return emit_simple(emit
, inst
);
5681 case TGSI_OPCODE_EMIT
:
5682 return emit_vertex(emit
, inst
);
5683 case TGSI_OPCODE_ENDPRIM
:
5684 return emit_endprim(emit
, inst
);
5685 case TGSI_OPCODE_ABS
:
5686 return emit_abs(emit
, inst
);
5687 case TGSI_OPCODE_IABS
:
5688 return emit_iabs(emit
, inst
);
5689 case TGSI_OPCODE_ARL
:
5691 case TGSI_OPCODE_UARL
:
5692 return emit_arl_uarl(emit
, inst
);
5693 case TGSI_OPCODE_BGNSUB
:
5696 case TGSI_OPCODE_CAL
:
5697 return emit_cal(emit
, inst
);
5698 case TGSI_OPCODE_CMP
:
5699 return emit_cmp(emit
, inst
);
5700 case TGSI_OPCODE_COS
:
5701 return emit_sincos(emit
, inst
);
5702 case TGSI_OPCODE_DP2A
:
5703 return emit_dp2a(emit
, inst
);
5704 case TGSI_OPCODE_DPH
:
5705 return emit_dph(emit
, inst
);
5706 case TGSI_OPCODE_DST
:
5707 return emit_dst(emit
, inst
);
5708 case TGSI_OPCODE_EX2
:
5709 return emit_ex2(emit
, inst
);
5710 case TGSI_OPCODE_EXP
:
5711 return emit_exp(emit
, inst
);
5712 case TGSI_OPCODE_IF
:
5713 return emit_if(emit
, inst
);
5714 case TGSI_OPCODE_KILL
:
5715 return emit_kill(emit
, inst
);
5716 case TGSI_OPCODE_KILL_IF
:
5717 return emit_kill_if(emit
, inst
);
5718 case TGSI_OPCODE_LG2
:
5719 return emit_lg2(emit
, inst
);
5720 case TGSI_OPCODE_LIT
:
5721 return emit_lit(emit
, inst
);
5722 case TGSI_OPCODE_LOG
:
5723 return emit_log(emit
, inst
);
5724 case TGSI_OPCODE_LRP
:
5725 return emit_lrp(emit
, inst
);
5726 case TGSI_OPCODE_POW
:
5727 return emit_pow(emit
, inst
);
5728 case TGSI_OPCODE_RCP
:
5729 return emit_rcp(emit
, inst
);
5730 case TGSI_OPCODE_RSQ
:
5731 return emit_rsq(emit
, inst
);
5732 case TGSI_OPCODE_SAMPLE
:
5733 return emit_sample(emit
, inst
);
5734 case TGSI_OPCODE_SCS
:
5735 return emit_scs(emit
, inst
);
5736 case TGSI_OPCODE_SEQ
:
5737 return emit_seq(emit
, inst
);
5738 case TGSI_OPCODE_SGE
:
5739 return emit_sge(emit
, inst
);
5740 case TGSI_OPCODE_SGT
:
5741 return emit_sgt(emit
, inst
);
5742 case TGSI_OPCODE_SIN
:
5743 return emit_sincos(emit
, inst
);
5744 case TGSI_OPCODE_SLE
:
5745 return emit_sle(emit
, inst
);
5746 case TGSI_OPCODE_SLT
:
5747 return emit_slt(emit
, inst
);
5748 case TGSI_OPCODE_SNE
:
5749 return emit_sne(emit
, inst
);
5750 case TGSI_OPCODE_SSG
:
5751 return emit_ssg(emit
, inst
);
5752 case TGSI_OPCODE_ISSG
:
5753 return emit_issg(emit
, inst
);
5754 case TGSI_OPCODE_SUB
:
5755 return emit_sub(emit
, inst
);
5756 case TGSI_OPCODE_TEX
:
5757 return emit_tex(emit
, inst
);
5758 case TGSI_OPCODE_TXP
:
5759 return emit_txp(emit
, inst
);
5760 case TGSI_OPCODE_TXB
:
5761 case TGSI_OPCODE_TXB2
:
5762 case TGSI_OPCODE_TXL
:
5763 return emit_txl_txb(emit
, inst
);
5764 case TGSI_OPCODE_TXD
:
5765 return emit_txd(emit
, inst
);
5766 case TGSI_OPCODE_TXF
:
5767 return emit_txf(emit
, inst
);
5768 case TGSI_OPCODE_TXQ
:
5769 return emit_txq(emit
, inst
);
5770 case TGSI_OPCODE_UIF
:
5771 return emit_if(emit
, inst
);
5772 case TGSI_OPCODE_XPD
:
5773 return emit_xpd(emit
, inst
);
5774 case TGSI_OPCODE_UMUL_HI
:
5775 case TGSI_OPCODE_IMUL_HI
:
5776 case TGSI_OPCODE_UDIV
:
5777 case TGSI_OPCODE_IDIV
:
5778 /* These cases use only the FIRST of two destination registers */
5779 return emit_simple_1dst(emit
, inst
, 2, 0);
5780 case TGSI_OPCODE_UMUL
:
5781 case TGSI_OPCODE_UMOD
:
5782 case TGSI_OPCODE_MOD
:
5783 /* These cases use only the SECOND of two destination registers */
5784 return emit_simple_1dst(emit
, inst
, 2, 1);
5785 case TGSI_OPCODE_END
:
5786 if (!emit_post_helpers(emit
))
5788 return emit_simple(emit
, inst
);
5791 debug_printf("Unimplemented tgsi instruction %s\n",
5792 tgsi_get_opcode_name(opcode
));
5801 * Emit the extra instructions to adjust the vertex position.
5802 * There are two possible adjustments:
5803 * 1. Converting from Gallium to VGPU10 coordinate space by applying the
5804 * "prescale" and "pretranslate" values.
5805 * 2. Undoing the viewport transformation when we use the swtnl/draw path.
5806 * \param vs_pos_tmp_index which temporary register contains the vertex pos.
5809 emit_vpos_instructions(struct svga_shader_emitter_v10
*emit
,
5810 unsigned vs_pos_tmp_index
)
5812 struct tgsi_full_src_register tmp_pos_src
;
5813 struct tgsi_full_dst_register pos_dst
;
5815 /* Don't bother to emit any extra vertex instructions if vertex position is
5818 if (emit
->vposition
.out_index
== INVALID_INDEX
)
5821 tmp_pos_src
= make_src_temp_reg(vs_pos_tmp_index
);
5822 pos_dst
= make_dst_output_reg(emit
->vposition
.out_index
);
5824 /* If non-adjusted vertex position register index
5825 * is valid, copy the vertex position from the temporary
5826 * vertex position register before it is modified by the
5827 * prescale computation.
5829 if (emit
->vposition
.so_index
!= INVALID_INDEX
) {
5830 struct tgsi_full_dst_register pos_so_dst
=
5831 make_dst_output_reg(emit
->vposition
.so_index
);
5833 /* MOV pos_so, tmp_pos */
5834 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_so_dst
,
5835 &tmp_pos_src
, FALSE
);
5838 if (emit
->vposition
.need_prescale
) {
5839 /* This code adjusts the vertex position to match the VGPU10 convention.
5840 * If p is the position computed by the shader (usually by applying the
5841 * modelview and projection matrices), the new position q is computed by:
5843 * q.x = p.w * trans.x + p.x * scale.x
5844 * q.y = p.w * trans.y + p.y * scale.y
5845 * q.z = p.w * trans.z + p.z * scale.z;
5846 * q.w = p.w * trans.w + p.w;
5848 struct tgsi_full_src_register tmp_pos_src_w
=
5849 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
5850 struct tgsi_full_dst_register tmp_pos_dst
=
5851 make_dst_temp_reg(vs_pos_tmp_index
);
5852 struct tgsi_full_dst_register tmp_pos_dst_xyz
=
5853 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XYZ
);
5855 struct tgsi_full_src_register prescale_scale
=
5856 make_src_const_reg(emit
->vposition
.prescale_scale_index
);
5857 struct tgsi_full_src_register prescale_trans
=
5858 make_src_const_reg(emit
->vposition
.prescale_trans_index
);
5860 /* MUL tmp_pos.xyz, tmp_pos, prescale.scale */
5861 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xyz
,
5862 &tmp_pos_src
, &prescale_scale
, FALSE
);
5864 /* MAD pos, tmp_pos.wwww, prescale.trans, tmp_pos */
5865 emit_instruction_op3(emit
, VGPU10_OPCODE_MAD
, &pos_dst
, &tmp_pos_src_w
,
5866 &prescale_trans
, &tmp_pos_src
, FALSE
);
5868 else if (emit
->key
.vs
.undo_viewport
) {
5869 /* This code computes the final vertex position from the temporary
5870 * vertex position by undoing the viewport transformation and the
5871 * divide-by-W operation (we convert window coords back to clip coords).
5872 * This is needed when we use the 'draw' module for fallbacks.
5873 * If p is the temp pos in window coords, then the NDC coord q is:
5874 * q.x = (p.x - vp.x_trans) / vp.x_scale * p.w
5875 * q.y = (p.y - vp.y_trans) / vp.y_scale * p.w
5878 * CONST[vs_viewport_index] contains:
5879 * { 1/vp.x_scale, 1/vp.y_scale, -vp.x_trans, -vp.y_trans }
5881 struct tgsi_full_dst_register tmp_pos_dst
=
5882 make_dst_temp_reg(vs_pos_tmp_index
);
5883 struct tgsi_full_dst_register tmp_pos_dst_xy
=
5884 writemask_dst(&tmp_pos_dst
, TGSI_WRITEMASK_XY
);
5885 struct tgsi_full_src_register tmp_pos_src_wwww
=
5886 scalar_src(&tmp_pos_src
, TGSI_SWIZZLE_W
);
5888 struct tgsi_full_dst_register pos_dst_xyz
=
5889 writemask_dst(&pos_dst
, TGSI_WRITEMASK_XYZ
);
5890 struct tgsi_full_dst_register pos_dst_w
=
5891 writemask_dst(&pos_dst
, TGSI_WRITEMASK_W
);
5893 struct tgsi_full_src_register vp_xyzw
=
5894 make_src_const_reg(emit
->vs
.viewport_index
);
5895 struct tgsi_full_src_register vp_zwww
=
5896 swizzle_src(&vp_xyzw
, TGSI_SWIZZLE_Z
, TGSI_SWIZZLE_W
,
5897 TGSI_SWIZZLE_W
, TGSI_SWIZZLE_W
);
5899 /* ADD tmp_pos.xy, tmp_pos.xy, viewport.zwww */
5900 emit_instruction_op2(emit
, VGPU10_OPCODE_ADD
, &tmp_pos_dst_xy
,
5901 &tmp_pos_src
, &vp_zwww
, FALSE
);
5903 /* MUL tmp_pos.xy, tmp_pos.xyzw, viewport.xyzy */
5904 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &tmp_pos_dst_xy
,
5905 &tmp_pos_src
, &vp_xyzw
, FALSE
);
5907 /* MUL pos.xyz, tmp_pos.xyz, tmp_pos.www */
5908 emit_instruction_op2(emit
, VGPU10_OPCODE_MUL
, &pos_dst_xyz
,
5909 &tmp_pos_src
, &tmp_pos_src_wwww
, FALSE
);
5911 /* MOV pos.w, tmp_pos.w */
5912 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &pos_dst_w
,
5913 &tmp_pos_src
, FALSE
);
5915 else if (vs_pos_tmp_index
!= INVALID_INDEX
) {
5916 /* This code is to handle the case where the temporary vertex
5917 * position register is created when the vertex shader has stream
5918 * output and prescale is disabled because rasterization is to be
5921 struct tgsi_full_dst_register pos_dst
=
5922 make_dst_output_reg(emit
->vposition
.out_index
);
5924 /* MOV pos, tmp_pos */
5925 begin_emit_instruction(emit
);
5926 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
5927 emit_dst_register(emit
, &pos_dst
);
5928 emit_src_register(emit
, &tmp_pos_src
);
5929 end_emit_instruction(emit
);
5934 emit_clipping_instructions(struct svga_shader_emitter_v10
*emit
)
5936 if (emit
->clip_mode
== CLIP_DISTANCE
) {
5937 /* Copy from copy distance temporary to CLIPDIST & the shadow copy */
5938 emit_clip_distance_instructions(emit
);
5940 } else if (emit
->clip_mode
== CLIP_VERTEX
) {
5941 /* Convert TGSI CLIPVERTEX to CLIPDIST */
5942 emit_clip_vertex_instructions(emit
);
5946 * Emit vertex position and take care of legacy user planes only if
5947 * there is a valid vertex position register index.
5948 * This is to take care of the case
5949 * where the shader doesn't output vertex position. Then in
5950 * this case, don't bother to emit more vertex instructions.
5952 if (emit
->vposition
.out_index
== INVALID_INDEX
)
5956 * Emit per-vertex clipping instructions for legacy user defined clip planes.
5957 * NOTE: we must emit the clip distance instructions before the
5958 * emit_vpos_instructions() call since the later function will change
5959 * the TEMP[vs_pos_tmp_index] value.
5961 if (emit
->clip_mode
== CLIP_LEGACY
) {
5962 /* Emit CLIPDIST for legacy user defined clip planes */
5963 emit_clip_distance_from_vpos(emit
, emit
->vposition
.tmp_index
);
5969 * Emit extra per-vertex instructions. This includes clip-coordinate
5970 * space conversion and computing clip distances. This is called for
5971 * each GS emit-vertex instruction and at the end of VS translation.
5974 emit_vertex_instructions(struct svga_shader_emitter_v10
*emit
)
5976 const unsigned vs_pos_tmp_index
= emit
->vposition
.tmp_index
;
5978 /* Emit clipping instructions based on clipping mode */
5979 emit_clipping_instructions(emit
);
5982 * Reset the temporary vertex position register index
5983 * so that emit_dst_register() will use the real vertex position output
5985 emit
->vposition
.tmp_index
= INVALID_INDEX
;
5987 /* Emit vertex position instructions */
5988 emit_vpos_instructions(emit
, vs_pos_tmp_index
);
5990 /* Restore original vposition.tmp_index value for the next GS vertex.
5991 * It doesn't matter for VS.
5993 emit
->vposition
.tmp_index
= vs_pos_tmp_index
;
5997 * Translate the TGSI_OPCODE_EMIT GS instruction.
6000 emit_vertex(struct svga_shader_emitter_v10
*emit
,
6001 const struct tgsi_full_instruction
*inst
)
6003 unsigned ret
= TRUE
;
6005 assert(emit
->unit
== PIPE_SHADER_GEOMETRY
);
6007 emit_vertex_instructions(emit
);
6009 /* We can't use emit_simple() because the TGSI instruction has one
6010 * operand (vertex stream number) which we must ignore for VGPU10.
6012 begin_emit_instruction(emit
);
6013 emit_opcode(emit
, VGPU10_OPCODE_EMIT
, FALSE
);
6014 end_emit_instruction(emit
);
6021 * Emit the extra code to convert from VGPU10's boolean front-face
6022 * register to TGSI's signed front-face register.
6024 * TODO: Make temporary front-face register a scalar.
6027 emit_frontface_instructions(struct svga_shader_emitter_v10
*emit
)
6029 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6031 if (emit
->fs
.face_input_index
!= INVALID_INDEX
) {
6032 /* convert vgpu10 boolean face register to gallium +/-1 value */
6033 struct tgsi_full_dst_register tmp_dst
=
6034 make_dst_temp_reg(emit
->fs
.face_tmp_index
);
6035 struct tgsi_full_src_register one
=
6036 make_immediate_reg_float(emit
, 1.0f
);
6037 struct tgsi_full_src_register neg_one
=
6038 make_immediate_reg_float(emit
, -1.0f
);
6040 /* MOVC face_tmp, IS_FRONT_FACE.x, 1.0, -1.0 */
6041 begin_emit_instruction(emit
);
6042 emit_opcode(emit
, VGPU10_OPCODE_MOVC
, FALSE
);
6043 emit_dst_register(emit
, &tmp_dst
);
6044 emit_face_register(emit
);
6045 emit_src_register(emit
, &one
);
6046 emit_src_register(emit
, &neg_one
);
6047 end_emit_instruction(emit
);
6053 * Emit the extra code to convert from VGPU10's fragcoord.w value to 1/w.
6056 emit_fragcoord_instructions(struct svga_shader_emitter_v10
*emit
)
6058 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6060 if (emit
->fs
.fragcoord_input_index
!= INVALID_INDEX
) {
6061 struct tgsi_full_dst_register tmp_dst
=
6062 make_dst_temp_reg(emit
->fs
.fragcoord_tmp_index
);
6063 struct tgsi_full_dst_register tmp_dst_xyz
=
6064 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_XYZ
);
6065 struct tgsi_full_dst_register tmp_dst_w
=
6066 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
6067 struct tgsi_full_src_register one
=
6068 make_immediate_reg_float(emit
, 1.0f
);
6069 struct tgsi_full_src_register fragcoord
=
6070 make_src_reg(TGSI_FILE_INPUT
, emit
->fs
.fragcoord_input_index
);
6072 /* save the input index */
6073 unsigned fragcoord_input_index
= emit
->fs
.fragcoord_input_index
;
6074 /* set to invalid to prevent substitution in emit_src_register() */
6075 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
6077 /* MOV fragcoord_tmp.xyz, fragcoord.xyz */
6078 begin_emit_instruction(emit
);
6079 emit_opcode(emit
, VGPU10_OPCODE_MOV
, FALSE
);
6080 emit_dst_register(emit
, &tmp_dst_xyz
);
6081 emit_src_register(emit
, &fragcoord
);
6082 end_emit_instruction(emit
);
6084 /* DIV fragcoord_tmp.w, 1.0, fragcoord.w */
6085 begin_emit_instruction(emit
);
6086 emit_opcode(emit
, VGPU10_OPCODE_DIV
, FALSE
);
6087 emit_dst_register(emit
, &tmp_dst_w
);
6088 emit_src_register(emit
, &one
);
6089 emit_src_register(emit
, &fragcoord
);
6090 end_emit_instruction(emit
);
6092 /* restore saved value */
6093 emit
->fs
.fragcoord_input_index
= fragcoord_input_index
;
6099 * Emit extra instructions to adjust VS inputs/attributes. This can
6100 * mean casting a vertex attribute from int to float or setting the
6101 * W component to 1, or both.
6104 emit_vertex_attrib_instructions(struct svga_shader_emitter_v10
*emit
)
6106 const unsigned save_w_1_mask
= emit
->key
.vs
.adjust_attrib_w_1
;
6107 const unsigned save_itof_mask
= emit
->key
.vs
.adjust_attrib_itof
;
6108 const unsigned save_utof_mask
= emit
->key
.vs
.adjust_attrib_utof
;
6109 const unsigned save_is_bgra_mask
= emit
->key
.vs
.attrib_is_bgra
;
6110 const unsigned save_puint_to_snorm_mask
= emit
->key
.vs
.attrib_puint_to_snorm
;
6111 const unsigned save_puint_to_uscaled_mask
= emit
->key
.vs
.attrib_puint_to_uscaled
;
6112 const unsigned save_puint_to_sscaled_mask
= emit
->key
.vs
.attrib_puint_to_sscaled
;
6114 unsigned adjust_mask
= (save_w_1_mask
|
6118 save_puint_to_snorm_mask
|
6119 save_puint_to_uscaled_mask
|
6120 save_puint_to_sscaled_mask
);
6122 assert(emit
->unit
== PIPE_SHADER_VERTEX
);
6125 struct tgsi_full_src_register one
=
6126 make_immediate_reg_float(emit
, 1.0f
);
6128 struct tgsi_full_src_register one_int
=
6129 make_immediate_reg_int(emit
, 1);
6131 /* We need to turn off these bitmasks while emitting the
6132 * instructions below, then restore them afterward.
6134 emit
->key
.vs
.adjust_attrib_w_1
= 0;
6135 emit
->key
.vs
.adjust_attrib_itof
= 0;
6136 emit
->key
.vs
.adjust_attrib_utof
= 0;
6137 emit
->key
.vs
.attrib_is_bgra
= 0;
6138 emit
->key
.vs
.attrib_puint_to_snorm
= 0;
6139 emit
->key
.vs
.attrib_puint_to_uscaled
= 0;
6140 emit
->key
.vs
.attrib_puint_to_sscaled
= 0;
6142 while (adjust_mask
) {
6143 unsigned index
= u_bit_scan(&adjust_mask
);
6144 unsigned tmp
= emit
->vs
.adjusted_input
[index
];
6145 struct tgsi_full_src_register input_src
=
6146 make_src_reg(TGSI_FILE_INPUT
, index
);
6148 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
6149 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
6150 struct tgsi_full_dst_register tmp_dst_w
=
6151 writemask_dst(&tmp_dst
, TGSI_WRITEMASK_W
);
6153 /* ITOF/UTOF/MOV tmp, input[index] */
6154 if (save_itof_mask
& (1 << index
)) {
6155 emit_instruction_op1(emit
, VGPU10_OPCODE_ITOF
,
6156 &tmp_dst
, &input_src
, FALSE
);
6158 else if (save_utof_mask
& (1 << index
)) {
6159 emit_instruction_op1(emit
, VGPU10_OPCODE_UTOF
,
6160 &tmp_dst
, &input_src
, FALSE
);
6162 else if (save_puint_to_snorm_mask
& (1 << index
)) {
6163 emit_puint_to_snorm(emit
, &tmp_dst
, &input_src
);
6165 else if (save_puint_to_uscaled_mask
& (1 << index
)) {
6166 emit_puint_to_uscaled(emit
, &tmp_dst
, &input_src
);
6168 else if (save_puint_to_sscaled_mask
& (1 << index
)) {
6169 emit_puint_to_sscaled(emit
, &tmp_dst
, &input_src
);
6172 assert((save_w_1_mask
| save_is_bgra_mask
) & (1 << index
));
6173 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6174 &tmp_dst
, &input_src
, FALSE
);
6177 if (save_is_bgra_mask
& (1 << index
)) {
6178 emit_swap_r_b(emit
, &tmp_dst
, &tmp_src
);
6181 if (save_w_1_mask
& (1 << index
)) {
6182 /* MOV tmp.w, 1.0 */
6183 if (emit
->key
.vs
.attrib_is_pure_int
& (1 << index
)) {
6184 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6185 &tmp_dst_w
, &one_int
, FALSE
);
6188 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
,
6189 &tmp_dst_w
, &one
, FALSE
);
6194 emit
->key
.vs
.adjust_attrib_w_1
= save_w_1_mask
;
6195 emit
->key
.vs
.adjust_attrib_itof
= save_itof_mask
;
6196 emit
->key
.vs
.adjust_attrib_utof
= save_utof_mask
;
6197 emit
->key
.vs
.attrib_is_bgra
= save_is_bgra_mask
;
6198 emit
->key
.vs
.attrib_puint_to_snorm
= save_puint_to_snorm_mask
;
6199 emit
->key
.vs
.attrib_puint_to_uscaled
= save_puint_to_uscaled_mask
;
6200 emit
->key
.vs
.attrib_puint_to_sscaled
= save_puint_to_sscaled_mask
;
6206 * Some common values like 0.0, 1.0, 0.5, etc. are frequently needed
6207 * to implement some instructions. We pre-allocate those values here
6208 * in the immediate constant buffer.
6211 alloc_common_immediates(struct svga_shader_emitter_v10
*emit
)
6215 emit
->common_immediate_pos
[n
++] =
6216 alloc_immediate_float4(emit
, 0.0f
, 1.0f
, 0.5f
, -1.0f
);
6218 emit
->common_immediate_pos
[n
++] =
6219 alloc_immediate_float4(emit
, 128.0f
, -128.0f
, 2.0f
, 3.0f
);
6221 emit
->common_immediate_pos
[n
++] =
6222 alloc_immediate_int4(emit
, 0, 1, 0, -1);
6224 if (emit
->key
.vs
.attrib_puint_to_snorm
) {
6225 emit
->common_immediate_pos
[n
++] =
6226 alloc_immediate_float4(emit
, -2.0f
, -2.0f
, -2.0f
, -1.66666f
);
6229 if (emit
->key
.vs
.attrib_puint_to_uscaled
) {
6230 emit
->common_immediate_pos
[n
++] =
6231 alloc_immediate_float4(emit
, 1023.0f
, 3.0f
, 0.0f
, 0.0f
);
6234 if (emit
->key
.vs
.attrib_puint_to_sscaled
) {
6235 emit
->common_immediate_pos
[n
++] =
6236 alloc_immediate_int4(emit
, 22, 12, 2, 0);
6238 emit
->common_immediate_pos
[n
++] =
6239 alloc_immediate_int4(emit
, 22, 30, 0, 0);
6242 assert(n
<= Elements(emit
->common_immediate_pos
));
6243 emit
->num_common_immediates
= n
;
6248 * Emit any extra/helper declarations/code that we might need between
6249 * the declaration section and code section.
6252 emit_pre_helpers(struct svga_shader_emitter_v10
*emit
)
6255 if (emit
->unit
== PIPE_SHADER_GEOMETRY
)
6256 emit_property_instructions(emit
);
6258 /* Declare inputs */
6259 if (!emit_input_declarations(emit
))
6262 /* Declare outputs */
6263 if (!emit_output_declarations(emit
))
6266 /* Declare temporary registers */
6267 emit_temporaries_declaration(emit
);
6269 /* Declare constant registers */
6270 emit_constant_declaration(emit
);
6272 /* Declare samplers and resources */
6273 emit_sampler_declarations(emit
);
6274 emit_resource_declarations(emit
);
6276 /* Declare clip distance output registers */
6277 if (emit
->unit
== PIPE_SHADER_VERTEX
||
6278 emit
->unit
== PIPE_SHADER_GEOMETRY
) {
6279 emit_clip_distance_declarations(emit
);
6282 alloc_common_immediates(emit
);
6284 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
6285 emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6286 float alpha
= emit
->key
.fs
.alpha_ref
;
6287 emit
->fs
.alpha_ref_index
=
6288 alloc_immediate_float4(emit
, alpha
, alpha
, alpha
, alpha
);
6291 /* Now, emit the constant block containing all the immediates
6292 * declared by shader, as well as the extra ones seen above.
6294 emit_vgpu10_immediates_block(emit
);
6296 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6297 emit_frontface_instructions(emit
);
6298 emit_fragcoord_instructions(emit
);
6300 else if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6301 emit_vertex_attrib_instructions(emit
);
6309 * Emit alpha test code. This compares TEMP[fs_color_tmp_index].w
6310 * against the alpha reference value and discards the fragment if the
6314 emit_alpha_test_instructions(struct svga_shader_emitter_v10
*emit
,
6315 unsigned fs_color_tmp_index
)
6317 /* compare output color's alpha to alpha ref and kill */
6318 unsigned tmp
= get_temp_index(emit
);
6319 struct tgsi_full_src_register tmp_src
= make_src_temp_reg(tmp
);
6320 struct tgsi_full_src_register tmp_src_x
=
6321 scalar_src(&tmp_src
, TGSI_SWIZZLE_X
);
6322 struct tgsi_full_dst_register tmp_dst
= make_dst_temp_reg(tmp
);
6323 struct tgsi_full_src_register color_src
=
6324 make_src_temp_reg(fs_color_tmp_index
);
6325 struct tgsi_full_src_register color_src_w
=
6326 scalar_src(&color_src
, TGSI_SWIZZLE_W
);
6327 struct tgsi_full_src_register ref_src
=
6328 make_src_immediate_reg(emit
->fs
.alpha_ref_index
);
6329 struct tgsi_full_dst_register color_dst
=
6330 make_dst_output_reg(emit
->fs
.color_out_index
[0]);
6332 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6334 /* dst = src0 'alpha_func' src1 */
6335 emit_comparison(emit
, emit
->key
.fs
.alpha_func
, &tmp_dst
,
6336 &color_src_w
, &ref_src
);
6338 /* DISCARD if dst.x == 0 */
6339 begin_emit_instruction(emit
);
6340 emit_discard_opcode(emit
, FALSE
); /* discard if src0.x is zero */
6341 emit_src_register(emit
, &tmp_src_x
);
6342 end_emit_instruction(emit
);
6344 /* If we don't need to broadcast the color below, emit final color here */
6345 if (emit
->key
.fs
.write_color0_to_n_cbufs
<= 1) {
6346 /* MOV output.color, tempcolor */
6347 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6348 &color_src
, FALSE
); /* XXX saturate? */
6351 free_temp_indexes(emit
);
6356 * Emit instructions for writing a single color output to multiple
6358 * This is used when the TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
6359 * property is set and the number of render targets is greater than one.
6360 * \param fs_color_tmp_index index of the temp register that holds the
6361 * color to broadcast.
6364 emit_broadcast_color_instructions(struct svga_shader_emitter_v10
*emit
,
6365 unsigned fs_color_tmp_index
)
6367 const unsigned n
= emit
->key
.fs
.write_color0_to_n_cbufs
;
6369 struct tgsi_full_src_register color_src
=
6370 make_src_temp_reg(fs_color_tmp_index
);
6372 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
6375 for (i
= 0; i
< n
; i
++) {
6376 unsigned output_reg
= emit
->fs
.color_out_index
[i
];
6377 struct tgsi_full_dst_register color_dst
=
6378 make_dst_output_reg(output_reg
);
6380 /* Fill in this semantic here since we'll use it later in
6381 * emit_dst_register().
6383 emit
->info
.output_semantic_name
[output_reg
] = TGSI_SEMANTIC_COLOR
;
6385 /* MOV output.color[i], tempcolor */
6386 emit_instruction_op1(emit
, VGPU10_OPCODE_MOV
, &color_dst
,
6387 &color_src
, FALSE
); /* XXX saturate? */
6393 * Emit extra helper code after the original shader code, but before the
6394 * last END/RET instruction.
6395 * For vertex shaders this means emitting the extra code to apply the
6396 * prescale scale/translation.
6399 emit_post_helpers(struct svga_shader_emitter_v10
*emit
)
6401 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
6402 emit_vertex_instructions(emit
);
6404 else if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
6405 const unsigned fs_color_tmp_index
= emit
->fs
.color_tmp_index
;
6407 /* We no longer want emit_dst_register() to substitute the
6408 * temporary fragment color register for the real color output.
6410 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6412 if (emit
->key
.fs
.alpha_func
!= SVGA3D_CMP_ALWAYS
) {
6413 emit_alpha_test_instructions(emit
, fs_color_tmp_index
);
6415 if (emit
->key
.fs
.write_color0_to_n_cbufs
> 1) {
6416 emit_broadcast_color_instructions(emit
, fs_color_tmp_index
);
6425 * Translate the TGSI tokens into VGPU10 tokens.
6428 emit_vgpu10_instructions(struct svga_shader_emitter_v10
*emit
,
6429 const struct tgsi_token
*tokens
)
6431 struct tgsi_parse_context parse
;
6433 boolean pre_helpers_emitted
= FALSE
;
6434 unsigned inst_number
= 0;
6436 tgsi_parse_init(&parse
, tokens
);
6438 while (!tgsi_parse_end_of_tokens(&parse
)) {
6439 tgsi_parse_token(&parse
);
6441 switch (parse
.FullToken
.Token
.Type
) {
6442 case TGSI_TOKEN_TYPE_IMMEDIATE
:
6443 ret
= emit_vgpu10_immediate(emit
, &parse
.FullToken
.FullImmediate
);
6448 case TGSI_TOKEN_TYPE_DECLARATION
:
6449 ret
= emit_vgpu10_declaration(emit
, &parse
.FullToken
.FullDeclaration
);
6454 case TGSI_TOKEN_TYPE_INSTRUCTION
:
6455 if (!pre_helpers_emitted
) {
6456 ret
= emit_pre_helpers(emit
);
6459 pre_helpers_emitted
= TRUE
;
6461 ret
= emit_vgpu10_instruction(emit
, inst_number
++,
6462 &parse
.FullToken
.FullInstruction
);
6467 case TGSI_TOKEN_TYPE_PROPERTY
:
6468 ret
= emit_vgpu10_property(emit
, &parse
.FullToken
.FullProperty
);
6479 tgsi_parse_free(&parse
);
6485 * Emit the first VGPU10 shader tokens.
6488 emit_vgpu10_header(struct svga_shader_emitter_v10
*emit
)
6490 VGPU10ProgramToken ptoken
;
6492 /* First token: VGPU10ProgramToken (version info, program type (VS,GS,PS)) */
6493 ptoken
.majorVersion
= 4;
6494 ptoken
.minorVersion
= 0;
6495 ptoken
.programType
= translate_shader_type(emit
->unit
);
6496 if (!emit_dword(emit
, ptoken
.value
))
6499 /* Second token: total length of shader, in tokens. We can't fill this
6500 * in until we're all done. Emit zero for now.
6502 return emit_dword(emit
, 0);
6507 emit_vgpu10_tail(struct svga_shader_emitter_v10
*emit
)
6509 VGPU10ProgramToken
*tokens
;
6511 /* Replace the second token with total shader length */
6512 tokens
= (VGPU10ProgramToken
*) emit
->buf
;
6513 tokens
[1].value
= emit_get_num_tokens(emit
);
6520 * Modify the FS to read the BCOLORs and use the FACE register
6521 * to choose between the front/back colors.
6523 static const struct tgsi_token
*
6524 transform_fs_twoside(const struct tgsi_token
*tokens
)
6527 debug_printf("Before tgsi_add_two_side ------------------\n");
6528 tgsi_dump(tokens
,0);
6530 tokens
= tgsi_add_two_side(tokens
);
6532 debug_printf("After tgsi_add_two_side ------------------\n");
6533 tgsi_dump(tokens
, 0);
6540 * Modify the FS to do polygon stipple.
6542 static const struct tgsi_token
*
6543 transform_fs_pstipple(struct svga_shader_emitter_v10
*emit
,
6544 const struct tgsi_token
*tokens
)
6546 const struct tgsi_token
*new_tokens
;
6550 debug_printf("Before pstipple ------------------\n");
6551 tgsi_dump(tokens
,0);
6554 new_tokens
= util_pstipple_create_fragment_shader(tokens
, &unit
, 0);
6556 emit
->fs
.pstipple_sampler_unit
= unit
;
6558 /* Setup texture state for stipple */
6559 emit
->key
.tex
[unit
].texture_target
= PIPE_TEXTURE_2D
;
6560 emit
->key
.tex
[unit
].swizzle_r
= TGSI_SWIZZLE_X
;
6561 emit
->key
.tex
[unit
].swizzle_g
= TGSI_SWIZZLE_Y
;
6562 emit
->key
.tex
[unit
].swizzle_b
= TGSI_SWIZZLE_Z
;
6563 emit
->key
.tex
[unit
].swizzle_a
= TGSI_SWIZZLE_W
;
6566 debug_printf("After pstipple ------------------\n");
6567 tgsi_dump(new_tokens
, 0);
6574 * Modify the FS to support anti-aliasing point.
6576 static const struct tgsi_token
*
6577 transform_fs_aapoint(const struct tgsi_token
*tokens
,
6581 debug_printf("Before tgsi_add_aa_point ------------------\n");
6582 tgsi_dump(tokens
,0);
6584 tokens
= tgsi_add_aa_point(tokens
, aa_coord_index
);
6586 debug_printf("After tgsi_add_aa_point ------------------\n");
6587 tgsi_dump(tokens
, 0);
6593 * This is the main entrypoint for the TGSI -> VPGU10 translator.
6595 struct svga_shader_variant
*
6596 svga_tgsi_vgpu10_translate(struct svga_context
*svga
,
6597 const struct svga_shader
*shader
,
6598 const struct svga_compile_key
*key
,
6601 struct svga_shader_variant
*variant
= NULL
;
6602 struct svga_shader_emitter_v10
*emit
;
6603 const struct tgsi_token
*tokens
= shader
->tokens
;
6604 struct svga_vertex_shader
*vs
= svga
->curr
.vs
;
6605 struct svga_geometry_shader
*gs
= svga
->curr
.gs
;
6607 assert(unit
== PIPE_SHADER_VERTEX
||
6608 unit
== PIPE_SHADER_GEOMETRY
||
6609 unit
== PIPE_SHADER_FRAGMENT
);
6611 /* These two flags cannot be used together */
6612 assert(key
->vs
.need_prescale
+ key
->vs
.undo_viewport
<= 1);
6615 * Setup the code emitter
6617 emit
= alloc_emitter();
6624 emit
->vposition
.need_prescale
= (emit
->key
.vs
.need_prescale
||
6625 emit
->key
.gs
.need_prescale
);
6626 emit
->vposition
.tmp_index
= INVALID_INDEX
;
6627 emit
->vposition
.so_index
= INVALID_INDEX
;
6628 emit
->vposition
.out_index
= INVALID_INDEX
;
6630 emit
->fs
.color_tmp_index
= INVALID_INDEX
;
6631 emit
->fs
.face_input_index
= INVALID_INDEX
;
6632 emit
->fs
.fragcoord_input_index
= INVALID_INDEX
;
6634 emit
->gs
.prim_id_index
= INVALID_INDEX
;
6636 emit
->clip_dist_out_index
= INVALID_INDEX
;
6637 emit
->clip_dist_tmp_index
= INVALID_INDEX
;
6638 emit
->clip_dist_so_index
= INVALID_INDEX
;
6639 emit
->clip_vertex_out_index
= INVALID_INDEX
;
6641 if (emit
->key
.fs
.alpha_func
== SVGA3D_CMP_INVALID
) {
6642 emit
->key
.fs
.alpha_func
= SVGA3D_CMP_ALWAYS
;
6645 if (unit
== PIPE_SHADER_FRAGMENT
) {
6646 if (key
->fs
.light_twoside
) {
6647 tokens
= transform_fs_twoside(tokens
);
6649 if (key
->fs
.pstipple
) {
6650 const struct tgsi_token
*new_tokens
=
6651 transform_fs_pstipple(emit
, tokens
);
6652 if (tokens
!= shader
->tokens
) {
6653 /* free the two-sided shader tokens */
6654 tgsi_free_tokens(tokens
);
6656 tokens
= new_tokens
;
6658 if (key
->fs
.aa_point
) {
6659 tokens
= transform_fs_aapoint(tokens
, key
->fs
.aa_point_coord_index
);
6663 if (SVGA_DEBUG
& DEBUG_TGSI
) {
6664 debug_printf("#####################################\n");
6665 debug_printf("### TGSI Shader %u\n", shader
->id
);
6666 tgsi_dump(tokens
, 0);
6670 * Rescan the header if the token string is different from the one
6671 * included in the shader; otherwise, the header info is already up-to-date
6673 if (tokens
!= shader
->tokens
) {
6674 tgsi_scan_shader(tokens
, &emit
->info
);
6676 emit
->info
= shader
->info
;
6679 emit
->num_outputs
= emit
->info
.num_outputs
;
6681 if (unit
== PIPE_SHADER_FRAGMENT
) {
6682 /* Compute FS input remapping to match the output from VS/GS */
6684 svga_link_shaders(&gs
->base
.info
, &emit
->info
, &emit
->linkage
);
6687 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
6689 } else if (unit
== PIPE_SHADER_GEOMETRY
) {
6691 svga_link_shaders(&vs
->base
.info
, &emit
->info
, &emit
->linkage
);
6694 determine_clipping_mode(emit
);
6696 if (unit
== PIPE_SHADER_GEOMETRY
|| unit
== PIPE_SHADER_VERTEX
) {
6697 if (shader
->stream_output
!= NULL
|| emit
->clip_mode
== CLIP_DISTANCE
) {
6698 /* if there is stream output declarations associated
6699 * with this shader or the shader writes to ClipDistance
6700 * then reserve extra registers for the non-adjusted vertex position
6701 * and the ClipDistance shadow copy
6703 emit
->vposition
.so_index
= emit
->num_outputs
++;
6705 if (emit
->clip_mode
== CLIP_DISTANCE
) {
6706 emit
->clip_dist_so_index
= emit
->num_outputs
++;
6707 if (emit
->info
.num_written_clipdistance
> 4)
6708 emit
->num_outputs
++;
6714 * Do actual shader translation.
6716 if (!emit_vgpu10_header(emit
)) {
6717 debug_printf("svga: emit VGPU10 header failed\n");
6721 if (!emit_vgpu10_instructions(emit
, tokens
)) {
6722 debug_printf("svga: emit VGPU10 instructions failed\n");
6726 if (!emit_vgpu10_tail(emit
)) {
6727 debug_printf("svga: emit VGPU10 tail failed\n");
6731 if (emit
->register_overflow
) {
6736 * Create, initialize the 'variant' object.
6738 variant
= CALLOC_STRUCT(svga_shader_variant
);
6742 variant
->shader
= shader
;
6743 variant
->nr_tokens
= emit_get_num_tokens(emit
);
6744 variant
->tokens
= (const unsigned *)emit
->buf
;
6745 emit
->buf
= NULL
; /* buffer is no longer owed by emitter context */
6746 memcpy(&variant
->key
, key
, sizeof(*key
));
6747 variant
->id
= UTIL_BITMASK_INVALID_INDEX
;
6749 /* The extra constant starting offset starts with the number of
6750 * shader constants declared in the shader.
6752 variant
->extra_const_start
= emit
->num_shader_consts
[0];
6753 if (key
->gs
.wide_point
) {
6755 * The extra constant added in the transformed shader
6756 * for inverse viewport scale is to be supplied by the driver.
6757 * So the extra constant starting offset needs to be reduced by 1.
6759 assert(variant
->extra_const_start
> 0);
6760 variant
->extra_const_start
--;
6763 variant
->pstipple_sampler_unit
= emit
->fs
.pstipple_sampler_unit
;
6765 /** keep track in the variant if flat interpolation is used
6766 * for any of the varyings.
6768 variant
->uses_flat_interp
= emit
->uses_flat_interp
;
6770 if (tokens
!= shader
->tokens
) {
6771 tgsi_free_tokens(tokens
);