1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
37 static boolean
emit_vs_postamble( struct svga_shader_emitter
*emit
);
38 static boolean
emit_ps_postamble( struct svga_shader_emitter
*emit
);
42 translate_opcode(uint opcode
)
45 case TGSI_OPCODE_ABS
: return SVGA3DOP_ABS
;
46 case TGSI_OPCODE_ADD
: return SVGA3DOP_ADD
;
47 case TGSI_OPCODE_DP2A
: return SVGA3DOP_DP2ADD
;
48 case TGSI_OPCODE_DP3
: return SVGA3DOP_DP3
;
49 case TGSI_OPCODE_DP4
: return SVGA3DOP_DP4
;
50 case TGSI_OPCODE_FRC
: return SVGA3DOP_FRC
;
51 case TGSI_OPCODE_MAD
: return SVGA3DOP_MAD
;
52 case TGSI_OPCODE_MAX
: return SVGA3DOP_MAX
;
53 case TGSI_OPCODE_MIN
: return SVGA3DOP_MIN
;
54 case TGSI_OPCODE_MOV
: return SVGA3DOP_MOV
;
55 case TGSI_OPCODE_MUL
: return SVGA3DOP_MUL
;
56 case TGSI_OPCODE_NOP
: return SVGA3DOP_NOP
;
57 case TGSI_OPCODE_NRM4
: return SVGA3DOP_NRM
;
59 debug_printf("Unkown opcode %u\n", opcode
);
61 return SVGA3DOP_LAST_INST
;
67 translate_file(unsigned file
)
70 case TGSI_FILE_TEMPORARY
: return SVGA3DREG_TEMP
;
71 case TGSI_FILE_INPUT
: return SVGA3DREG_INPUT
;
72 case TGSI_FILE_OUTPUT
: return SVGA3DREG_OUTPUT
; /* VS3.0+ only */
73 case TGSI_FILE_IMMEDIATE
: return SVGA3DREG_CONST
;
74 case TGSI_FILE_CONSTANT
: return SVGA3DREG_CONST
;
75 case TGSI_FILE_SAMPLER
: return SVGA3DREG_SAMPLER
;
76 case TGSI_FILE_ADDRESS
: return SVGA3DREG_ADDR
;
79 return SVGA3DREG_TEMP
;
84 static SVGA3dShaderDestToken
85 translate_dst_register( struct svga_shader_emitter
*emit
,
86 const struct tgsi_full_instruction
*insn
,
89 const struct tgsi_full_dst_register
*reg
= &insn
->Dst
[idx
];
90 SVGA3dShaderDestToken dest
;
92 switch (reg
->Register
.File
) {
93 case TGSI_FILE_OUTPUT
:
94 /* Output registers encode semantic information in their name.
95 * Need to lookup a table built at decl time:
97 dest
= emit
->output_map
[reg
->Register
.Index
];
102 unsigned index
= reg
->Register
.Index
;
103 assert(index
< SVGA3D_TEMPREG_MAX
);
104 index
= MIN2(index
, SVGA3D_TEMPREG_MAX
- 1);
105 dest
= dst_register(translate_file(reg
->Register
.File
), index
);
110 dest
.mask
= reg
->Register
.WriteMask
;
113 if (insn
->Instruction
.Saturate
)
114 dest
.dstMod
= SVGA3DDSTMOD_SATURATE
;
121 * Apply a swizzle to a src_register, returning a new src_register
122 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
123 * would return SRC.YYZZ
125 static struct src_register
126 swizzle(struct src_register src
,
127 unsigned x
, unsigned y
, unsigned z
, unsigned w
)
133 x
= (src
.base
.swizzle
>> (x
* 2)) & 0x3;
134 y
= (src
.base
.swizzle
>> (y
* 2)) & 0x3;
135 z
= (src
.base
.swizzle
>> (z
* 2)) & 0x3;
136 w
= (src
.base
.swizzle
>> (w
* 2)) & 0x3;
138 src
.base
.swizzle
= TRANSLATE_SWIZZLE(x
, y
, z
, w
);
145 * Apply a "scalar" swizzle to a src_register returning a new
146 * src_register where all the swizzle terms are the same.
147 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
149 static struct src_register
150 scalar(struct src_register src
, unsigned comp
)
153 return swizzle( src
, comp
, comp
, comp
, comp
);
158 svga_arl_needs_adjustment( const struct svga_shader_emitter
*emit
)
162 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
163 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
)
171 svga_arl_adjustment( const struct svga_shader_emitter
*emit
)
175 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
176 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
)
177 return emit
->arl_consts
[i
].number
;
183 static struct src_register
184 translate_src_register( const struct svga_shader_emitter
*emit
,
185 const struct tgsi_full_src_register
*reg
)
187 struct src_register src
;
189 switch (reg
->Register
.File
) {
190 case TGSI_FILE_INPUT
:
191 /* Input registers are referred to by their semantic name rather
192 * than by index. Use the mapping build up from the decls:
194 src
= emit
->input_map
[reg
->Register
.Index
];
197 case TGSI_FILE_IMMEDIATE
:
198 /* Immediates are appended after TGSI constants in the D3D
201 src
= src_register( translate_file( reg
->Register
.File
),
202 reg
->Register
.Index
+ emit
->imm_start
);
206 src
= src_register( translate_file( reg
->Register
.File
),
207 reg
->Register
.Index
);
211 /* Indirect addressing.
213 if (reg
->Register
.Indirect
) {
214 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
215 /* Pixel shaders have only loop registers for relative
216 * addressing into inputs. Ignore the redundant address
217 * register, the contents of aL should be in sync with it.
219 if (reg
->Register
.File
== TGSI_FILE_INPUT
) {
220 src
.base
.relAddr
= 1;
221 src
.indirect
= src_token(SVGA3DREG_LOOP
, 0);
225 /* Constant buffers only.
227 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
) {
228 /* we shift the offset towards the minimum */
229 if (svga_arl_needs_adjustment( emit
)) {
230 src
.base
.num
-= svga_arl_adjustment( emit
);
232 src
.base
.relAddr
= 1;
234 /* Not really sure what should go in the second token:
236 src
.indirect
= src_token( SVGA3DREG_ADDR
,
237 reg
->Indirect
.Index
);
239 src
.indirect
.swizzle
= SWIZZLE_XXXX
;
245 reg
->Register
.SwizzleX
,
246 reg
->Register
.SwizzleY
,
247 reg
->Register
.SwizzleZ
,
248 reg
->Register
.SwizzleW
);
250 /* src.mod isn't a bitfield, unfortunately:
251 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
253 if (reg
->Register
.Absolute
) {
254 if (reg
->Register
.Negate
)
255 src
.base
.srcMod
= SVGA3DSRCMOD_ABSNEG
;
257 src
.base
.srcMod
= SVGA3DSRCMOD_ABS
;
260 if (reg
->Register
.Negate
)
261 src
.base
.srcMod
= SVGA3DSRCMOD_NEG
;
263 src
.base
.srcMod
= SVGA3DSRCMOD_NONE
;
271 * Get a temporary register.
272 * Note: if we exceed the temporary register limit we just use
273 * register SVGA3D_TEMPREG_MAX - 1.
275 static SVGA3dShaderDestToken
276 get_temp( struct svga_shader_emitter
*emit
)
278 int i
= emit
->nr_hw_temp
+ emit
->internal_temp_count
++;
279 assert(i
< SVGA3D_TEMPREG_MAX
);
280 i
= MIN2(i
, SVGA3D_TEMPREG_MAX
- 1);
281 return dst_register( SVGA3DREG_TEMP
, i
);
286 * Release a single temp. Currently only effective if it was the last
287 * allocated temp, otherwise release will be delayed until the next
288 * call to reset_temp_regs().
291 release_temp( struct svga_shader_emitter
*emit
,
292 SVGA3dShaderDestToken temp
)
294 if (temp
.num
== emit
->internal_temp_count
- 1)
295 emit
->internal_temp_count
--;
300 reset_temp_regs(struct svga_shader_emitter
*emit
)
302 emit
->internal_temp_count
= 0;
306 /** Emit bytecode for a src_register */
308 emit_src(struct svga_shader_emitter
*emit
, const struct src_register src
)
310 if (src
.base
.relAddr
) {
311 assert(src
.base
.reserved0
);
312 assert(src
.indirect
.reserved0
);
313 return (svga_shader_emit_dword( emit
, src
.base
.value
) &&
314 svga_shader_emit_dword( emit
, src
.indirect
.value
));
317 assert(src
.base
.reserved0
);
318 return svga_shader_emit_dword( emit
, src
.base
.value
);
323 /** Emit bytecode for a dst_register */
325 emit_dst(struct svga_shader_emitter
*emit
, SVGA3dShaderDestToken dest
)
327 assert(dest
.reserved0
);
329 return svga_shader_emit_dword( emit
, dest
.value
);
333 /** Emit bytecode for a 1-operand instruction */
335 emit_op1(struct svga_shader_emitter
*emit
,
336 SVGA3dShaderInstToken inst
,
337 SVGA3dShaderDestToken dest
,
338 struct src_register src0
)
340 return (emit_instruction(emit
, inst
) &&
341 emit_dst(emit
, dest
) &&
342 emit_src(emit
, src0
));
346 /** Emit bytecode for a 2-operand instruction */
348 emit_op2(struct svga_shader_emitter
*emit
,
349 SVGA3dShaderInstToken inst
,
350 SVGA3dShaderDestToken dest
,
351 struct src_register src0
,
352 struct src_register src1
)
354 return (emit_instruction(emit
, inst
) &&
355 emit_dst(emit
, dest
) &&
356 emit_src(emit
, src0
) &&
357 emit_src(emit
, src1
));
361 /** Emit bytecode for a 3-operand instruction */
363 emit_op3(struct svga_shader_emitter
*emit
,
364 SVGA3dShaderInstToken inst
,
365 SVGA3dShaderDestToken dest
,
366 struct src_register src0
,
367 struct src_register src1
,
368 struct src_register src2
)
370 return (emit_instruction(emit
, inst
) &&
371 emit_dst(emit
, dest
) &&
372 emit_src(emit
, src0
) &&
373 emit_src(emit
, src1
) &&
374 emit_src(emit
, src2
));
378 /** Emit bytecode for a 4-operand instruction */
380 emit_op4(struct svga_shader_emitter
*emit
,
381 SVGA3dShaderInstToken inst
,
382 SVGA3dShaderDestToken dest
,
383 struct src_register src0
,
384 struct src_register src1
,
385 struct src_register src2
,
386 struct src_register src3
)
388 return (emit_instruction(emit
, inst
) &&
389 emit_dst(emit
, dest
) &&
390 emit_src(emit
, src0
) &&
391 emit_src(emit
, src1
) &&
392 emit_src(emit
, src2
) &&
393 emit_src(emit
, src3
));
398 * Apply the absolute value modifier to the given src_register, returning
399 * a new src_register.
401 static struct src_register
402 absolute(struct src_register src
)
404 src
.base
.srcMod
= SVGA3DSRCMOD_ABS
;
410 * Apply the negation modifier to the given src_register, returning
411 * a new src_register.
413 static struct src_register
414 negate(struct src_register src
)
416 switch (src
.base
.srcMod
) {
417 case SVGA3DSRCMOD_ABS
:
418 src
.base
.srcMod
= SVGA3DSRCMOD_ABSNEG
;
420 case SVGA3DSRCMOD_ABSNEG
:
421 src
.base
.srcMod
= SVGA3DSRCMOD_ABS
;
423 case SVGA3DSRCMOD_NEG
:
424 src
.base
.srcMod
= SVGA3DSRCMOD_NONE
;
426 case SVGA3DSRCMOD_NONE
:
427 src
.base
.srcMod
= SVGA3DSRCMOD_NEG
;
435 /* Replace the src with the temporary specified in the dst, but copying
436 * only the necessary channels, and preserving the original swizzle (which is
437 * important given that several opcodes have constraints in the allowed
441 emit_repl(struct svga_shader_emitter
*emit
,
442 SVGA3dShaderDestToken dst
,
443 struct src_register
*src0
)
445 unsigned src0_swizzle
;
448 assert(SVGA3dShaderGetRegType(dst
.value
) == SVGA3DREG_TEMP
);
450 src0_swizzle
= src0
->base
.swizzle
;
453 for (chan
= 0; chan
< 4; ++chan
) {
454 unsigned swizzle
= (src0_swizzle
>> (chan
*2)) & 0x3;
455 dst
.mask
|= 1 << swizzle
;
459 src0
->base
.swizzle
= SVGA3DSWIZZLE_NONE
;
461 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, *src0
))
465 src0
->base
.swizzle
= src0_swizzle
;
472 submit_op0(struct svga_shader_emitter
*emit
,
473 SVGA3dShaderInstToken inst
,
474 SVGA3dShaderDestToken dest
)
476 return (emit_instruction( emit
, inst
) &&
477 emit_dst( emit
, dest
));
482 submit_op1(struct svga_shader_emitter
*emit
,
483 SVGA3dShaderInstToken inst
,
484 SVGA3dShaderDestToken dest
,
485 struct src_register src0
)
487 return emit_op1( emit
, inst
, dest
, src0
);
492 * SVGA shaders may not refer to >1 constant register in a single
493 * instruction. This function checks for that usage and inserts a
494 * move to temporary if detected.
496 * The same applies to input registers -- at most a single input
497 * register may be read by any instruction.
500 submit_op2(struct svga_shader_emitter
*emit
,
501 SVGA3dShaderInstToken inst
,
502 SVGA3dShaderDestToken dest
,
503 struct src_register src0
,
504 struct src_register src1
)
506 SVGA3dShaderDestToken temp
;
507 SVGA3dShaderRegType type0
, type1
;
508 boolean need_temp
= FALSE
;
511 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
512 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
514 if (type0
== SVGA3DREG_CONST
&&
515 type1
== SVGA3DREG_CONST
&&
516 src0
.base
.num
!= src1
.base
.num
)
519 if (type0
== SVGA3DREG_INPUT
&&
520 type1
== SVGA3DREG_INPUT
&&
521 src0
.base
.num
!= src1
.base
.num
)
525 temp
= get_temp( emit
);
527 if (!emit_repl( emit
, temp
, &src0
))
531 if (!emit_op2( emit
, inst
, dest
, src0
, src1
))
535 release_temp( emit
, temp
);
542 * SVGA shaders may not refer to >1 constant register in a single
543 * instruction. This function checks for that usage and inserts a
544 * move to temporary if detected.
547 submit_op3(struct svga_shader_emitter
*emit
,
548 SVGA3dShaderInstToken inst
,
549 SVGA3dShaderDestToken dest
,
550 struct src_register src0
,
551 struct src_register src1
,
552 struct src_register src2
)
554 SVGA3dShaderDestToken temp0
;
555 SVGA3dShaderDestToken temp1
;
556 boolean need_temp0
= FALSE
;
557 boolean need_temp1
= FALSE
;
558 SVGA3dShaderRegType type0
, type1
, type2
;
562 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
563 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
564 type2
= SVGA3dShaderGetRegType( src2
.base
.value
);
566 if (inst
.op
!= SVGA3DOP_SINCOS
) {
567 if (type0
== SVGA3DREG_CONST
&&
568 ((type1
== SVGA3DREG_CONST
&& src0
.base
.num
!= src1
.base
.num
) ||
569 (type2
== SVGA3DREG_CONST
&& src0
.base
.num
!= src2
.base
.num
)))
572 if (type1
== SVGA3DREG_CONST
&&
573 (type2
== SVGA3DREG_CONST
&& src1
.base
.num
!= src2
.base
.num
))
577 if (type0
== SVGA3DREG_INPUT
&&
578 ((type1
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src1
.base
.num
) ||
579 (type2
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src2
.base
.num
)))
582 if (type1
== SVGA3DREG_INPUT
&&
583 (type2
== SVGA3DREG_INPUT
&& src1
.base
.num
!= src2
.base
.num
))
587 temp0
= get_temp( emit
);
589 if (!emit_repl( emit
, temp0
, &src0
))
594 temp1
= get_temp( emit
);
596 if (!emit_repl( emit
, temp1
, &src1
))
600 if (!emit_op3( emit
, inst
, dest
, src0
, src1
, src2
))
604 release_temp( emit
, temp1
);
606 release_temp( emit
, temp0
);
612 * SVGA shaders may not refer to >1 constant register in a single
613 * instruction. This function checks for that usage and inserts a
614 * move to temporary if detected.
617 submit_op4(struct svga_shader_emitter
*emit
,
618 SVGA3dShaderInstToken inst
,
619 SVGA3dShaderDestToken dest
,
620 struct src_register src0
,
621 struct src_register src1
,
622 struct src_register src2
,
623 struct src_register src3
)
625 SVGA3dShaderDestToken temp0
;
626 SVGA3dShaderDestToken temp3
;
627 boolean need_temp0
= FALSE
;
628 boolean need_temp3
= FALSE
;
629 SVGA3dShaderRegType type0
, type1
, type2
, type3
;
633 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
634 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
635 type2
= SVGA3dShaderGetRegType( src2
.base
.value
);
636 type3
= SVGA3dShaderGetRegType( src2
.base
.value
);
638 /* Make life a little easier - this is only used by the TXD
639 * instruction which is guaranteed not to have a constant/input reg
640 * in one slot at least:
642 assert(type1
== SVGA3DREG_SAMPLER
);
644 if (type0
== SVGA3DREG_CONST
&&
645 ((type3
== SVGA3DREG_CONST
&& src0
.base
.num
!= src3
.base
.num
) ||
646 (type2
== SVGA3DREG_CONST
&& src0
.base
.num
!= src2
.base
.num
)))
649 if (type3
== SVGA3DREG_CONST
&&
650 (type2
== SVGA3DREG_CONST
&& src3
.base
.num
!= src2
.base
.num
))
653 if (type0
== SVGA3DREG_INPUT
&&
654 ((type3
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src3
.base
.num
) ||
655 (type2
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src2
.base
.num
)))
658 if (type3
== SVGA3DREG_INPUT
&&
659 (type2
== SVGA3DREG_INPUT
&& src3
.base
.num
!= src2
.base
.num
))
663 temp0
= get_temp( emit
);
665 if (!emit_repl( emit
, temp0
, &src0
))
670 temp3
= get_temp( emit
);
672 if (!emit_repl( emit
, temp3
, &src3
))
676 if (!emit_op4( emit
, inst
, dest
, src0
, src1
, src2
, src3
))
680 release_temp( emit
, temp3
);
682 release_temp( emit
, temp0
);
688 * Do the src and dest registers refer to the same register?
691 alias_src_dst(struct src_register src
,
692 SVGA3dShaderDestToken dst
)
694 if (src
.base
.num
!= dst
.num
)
697 if (SVGA3dShaderGetRegType(dst
.value
) !=
698 SVGA3dShaderGetRegType(src
.base
.value
))
706 submit_lrp(struct svga_shader_emitter
*emit
,
707 SVGA3dShaderDestToken dst
,
708 struct src_register src0
,
709 struct src_register src1
,
710 struct src_register src2
)
712 SVGA3dShaderDestToken tmp
;
713 boolean need_dst_tmp
= FALSE
;
715 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
716 if (SVGA3dShaderGetRegType(dst
.value
) != SVGA3DREG_TEMP
||
717 alias_src_dst(src0
, dst
) ||
718 alias_src_dst(src2
, dst
))
722 tmp
= get_temp( emit
);
729 if (!submit_op3(emit
, inst_token( SVGA3DOP_LRP
), tmp
, src0
, src1
, src2
))
733 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), dst
, src( tmp
)))
742 emit_def_const(struct svga_shader_emitter
*emit
,
743 SVGA3dShaderConstType type
,
744 unsigned idx
, float a
, float b
, float c
, float d
)
747 SVGA3dShaderInstToken opcode
;
750 case SVGA3D_CONST_TYPE_FLOAT
:
751 opcode
= inst_token( SVGA3DOP_DEF
);
752 def
.dst
= dst_register( SVGA3DREG_CONST
, idx
);
753 def
.constValues
[0] = a
;
754 def
.constValues
[1] = b
;
755 def
.constValues
[2] = c
;
756 def
.constValues
[3] = d
;
758 case SVGA3D_CONST_TYPE_INT
:
759 opcode
= inst_token( SVGA3DOP_DEFI
);
760 def
.dst
= dst_register( SVGA3DREG_CONSTINT
, idx
);
761 def
.constIValues
[0] = (int)a
;
762 def
.constIValues
[1] = (int)b
;
763 def
.constIValues
[2] = (int)c
;
764 def
.constIValues
[3] = (int)d
;
768 opcode
= inst_token( SVGA3DOP_NOP
);
772 if (!emit_instruction(emit
, opcode
) ||
773 !svga_shader_emit_dwords( emit
, def
.values
, Elements(def
.values
)))
781 create_zero_immediate( struct svga_shader_emitter
*emit
)
783 unsigned idx
= emit
->nr_hw_float_const
++;
785 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
786 * other useful vectors.
788 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
789 idx
, 0, 0.5, -1, 1 ))
792 emit
->zero_immediate_idx
= idx
;
793 emit
->created_zero_immediate
= TRUE
;
800 create_loop_const( struct svga_shader_emitter
*emit
)
802 unsigned idx
= emit
->nr_hw_int_const
++;
804 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_INT
, idx
,
805 255, /* iteration count */
806 0, /* initial value */
808 0 /* not used, must be 0 */))
811 emit
->loop_const_idx
= idx
;
812 emit
->created_loop_const
= TRUE
;
818 create_arl_consts( struct svga_shader_emitter
*emit
)
822 for (i
= 0; i
< emit
->num_arl_consts
; i
+= 4) {
824 unsigned idx
= emit
->nr_hw_float_const
++;
826 for (j
= 0; j
< 4 && (j
+ i
) < emit
->num_arl_consts
; ++j
) {
827 vals
[j
] = (float) emit
->arl_consts
[i
+ j
].number
;
828 emit
->arl_consts
[i
+ j
].idx
= idx
;
831 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_X
;
834 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_Y
;
837 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_Z
;
840 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_W
;
847 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
, idx
,
858 * Return the register which holds the pixel shaders front/back-
861 static struct src_register
862 get_vface( struct svga_shader_emitter
*emit
)
864 assert(emit
->emitted_vface
);
865 return src_register(SVGA3DREG_MISCTYPE
, SVGA3DMISCREG_FACE
);
870 * returns {0, 0, 0, 1} immediate
872 static struct src_register
873 get_zero_immediate( struct svga_shader_emitter
*emit
)
875 assert(emit
->created_zero_immediate
);
876 assert(emit
->zero_immediate_idx
>= 0);
877 return swizzle(src_register( SVGA3DREG_CONST
,
878 emit
->zero_immediate_idx
),
884 * returns {1, 1, 1, -1} immediate
886 static struct src_register
887 get_pos_neg_one_immediate( struct svga_shader_emitter
*emit
)
889 assert(emit
->created_zero_immediate
);
890 assert(emit
->zero_immediate_idx
>= 0);
891 return swizzle(src_register( SVGA3DREG_CONST
,
892 emit
->zero_immediate_idx
),
898 * returns {0.5, 0.5, 0.5, 0.5} immediate
900 static struct src_register
901 get_half_immediate( struct svga_shader_emitter
*emit
)
903 assert(emit
->created_zero_immediate
);
904 assert(emit
->zero_immediate_idx
>= 0);
905 return swizzle(src_register(SVGA3DREG_CONST
, emit
->zero_immediate_idx
),
911 * returns the loop const
913 static struct src_register
914 get_loop_const( struct svga_shader_emitter
*emit
)
916 assert(emit
->created_loop_const
);
917 assert(emit
->loop_const_idx
>= 0);
918 return src_register( SVGA3DREG_CONSTINT
,
919 emit
->loop_const_idx
);
923 static struct src_register
924 get_fake_arl_const( struct svga_shader_emitter
*emit
)
926 struct src_register reg
;
927 int idx
= 0, swizzle
= 0, i
;
929 for (i
= 0; i
< emit
->num_arl_consts
; ++ i
) {
930 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
) {
931 idx
= emit
->arl_consts
[i
].idx
;
932 swizzle
= emit
->arl_consts
[i
].swizzle
;
936 reg
= src_register( SVGA3DREG_CONST
, idx
);
937 return scalar(reg
, swizzle
);
942 * Return the register which holds the current dimenions of the
943 * texture bound to the given sampler
945 static struct src_register
946 get_tex_dimensions( struct svga_shader_emitter
*emit
, int sampler_num
)
949 struct src_register reg
;
951 /* the width/height indexes start right after constants */
952 idx
= emit
->key
.fkey
.tex
[sampler_num
].width_height_idx
+
953 emit
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1;
955 reg
= src_register( SVGA3DREG_CONST
, idx
);
961 emit_fake_arl(struct svga_shader_emitter
*emit
,
962 const struct tgsi_full_instruction
*insn
)
964 const struct src_register src0
=
965 translate_src_register(emit
, &insn
->Src
[0] );
966 struct src_register src1
= get_fake_arl_const( emit
);
967 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
968 SVGA3dShaderDestToken tmp
= get_temp( emit
);
970 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), tmp
, src0
))
973 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), tmp
, src( tmp
),
977 /* replicate the original swizzle */
979 src1
.base
.swizzle
= src0
.base
.swizzle
;
981 return submit_op1( emit
, inst_token( SVGA3DOP_MOVA
),
987 emit_if(struct svga_shader_emitter
*emit
,
988 const struct tgsi_full_instruction
*insn
)
990 struct src_register src0
=
991 translate_src_register(emit
, &insn
->Src
[0]);
992 struct src_register zero
= get_zero_immediate( emit
);
993 SVGA3dShaderInstToken if_token
= inst_token( SVGA3DOP_IFC
);
995 if_token
.control
= SVGA3DOPCOMPC_NE
;
996 zero
= scalar(zero
, TGSI_SWIZZLE_X
);
998 if (SVGA3dShaderGetRegType(src0
.base
.value
) == SVGA3DREG_CONST
) {
1000 * Max different constant registers readable per IFC instruction is 1.
1002 SVGA3dShaderDestToken tmp
= get_temp( emit
);
1004 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), tmp
, src0
))
1007 src0
= scalar(src( tmp
), TGSI_SWIZZLE_X
);
1010 emit
->dynamic_branching_level
++;
1012 return (emit_instruction( emit
, if_token
) &&
1013 emit_src( emit
, src0
) &&
1014 emit_src( emit
, zero
) );
1019 emit_endif(struct svga_shader_emitter
*emit
,
1020 const struct tgsi_full_instruction
*insn
)
1022 emit
->dynamic_branching_level
--;
1024 return emit_instruction(emit
, inst_token(SVGA3DOP_ENDIF
));
1029 emit_else(struct svga_shader_emitter
*emit
,
1030 const struct tgsi_full_instruction
*insn
)
1032 return emit_instruction(emit
, inst_token(SVGA3DOP_ELSE
));
1037 * Translate the following TGSI FLR instruction.
1039 * To the following SVGA3D instruction sequence.
1044 emit_floor(struct svga_shader_emitter
*emit
,
1045 const struct tgsi_full_instruction
*insn
)
1047 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1048 const struct src_register src0
=
1049 translate_src_register(emit
, &insn
->Src
[0] );
1050 SVGA3dShaderDestToken temp
= get_temp( emit
);
1053 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
), temp
, src0
))
1056 /* SUB DST, SRC, TMP */
1057 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
, src0
,
1058 negate( src( temp
) ) ))
1066 * Translate the following TGSI CEIL instruction.
1068 * To the following SVGA3D instruction sequence.
1073 emit_ceil(struct svga_shader_emitter
*emit
,
1074 const struct tgsi_full_instruction
*insn
)
1076 SVGA3dShaderDestToken dst
= translate_dst_register(emit
, insn
, 0);
1077 const struct src_register src0
=
1078 translate_src_register(emit
, &insn
->Src
[0]);
1079 SVGA3dShaderDestToken temp
= get_temp(emit
);
1082 if (!submit_op1(emit
, inst_token(SVGA3DOP_FRC
), temp
, negate(src0
)))
1085 /* ADD DST, SRC, TMP */
1086 if (!submit_op2(emit
, inst_token(SVGA3DOP_ADD
), dst
, src0
, src(temp
)))
1094 * Translate the following TGSI DIV instruction.
1095 * DIV DST.xy, SRC0, SRC1
1096 * To the following SVGA3D instruction sequence.
1097 * RCP TMP.x, SRC1.xxxx
1098 * RCP TMP.y, SRC1.yyyy
1099 * MUL DST.xy, SRC0, TMP
1102 emit_div(struct svga_shader_emitter
*emit
,
1103 const struct tgsi_full_instruction
*insn
)
1105 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1106 const struct src_register src0
=
1107 translate_src_register(emit
, &insn
->Src
[0] );
1108 const struct src_register src1
=
1109 translate_src_register(emit
, &insn
->Src
[1] );
1110 SVGA3dShaderDestToken temp
= get_temp( emit
);
1113 /* For each enabled element, perform a RCP instruction. Note that
1114 * RCP is scalar in SVGA3D:
1116 for (i
= 0; i
< 4; i
++) {
1117 unsigned channel
= 1 << i
;
1118 if (dst
.mask
& channel
) {
1119 /* RCP TMP.?, SRC1.???? */
1120 if (!submit_op1( emit
, inst_token( SVGA3DOP_RCP
),
1121 writemask(temp
, channel
),
1128 * MUL DST, SRC0, TMP
1130 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), dst
, src0
,
1139 * Translate the following TGSI DP2 instruction.
1140 * DP2 DST, SRC1, SRC2
1141 * To the following SVGA3D instruction sequence.
1142 * MUL TMP, SRC1, SRC2
1143 * ADD DST, TMP.xxxx, TMP.yyyy
1146 emit_dp2(struct svga_shader_emitter
*emit
,
1147 const struct tgsi_full_instruction
*insn
)
1149 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1150 const struct src_register src0
=
1151 translate_src_register(emit
, &insn
->Src
[0]);
1152 const struct src_register src1
=
1153 translate_src_register(emit
, &insn
->Src
[1]);
1154 SVGA3dShaderDestToken temp
= get_temp( emit
);
1155 struct src_register temp_src0
, temp_src1
;
1157 /* MUL TMP, SRC1, SRC2 */
1158 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), temp
, src0
, src1
))
1161 temp_src0
= scalar(src( temp
), TGSI_SWIZZLE_X
);
1162 temp_src1
= scalar(src( temp
), TGSI_SWIZZLE_Y
);
1164 /* ADD DST, TMP.xxxx, TMP.yyyy */
1165 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
1166 temp_src0
, temp_src1
))
1174 * Translate the following TGSI DPH instruction.
1175 * DPH DST, SRC1, SRC2
1176 * To the following SVGA3D instruction sequence.
1177 * DP3 TMP, SRC1, SRC2
1178 * ADD DST, TMP, SRC2.wwww
1181 emit_dph(struct svga_shader_emitter
*emit
,
1182 const struct tgsi_full_instruction
*insn
)
1184 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1185 const struct src_register src0
= translate_src_register(
1186 emit
, &insn
->Src
[0] );
1187 struct src_register src1
=
1188 translate_src_register(emit
, &insn
->Src
[1]);
1189 SVGA3dShaderDestToken temp
= get_temp( emit
);
1191 /* DP3 TMP, SRC1, SRC2 */
1192 if (!submit_op2( emit
, inst_token( SVGA3DOP_DP3
), temp
, src0
, src1
))
1195 src1
= scalar(src1
, TGSI_SWIZZLE_W
);
1197 /* ADD DST, TMP, SRC2.wwww */
1198 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
1199 src( temp
), src1
))
1207 * Translate the following TGSI DST instruction.
1209 * To the following SVGA3D instruction sequence.
1215 emit_nrm(struct svga_shader_emitter
*emit
,
1216 const struct tgsi_full_instruction
*insn
)
1218 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1219 const struct src_register src0
=
1220 translate_src_register(emit
, &insn
->Src
[0]);
1221 SVGA3dShaderDestToken temp
= get_temp( emit
);
1223 /* DP3 TMP, SRC, SRC */
1224 if (!submit_op2( emit
, inst_token( SVGA3DOP_DP3
), temp
, src0
, src0
))
1228 if (!submit_op1( emit
, inst_token( SVGA3DOP_RSQ
), temp
, src( temp
)))
1231 /* MUL DST, SRC, TMP */
1232 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), dst
,
1241 do_emit_sincos(struct svga_shader_emitter
*emit
,
1242 SVGA3dShaderDestToken dst
,
1243 struct src_register src0
)
1245 src0
= scalar(src0
, TGSI_SWIZZLE_X
);
1246 return submit_op1(emit
, inst_token(SVGA3DOP_SINCOS
), dst
, src0
);
1251 emit_sincos(struct svga_shader_emitter
*emit
,
1252 const struct tgsi_full_instruction
*insn
)
1254 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1255 struct src_register src0
= translate_src_register(emit
, &insn
->Src
[0]);
1256 SVGA3dShaderDestToken temp
= get_temp( emit
);
1259 if (!do_emit_sincos(emit
, writemask(temp
, TGSI_WRITEMASK_XY
), src0
))
1263 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src( temp
) ))
1275 emit_sin(struct svga_shader_emitter
*emit
,
1276 const struct tgsi_full_instruction
*insn
)
1278 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1279 struct src_register src0
=
1280 translate_src_register(emit
, &insn
->Src
[0] );
1281 SVGA3dShaderDestToken temp
= get_temp( emit
);
1284 if (!do_emit_sincos(emit
, writemask(temp
, TGSI_WRITEMASK_Y
), src0
))
1287 src0
= scalar(src( temp
), TGSI_SWIZZLE_Y
);
1289 /* MOV DST TMP.yyyy */
1290 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src0
))
1301 emit_cos(struct svga_shader_emitter
*emit
,
1302 const struct tgsi_full_instruction
*insn
)
1304 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1305 struct src_register src0
=
1306 translate_src_register(emit
, &insn
->Src
[0] );
1307 SVGA3dShaderDestToken temp
= get_temp( emit
);
1310 if (!do_emit_sincos( emit
, writemask(temp
, TGSI_WRITEMASK_X
), src0
))
1313 src0
= scalar(src( temp
), TGSI_SWIZZLE_X
);
1315 /* MOV DST TMP.xxxx */
1316 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src0
))
1324 emit_ssg(struct svga_shader_emitter
*emit
,
1325 const struct tgsi_full_instruction
*insn
)
1327 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1328 struct src_register src0
=
1329 translate_src_register(emit
, &insn
->Src
[0] );
1330 SVGA3dShaderDestToken temp0
= get_temp( emit
);
1331 SVGA3dShaderDestToken temp1
= get_temp( emit
);
1332 struct src_register zero
, one
;
1334 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1335 /* SGN DST, SRC0, TMP0, TMP1 */
1336 return submit_op3( emit
, inst_token( SVGA3DOP_SGN
), dst
, src0
,
1337 src( temp0
), src( temp1
) );
1340 zero
= get_zero_immediate( emit
);
1341 one
= scalar( zero
, TGSI_SWIZZLE_W
);
1342 zero
= scalar( zero
, TGSI_SWIZZLE_X
);
1344 /* CMP TMP0, SRC0, one, zero */
1345 if (!submit_op3( emit
, inst_token( SVGA3DOP_CMP
),
1346 writemask( temp0
, dst
.mask
), src0
, one
, zero
))
1349 /* CMP TMP1, negate(SRC0), negate(one), zero */
1350 if (!submit_op3( emit
, inst_token( SVGA3DOP_CMP
),
1351 writemask( temp1
, dst
.mask
), negate( src0
), negate( one
),
1355 /* ADD DST, TMP0, TMP1 */
1356 return submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
, src( temp0
),
1362 * ADD DST SRC0, negate(SRC0)
1365 emit_sub(struct svga_shader_emitter
*emit
,
1366 const struct tgsi_full_instruction
*insn
)
1368 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1369 struct src_register src0
= translate_src_register(
1370 emit
, &insn
->Src
[0] );
1371 struct src_register src1
= translate_src_register(
1372 emit
, &insn
->Src
[1] );
1374 src1
= negate(src1
);
1376 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
1385 emit_kill_if(struct svga_shader_emitter
*emit
,
1386 const struct tgsi_full_instruction
*insn
)
1388 const struct tgsi_full_src_register
*reg
= &insn
->Src
[0];
1389 struct src_register src0
, srcIn
;
1390 /* is the W component tested in another position? */
1391 const boolean w_tested
= (reg
->Register
.SwizzleW
== reg
->Register
.SwizzleX
||
1392 reg
->Register
.SwizzleW
== reg
->Register
.SwizzleY
||
1393 reg
->Register
.SwizzleW
== reg
->Register
.SwizzleZ
);
1394 const boolean special
= (reg
->Register
.Absolute
||
1395 reg
->Register
.Negate
||
1396 reg
->Register
.Indirect
||
1397 reg
->Register
.SwizzleX
!= 0 ||
1398 reg
->Register
.SwizzleY
!= 1 ||
1399 reg
->Register
.SwizzleZ
!= 2 ||
1400 reg
->Register
.File
!= TGSI_FILE_TEMPORARY
);
1401 SVGA3dShaderDestToken temp
;
1403 src0
= srcIn
= translate_src_register( emit
, reg
);
1405 if (special
|| !w_tested
) {
1406 /* need a temp reg */
1407 temp
= get_temp( emit
);
1411 /* move the source into a temp register */
1412 submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1413 writemask( temp
, TGSI_WRITEMASK_XYZ
),
1419 /* do the texkill (on the xyz components) */
1420 if (!submit_op0( emit
, inst_token( SVGA3DOP_TEXKILL
), dst(src0
) ))
1424 /* need to emit a second texkill to test the W component */
1425 /* put src.wwww into temp register */
1426 if (!submit_op1(emit
,
1427 inst_token( SVGA3DOP_MOV
),
1428 writemask( temp
, TGSI_WRITEMASK_XYZ
),
1429 scalar(srcIn
, TGSI_SWIZZLE_W
)))
1432 /* second texkill */
1433 if (!submit_op0( emit
, inst_token( SVGA3DOP_TEXKILL
), temp
))
1442 * unconditional kill
1445 emit_kill(struct svga_shader_emitter
*emit
,
1446 const struct tgsi_full_instruction
*insn
)
1448 SVGA3dShaderDestToken temp
;
1449 struct src_register one
= scalar( get_zero_immediate( emit
),
1451 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_TEXKILL
);
1453 /* texkill doesn't allow negation on the operand so lets move
1454 * negation of {1} to a temp register */
1455 temp
= get_temp( emit
);
1456 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp
,
1460 return submit_op0( emit
, inst
, temp
);
1465 * Test if r1 and r2 are the same register.
1468 same_register(struct src_register r1
, struct src_register r2
)
1470 return (r1
.base
.num
== r2
.base
.num
&&
1471 r1
.base
.type_upper
== r2
.base
.type_upper
&&
1472 r1
.base
.type_lower
== r2
.base
.type_lower
);
1477 /* Implement conditionals by initializing destination reg to 'fail',
1478 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1479 * based on predicate reg.
1481 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1486 emit_conditional(struct svga_shader_emitter
*emit
,
1487 unsigned compare_func
,
1488 SVGA3dShaderDestToken dst
,
1489 struct src_register src0
,
1490 struct src_register src1
,
1491 struct src_register pass
,
1492 struct src_register fail
)
1494 SVGA3dShaderDestToken pred_reg
= dst_register( SVGA3DREG_PREDICATE
, 0 );
1495 SVGA3dShaderInstToken setp_token
, mov_token
;
1496 setp_token
= inst_token( SVGA3DOP_SETP
);
1498 switch (compare_func
) {
1499 case PIPE_FUNC_NEVER
:
1500 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1503 case PIPE_FUNC_LESS
:
1504 setp_token
.control
= SVGA3DOPCOMP_LT
;
1506 case PIPE_FUNC_EQUAL
:
1507 setp_token
.control
= SVGA3DOPCOMP_EQ
;
1509 case PIPE_FUNC_LEQUAL
:
1510 setp_token
.control
= SVGA3DOPCOMP_LE
;
1512 case PIPE_FUNC_GREATER
:
1513 setp_token
.control
= SVGA3DOPCOMP_GT
;
1515 case PIPE_FUNC_NOTEQUAL
:
1516 setp_token
.control
= SVGA3DOPCOMPC_NE
;
1518 case PIPE_FUNC_GEQUAL
:
1519 setp_token
.control
= SVGA3DOPCOMP_GE
;
1521 case PIPE_FUNC_ALWAYS
:
1522 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1527 if (same_register(src(dst
), pass
)) {
1528 /* We'll get bad results if the dst and pass registers are the same
1529 * so use a temp register containing pass.
1531 SVGA3dShaderDestToken temp
= get_temp(emit
);
1532 if (!submit_op1(emit
, inst_token(SVGA3DOP_MOV
), temp
, pass
))
1537 /* SETP src0, COMPOP, src1 */
1538 if (!submit_op2( emit
, setp_token
, pred_reg
,
1542 mov_token
= inst_token( SVGA3DOP_MOV
);
1545 if (!submit_op1( emit
, mov_token
, dst
,
1549 /* MOV dst, pass (predicated)
1551 * Note that the predicate reg (and possible modifiers) is passed
1552 * as the first source argument.
1554 mov_token
.predicated
= 1;
1555 if (!submit_op2( emit
, mov_token
, dst
,
1556 src( pred_reg
), pass
))
1564 emit_select(struct svga_shader_emitter
*emit
,
1565 unsigned compare_func
,
1566 SVGA3dShaderDestToken dst
,
1567 struct src_register src0
,
1568 struct src_register src1
)
1570 /* There are some SVGA instructions which implement some selects
1571 * directly, but they are only available in the vertex shader.
1573 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1574 switch (compare_func
) {
1575 case PIPE_FUNC_GEQUAL
:
1576 return submit_op2( emit
, inst_token( SVGA3DOP_SGE
), dst
, src0
, src1
);
1577 case PIPE_FUNC_LEQUAL
:
1578 return submit_op2( emit
, inst_token( SVGA3DOP_SGE
), dst
, src1
, src0
);
1579 case PIPE_FUNC_GREATER
:
1580 return submit_op2( emit
, inst_token( SVGA3DOP_SLT
), dst
, src1
, src0
);
1581 case PIPE_FUNC_LESS
:
1582 return submit_op2( emit
, inst_token( SVGA3DOP_SLT
), dst
, src0
, src1
);
1588 /* Otherwise, need to use the setp approach:
1591 struct src_register one
, zero
;
1592 /* zero immediate is 0,0,0,1 */
1593 zero
= get_zero_immediate( emit
);
1594 one
= scalar( zero
, TGSI_SWIZZLE_W
);
1595 zero
= scalar( zero
, TGSI_SWIZZLE_X
);
1597 return emit_conditional(
1609 emit_select_op(struct svga_shader_emitter
*emit
,
1611 const struct tgsi_full_instruction
*insn
)
1613 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1614 struct src_register src0
= translate_src_register(
1615 emit
, &insn
->Src
[0] );
1616 struct src_register src1
= translate_src_register(
1617 emit
, &insn
->Src
[1] );
1619 return emit_select( emit
, compare
, dst
, src0
, src1
);
1624 * Translate TGSI CMP instruction.
1627 emit_cmp(struct svga_shader_emitter
*emit
,
1628 const struct tgsi_full_instruction
*insn
)
1630 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1631 const struct src_register src0
=
1632 translate_src_register(emit
, &insn
->Src
[0] );
1633 const struct src_register src1
=
1634 translate_src_register(emit
, &insn
->Src
[1] );
1635 const struct src_register src2
=
1636 translate_src_register(emit
, &insn
->Src
[2] );
1638 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1639 struct src_register zero
=
1640 scalar(get_zero_immediate(emit
), TGSI_SWIZZLE_X
);
1641 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1642 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1643 * because it involves a CMP to handle the 0 case.
1644 * Use a conditional expression instead.
1646 return emit_conditional(emit
, PIPE_FUNC_LESS
, dst
,
1647 src0
, zero
, src1
, src2
);
1650 assert(emit
->unit
== PIPE_SHADER_FRAGMENT
);
1652 /* CMP DST, SRC0, SRC2, SRC1 */
1653 return submit_op3( emit
, inst_token( SVGA3DOP_CMP
), dst
,
1660 * Translate texture instructions to SVGA3D representation.
1663 emit_tex2(struct svga_shader_emitter
*emit
,
1664 const struct tgsi_full_instruction
*insn
,
1665 SVGA3dShaderDestToken dst
)
1667 SVGA3dShaderInstToken inst
;
1668 struct src_register texcoord
;
1669 struct src_register sampler
;
1670 SVGA3dShaderDestToken tmp
;
1674 switch (insn
->Instruction
.Opcode
) {
1675 case TGSI_OPCODE_TEX
:
1676 inst
.op
= SVGA3DOP_TEX
;
1678 case TGSI_OPCODE_TXP
:
1679 inst
.op
= SVGA3DOP_TEX
;
1680 inst
.control
= SVGA3DOPCONT_PROJECT
;
1682 case TGSI_OPCODE_TXB
:
1683 inst
.op
= SVGA3DOP_TEX
;
1684 inst
.control
= SVGA3DOPCONT_BIAS
;
1686 case TGSI_OPCODE_TXL
:
1687 inst
.op
= SVGA3DOP_TEXLDL
;
1694 texcoord
= translate_src_register( emit
, &insn
->Src
[0] );
1695 sampler
= translate_src_register( emit
, &insn
->Src
[1] );
1697 if (emit
->key
.fkey
.tex
[sampler
.base
.num
].unnormalized
||
1698 emit
->dynamic_branching_level
> 0)
1699 tmp
= get_temp( emit
);
1701 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1702 * zero in that case.
1704 if (emit
->dynamic_branching_level
> 0 &&
1705 inst
.op
== SVGA3DOP_TEX
&&
1706 SVGA3dShaderGetRegType(texcoord
.base
.value
) == SVGA3DREG_TEMP
) {
1707 struct src_register zero
= get_zero_immediate( emit
);
1709 /* MOV tmp, texcoord */
1710 if (!submit_op1( emit
,
1711 inst_token( SVGA3DOP_MOV
),
1716 /* MOV tmp.w, zero */
1717 if (!submit_op1( emit
,
1718 inst_token( SVGA3DOP_MOV
),
1719 writemask( tmp
, TGSI_WRITEMASK_W
),
1720 scalar( zero
, TGSI_SWIZZLE_X
)))
1723 texcoord
= src( tmp
);
1724 inst
.op
= SVGA3DOP_TEXLDL
;
1727 /* Explicit normalization of texcoords:
1729 if (emit
->key
.fkey
.tex
[sampler
.base
.num
].unnormalized
) {
1730 struct src_register wh
= get_tex_dimensions( emit
, sampler
.base
.num
);
1732 /* MUL tmp, SRC0, WH */
1733 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
1734 tmp
, texcoord
, wh
))
1737 texcoord
= src( tmp
);
1740 return submit_op2( emit
, inst
, dst
, texcoord
, sampler
);
1745 * Translate texture instructions to SVGA3D representation.
1748 emit_tex4(struct svga_shader_emitter
*emit
,
1749 const struct tgsi_full_instruction
*insn
,
1750 SVGA3dShaderDestToken dst
)
1752 SVGA3dShaderInstToken inst
;
1753 struct src_register texcoord
;
1754 struct src_register ddx
;
1755 struct src_register ddy
;
1756 struct src_register sampler
;
1758 texcoord
= translate_src_register( emit
, &insn
->Src
[0] );
1759 ddx
= translate_src_register( emit
, &insn
->Src
[1] );
1760 ddy
= translate_src_register( emit
, &insn
->Src
[2] );
1761 sampler
= translate_src_register( emit
, &insn
->Src
[3] );
1765 switch (insn
->Instruction
.Opcode
) {
1766 case TGSI_OPCODE_TXD
:
1767 inst
.op
= SVGA3DOP_TEXLDD
; /* 4 args! */
1774 return submit_op4( emit
, inst
, dst
, texcoord
, sampler
, ddx
, ddy
);
1779 * Emit texture swizzle code.
1782 emit_tex_swizzle(struct svga_shader_emitter
*emit
,
1783 SVGA3dShaderDestToken dst
,
1784 struct src_register src
,
1790 const unsigned swizzleIn
[4] = {swizzle_x
, swizzle_y
, swizzle_z
, swizzle_w
};
1791 unsigned srcSwizzle
[4];
1792 unsigned srcWritemask
= 0x0, zeroWritemask
= 0x0, oneWritemask
= 0x0;
1795 /* build writemasks and srcSwizzle terms */
1796 for (i
= 0; i
< 4; i
++) {
1797 if (swizzleIn
[i
] == PIPE_SWIZZLE_ZERO
) {
1798 srcSwizzle
[i
] = TGSI_SWIZZLE_X
+ i
;
1799 zeroWritemask
|= (1 << i
);
1801 else if (swizzleIn
[i
] == PIPE_SWIZZLE_ONE
) {
1802 srcSwizzle
[i
] = TGSI_SWIZZLE_X
+ i
;
1803 oneWritemask
|= (1 << i
);
1806 srcSwizzle
[i
] = swizzleIn
[i
];
1807 srcWritemask
|= (1 << i
);
1811 /* write x/y/z/w comps */
1812 if (dst
.mask
& srcWritemask
) {
1813 if (!submit_op1(emit
,
1814 inst_token(SVGA3DOP_MOV
),
1815 writemask(dst
, srcWritemask
),
1825 if (dst
.mask
& zeroWritemask
) {
1826 if (!submit_op1(emit
,
1827 inst_token(SVGA3DOP_MOV
),
1828 writemask(dst
, zeroWritemask
),
1829 scalar(get_zero_immediate(emit
), TGSI_SWIZZLE_X
)))
1834 if (dst
.mask
& oneWritemask
) {
1835 if (!submit_op1(emit
,
1836 inst_token(SVGA3DOP_MOV
),
1837 writemask(dst
, oneWritemask
),
1838 scalar(get_zero_immediate(emit
), TGSI_SWIZZLE_W
)))
1847 emit_tex(struct svga_shader_emitter
*emit
,
1848 const struct tgsi_full_instruction
*insn
)
1850 SVGA3dShaderDestToken dst
=
1851 translate_dst_register( emit
, insn
, 0 );
1852 struct src_register src0
=
1853 translate_src_register( emit
, &insn
->Src
[0] );
1854 struct src_register src1
=
1855 translate_src_register( emit
, &insn
->Src
[1] );
1857 SVGA3dShaderDestToken tex_result
;
1858 const unsigned unit
= src1
.base
.num
;
1860 /* check for shadow samplers */
1861 boolean compare
= (emit
->key
.fkey
.tex
[unit
].compare_mode
==
1862 PIPE_TEX_COMPARE_R_TO_TEXTURE
);
1864 /* texture swizzle */
1865 boolean swizzle
= (emit
->key
.fkey
.tex
[unit
].swizzle_r
!= PIPE_SWIZZLE_RED
||
1866 emit
->key
.fkey
.tex
[unit
].swizzle_g
!= PIPE_SWIZZLE_GREEN
||
1867 emit
->key
.fkey
.tex
[unit
].swizzle_b
!= PIPE_SWIZZLE_BLUE
||
1868 emit
->key
.fkey
.tex
[unit
].swizzle_a
!= PIPE_SWIZZLE_ALPHA
);
1870 boolean saturate
= insn
->Instruction
.Saturate
!= TGSI_SAT_NONE
;
1872 /* If doing compare processing or tex swizzle or saturation, we need to put
1873 * the fetched color into a temporary so it can be used as a source later on.
1875 if (compare
|| swizzle
|| saturate
) {
1876 tex_result
= get_temp( emit
);
1882 switch(insn
->Instruction
.Opcode
) {
1883 case TGSI_OPCODE_TEX
:
1884 case TGSI_OPCODE_TXB
:
1885 case TGSI_OPCODE_TXP
:
1886 case TGSI_OPCODE_TXL
:
1887 if (!emit_tex2( emit
, insn
, tex_result
))
1890 case TGSI_OPCODE_TXD
:
1891 if (!emit_tex4( emit
, insn
, tex_result
))
1899 SVGA3dShaderDestToken dst2
;
1901 if (swizzle
|| saturate
)
1906 if (dst
.mask
& TGSI_WRITEMASK_XYZ
) {
1907 SVGA3dShaderDestToken src0_zdivw
= get_temp( emit
);
1908 /* When sampling a depth texture, the result of the comparison is in
1911 struct src_register tex_src_x
= scalar(src(tex_result
), TGSI_SWIZZLE_Y
);
1912 struct src_register r_coord
;
1914 if (insn
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
1915 /* Divide texcoord R by Q */
1916 if (!submit_op1( emit
, inst_token( SVGA3DOP_RCP
),
1917 writemask(src0_zdivw
, TGSI_WRITEMASK_X
),
1918 scalar(src0
, TGSI_SWIZZLE_W
) ))
1921 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
1922 writemask(src0_zdivw
, TGSI_WRITEMASK_X
),
1923 scalar(src0
, TGSI_SWIZZLE_Z
),
1924 scalar(src(src0_zdivw
), TGSI_SWIZZLE_X
) ))
1927 r_coord
= scalar(src(src0_zdivw
), TGSI_SWIZZLE_X
);
1930 r_coord
= scalar(src0
, TGSI_SWIZZLE_Z
);
1933 /* Compare texture sample value against R component of texcoord */
1934 if (!emit_select(emit
,
1935 emit
->key
.fkey
.tex
[unit
].compare_func
,
1936 writemask( dst2
, TGSI_WRITEMASK_XYZ
),
1942 if (dst
.mask
& TGSI_WRITEMASK_W
) {
1943 struct src_register one
=
1944 scalar( get_zero_immediate( emit
), TGSI_SWIZZLE_W
);
1946 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1947 writemask( dst2
, TGSI_WRITEMASK_W
),
1953 if (saturate
&& !swizzle
) {
1954 /* MOV_SAT real_dst, dst */
1955 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src(tex_result
) ))
1959 /* swizzle from tex_result to dst (handles saturation too, if any) */
1960 emit_tex_swizzle(emit
,
1961 dst
, src(tex_result
),
1962 emit
->key
.fkey
.tex
[unit
].swizzle_r
,
1963 emit
->key
.fkey
.tex
[unit
].swizzle_g
,
1964 emit
->key
.fkey
.tex
[unit
].swizzle_b
,
1965 emit
->key
.fkey
.tex
[unit
].swizzle_a
);
1973 emit_bgnloop2(struct svga_shader_emitter
*emit
,
1974 const struct tgsi_full_instruction
*insn
)
1976 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_LOOP
);
1977 struct src_register loop_reg
= src_register( SVGA3DREG_LOOP
, 0 );
1978 struct src_register const_int
= get_loop_const( emit
);
1980 emit
->dynamic_branching_level
++;
1982 return (emit_instruction( emit
, inst
) &&
1983 emit_src( emit
, loop_reg
) &&
1984 emit_src( emit
, const_int
) );
1989 emit_endloop2(struct svga_shader_emitter
*emit
,
1990 const struct tgsi_full_instruction
*insn
)
1992 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_ENDLOOP
);
1994 emit
->dynamic_branching_level
--;
1996 return emit_instruction( emit
, inst
);
2001 emit_brk(struct svga_shader_emitter
*emit
,
2002 const struct tgsi_full_instruction
*insn
)
2004 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_BREAK
);
2005 return emit_instruction( emit
, inst
);
2010 emit_scalar_op1(struct svga_shader_emitter
*emit
,
2012 const struct tgsi_full_instruction
*insn
)
2014 SVGA3dShaderInstToken inst
;
2015 SVGA3dShaderDestToken dst
;
2016 struct src_register src
;
2018 inst
= inst_token( opcode
);
2019 dst
= translate_dst_register( emit
, insn
, 0 );
2020 src
= translate_src_register( emit
, &insn
->Src
[0] );
2021 src
= scalar( src
, TGSI_SWIZZLE_X
);
2023 return submit_op1( emit
, inst
, dst
, src
);
2028 emit_simple_instruction(struct svga_shader_emitter
*emit
,
2030 const struct tgsi_full_instruction
*insn
)
2032 const struct tgsi_full_src_register
*src
= insn
->Src
;
2033 SVGA3dShaderInstToken inst
;
2034 SVGA3dShaderDestToken dst
;
2036 inst
= inst_token( opcode
);
2037 dst
= translate_dst_register( emit
, insn
, 0 );
2039 switch (insn
->Instruction
.NumSrcRegs
) {
2041 return submit_op0( emit
, inst
, dst
);
2043 return submit_op1( emit
, inst
, dst
,
2044 translate_src_register( emit
, &src
[0] ));
2046 return submit_op2( emit
, inst
, dst
,
2047 translate_src_register( emit
, &src
[0] ),
2048 translate_src_register( emit
, &src
[1] ) );
2050 return submit_op3( emit
, inst
, dst
,
2051 translate_src_register( emit
, &src
[0] ),
2052 translate_src_register( emit
, &src
[1] ),
2053 translate_src_register( emit
, &src
[2] ) );
2062 emit_deriv(struct svga_shader_emitter
*emit
,
2063 const struct tgsi_full_instruction
*insn
)
2065 if (emit
->dynamic_branching_level
> 0 &&
2066 insn
->Src
[0].Register
.File
== TGSI_FILE_TEMPORARY
)
2068 struct src_register zero
= get_zero_immediate( emit
);
2069 SVGA3dShaderDestToken dst
=
2070 translate_dst_register( emit
, insn
, 0 );
2072 /* Deriv opcodes not valid inside dynamic branching, workaround
2073 * by zeroing out the destination.
2075 if (!submit_op1(emit
,
2076 inst_token( SVGA3DOP_MOV
),
2078 scalar(zero
, TGSI_SWIZZLE_X
)))
2085 const struct tgsi_full_src_register
*reg
= &insn
->Src
[0];
2086 SVGA3dShaderInstToken inst
;
2087 SVGA3dShaderDestToken dst
;
2088 struct src_register src0
;
2090 switch (insn
->Instruction
.Opcode
) {
2091 case TGSI_OPCODE_DDX
:
2092 opcode
= SVGA3DOP_DSX
;
2094 case TGSI_OPCODE_DDY
:
2095 opcode
= SVGA3DOP_DSY
;
2101 inst
= inst_token( opcode
);
2102 dst
= translate_dst_register( emit
, insn
, 0 );
2103 src0
= translate_src_register( emit
, reg
);
2105 /* We cannot use negate or abs on source to dsx/dsy instruction.
2107 if (reg
->Register
.Absolute
||
2108 reg
->Register
.Negate
) {
2109 SVGA3dShaderDestToken temp
= get_temp( emit
);
2111 if (!emit_repl( emit
, temp
, &src0
))
2115 return submit_op1( emit
, inst
, dst
, src0
);
2121 emit_arl(struct svga_shader_emitter
*emit
,
2122 const struct tgsi_full_instruction
*insn
)
2124 ++emit
->current_arl
;
2125 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2126 /* MOVA not present in pixel shader instruction set.
2127 * Ignore this instruction altogether since it is
2128 * only used for loop counters -- and for that
2129 * we reference aL directly.
2133 if (svga_arl_needs_adjustment( emit
)) {
2134 return emit_fake_arl( emit
, insn
);
2136 /* no need to adjust, just emit straight arl */
2137 return emit_simple_instruction(emit
, SVGA3DOP_MOVA
, insn
);
2143 emit_pow(struct svga_shader_emitter
*emit
,
2144 const struct tgsi_full_instruction
*insn
)
2146 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2147 struct src_register src0
= translate_src_register(
2148 emit
, &insn
->Src
[0] );
2149 struct src_register src1
= translate_src_register(
2150 emit
, &insn
->Src
[1] );
2151 boolean need_tmp
= FALSE
;
2153 /* POW can only output to a temporary */
2154 if (insn
->Dst
[0].Register
.File
!= TGSI_FILE_TEMPORARY
)
2157 /* POW src1 must not be the same register as dst */
2158 if (alias_src_dst( src1
, dst
))
2161 /* it's a scalar op */
2162 src0
= scalar( src0
, TGSI_SWIZZLE_X
);
2163 src1
= scalar( src1
, TGSI_SWIZZLE_X
);
2166 SVGA3dShaderDestToken tmp
=
2167 writemask(get_temp( emit
), TGSI_WRITEMASK_X
);
2169 if (!submit_op2(emit
, inst_token( SVGA3DOP_POW
), tmp
, src0
, src1
))
2172 return submit_op1(emit
, inst_token( SVGA3DOP_MOV
),
2173 dst
, scalar(src(tmp
), 0) );
2176 return submit_op2(emit
, inst_token( SVGA3DOP_POW
), dst
, src0
, src1
);
2182 emit_xpd(struct svga_shader_emitter
*emit
,
2183 const struct tgsi_full_instruction
*insn
)
2185 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2186 const struct src_register src0
= translate_src_register(
2187 emit
, &insn
->Src
[0] );
2188 const struct src_register src1
= translate_src_register(
2189 emit
, &insn
->Src
[1] );
2190 boolean need_dst_tmp
= FALSE
;
2192 /* XPD can only output to a temporary */
2193 if (SVGA3dShaderGetRegType(dst
.value
) != SVGA3DREG_TEMP
)
2194 need_dst_tmp
= TRUE
;
2196 /* The dst reg must not be the same as src0 or src1*/
2197 if (alias_src_dst(src0
, dst
) ||
2198 alias_src_dst(src1
, dst
))
2199 need_dst_tmp
= TRUE
;
2202 SVGA3dShaderDestToken tmp
= get_temp( emit
);
2204 /* Obey DX9 restrictions on mask:
2206 tmp
.mask
= dst
.mask
& TGSI_WRITEMASK_XYZ
;
2208 if (!submit_op2(emit
, inst_token( SVGA3DOP_CRS
), tmp
, src0
, src1
))
2211 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), dst
, src( tmp
)))
2215 if (!submit_op2(emit
, inst_token( SVGA3DOP_CRS
), dst
, src0
, src1
))
2219 /* Need to emit 1.0 to dst.w?
2221 if (dst
.mask
& TGSI_WRITEMASK_W
) {
2222 struct src_register zero
= get_zero_immediate( emit
);
2224 if (!submit_op1(emit
,
2225 inst_token( SVGA3DOP_MOV
),
2226 writemask(dst
, TGSI_WRITEMASK_W
),
2236 emit_lrp(struct svga_shader_emitter
*emit
,
2237 const struct tgsi_full_instruction
*insn
)
2239 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2240 const struct src_register src0
= translate_src_register(
2241 emit
, &insn
->Src
[0] );
2242 const struct src_register src1
= translate_src_register(
2243 emit
, &insn
->Src
[1] );
2244 const struct src_register src2
= translate_src_register(
2245 emit
, &insn
->Src
[2] );
2247 return submit_lrp(emit
, dst
, src0
, src1
, src2
);
2252 emit_dst_insn(struct svga_shader_emitter
*emit
,
2253 const struct tgsi_full_instruction
*insn
)
2255 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2256 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2258 return emit_simple_instruction(emit
, SVGA3DOP_DST
, insn
);
2261 /* result[0] = 1 * 1;
2262 * result[1] = a[1] * b[1];
2263 * result[2] = a[2] * 1;
2264 * result[3] = 1 * b[3];
2266 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2267 SVGA3dShaderDestToken tmp
;
2268 const struct src_register src0
= translate_src_register(
2269 emit
, &insn
->Src
[0] );
2270 const struct src_register src1
= translate_src_register(
2271 emit
, &insn
->Src
[1] );
2272 struct src_register zero
= get_zero_immediate( emit
);
2273 boolean need_tmp
= FALSE
;
2275 if (SVGA3dShaderGetRegType(dst
.value
) != SVGA3DREG_TEMP
||
2276 alias_src_dst(src0
, dst
) ||
2277 alias_src_dst(src1
, dst
))
2281 tmp
= get_temp( emit
);
2289 if (tmp
.mask
& TGSI_WRITEMASK_XW
) {
2290 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2291 writemask(tmp
, TGSI_WRITEMASK_XW
),
2298 if (tmp
.mask
& TGSI_WRITEMASK_YZ
) {
2299 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2300 writemask(tmp
, TGSI_WRITEMASK_YZ
),
2305 /* tmp.yw = tmp * src1
2307 if (tmp
.mask
& TGSI_WRITEMASK_YW
) {
2308 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
2309 writemask(tmp
, TGSI_WRITEMASK_YW
),
2318 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2330 emit_exp(struct svga_shader_emitter
*emit
,
2331 const struct tgsi_full_instruction
*insn
)
2333 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2334 struct src_register src0
=
2335 translate_src_register( emit
, &insn
->Src
[0] );
2336 struct src_register zero
= get_zero_immediate( emit
);
2337 SVGA3dShaderDestToken fraction
;
2339 if (dst
.mask
& TGSI_WRITEMASK_Y
)
2341 else if (dst
.mask
& TGSI_WRITEMASK_X
)
2342 fraction
= get_temp( emit
);
2346 /* If y is being written, fill it with src0 - floor(src0).
2348 if (dst
.mask
& TGSI_WRITEMASK_XY
) {
2349 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
),
2350 writemask( fraction
, TGSI_WRITEMASK_Y
),
2355 /* If x is being written, fill it with 2 ^ floor(src0).
2357 if (dst
.mask
& TGSI_WRITEMASK_X
) {
2358 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
),
2359 writemask( dst
, TGSI_WRITEMASK_X
),
2361 scalar( negate( src( fraction
) ), TGSI_SWIZZLE_Y
) ) )
2364 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXP
),
2365 writemask( dst
, TGSI_WRITEMASK_X
),
2366 scalar( src( dst
), TGSI_SWIZZLE_X
) ) )
2369 if (!(dst
.mask
& TGSI_WRITEMASK_Y
))
2370 release_temp( emit
, fraction
);
2373 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2375 if (dst
.mask
& TGSI_WRITEMASK_Z
) {
2376 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXPP
),
2377 writemask( dst
, TGSI_WRITEMASK_Z
),
2382 /* If w is being written, fill it with one.
2384 if (dst
.mask
& TGSI_WRITEMASK_W
) {
2385 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2386 writemask(dst
, TGSI_WRITEMASK_W
),
2387 scalar( zero
, TGSI_SWIZZLE_W
) ))
2396 emit_lit(struct svga_shader_emitter
*emit
,
2397 const struct tgsi_full_instruction
*insn
)
2399 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2400 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2402 return emit_simple_instruction(emit
, SVGA3DOP_LIT
, insn
);
2405 /* D3D vs. GL semantics can be fairly easily accomodated by
2406 * variations on this sequence.
2410 * tmp.z = pow(src.y,src.w)
2411 * p0 = src0.xxxx > 0
2412 * result = zero.wxxw
2413 * (p0) result.yz = tmp
2417 * tmp.z = pow(src.y,src.w)
2418 * p0 = src0.xxyy > 0
2419 * result = zero.wxxw
2420 * (p0) result.yz = tmp
2422 * Will implement the GL version for now.
2424 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2425 SVGA3dShaderDestToken tmp
= get_temp( emit
);
2426 const struct src_register src0
= translate_src_register(
2427 emit
, &insn
->Src
[0] );
2428 struct src_register zero
= get_zero_immediate( emit
);
2430 /* tmp = pow(src.y, src.w)
2432 if (dst
.mask
& TGSI_WRITEMASK_Z
) {
2433 if (!submit_op2(emit
, inst_token( SVGA3DOP_POW
),
2442 if (dst
.mask
& TGSI_WRITEMASK_Y
) {
2443 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2444 writemask(tmp
, TGSI_WRITEMASK_Y
),
2449 /* Can't quite do this with emit conditional due to the extra
2450 * writemask on the predicated mov:
2453 SVGA3dShaderDestToken pred_reg
= dst_register( SVGA3DREG_PREDICATE
, 0 );
2454 SVGA3dShaderInstToken setp_token
, mov_token
;
2455 struct src_register predsrc
;
2457 setp_token
= inst_token( SVGA3DOP_SETP
);
2458 mov_token
= inst_token( SVGA3DOP_MOV
);
2460 setp_token
.control
= SVGA3DOPCOMP_GT
;
2462 /* D3D vs GL semantics:
2465 predsrc
= swizzle(src0
, 0, 0, 1, 1); /* D3D */
2467 predsrc
= swizzle(src0
, 0, 0, 0, 0); /* GL */
2469 /* SETP src0.xxyy, GT, {0}.x */
2470 if (!submit_op2( emit
, setp_token
, pred_reg
,
2472 swizzle(zero
, 0, 0, 0, 0) ))
2476 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
,
2477 swizzle(zero
, 3, 0, 0, 3 )))
2480 /* MOV dst.yz, tmp (predicated)
2482 * Note that the predicate reg (and possible modifiers) is passed
2483 * as the first source argument.
2485 if (dst
.mask
& TGSI_WRITEMASK_YZ
) {
2486 mov_token
.predicated
= 1;
2487 if (!submit_op2( emit
, mov_token
,
2488 writemask(dst
, TGSI_WRITEMASK_YZ
),
2489 src( pred_reg
), src( tmp
) ))
2500 emit_ex2(struct svga_shader_emitter
*emit
,
2501 const struct tgsi_full_instruction
*insn
)
2503 SVGA3dShaderInstToken inst
;
2504 SVGA3dShaderDestToken dst
;
2505 struct src_register src0
;
2507 inst
= inst_token( SVGA3DOP_EXP
);
2508 dst
= translate_dst_register( emit
, insn
, 0 );
2509 src0
= translate_src_register( emit
, &insn
->Src
[0] );
2510 src0
= scalar( src0
, TGSI_SWIZZLE_X
);
2512 if (dst
.mask
!= TGSI_WRITEMASK_XYZW
) {
2513 SVGA3dShaderDestToken tmp
= get_temp( emit
);
2515 if (!submit_op1( emit
, inst
, tmp
, src0
))
2518 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2520 scalar( src( tmp
), TGSI_SWIZZLE_X
) );
2523 return submit_op1( emit
, inst
, dst
, src0
);
2528 emit_log(struct svga_shader_emitter
*emit
,
2529 const struct tgsi_full_instruction
*insn
)
2531 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2532 struct src_register src0
=
2533 translate_src_register( emit
, &insn
->Src
[0] );
2534 struct src_register zero
= get_zero_immediate( emit
);
2535 SVGA3dShaderDestToken abs_tmp
;
2536 struct src_register abs_src0
;
2537 SVGA3dShaderDestToken log2_abs
;
2541 if (dst
.mask
& TGSI_WRITEMASK_Z
)
2543 else if (dst
.mask
& TGSI_WRITEMASK_XY
)
2544 log2_abs
= get_temp( emit
);
2548 /* If z is being written, fill it with log2( abs( src0 ) ).
2550 if (dst
.mask
& TGSI_WRITEMASK_XYZ
) {
2551 if (!src0
.base
.srcMod
|| src0
.base
.srcMod
== SVGA3DSRCMOD_ABS
)
2554 abs_tmp
= get_temp( emit
);
2556 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2561 abs_src0
= src( abs_tmp
);
2564 abs_src0
= absolute( scalar( abs_src0
, TGSI_SWIZZLE_X
) );
2566 if (!submit_op1( emit
, inst_token( SVGA3DOP_LOG
),
2567 writemask( log2_abs
, TGSI_WRITEMASK_Z
),
2572 if (dst
.mask
& TGSI_WRITEMASK_XY
) {
2573 SVGA3dShaderDestToken floor_log2
;
2575 if (dst
.mask
& TGSI_WRITEMASK_X
)
2578 floor_log2
= get_temp( emit
);
2580 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2582 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
),
2583 writemask( floor_log2
, TGSI_WRITEMASK_X
),
2584 scalar( src( log2_abs
), TGSI_SWIZZLE_Z
) ) )
2587 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
),
2588 writemask( floor_log2
, TGSI_WRITEMASK_X
),
2589 scalar( src( log2_abs
), TGSI_SWIZZLE_Z
),
2590 negate( src( floor_log2
) ) ) )
2593 /* If y is being written, fill it with
2594 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2596 if (dst
.mask
& TGSI_WRITEMASK_Y
) {
2597 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXP
),
2598 writemask( dst
, TGSI_WRITEMASK_Y
),
2599 negate( scalar( src( floor_log2
),
2600 TGSI_SWIZZLE_X
) ) ) )
2603 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
2604 writemask( dst
, TGSI_WRITEMASK_Y
),
2610 if (!(dst
.mask
& TGSI_WRITEMASK_X
))
2611 release_temp( emit
, floor_log2
);
2613 if (!(dst
.mask
& TGSI_WRITEMASK_Z
))
2614 release_temp( emit
, log2_abs
);
2617 if (dst
.mask
& TGSI_WRITEMASK_XYZ
&& src0
.base
.srcMod
&&
2618 src0
.base
.srcMod
!= SVGA3DSRCMOD_ABS
)
2619 release_temp( emit
, abs_tmp
);
2621 /* If w is being written, fill it with one.
2623 if (dst
.mask
& TGSI_WRITEMASK_W
) {
2624 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2625 writemask(dst
, TGSI_WRITEMASK_W
),
2626 scalar( zero
, TGSI_SWIZZLE_W
) ))
2635 * Translate TGSI TRUNC or ROUND instruction.
2636 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2637 * Different approaches are needed for VS versus PS.
2640 emit_trunc_round(struct svga_shader_emitter
*emit
,
2641 const struct tgsi_full_instruction
*insn
,
2644 SVGA3dShaderDestToken dst
= translate_dst_register(emit
, insn
, 0);
2645 const struct src_register src0
=
2646 translate_src_register(emit
, &insn
->Src
[0] );
2647 SVGA3dShaderDestToken t1
= get_temp(emit
);
2650 SVGA3dShaderDestToken t0
= get_temp(emit
);
2651 struct src_register half
= get_half_immediate(emit
);
2653 /* t0 = abs(src0) + 0.5 */
2654 if (!submit_op2(emit
, inst_token(SVGA3DOP_ADD
), t0
,
2655 absolute(src0
), half
))
2658 /* t1 = fract(t0) */
2659 if (!submit_op1(emit
, inst_token(SVGA3DOP_FRC
), t1
, src(t0
)))
2663 if (!submit_op2(emit
, inst_token(SVGA3DOP_ADD
), t1
, src(t0
),
2670 /* t1 = fract(abs(src0)) */
2671 if (!submit_op1(emit
, inst_token(SVGA3DOP_FRC
), t1
, absolute(src0
)))
2674 /* t1 = abs(src0) - t1 */
2675 if (!submit_op2(emit
, inst_token(SVGA3DOP_ADD
), t1
, absolute(src0
),
2681 * Now we need to multiply t1 by the sign of the original value.
2683 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2684 /* For VS: use SGN instruction */
2685 /* Need two extra/dummy registers: */
2686 SVGA3dShaderDestToken t2
= get_temp(emit
), t3
= get_temp(emit
),
2687 t4
= get_temp(emit
);
2689 /* t2 = sign(src0) */
2690 if (!submit_op3(emit
, inst_token(SVGA3DOP_SGN
), t2
, src0
,
2695 if (!submit_op2(emit
, inst_token(SVGA3DOP_MUL
), dst
, src(t1
), src(t2
)))
2699 /* For FS: Use CMP instruction */
2700 return submit_op3(emit
, inst_token( SVGA3DOP_CMP
), dst
,
2701 src0
, src(t1
), negate(src(t1
)));
2709 emit_bgnsub(struct svga_shader_emitter
*emit
,
2711 const struct tgsi_full_instruction
*insn
)
2715 /* Note that we've finished the main function and are now emitting
2716 * subroutines. This affects how we terminate the generated
2719 emit
->in_main_func
= FALSE
;
2721 for (i
= 0; i
< emit
->nr_labels
; i
++) {
2722 if (emit
->label
[i
] == position
) {
2723 return (emit_instruction( emit
, inst_token( SVGA3DOP_RET
) ) &&
2724 emit_instruction( emit
, inst_token( SVGA3DOP_LABEL
) ) &&
2725 emit_src( emit
, src_register( SVGA3DREG_LABEL
, i
)));
2735 emit_call(struct svga_shader_emitter
*emit
,
2736 const struct tgsi_full_instruction
*insn
)
2738 unsigned position
= insn
->Label
.Label
;
2741 for (i
= 0; i
< emit
->nr_labels
; i
++) {
2742 if (emit
->label
[i
] == position
)
2746 if (emit
->nr_labels
== Elements(emit
->label
))
2749 if (i
== emit
->nr_labels
) {
2750 emit
->label
[i
] = position
;
2754 return (emit_instruction( emit
, inst_token( SVGA3DOP_CALL
) ) &&
2755 emit_src( emit
, src_register( SVGA3DREG_LABEL
, i
)));
2760 * Called at the end of the shader. Actually, emit special "fix-up"
2761 * code for the vertex/fragment shader.
2764 emit_end(struct svga_shader_emitter
*emit
)
2766 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2767 return emit_vs_postamble( emit
);
2770 return emit_ps_postamble( emit
);
2777 svga_emit_instruction(struct svga_shader_emitter
*emit
,
2779 const struct tgsi_full_instruction
*insn
)
2781 switch (insn
->Instruction
.Opcode
) {
2783 case TGSI_OPCODE_ARL
:
2784 return emit_arl( emit
, insn
);
2786 case TGSI_OPCODE_TEX
:
2787 case TGSI_OPCODE_TXB
:
2788 case TGSI_OPCODE_TXP
:
2789 case TGSI_OPCODE_TXL
:
2790 case TGSI_OPCODE_TXD
:
2791 return emit_tex( emit
, insn
);
2793 case TGSI_OPCODE_DDX
:
2794 case TGSI_OPCODE_DDY
:
2795 return emit_deriv( emit
, insn
);
2797 case TGSI_OPCODE_BGNSUB
:
2798 return emit_bgnsub( emit
, position
, insn
);
2800 case TGSI_OPCODE_ENDSUB
:
2803 case TGSI_OPCODE_CAL
:
2804 return emit_call( emit
, insn
);
2806 case TGSI_OPCODE_FLR
:
2807 return emit_floor( emit
, insn
);
2809 case TGSI_OPCODE_TRUNC
:
2810 return emit_trunc_round( emit
, insn
, FALSE
);
2812 case TGSI_OPCODE_ROUND
:
2813 return emit_trunc_round( emit
, insn
, TRUE
);
2815 case TGSI_OPCODE_CEIL
:
2816 return emit_ceil( emit
, insn
);
2818 case TGSI_OPCODE_CMP
:
2819 return emit_cmp( emit
, insn
);
2821 case TGSI_OPCODE_DIV
:
2822 return emit_div( emit
, insn
);
2824 case TGSI_OPCODE_DP2
:
2825 return emit_dp2( emit
, insn
);
2827 case TGSI_OPCODE_DPH
:
2828 return emit_dph( emit
, insn
);
2830 case TGSI_OPCODE_NRM
:
2831 return emit_nrm( emit
, insn
);
2833 case TGSI_OPCODE_COS
:
2834 return emit_cos( emit
, insn
);
2836 case TGSI_OPCODE_SIN
:
2837 return emit_sin( emit
, insn
);
2839 case TGSI_OPCODE_SCS
:
2840 return emit_sincos( emit
, insn
);
2842 case TGSI_OPCODE_END
:
2843 /* TGSI always finishes the main func with an END */
2844 return emit_end( emit
);
2846 case TGSI_OPCODE_KILL_IF
:
2847 return emit_kill_if( emit
, insn
);
2849 /* Selection opcodes. The underlying language is fairly
2850 * non-orthogonal about these.
2852 case TGSI_OPCODE_SEQ
:
2853 return emit_select_op( emit
, PIPE_FUNC_EQUAL
, insn
);
2855 case TGSI_OPCODE_SNE
:
2856 return emit_select_op( emit
, PIPE_FUNC_NOTEQUAL
, insn
);
2858 case TGSI_OPCODE_SGT
:
2859 return emit_select_op( emit
, PIPE_FUNC_GREATER
, insn
);
2861 case TGSI_OPCODE_SGE
:
2862 return emit_select_op( emit
, PIPE_FUNC_GEQUAL
, insn
);
2864 case TGSI_OPCODE_SLT
:
2865 return emit_select_op( emit
, PIPE_FUNC_LESS
, insn
);
2867 case TGSI_OPCODE_SLE
:
2868 return emit_select_op( emit
, PIPE_FUNC_LEQUAL
, insn
);
2870 case TGSI_OPCODE_SUB
:
2871 return emit_sub( emit
, insn
);
2873 case TGSI_OPCODE_POW
:
2874 return emit_pow( emit
, insn
);
2876 case TGSI_OPCODE_EX2
:
2877 return emit_ex2( emit
, insn
);
2879 case TGSI_OPCODE_EXP
:
2880 return emit_exp( emit
, insn
);
2882 case TGSI_OPCODE_LOG
:
2883 return emit_log( emit
, insn
);
2885 case TGSI_OPCODE_LG2
:
2886 return emit_scalar_op1( emit
, SVGA3DOP_LOG
, insn
);
2888 case TGSI_OPCODE_RSQ
:
2889 return emit_scalar_op1( emit
, SVGA3DOP_RSQ
, insn
);
2891 case TGSI_OPCODE_RCP
:
2892 return emit_scalar_op1( emit
, SVGA3DOP_RCP
, insn
);
2894 case TGSI_OPCODE_CONT
:
2895 case TGSI_OPCODE_RET
:
2896 /* This is a noop -- we tell mesa that we can't support RET
2897 * within a function (early return), so this will always be
2898 * followed by an ENDSUB.
2902 /* These aren't actually used by any of the frontends we care
2905 case TGSI_OPCODE_CLAMP
:
2906 case TGSI_OPCODE_AND
:
2907 case TGSI_OPCODE_OR
:
2908 case TGSI_OPCODE_I2F
:
2909 case TGSI_OPCODE_NOT
:
2910 case TGSI_OPCODE_SHL
:
2911 case TGSI_OPCODE_ISHR
:
2912 case TGSI_OPCODE_XOR
:
2915 case TGSI_OPCODE_IF
:
2916 return emit_if( emit
, insn
);
2917 case TGSI_OPCODE_ELSE
:
2918 return emit_else( emit
, insn
);
2919 case TGSI_OPCODE_ENDIF
:
2920 return emit_endif( emit
, insn
);
2922 case TGSI_OPCODE_BGNLOOP
:
2923 return emit_bgnloop2( emit
, insn
);
2924 case TGSI_OPCODE_ENDLOOP
:
2925 return emit_endloop2( emit
, insn
);
2926 case TGSI_OPCODE_BRK
:
2927 return emit_brk( emit
, insn
);
2929 case TGSI_OPCODE_XPD
:
2930 return emit_xpd( emit
, insn
);
2932 case TGSI_OPCODE_KILL
:
2933 return emit_kill( emit
, insn
);
2935 case TGSI_OPCODE_DST
:
2936 return emit_dst_insn( emit
, insn
);
2938 case TGSI_OPCODE_LIT
:
2939 return emit_lit( emit
, insn
);
2941 case TGSI_OPCODE_LRP
:
2942 return emit_lrp( emit
, insn
);
2944 case TGSI_OPCODE_SSG
:
2945 return emit_ssg( emit
, insn
);
2949 unsigned opcode
= translate_opcode(insn
->Instruction
.Opcode
);
2951 if (opcode
== SVGA3DOP_LAST_INST
)
2954 if (!emit_simple_instruction( emit
, opcode
, insn
))
2964 svga_emit_immediate(struct svga_shader_emitter
*emit
,
2965 struct tgsi_full_immediate
*imm
)
2967 static const float id
[4] = {0,0,0,1};
2971 assert(1 <= imm
->Immediate
.NrTokens
&& imm
->Immediate
.NrTokens
<= 5);
2972 for (i
= 0; i
< imm
->Immediate
.NrTokens
- 1; i
++) {
2973 float f
= imm
->u
[i
].Float
;
2974 value
[i
] = util_is_inf_or_nan(f
) ? 0.0f
: f
;
2977 for ( ; i
< 4; i
++ )
2980 return emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
2981 emit
->imm_start
+ emit
->internal_imm_count
++,
2982 value
[0], value
[1], value
[2], value
[3]);
2987 make_immediate(struct svga_shader_emitter
*emit
,
2988 float a
, float b
, float c
, float d
,
2989 struct src_register
*out
)
2991 unsigned idx
= emit
->nr_hw_float_const
++;
2993 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
2997 *out
= src_register( SVGA3DREG_CONST
, idx
);
3004 emit_vs_preamble(struct svga_shader_emitter
*emit
)
3006 if (!emit
->key
.vkey
.need_prescale
) {
3007 if (!make_immediate( emit
, 0, 0, .5, .5,
3017 emit_ps_preamble(struct svga_shader_emitter
*emit
)
3019 if (emit
->ps_reads_pos
&& emit
->info
.reads_z
) {
3021 * Assemble the position from various bits of inputs. Depth and W are
3022 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3023 * Also fixup the perspective interpolation.
3025 * temp_pos.xy = vPos.xy
3026 * temp_pos.w = rcp(texcoord1.w);
3027 * temp_pos.z = texcoord1.z * temp_pos.w;
3029 if (!submit_op1( emit
,
3030 inst_token(SVGA3DOP_MOV
),
3031 writemask( emit
->ps_temp_pos
, TGSI_WRITEMASK_XY
),
3032 emit
->ps_true_pos
))
3035 if (!submit_op1( emit
,
3036 inst_token(SVGA3DOP_RCP
),
3037 writemask( emit
->ps_temp_pos
, TGSI_WRITEMASK_W
),
3038 scalar( emit
->ps_depth_pos
, TGSI_SWIZZLE_W
) ))
3041 if (!submit_op2( emit
,
3042 inst_token(SVGA3DOP_MUL
),
3043 writemask( emit
->ps_temp_pos
, TGSI_WRITEMASK_Z
),
3044 scalar( emit
->ps_depth_pos
, TGSI_SWIZZLE_Z
),
3045 scalar( src(emit
->ps_temp_pos
), TGSI_SWIZZLE_W
) ))
3054 emit_ps_postamble(struct svga_shader_emitter
*emit
)
3058 /* PS oDepth is incredibly fragile and it's very hard to catch the
3059 * types of usage that break it during shader emit. Easier just to
3060 * redirect the main program to a temporary and then only touch
3061 * oDepth with a hand-crafted MOV below.
3063 if (SVGA3dShaderGetRegType(emit
->true_pos
.value
) != 0) {
3064 if (!submit_op1( emit
,
3065 inst_token(SVGA3DOP_MOV
),
3067 scalar(src(emit
->temp_pos
), TGSI_SWIZZLE_Z
) ))
3071 for (i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
3072 if (SVGA3dShaderGetRegType(emit
->true_col
[i
].value
) != 0) {
3073 /* Potentially override output colors with white for XOR
3074 * logicop workaround.
3076 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
3077 emit
->key
.fkey
.white_fragments
) {
3078 struct src_register one
= scalar( get_zero_immediate( emit
),
3081 if (!submit_op1( emit
,
3082 inst_token(SVGA3DOP_MOV
),
3088 if (!submit_op1( emit
,
3089 inst_token(SVGA3DOP_MOV
),
3091 src(emit
->temp_col
[i
]) ))
3102 emit_vs_postamble(struct svga_shader_emitter
*emit
)
3104 /* PSIZ output is incredibly fragile and it's very hard to catch
3105 * the types of usage that break it during shader emit. Easier
3106 * just to redirect the main program to a temporary and then only
3107 * touch PSIZ with a hand-crafted MOV below.
3109 if (SVGA3dShaderGetRegType(emit
->true_psiz
.value
) != 0) {
3110 if (!submit_op1( emit
,
3111 inst_token(SVGA3DOP_MOV
),
3113 scalar(src(emit
->temp_psiz
), TGSI_SWIZZLE_X
) ))
3117 /* Need to perform various manipulations on vertex position to cope
3118 * with the different GL and D3D clip spaces.
3120 if (emit
->key
.vkey
.need_prescale
) {
3121 SVGA3dShaderDestToken temp_pos
= emit
->temp_pos
;
3122 SVGA3dShaderDestToken depth
= emit
->depth_pos
;
3123 SVGA3dShaderDestToken pos
= emit
->true_pos
;
3124 unsigned offset
= emit
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1;
3125 struct src_register prescale_scale
= src_register( SVGA3DREG_CONST
,
3127 struct src_register prescale_trans
= src_register( SVGA3DREG_CONST
,
3130 if (!submit_op1( emit
,
3131 inst_token(SVGA3DOP_MOV
),
3132 writemask(depth
, TGSI_WRITEMASK_W
),
3133 scalar(src(temp_pos
), TGSI_SWIZZLE_W
) ))
3136 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3137 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3138 * --> Note that prescale.trans.w == 0
3140 if (!submit_op2( emit
,
3141 inst_token(SVGA3DOP_MUL
),
3142 writemask(temp_pos
, TGSI_WRITEMASK_XYZ
),
3147 if (!submit_op3( emit
,
3148 inst_token(SVGA3DOP_MAD
),
3150 swizzle(src(temp_pos
), 3, 3, 3, 3),
3155 /* Also write to depth value */
3156 if (!submit_op3( emit
,
3157 inst_token(SVGA3DOP_MAD
),
3158 writemask(depth
, TGSI_WRITEMASK_Z
),
3159 swizzle(src(temp_pos
), 3, 3, 3, 3),
3165 SVGA3dShaderDestToken temp_pos
= emit
->temp_pos
;
3166 SVGA3dShaderDestToken depth
= emit
->depth_pos
;
3167 SVGA3dShaderDestToken pos
= emit
->true_pos
;
3168 struct src_register imm_0055
= emit
->imm_0055
;
3170 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3172 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3173 * MOV result.position, temp_pos
3175 if (!submit_op2( emit
,
3176 inst_token(SVGA3DOP_DP4
),
3177 writemask(temp_pos
, TGSI_WRITEMASK_Z
),
3182 if (!submit_op1( emit
,
3183 inst_token(SVGA3DOP_MOV
),
3188 /* Move the manipulated depth into the extra texcoord reg */
3189 if (!submit_op1( emit
,
3190 inst_token(SVGA3DOP_MOV
),
3191 writemask(depth
, TGSI_WRITEMASK_ZW
),
3201 * For the pixel shader: emit the code which chooses the front
3202 * or back face color depending on triangle orientation.
3205 * 1: COLOR = FrontColor;
3207 * 3: COLOR = BackColor;
3211 emit_light_twoside(struct svga_shader_emitter
*emit
)
3213 struct src_register vface
, zero
;
3214 struct src_register front
[2];
3215 struct src_register back
[2];
3216 SVGA3dShaderDestToken color
[2];
3217 int count
= emit
->internal_color_count
;
3219 SVGA3dShaderInstToken if_token
;
3224 vface
= get_vface( emit
);
3225 zero
= get_zero_immediate( emit
);
3227 /* Can't use get_temp() to allocate the color reg as such
3228 * temporaries will be reclaimed after each instruction by the call
3229 * to reset_temp_regs().
3231 for (i
= 0; i
< count
; i
++) {
3232 color
[i
] = dst_register( SVGA3DREG_TEMP
, emit
->nr_hw_temp
++ );
3233 front
[i
] = emit
->input_map
[emit
->internal_color_idx
[i
]];
3235 /* Back is always the next input:
3238 back
[i
].base
.num
= front
[i
].base
.num
+ 1;
3240 /* Reassign the input_map to the actual front-face color:
3242 emit
->input_map
[emit
->internal_color_idx
[i
]] = src(color
[i
]);
3245 if_token
= inst_token( SVGA3DOP_IFC
);
3247 if (emit
->key
.fkey
.front_ccw
)
3248 if_token
.control
= SVGA3DOPCOMP_LT
;
3250 if_token
.control
= SVGA3DOPCOMP_GT
;
3252 zero
= scalar(zero
, TGSI_SWIZZLE_X
);
3254 if (!(emit_instruction( emit
, if_token
) &&
3255 emit_src( emit
, vface
) &&
3256 emit_src( emit
, zero
) ))
3259 for (i
= 0; i
< count
; i
++) {
3260 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), color
[i
], front
[i
] ))
3264 if (!(emit_instruction( emit
, inst_token( SVGA3DOP_ELSE
))))
3267 for (i
= 0; i
< count
; i
++) {
3268 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), color
[i
], back
[i
] ))
3272 if (!emit_instruction( emit
, inst_token( SVGA3DOP_ENDIF
) ))
3280 * 0: SETP_GT TEMP, VFACE, 0
3281 * where TEMP is a fake frontface register
3284 emit_frontface(struct svga_shader_emitter
*emit
)
3286 struct src_register vface
, zero
;
3287 SVGA3dShaderDestToken temp
;
3288 struct src_register pass
, fail
;
3290 vface
= get_vface( emit
);
3291 zero
= get_zero_immediate( emit
);
3293 /* Can't use get_temp() to allocate the fake frontface reg as such
3294 * temporaries will be reclaimed after each instruction by the call
3295 * to reset_temp_regs().
3297 temp
= dst_register( SVGA3DREG_TEMP
,
3298 emit
->nr_hw_temp
++ );
3300 if (emit
->key
.fkey
.front_ccw
) {
3301 pass
= scalar( zero
, TGSI_SWIZZLE_X
);
3302 fail
= scalar( zero
, TGSI_SWIZZLE_W
);
3304 pass
= scalar( zero
, TGSI_SWIZZLE_W
);
3305 fail
= scalar( zero
, TGSI_SWIZZLE_X
);
3308 if (!emit_conditional(emit
, PIPE_FUNC_GREATER
,
3309 temp
, vface
, scalar( zero
, TGSI_SWIZZLE_X
),
3313 /* Reassign the input_map to the actual front-face color:
3315 emit
->input_map
[emit
->internal_frontface_idx
] = src(temp
);
3322 * Emit code to invert the T component of the incoming texture coordinate.
3323 * This is used for drawing point sprites when
3324 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3327 emit_inverted_texcoords(struct svga_shader_emitter
*emit
)
3329 struct src_register zero
= get_zero_immediate(emit
);
3330 struct src_register pos_neg_one
= get_pos_neg_one_immediate( emit
);
3331 unsigned inverted_texcoords
= emit
->inverted_texcoords
;
3333 while (inverted_texcoords
) {
3334 const unsigned unit
= ffs(inverted_texcoords
) - 1;
3336 assert(emit
->inverted_texcoords
& (1 << unit
));
3338 assert(unit
< Elements(emit
->ps_true_texcoord
));
3340 assert(unit
< Elements(emit
->ps_inverted_texcoord_input
));
3342 assert(emit
->ps_inverted_texcoord_input
[unit
]
3343 < Elements(emit
->input_map
));
3345 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3346 if (!submit_op3(emit
,
3347 inst_token(SVGA3DOP_MAD
),
3348 dst(emit
->ps_inverted_texcoord
[unit
]),
3349 emit
->ps_true_texcoord
[unit
],
3350 swizzle(pos_neg_one
, 0, 3, 0, 0), /* (1, -1, 1, 1) */
3351 swizzle(zero
, 0, 3, 0, 0))) /* (0, 1, 0, 0) */
3354 /* Reassign the input_map entry to the new texcoord register */
3355 emit
->input_map
[emit
->ps_inverted_texcoord_input
[unit
]] =
3356 emit
->ps_inverted_texcoord
[unit
];
3358 inverted_texcoords
&= ~(1 << unit
);
3366 needs_to_create_zero( struct svga_shader_emitter
*emit
)
3370 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
3371 if (emit
->key
.fkey
.light_twoside
)
3374 if (emit
->key
.fkey
.white_fragments
)
3377 if (emit
->emit_frontface
)
3380 if (emit
->info
.opcode_count
[TGSI_OPCODE_DST
] >= 1 ||
3381 emit
->info
.opcode_count
[TGSI_OPCODE_SSG
] >= 1 ||
3382 emit
->info
.opcode_count
[TGSI_OPCODE_LIT
] >= 1)
3385 if (emit
->inverted_texcoords
)
3388 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3389 for (i
= 0; i
< emit
->key
.fkey
.num_textures
; i
++) {
3390 if (emit
->key
.fkey
.tex
[i
].swizzle_r
> PIPE_SWIZZLE_ALPHA
||
3391 emit
->key
.fkey
.tex
[i
].swizzle_g
> PIPE_SWIZZLE_ALPHA
||
3392 emit
->key
.fkey
.tex
[i
].swizzle_b
> PIPE_SWIZZLE_ALPHA
||
3393 emit
->key
.fkey
.tex
[i
].swizzle_a
> PIPE_SWIZZLE_ALPHA
)
3397 for (i
= 0; i
< emit
->key
.fkey
.num_textures
; i
++) {
3398 if (emit
->key
.fkey
.tex
[i
].compare_mode
3399 == PIPE_TEX_COMPARE_R_TO_TEXTURE
)
3404 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
3405 if (emit
->info
.opcode_count
[TGSI_OPCODE_CMP
] >= 1)
3409 if (emit
->info
.opcode_count
[TGSI_OPCODE_IF
] >= 1 ||
3410 emit
->info
.opcode_count
[TGSI_OPCODE_BGNLOOP
] >= 1 ||
3411 emit
->info
.opcode_count
[TGSI_OPCODE_DDX
] >= 1 ||
3412 emit
->info
.opcode_count
[TGSI_OPCODE_DDY
] >= 1 ||
3413 emit
->info
.opcode_count
[TGSI_OPCODE_ROUND
] >= 1 ||
3414 emit
->info
.opcode_count
[TGSI_OPCODE_SGE
] >= 1 ||
3415 emit
->info
.opcode_count
[TGSI_OPCODE_SGT
] >= 1 ||
3416 emit
->info
.opcode_count
[TGSI_OPCODE_SLE
] >= 1 ||
3417 emit
->info
.opcode_count
[TGSI_OPCODE_SLT
] >= 1 ||
3418 emit
->info
.opcode_count
[TGSI_OPCODE_SNE
] >= 1 ||
3419 emit
->info
.opcode_count
[TGSI_OPCODE_SEQ
] >= 1 ||
3420 emit
->info
.opcode_count
[TGSI_OPCODE_EXP
] >= 1 ||
3421 emit
->info
.opcode_count
[TGSI_OPCODE_LOG
] >= 1 ||
3422 emit
->info
.opcode_count
[TGSI_OPCODE_XPD
] >= 1 ||
3423 emit
->info
.opcode_count
[TGSI_OPCODE_KILL
] >= 1)
3431 needs_to_create_loop_const( struct svga_shader_emitter
*emit
)
3433 return (emit
->info
.opcode_count
[TGSI_OPCODE_BGNLOOP
] >= 1);
3438 needs_to_create_arl_consts( struct svga_shader_emitter
*emit
)
3440 return (emit
->num_arl_consts
> 0);
3445 pre_parse_add_indirect( struct svga_shader_emitter
*emit
,
3446 int num
, int current_arl
)
3451 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
3452 if (emit
->arl_consts
[i
].arl_num
== current_arl
)
3456 if (emit
->num_arl_consts
== i
) {
3457 ++emit
->num_arl_consts
;
3459 emit
->arl_consts
[i
].number
= (emit
->arl_consts
[i
].number
> num
) ?
3461 emit
->arl_consts
[i
].number
;
3462 emit
->arl_consts
[i
].arl_num
= current_arl
;
3468 pre_parse_instruction( struct svga_shader_emitter
*emit
,
3469 const struct tgsi_full_instruction
*insn
,
3472 if (insn
->Src
[0].Register
.Indirect
&&
3473 insn
->Src
[0].Indirect
.File
== TGSI_FILE_ADDRESS
) {
3474 const struct tgsi_full_src_register
*reg
= &insn
->Src
[0];
3475 if (reg
->Register
.Index
< 0) {
3476 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
3480 if (insn
->Src
[1].Register
.Indirect
&&
3481 insn
->Src
[1].Indirect
.File
== TGSI_FILE_ADDRESS
) {
3482 const struct tgsi_full_src_register
*reg
= &insn
->Src
[1];
3483 if (reg
->Register
.Index
< 0) {
3484 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
3488 if (insn
->Src
[2].Register
.Indirect
&&
3489 insn
->Src
[2].Indirect
.File
== TGSI_FILE_ADDRESS
) {
3490 const struct tgsi_full_src_register
*reg
= &insn
->Src
[2];
3491 if (reg
->Register
.Index
< 0) {
3492 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
3501 pre_parse_tokens( struct svga_shader_emitter
*emit
,
3502 const struct tgsi_token
*tokens
)
3504 struct tgsi_parse_context parse
;
3505 int current_arl
= 0;
3507 tgsi_parse_init( &parse
, tokens
);
3509 while (!tgsi_parse_end_of_tokens( &parse
)) {
3510 tgsi_parse_token( &parse
);
3511 switch (parse
.FullToken
.Token
.Type
) {
3512 case TGSI_TOKEN_TYPE_IMMEDIATE
:
3513 case TGSI_TOKEN_TYPE_DECLARATION
:
3515 case TGSI_TOKEN_TYPE_INSTRUCTION
:
3516 if (parse
.FullToken
.FullInstruction
.Instruction
.Opcode
==
3520 if (!pre_parse_instruction( emit
, &parse
.FullToken
.FullInstruction
,
3534 svga_shader_emit_helpers(struct svga_shader_emitter
*emit
)
3536 if (needs_to_create_zero( emit
)) {
3537 create_zero_immediate( emit
);
3539 if (needs_to_create_loop_const( emit
)) {
3540 create_loop_const( emit
);
3542 if (needs_to_create_arl_consts( emit
)) {
3543 create_arl_consts( emit
);
3546 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
3547 if (!emit_ps_preamble( emit
))
3550 if (emit
->key
.fkey
.light_twoside
) {
3551 if (!emit_light_twoside( emit
))
3554 if (emit
->emit_frontface
) {
3555 if (!emit_frontface( emit
))
3558 if (emit
->inverted_texcoords
) {
3559 if (!emit_inverted_texcoords( emit
))
3569 svga_shader_emit_instructions(struct svga_shader_emitter
*emit
,
3570 const struct tgsi_token
*tokens
)
3572 struct tgsi_parse_context parse
;
3574 boolean helpers_emitted
= FALSE
;
3575 unsigned line_nr
= 0;
3577 tgsi_parse_init( &parse
, tokens
);
3578 emit
->internal_imm_count
= 0;
3580 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
3581 ret
= emit_vs_preamble( emit
);
3586 pre_parse_tokens(emit
, tokens
);
3588 while (!tgsi_parse_end_of_tokens( &parse
)) {
3589 tgsi_parse_token( &parse
);
3591 switch (parse
.FullToken
.Token
.Type
) {
3592 case TGSI_TOKEN_TYPE_IMMEDIATE
:
3593 ret
= svga_emit_immediate( emit
, &parse
.FullToken
.FullImmediate
);
3598 case TGSI_TOKEN_TYPE_DECLARATION
:
3599 ret
= svga_translate_decl_sm30( emit
, &parse
.FullToken
.FullDeclaration
);
3604 case TGSI_TOKEN_TYPE_INSTRUCTION
:
3605 if (!helpers_emitted
) {
3606 if (!svga_shader_emit_helpers( emit
))
3608 helpers_emitted
= TRUE
;
3610 ret
= svga_emit_instruction( emit
,
3612 &parse
.FullToken
.FullInstruction
);
3620 reset_temp_regs( emit
);
3623 /* Need to terminate the current subroutine. Note that the
3624 * hardware doesn't tolerate shaders without sub-routines
3625 * terminating with RET+END.
3627 if (!emit
->in_main_func
) {
3628 ret
= emit_instruction( emit
, inst_token( SVGA3DOP_RET
) );
3633 assert(emit
->dynamic_branching_level
== 0);
3635 /* Need to terminate the whole shader:
3637 ret
= emit_instruction( emit
, inst_token( SVGA3DOP_END
) );
3642 tgsi_parse_free( &parse
);