1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "util/u_memory.h"
31 #include "svga_tgsi_emit.h"
32 #include "svga_context.h"
35 static boolean
emit_vs_postamble( struct svga_shader_emitter
*emit
);
36 static boolean
emit_ps_postamble( struct svga_shader_emitter
*emit
);
46 case TGSI_OPCODE_ABS
: return SVGA3DOP_ABS
;
47 case TGSI_OPCODE_ADD
: return SVGA3DOP_ADD
;
48 case TGSI_OPCODE_BREAKC
: return SVGA3DOP_BREAKC
;
49 case TGSI_OPCODE_DP2A
: return SVGA3DOP_DP2ADD
;
50 case TGSI_OPCODE_DP3
: return SVGA3DOP_DP3
;
51 case TGSI_OPCODE_DP4
: return SVGA3DOP_DP4
;
52 case TGSI_OPCODE_FRC
: return SVGA3DOP_FRC
;
53 case TGSI_OPCODE_MAD
: return SVGA3DOP_MAD
;
54 case TGSI_OPCODE_MAX
: return SVGA3DOP_MAX
;
55 case TGSI_OPCODE_MIN
: return SVGA3DOP_MIN
;
56 case TGSI_OPCODE_MOV
: return SVGA3DOP_MOV
;
57 case TGSI_OPCODE_MUL
: return SVGA3DOP_MUL
;
58 case TGSI_OPCODE_NOP
: return SVGA3DOP_NOP
;
59 case TGSI_OPCODE_NRM4
: return SVGA3DOP_NRM
;
60 case TGSI_OPCODE_SSG
: return SVGA3DOP_SGN
;
62 debug_printf("Unkown opcode %u\n", opcode
);
64 return SVGA3DOP_LAST_INST
;
69 static unsigned translate_file( unsigned file
)
72 case TGSI_FILE_TEMPORARY
: return SVGA3DREG_TEMP
;
73 case TGSI_FILE_INPUT
: return SVGA3DREG_INPUT
;
74 case TGSI_FILE_OUTPUT
: return SVGA3DREG_OUTPUT
; /* VS3.0+ only */
75 case TGSI_FILE_IMMEDIATE
: return SVGA3DREG_CONST
;
76 case TGSI_FILE_CONSTANT
: return SVGA3DREG_CONST
;
77 case TGSI_FILE_SAMPLER
: return SVGA3DREG_SAMPLER
;
78 case TGSI_FILE_ADDRESS
: return SVGA3DREG_ADDR
;
81 return SVGA3DREG_TEMP
;
90 static SVGA3dShaderDestToken
91 translate_dst_register( struct svga_shader_emitter
*emit
,
92 const struct tgsi_full_instruction
*insn
,
95 const struct tgsi_full_dst_register
*reg
= &insn
->Dst
[idx
];
96 SVGA3dShaderDestToken dest
;
98 switch (reg
->Register
.File
) {
99 case TGSI_FILE_OUTPUT
:
100 /* Output registers encode semantic information in their name.
101 * Need to lookup a table built at decl time:
103 dest
= emit
->output_map
[reg
->Register
.Index
];
107 dest
= dst_register( translate_file( reg
->Register
.File
),
108 reg
->Register
.Index
);
112 dest
.mask
= reg
->Register
.WriteMask
;
115 if (insn
->Instruction
.Saturate
)
116 dest
.dstMod
= SVGA3DDSTMOD_SATURATE
;
122 static struct src_register
123 swizzle( struct src_register src
,
129 x
= (src
.base
.swizzle
>> (x
* 2)) & 0x3;
130 y
= (src
.base
.swizzle
>> (y
* 2)) & 0x3;
131 z
= (src
.base
.swizzle
>> (z
* 2)) & 0x3;
132 w
= (src
.base
.swizzle
>> (w
* 2)) & 0x3;
134 src
.base
.swizzle
= TRANSLATE_SWIZZLE(x
,y
,z
,w
);
139 static struct src_register
140 scalar( struct src_register src
,
143 return swizzle( src
, comp
, comp
, comp
, comp
);
146 static INLINE boolean
147 svga_arl_needs_adjustment( const struct svga_shader_emitter
*emit
)
151 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
152 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
)
159 svga_arl_adjustment( const struct svga_shader_emitter
*emit
)
163 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
164 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
)
165 return emit
->arl_consts
[i
].number
;
170 static struct src_register
171 translate_src_register( const struct svga_shader_emitter
*emit
,
172 const struct tgsi_full_src_register
*reg
)
174 struct src_register src
;
176 switch (reg
->Register
.File
) {
177 case TGSI_FILE_INPUT
:
178 /* Input registers are referred to by their semantic name rather
179 * than by index. Use the mapping build up from the decls:
181 src
= emit
->input_map
[reg
->Register
.Index
];
184 case TGSI_FILE_IMMEDIATE
:
185 /* Immediates are appended after TGSI constants in the D3D
188 src
= src_register( translate_file( reg
->Register
.File
),
189 reg
->Register
.Index
+
194 src
= src_register( translate_file( reg
->Register
.File
),
195 reg
->Register
.Index
);
200 /* Indirect addressing.
202 if (reg
->Register
.Indirect
) {
203 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
204 /* Pixel shaders have only loop registers for relative
205 * addressing into inputs. Ignore the redundant address
206 * register, the contents of aL should be in sync with it.
208 if (reg
->Register
.File
== TGSI_FILE_INPUT
) {
209 src
.base
.relAddr
= 1;
210 src
.indirect
= src_token(SVGA3DREG_LOOP
, 0);
214 /* Constant buffers only.
216 if (reg
->Register
.File
== TGSI_FILE_CONSTANT
) {
217 /* we shift the offset towards the minimum */
218 if (svga_arl_needs_adjustment( emit
)) {
219 src
.base
.num
-= svga_arl_adjustment( emit
);
221 src
.base
.relAddr
= 1;
223 /* Not really sure what should go in the second token:
225 src
.indirect
= src_token( SVGA3DREG_ADDR
,
226 reg
->Indirect
.Index
);
228 src
.indirect
.swizzle
= SWIZZLE_XXXX
;
234 reg
->Register
.SwizzleX
,
235 reg
->Register
.SwizzleY
,
236 reg
->Register
.SwizzleZ
,
237 reg
->Register
.SwizzleW
);
239 /* src.mod isn't a bitfield, unfortunately:
240 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
242 if (reg
->Register
.Absolute
) {
243 if (reg
->Register
.Negate
)
244 src
.base
.srcMod
= SVGA3DSRCMOD_ABSNEG
;
246 src
.base
.srcMod
= SVGA3DSRCMOD_ABS
;
249 if (reg
->Register
.Negate
)
250 src
.base
.srcMod
= SVGA3DSRCMOD_NEG
;
252 src
.base
.srcMod
= SVGA3DSRCMOD_NONE
;
260 * Get a temporary register, return -1 if none available
262 static INLINE SVGA3dShaderDestToken
263 get_temp( struct svga_shader_emitter
*emit
)
265 int i
= emit
->nr_hw_temp
+ emit
->internal_temp_count
++;
267 return dst_register( SVGA3DREG_TEMP
, i
);
270 /* Release a single temp. Currently only effective if it was the last
271 * allocated temp, otherwise release will be delayed until the next
272 * call to reset_temp_regs().
275 release_temp( struct svga_shader_emitter
*emit
,
276 SVGA3dShaderDestToken temp
)
278 if (temp
.num
== emit
->internal_temp_count
- 1)
279 emit
->internal_temp_count
--;
282 static void reset_temp_regs( struct svga_shader_emitter
*emit
)
284 emit
->internal_temp_count
= 0;
288 static boolean
submit_op0( struct svga_shader_emitter
*emit
,
289 SVGA3dShaderInstToken inst
,
290 SVGA3dShaderDestToken dest
)
292 return (emit_instruction( emit
, inst
) &&
293 emit_dst( emit
, dest
));
296 static boolean
submit_op1( struct svga_shader_emitter
*emit
,
297 SVGA3dShaderInstToken inst
,
298 SVGA3dShaderDestToken dest
,
299 struct src_register src0
)
301 return emit_op1( emit
, inst
, dest
, src0
);
305 /* SVGA shaders may not refer to >1 constant register in a single
306 * instruction. This function checks for that usage and inserts a
307 * move to temporary if detected.
309 * The same applies to input registers -- at most a single input
310 * register may be read by any instruction.
312 static boolean
submit_op2( struct svga_shader_emitter
*emit
,
313 SVGA3dShaderInstToken inst
,
314 SVGA3dShaderDestToken dest
,
315 struct src_register src0
,
316 struct src_register src1
)
318 SVGA3dShaderDestToken temp
;
319 SVGA3dShaderRegType type0
, type1
;
320 boolean need_temp
= FALSE
;
323 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
324 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
326 if (type0
== SVGA3DREG_CONST
&&
327 type1
== SVGA3DREG_CONST
&&
328 src0
.base
.num
!= src1
.base
.num
)
331 if (type0
== SVGA3DREG_INPUT
&&
332 type1
== SVGA3DREG_INPUT
&&
333 src0
.base
.num
!= src1
.base
.num
)
338 temp
= get_temp( emit
);
340 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp
, src0
))
346 if (!emit_op2( emit
, inst
, dest
, src0
, src1
))
350 release_temp( emit
, temp
);
356 /* SVGA shaders may not refer to >1 constant register in a single
357 * instruction. This function checks for that usage and inserts a
358 * move to temporary if detected.
360 static boolean
submit_op3( struct svga_shader_emitter
*emit
,
361 SVGA3dShaderInstToken inst
,
362 SVGA3dShaderDestToken dest
,
363 struct src_register src0
,
364 struct src_register src1
,
365 struct src_register src2
)
367 SVGA3dShaderDestToken temp0
;
368 SVGA3dShaderDestToken temp1
;
369 boolean need_temp0
= FALSE
;
370 boolean need_temp1
= FALSE
;
371 SVGA3dShaderRegType type0
, type1
, type2
;
375 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
376 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
377 type2
= SVGA3dShaderGetRegType( src2
.base
.value
);
379 if (inst
.op
!= SVGA3DOP_SINCOS
) {
380 if (type0
== SVGA3DREG_CONST
&&
381 ((type1
== SVGA3DREG_CONST
&& src0
.base
.num
!= src1
.base
.num
) ||
382 (type2
== SVGA3DREG_CONST
&& src0
.base
.num
!= src2
.base
.num
)))
385 if (type1
== SVGA3DREG_CONST
&&
386 (type2
== SVGA3DREG_CONST
&& src1
.base
.num
!= src2
.base
.num
))
390 if (type0
== SVGA3DREG_INPUT
&&
391 ((type1
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src1
.base
.num
) ||
392 (type2
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src2
.base
.num
)))
395 if (type1
== SVGA3DREG_INPUT
&&
396 (type2
== SVGA3DREG_INPUT
&& src1
.base
.num
!= src2
.base
.num
))
401 temp0
= get_temp( emit
);
403 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp0
, src0
))
411 temp1
= get_temp( emit
);
413 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp1
, src1
))
419 if (!emit_op3( emit
, inst
, dest
, src0
, src1
, src2
))
423 release_temp( emit
, temp1
);
425 release_temp( emit
, temp0
);
432 /* SVGA shaders may not refer to >1 constant register in a single
433 * instruction. This function checks for that usage and inserts a
434 * move to temporary if detected.
436 static boolean
submit_op4( struct svga_shader_emitter
*emit
,
437 SVGA3dShaderInstToken inst
,
438 SVGA3dShaderDestToken dest
,
439 struct src_register src0
,
440 struct src_register src1
,
441 struct src_register src2
,
442 struct src_register src3
)
444 SVGA3dShaderDestToken temp0
;
445 SVGA3dShaderDestToken temp3
;
446 boolean need_temp0
= FALSE
;
447 boolean need_temp3
= FALSE
;
448 SVGA3dShaderRegType type0
, type1
, type2
, type3
;
452 type0
= SVGA3dShaderGetRegType( src0
.base
.value
);
453 type1
= SVGA3dShaderGetRegType( src1
.base
.value
);
454 type2
= SVGA3dShaderGetRegType( src2
.base
.value
);
455 type3
= SVGA3dShaderGetRegType( src2
.base
.value
);
457 /* Make life a little easier - this is only used by the TXD
458 * instruction which is guaranteed not to have a constant/input reg
459 * in one slot at least:
461 assert(type1
== SVGA3DREG_SAMPLER
);
463 if (type0
== SVGA3DREG_CONST
&&
464 ((type3
== SVGA3DREG_CONST
&& src0
.base
.num
!= src3
.base
.num
) ||
465 (type2
== SVGA3DREG_CONST
&& src0
.base
.num
!= src2
.base
.num
)))
468 if (type3
== SVGA3DREG_CONST
&&
469 (type2
== SVGA3DREG_CONST
&& src3
.base
.num
!= src2
.base
.num
))
472 if (type0
== SVGA3DREG_INPUT
&&
473 ((type3
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src3
.base
.num
) ||
474 (type2
== SVGA3DREG_INPUT
&& src0
.base
.num
!= src2
.base
.num
)))
477 if (type3
== SVGA3DREG_INPUT
&&
478 (type2
== SVGA3DREG_INPUT
&& src3
.base
.num
!= src2
.base
.num
))
483 temp0
= get_temp( emit
);
485 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp0
, src0
))
493 temp3
= get_temp( emit
);
495 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp3
, src3
))
501 if (!emit_op4( emit
, inst
, dest
, src0
, src1
, src2
, src3
))
505 release_temp( emit
, temp3
);
507 release_temp( emit
, temp0
);
512 static boolean
emit_def_const( struct svga_shader_emitter
*emit
,
513 SVGA3dShaderConstType type
,
521 SVGA3dShaderInstToken opcode
;
524 case SVGA3D_CONST_TYPE_FLOAT
:
525 opcode
= inst_token( SVGA3DOP_DEF
);
526 def
.dst
= dst_register( SVGA3DREG_CONST
, idx
);
527 def
.constValues
[0] = a
;
528 def
.constValues
[1] = b
;
529 def
.constValues
[2] = c
;
530 def
.constValues
[3] = d
;
532 case SVGA3D_CONST_TYPE_INT
:
533 opcode
= inst_token( SVGA3DOP_DEFI
);
534 def
.dst
= dst_register( SVGA3DREG_CONSTINT
, idx
);
535 def
.constIValues
[0] = (int)a
;
536 def
.constIValues
[1] = (int)b
;
537 def
.constIValues
[2] = (int)c
;
538 def
.constIValues
[3] = (int)d
;
542 opcode
= inst_token( SVGA3DOP_NOP
);
546 if (!emit_instruction(emit
, opcode
) ||
547 !svga_shader_emit_dwords( emit
, def
.values
, Elements(def
.values
)))
553 static INLINE boolean
554 create_zero_immediate( struct svga_shader_emitter
*emit
)
556 unsigned idx
= emit
->nr_hw_float_const
++;
558 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
562 emit
->zero_immediate_idx
= idx
;
563 emit
->created_zero_immediate
= TRUE
;
568 static INLINE boolean
569 create_loop_const( struct svga_shader_emitter
*emit
)
571 unsigned idx
= emit
->nr_hw_int_const
++;
573 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_INT
, idx
,
574 255, /* iteration count */
575 0, /* initial value */
577 0 /* not used, must be 0 */))
580 emit
->loop_const_idx
= idx
;
581 emit
->created_loop_const
= TRUE
;
586 static INLINE boolean
587 create_sincos_consts( struct svga_shader_emitter
*emit
)
589 unsigned idx
= emit
->nr_hw_float_const
++;
591 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
, idx
,
598 emit
->sincos_consts_idx
= idx
;
599 idx
= emit
->nr_hw_float_const
++;
601 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
, idx
,
608 emit
->created_sincos_consts
= TRUE
;
613 static INLINE boolean
614 create_arl_consts( struct svga_shader_emitter
*emit
)
618 for (i
= 0; i
< emit
->num_arl_consts
; i
+= 4) {
620 unsigned idx
= emit
->nr_hw_float_const
++;
622 for (j
= 0; j
< 4 && (j
+ i
) < emit
->num_arl_consts
; ++j
) {
623 vals
[j
] = emit
->arl_consts
[i
+ j
].number
;
624 emit
->arl_consts
[i
+ j
].idx
= idx
;
627 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_X
;
630 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_Y
;
633 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_Z
;
636 emit
->arl_consts
[i
+ 0].swizzle
= TGSI_SWIZZLE_W
;
643 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
, idx
,
652 static INLINE
struct src_register
653 get_vface( struct svga_shader_emitter
*emit
)
655 assert(emit
->emitted_vface
);
656 return src_register(SVGA3DREG_MISCTYPE
,
660 /* returns {0, 0, 0, 1} immediate */
661 static INLINE
struct src_register
662 get_zero_immediate( struct svga_shader_emitter
*emit
)
664 assert(emit
->created_zero_immediate
);
665 assert(emit
->zero_immediate_idx
>= 0);
666 return src_register( SVGA3DREG_CONST
,
667 emit
->zero_immediate_idx
);
670 /* returns the loop const */
671 static INLINE
struct src_register
672 get_loop_const( struct svga_shader_emitter
*emit
)
674 assert(emit
->created_loop_const
);
675 assert(emit
->loop_const_idx
>= 0);
676 return src_register( SVGA3DREG_CONSTINT
,
677 emit
->loop_const_idx
);
680 /* returns a sincos const */
681 static INLINE
struct src_register
682 get_sincos_const( struct svga_shader_emitter
*emit
,
685 assert(emit
->created_sincos_consts
);
686 assert(emit
->sincos_consts_idx
>= 0);
687 assert(index
== 0 || index
== 1);
688 return src_register( SVGA3DREG_CONST
,
689 emit
->sincos_consts_idx
+ index
);
692 static INLINE
struct src_register
693 get_fake_arl_const( struct svga_shader_emitter
*emit
)
695 struct src_register reg
;
696 int idx
= 0, swizzle
= 0, i
;
698 for (i
= 0; i
< emit
->num_arl_consts
; ++ i
) {
699 if (emit
->arl_consts
[i
].arl_num
== emit
->current_arl
) {
700 idx
= emit
->arl_consts
[i
].idx
;
701 swizzle
= emit
->arl_consts
[i
].swizzle
;
705 reg
= src_register( SVGA3DREG_CONST
, idx
);
706 return scalar(reg
, swizzle
);
709 static INLINE
struct src_register
710 get_tex_dimensions( struct svga_shader_emitter
*emit
, int sampler_num
)
713 struct src_register reg
;
715 /* the width/height indexes start right after constants */
716 idx
= emit
->key
.fkey
.tex
[sampler_num
].width_height_idx
+
717 emit
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1;
719 reg
= src_register( SVGA3DREG_CONST
, idx
);
723 static boolean
emit_fake_arl(struct svga_shader_emitter
*emit
,
724 const struct tgsi_full_instruction
*insn
)
726 const struct src_register src0
= translate_src_register(
727 emit
, &insn
->Src
[0] );
728 struct src_register src1
= get_fake_arl_const( emit
);
729 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
730 SVGA3dShaderDestToken tmp
= get_temp( emit
);
732 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), tmp
, src0
))
735 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), tmp
, src( tmp
),
739 /* replicate the original swizzle */
741 src1
.base
.swizzle
= src0
.base
.swizzle
;
743 return submit_op1( emit
, inst_token( SVGA3DOP_MOVA
),
747 static boolean
emit_if(struct svga_shader_emitter
*emit
,
748 const struct tgsi_full_instruction
*insn
)
750 const struct src_register src
= translate_src_register(
751 emit
, &insn
->Src
[0] );
752 struct src_register zero
= get_zero_immediate( emit
);
753 SVGA3dShaderInstToken if_token
= inst_token( SVGA3DOP_IFC
);
755 if_token
.control
= SVGA3DOPCOMPC_NE
;
756 zero
= scalar(zero
, TGSI_SWIZZLE_X
);
758 emit
->dynamic_branching_level
++;
760 return (emit_instruction( emit
, if_token
) &&
761 emit_src( emit
, src
) &&
762 emit_src( emit
, zero
) );
765 static boolean
emit_endif(struct svga_shader_emitter
*emit
,
766 const struct tgsi_full_instruction
*insn
)
768 emit
->dynamic_branching_level
--;
770 return (emit_instruction( emit
,
771 inst_token( SVGA3DOP_ENDIF
)));
774 static boolean
emit_else(struct svga_shader_emitter
*emit
,
775 const struct tgsi_full_instruction
*insn
)
777 return (emit_instruction( emit
,
778 inst_token( SVGA3DOP_ELSE
)));
781 /* Translate the following TGSI FLR instruction.
783 * To the following SVGA3D instruction sequence.
787 static boolean
emit_floor(struct svga_shader_emitter
*emit
,
788 const struct tgsi_full_instruction
*insn
)
790 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
791 const struct src_register src0
= translate_src_register(
792 emit
, &insn
->Src
[0] );
793 SVGA3dShaderDestToken temp
= get_temp( emit
);
796 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
), temp
, src0
))
799 /* SUB DST, SRC, TMP */
800 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
, src0
,
801 negate( src( temp
) ) ))
808 /* Translate the following TGSI CMP instruction.
809 * CMP DST, SRC0, SRC1, SRC2
810 * To the following SVGA3D instruction sequence.
811 * CMP DST, SRC0, SRC2, SRC1
813 static boolean
emit_cmp(struct svga_shader_emitter
*emit
,
814 const struct tgsi_full_instruction
*insn
)
816 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
817 const struct src_register src0
= translate_src_register(
818 emit
, &insn
->Src
[0] );
819 const struct src_register src1
= translate_src_register(
820 emit
, &insn
->Src
[1] );
821 const struct src_register src2
= translate_src_register(
822 emit
, &insn
->Src
[2] );
824 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
825 SVGA3dShaderDestToken temp
= get_temp(emit
);
826 struct src_register zero
= scalar(get_zero_immediate(emit
), TGSI_SWIZZLE_X
);
828 /* Since vertex shaders don't support the CMP instruction,
829 * simulate it with SLT and LRP instructions.
831 * LRP DST, TMP, SRC1, SRC2
833 if (!submit_op2(emit
, inst_token(SVGA3DOP_SLT
), temp
, src0
, zero
))
835 return submit_op3(emit
, inst_token(SVGA3DOP_LRP
), dst
, src(temp
), src1
, src2
);
838 /* CMP DST, SRC0, SRC2, SRC1 */
839 return submit_op3( emit
, inst_token( SVGA3DOP_CMP
), dst
, src0
, src2
, src1
);
844 /* Translate the following TGSI DIV instruction.
845 * DIV DST.xy, SRC0, SRC1
846 * To the following SVGA3D instruction sequence.
847 * RCP TMP.x, SRC1.xxxx
848 * RCP TMP.y, SRC1.yyyy
849 * MUL DST.xy, SRC0, TMP
851 static boolean
emit_div(struct svga_shader_emitter
*emit
,
852 const struct tgsi_full_instruction
*insn
)
854 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
855 const struct src_register src0
= translate_src_register(
856 emit
, &insn
->Src
[0] );
857 const struct src_register src1
= translate_src_register(
858 emit
, &insn
->Src
[1] );
859 SVGA3dShaderDestToken temp
= get_temp( emit
);
862 /* For each enabled element, perform a RCP instruction. Note that
863 * RCP is scalar in SVGA3D:
865 for (i
= 0; i
< 4; i
++) {
866 unsigned channel
= 1 << i
;
867 if (dst
.mask
& channel
) {
868 /* RCP TMP.?, SRC1.???? */
869 if (!submit_op1( emit
, inst_token( SVGA3DOP_RCP
),
870 writemask(temp
, channel
),
876 /* Then multiply them out with a single mul:
880 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), dst
, src0
,
887 /* Translate the following TGSI DP2 instruction.
888 * DP2 DST, SRC1, SRC2
889 * To the following SVGA3D instruction sequence.
890 * MUL TMP, SRC1, SRC2
891 * ADD DST, TMP.xxxx, TMP.yyyy
893 static boolean
emit_dp2(struct svga_shader_emitter
*emit
,
894 const struct tgsi_full_instruction
*insn
)
896 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
897 const struct src_register src0
= translate_src_register(
898 emit
, &insn
->Src
[0] );
899 const struct src_register src1
= translate_src_register(
900 emit
, &insn
->Src
[1] );
901 SVGA3dShaderDestToken temp
= get_temp( emit
);
902 struct src_register temp_src0
, temp_src1
;
904 /* MUL TMP, SRC1, SRC2 */
905 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), temp
, src0
, src1
))
908 temp_src0
= scalar(src( temp
), TGSI_SWIZZLE_X
);
909 temp_src1
= scalar(src( temp
), TGSI_SWIZZLE_Y
);
911 /* ADD DST, TMP.xxxx, TMP.yyyy */
912 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
913 temp_src0
, temp_src1
))
920 /* Translate the following TGSI DPH instruction.
921 * DPH DST, SRC1, SRC2
922 * To the following SVGA3D instruction sequence.
923 * DP3 TMP, SRC1, SRC2
924 * ADD DST, TMP, SRC2.wwww
926 static boolean
emit_dph(struct svga_shader_emitter
*emit
,
927 const struct tgsi_full_instruction
*insn
)
929 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
930 const struct src_register src0
= translate_src_register(
931 emit
, &insn
->Src
[0] );
932 struct src_register src1
= translate_src_register(
933 emit
, &insn
->Src
[1] );
934 SVGA3dShaderDestToken temp
= get_temp( emit
);
936 /* DP3 TMP, SRC1, SRC2 */
937 if (!submit_op2( emit
, inst_token( SVGA3DOP_DP3
), temp
, src0
, src1
))
940 src1
= scalar(src1
, TGSI_SWIZZLE_W
);
942 /* ADD DST, TMP, SRC2.wwww */
943 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
950 /* Translate the following TGSI DST instruction.
952 * To the following SVGA3D instruction sequence.
957 static boolean
emit_nrm(struct svga_shader_emitter
*emit
,
958 const struct tgsi_full_instruction
*insn
)
960 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
961 const struct src_register src0
= translate_src_register(
962 emit
, &insn
->Src
[0] );
963 SVGA3dShaderDestToken temp
= get_temp( emit
);
965 /* DP3 TMP, SRC, SRC */
966 if (!submit_op2( emit
, inst_token( SVGA3DOP_DP3
), temp
, src0
, src0
))
970 if (!submit_op1( emit
, inst_token( SVGA3DOP_RSQ
), temp
, src( temp
)))
973 /* MUL DST, SRC, TMP */
974 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
), dst
,
982 static boolean
do_emit_sincos(struct svga_shader_emitter
*emit
,
983 SVGA3dShaderDestToken dst
,
984 struct src_register src0
)
986 src0
= scalar(src0
, TGSI_SWIZZLE_X
);
988 if (emit
->use_sm30
) {
989 return submit_op1( emit
, inst_token( SVGA3DOP_SINCOS
),
992 struct src_register const1
= get_sincos_const( emit
, 0 );
993 struct src_register const2
= get_sincos_const( emit
, 1 );
995 return submit_op3( emit
, inst_token( SVGA3DOP_SINCOS
),
996 dst
, src0
, const1
, const2
);
1000 static boolean
emit_sincos(struct svga_shader_emitter
*emit
,
1001 const struct tgsi_full_instruction
*insn
)
1003 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1004 struct src_register src0
= translate_src_register(
1005 emit
, &insn
->Src
[0] );
1006 SVGA3dShaderDestToken temp
= get_temp( emit
);
1009 if (!do_emit_sincos(emit
, writemask(temp
, TGSI_WRITEMASK_XY
), src0
))
1013 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src( temp
) ))
1023 static boolean
emit_sin(struct svga_shader_emitter
*emit
,
1024 const struct tgsi_full_instruction
*insn
)
1026 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1027 struct src_register src0
= translate_src_register(
1028 emit
, &insn
->Src
[0] );
1029 SVGA3dShaderDestToken temp
= get_temp( emit
);
1032 if (!do_emit_sincos(emit
, writemask(temp
, TGSI_WRITEMASK_Y
), src0
))
1035 src0
= scalar(src( temp
), TGSI_SWIZZLE_Y
);
1037 /* MOV DST TMP.yyyy */
1038 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src0
))
1048 static boolean
emit_cos(struct svga_shader_emitter
*emit
,
1049 const struct tgsi_full_instruction
*insn
)
1051 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1052 struct src_register src0
= translate_src_register(
1053 emit
, &insn
->Src
[0] );
1054 SVGA3dShaderDestToken temp
= get_temp( emit
);
1057 if (!do_emit_sincos( emit
, writemask(temp
, TGSI_WRITEMASK_X
), src0
))
1060 src0
= scalar(src( temp
), TGSI_SWIZZLE_X
);
1062 /* MOV DST TMP.xxxx */
1063 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src0
))
1071 * ADD DST SRC0, negate(SRC0)
1073 static boolean
emit_sub(struct svga_shader_emitter
*emit
,
1074 const struct tgsi_full_instruction
*insn
)
1076 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1077 struct src_register src0
= translate_src_register(
1078 emit
, &insn
->Src
[0] );
1079 struct src_register src1
= translate_src_register(
1080 emit
, &insn
->Src
[1] );
1082 src1
= negate(src1
);
1084 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
), dst
,
1092 static boolean
emit_kil(struct svga_shader_emitter
*emit
,
1093 const struct tgsi_full_instruction
*insn
)
1095 SVGA3dShaderInstToken inst
;
1096 const struct tgsi_full_src_register
*reg
= &insn
->Src
[0];
1097 struct src_register src0
;
1099 inst
= inst_token( SVGA3DOP_TEXKILL
);
1100 src0
= translate_src_register( emit
, reg
);
1102 if (reg
->Register
.Absolute
||
1103 reg
->Register
.Negate
||
1104 reg
->Register
.Indirect
||
1105 reg
->Register
.SwizzleX
!= 0 ||
1106 reg
->Register
.SwizzleY
!= 1 ||
1107 reg
->Register
.SwizzleZ
!= 2 ||
1108 reg
->Register
.File
!= TGSI_FILE_TEMPORARY
)
1110 SVGA3dShaderDestToken temp
= get_temp( emit
);
1112 submit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp
, src0
);
1116 return submit_op0( emit
, inst
, dst(src0
) );
1120 /* mesa state tracker always emits kilp as an unconditional
1122 static boolean
emit_kilp(struct svga_shader_emitter
*emit
,
1123 const struct tgsi_full_instruction
*insn
)
1125 SVGA3dShaderInstToken inst
;
1126 SVGA3dShaderDestToken temp
;
1127 struct src_register one
= scalar( get_zero_immediate( emit
),
1130 inst
= inst_token( SVGA3DOP_TEXKILL
);
1132 /* texkill doesn't allow negation on the operand so lets move
1133 * negation of {1} to a temp register */
1134 temp
= get_temp( emit
);
1135 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), temp
,
1139 return submit_op0( emit
, inst
, temp
);
1142 /* Implement conditionals by initializing destination reg to 'fail',
1143 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1144 * based on predicate reg.
1146 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1151 emit_conditional(struct svga_shader_emitter
*emit
,
1152 unsigned compare_func
,
1153 SVGA3dShaderDestToken dst
,
1154 struct src_register src0
,
1155 struct src_register src1
,
1156 struct src_register pass
,
1157 struct src_register fail
)
1159 SVGA3dShaderDestToken pred_reg
= dst_register( SVGA3DREG_PREDICATE
, 0 );
1160 SVGA3dShaderInstToken setp_token
, mov_token
;
1161 setp_token
= inst_token( SVGA3DOP_SETP
);
1163 switch (compare_func
) {
1164 case PIPE_FUNC_NEVER
:
1165 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1168 case PIPE_FUNC_LESS
:
1169 setp_token
.control
= SVGA3DOPCOMP_LT
;
1171 case PIPE_FUNC_EQUAL
:
1172 setp_token
.control
= SVGA3DOPCOMP_EQ
;
1174 case PIPE_FUNC_LEQUAL
:
1175 setp_token
.control
= SVGA3DOPCOMP_LE
;
1177 case PIPE_FUNC_GREATER
:
1178 setp_token
.control
= SVGA3DOPCOMP_GT
;
1180 case PIPE_FUNC_NOTEQUAL
:
1181 setp_token
.control
= SVGA3DOPCOMPC_NE
;
1183 case PIPE_FUNC_GEQUAL
:
1184 setp_token
.control
= SVGA3DOPCOMP_GE
;
1186 case PIPE_FUNC_ALWAYS
:
1187 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1192 /* SETP src0, COMPOP, src1 */
1193 if (!submit_op2( emit
, setp_token
, pred_reg
,
1197 mov_token
= inst_token( SVGA3DOP_MOV
);
1200 if (!submit_op1( emit
, mov_token
, dst
,
1204 /* MOV dst, pass (predicated)
1206 * Note that the predicate reg (and possible modifiers) is passed
1207 * as the first source argument.
1209 mov_token
.predicated
= 1;
1210 if (!submit_op2( emit
, mov_token
, dst
,
1211 src( pred_reg
), pass
))
1219 emit_select(struct svga_shader_emitter
*emit
,
1220 unsigned compare_func
,
1221 SVGA3dShaderDestToken dst
,
1222 struct src_register src0
,
1223 struct src_register src1
)
1225 /* There are some SVGA instructions which implement some selects
1226 * directly, but they are only available in the vertex shader.
1228 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1229 switch (compare_func
) {
1230 case PIPE_FUNC_GEQUAL
:
1231 return submit_op2( emit
, inst_token( SVGA3DOP_SGE
), dst
, src0
, src1
);
1232 case PIPE_FUNC_LEQUAL
:
1233 return submit_op2( emit
, inst_token( SVGA3DOP_SGE
), dst
, src1
, src0
);
1234 case PIPE_FUNC_GREATER
:
1235 return submit_op2( emit
, inst_token( SVGA3DOP_SLT
), dst
, src1
, src0
);
1236 case PIPE_FUNC_LESS
:
1237 return submit_op2( emit
, inst_token( SVGA3DOP_SLT
), dst
, src0
, src1
);
1244 /* Otherwise, need to use the setp approach:
1247 struct src_register one
, zero
;
1248 /* zero immediate is 0,0,0,1 */
1249 zero
= get_zero_immediate( emit
);
1250 one
= scalar( zero
, TGSI_SWIZZLE_W
);
1251 zero
= scalar( zero
, TGSI_SWIZZLE_X
);
1253 return emit_conditional(
1264 static boolean
emit_select_op(struct svga_shader_emitter
*emit
,
1266 const struct tgsi_full_instruction
*insn
)
1268 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1269 struct src_register src0
= translate_src_register(
1270 emit
, &insn
->Src
[0] );
1271 struct src_register src1
= translate_src_register(
1272 emit
, &insn
->Src
[1] );
1274 return emit_select( emit
, compare
, dst
, src0
, src1
);
1278 /* Translate texture instructions to SVGA3D representation.
1280 static boolean
emit_tex2(struct svga_shader_emitter
*emit
,
1281 const struct tgsi_full_instruction
*insn
,
1282 SVGA3dShaderDestToken dst
)
1284 SVGA3dShaderInstToken inst
;
1285 struct src_register texcoord
;
1286 struct src_register sampler
;
1287 SVGA3dShaderDestToken tmp
;
1291 switch (insn
->Instruction
.Opcode
) {
1292 case TGSI_OPCODE_TEX
:
1293 inst
.op
= SVGA3DOP_TEX
;
1295 case TGSI_OPCODE_TXP
:
1296 inst
.op
= SVGA3DOP_TEX
;
1297 inst
.control
= SVGA3DOPCONT_PROJECT
;
1299 case TGSI_OPCODE_TXB
:
1300 inst
.op
= SVGA3DOP_TEX
;
1301 inst
.control
= SVGA3DOPCONT_BIAS
;
1303 case TGSI_OPCODE_TXL
:
1304 inst
.op
= SVGA3DOP_TEXLDL
;
1311 texcoord
= translate_src_register( emit
, &insn
->Src
[0] );
1312 sampler
= translate_src_register( emit
, &insn
->Src
[1] );
1314 if (emit
->key
.fkey
.tex
[sampler
.base
.num
].unnormalized
||
1315 emit
->dynamic_branching_level
> 0)
1316 tmp
= get_temp( emit
);
1318 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1319 * zero in that case.
1321 if (emit
->dynamic_branching_level
> 0 &&
1322 inst
.op
== SVGA3DOP_TEX
&&
1323 SVGA3dShaderGetRegType(texcoord
.base
.value
) == SVGA3DREG_TEMP
) {
1324 struct src_register zero
= get_zero_immediate( emit
);
1326 /* MOV tmp, texcoord */
1327 if (!submit_op1( emit
,
1328 inst_token( SVGA3DOP_MOV
),
1333 /* MOV tmp.w, zero */
1334 if (!submit_op1( emit
,
1335 inst_token( SVGA3DOP_MOV
),
1336 writemask( tmp
, TGSI_WRITEMASK_W
),
1337 scalar( zero
, TGSI_SWIZZLE_X
)))
1340 texcoord
= src( tmp
);
1341 inst
.op
= SVGA3DOP_TEXLDL
;
1344 /* Explicit normalization of texcoords:
1346 if (emit
->key
.fkey
.tex
[sampler
.base
.num
].unnormalized
) {
1347 struct src_register wh
= get_tex_dimensions( emit
, sampler
.base
.num
);
1349 /* MUL tmp, SRC0, WH */
1350 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
1351 tmp
, texcoord
, wh
))
1354 texcoord
= src( tmp
);
1357 return submit_op2( emit
, inst
, dst
, texcoord
, sampler
);
1363 /* Translate texture instructions to SVGA3D representation.
1365 static boolean
emit_tex4(struct svga_shader_emitter
*emit
,
1366 const struct tgsi_full_instruction
*insn
,
1367 SVGA3dShaderDestToken dst
)
1369 SVGA3dShaderInstToken inst
;
1370 struct src_register texcoord
;
1371 struct src_register ddx
;
1372 struct src_register ddy
;
1373 struct src_register sampler
;
1375 texcoord
= translate_src_register( emit
, &insn
->Src
[0] );
1376 ddx
= translate_src_register( emit
, &insn
->Src
[1] );
1377 ddy
= translate_src_register( emit
, &insn
->Src
[2] );
1378 sampler
= translate_src_register( emit
, &insn
->Src
[3] );
1382 switch (insn
->Instruction
.Opcode
) {
1383 case TGSI_OPCODE_TXD
:
1384 inst
.op
= SVGA3DOP_TEXLDD
; /* 4 args! */
1391 return submit_op4( emit
, inst
, dst
, texcoord
, sampler
, ddx
, ddy
);
1395 static boolean
emit_tex(struct svga_shader_emitter
*emit
,
1396 const struct tgsi_full_instruction
*insn
)
1398 SVGA3dShaderDestToken dst
=
1399 translate_dst_register( emit
, insn
, 0 );
1400 struct src_register src0
=
1401 translate_src_register( emit
, &insn
->Src
[0] );
1402 struct src_register src1
=
1403 translate_src_register( emit
, &insn
->Src
[1] );
1405 SVGA3dShaderDestToken tex_result
;
1407 /* check for shadow samplers */
1408 boolean compare
= (emit
->key
.fkey
.tex
[src1
.base
.num
].compare_mode
==
1409 PIPE_TEX_COMPARE_R_TO_TEXTURE
);
1412 /* If doing compare processing, need to put this value into a
1413 * temporary so it can be used as a source later on.
1416 (!emit
->use_sm30
&& dst
.mask
!= TGSI_WRITEMASK_XYZW
) ) {
1417 tex_result
= get_temp( emit
);
1423 switch(insn
->Instruction
.Opcode
) {
1424 case TGSI_OPCODE_TEX
:
1425 case TGSI_OPCODE_TXB
:
1426 case TGSI_OPCODE_TXP
:
1427 case TGSI_OPCODE_TXL
:
1428 if (!emit_tex2( emit
, insn
, tex_result
))
1431 case TGSI_OPCODE_TXD
:
1432 if (!emit_tex4( emit
, insn
, tex_result
))
1441 if (dst
.mask
& TGSI_WRITEMASK_XYZ
) {
1442 SVGA3dShaderDestToken src0_zdivw
= get_temp( emit
);
1443 struct src_register tex_src_x
= scalar(src(tex_result
), TGSI_SWIZZLE_Y
);
1445 /* Divide texcoord R by Q */
1446 if (!submit_op1( emit
, inst_token( SVGA3DOP_RCP
),
1447 writemask(src0_zdivw
, TGSI_WRITEMASK_X
),
1448 scalar(src0
, TGSI_SWIZZLE_W
) ))
1451 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
1452 writemask(src0_zdivw
, TGSI_WRITEMASK_X
),
1453 scalar(src0
, TGSI_SWIZZLE_Z
),
1454 scalar(src(src0_zdivw
), TGSI_SWIZZLE_X
) ))
1459 emit
->key
.fkey
.tex
[src1
.base
.num
].compare_func
,
1460 writemask( dst
, TGSI_WRITEMASK_XYZ
),
1461 scalar(src(src0_zdivw
), TGSI_SWIZZLE_X
),
1466 if (dst
.mask
& TGSI_WRITEMASK_W
) {
1467 struct src_register one
=
1468 scalar( get_zero_immediate( emit
), TGSI_SWIZZLE_W
);
1470 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1471 writemask( dst
, TGSI_WRITEMASK_W
),
1478 else if (!emit
->use_sm30
&& dst
.mask
!= TGSI_WRITEMASK_XYZW
)
1480 if (!emit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
, src(tex_result
) ))
1487 static boolean
emit_bgnloop2( struct svga_shader_emitter
*emit
,
1488 const struct tgsi_full_instruction
*insn
)
1490 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_LOOP
);
1491 struct src_register loop_reg
= src_register( SVGA3DREG_LOOP
, 0 );
1492 struct src_register const_int
= get_loop_const( emit
);
1494 emit
->dynamic_branching_level
++;
1496 return (emit_instruction( emit
, inst
) &&
1497 emit_src( emit
, loop_reg
) &&
1498 emit_src( emit
, const_int
) );
1501 static boolean
emit_endloop2( struct svga_shader_emitter
*emit
,
1502 const struct tgsi_full_instruction
*insn
)
1504 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_ENDLOOP
);
1506 emit
->dynamic_branching_level
--;
1508 return emit_instruction( emit
, inst
);
1511 static boolean
emit_brk( struct svga_shader_emitter
*emit
,
1512 const struct tgsi_full_instruction
*insn
)
1514 SVGA3dShaderInstToken inst
= inst_token( SVGA3DOP_BREAK
);
1515 return emit_instruction( emit
, inst
);
1518 static boolean
emit_scalar_op1( struct svga_shader_emitter
*emit
,
1520 const struct tgsi_full_instruction
*insn
)
1522 SVGA3dShaderInstToken inst
;
1523 SVGA3dShaderDestToken dst
;
1524 struct src_register src
;
1526 inst
= inst_token( opcode
);
1527 dst
= translate_dst_register( emit
, insn
, 0 );
1528 src
= translate_src_register( emit
, &insn
->Src
[0] );
1529 src
= scalar( src
, TGSI_SWIZZLE_X
);
1531 return submit_op1( emit
, inst
, dst
, src
);
1535 static boolean
emit_simple_instruction(struct svga_shader_emitter
*emit
,
1537 const struct tgsi_full_instruction
*insn
)
1539 const struct tgsi_full_src_register
*src
= insn
->Src
;
1540 SVGA3dShaderInstToken inst
;
1541 SVGA3dShaderDestToken dst
;
1543 inst
= inst_token( opcode
);
1544 dst
= translate_dst_register( emit
, insn
, 0 );
1546 switch (insn
->Instruction
.NumSrcRegs
) {
1548 return submit_op0( emit
, inst
, dst
);
1550 return submit_op1( emit
, inst
, dst
,
1551 translate_src_register( emit
, &src
[0] ));
1553 return submit_op2( emit
, inst
, dst
,
1554 translate_src_register( emit
, &src
[0] ),
1555 translate_src_register( emit
, &src
[1] ) );
1557 return submit_op3( emit
, inst
, dst
,
1558 translate_src_register( emit
, &src
[0] ),
1559 translate_src_register( emit
, &src
[1] ),
1560 translate_src_register( emit
, &src
[2] ) );
1568 static boolean
emit_deriv(struct svga_shader_emitter
*emit
,
1569 const struct tgsi_full_instruction
*insn
)
1571 if (emit
->dynamic_branching_level
> 0 &&
1572 insn
->Src
[0].Register
.File
== TGSI_FILE_TEMPORARY
)
1574 struct src_register zero
= get_zero_immediate( emit
);
1575 SVGA3dShaderDestToken dst
=
1576 translate_dst_register( emit
, insn
, 0 );
1578 /* Deriv opcodes not valid inside dynamic branching, workaround
1579 * by zeroing out the destination.
1581 if (!submit_op1(emit
,
1582 inst_token( SVGA3DOP_MOV
),
1584 scalar(zero
, TGSI_SWIZZLE_X
)))
1592 switch (insn
->Instruction
.Opcode
) {
1593 case TGSI_OPCODE_DDX
:
1594 opcode
= SVGA3DOP_DSX
;
1596 case TGSI_OPCODE_DDY
:
1597 opcode
= SVGA3DOP_DSY
;
1603 return emit_simple_instruction( emit
, opcode
, insn
);
1607 static boolean
emit_arl(struct svga_shader_emitter
*emit
,
1608 const struct tgsi_full_instruction
*insn
)
1610 ++emit
->current_arl
;
1611 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
1612 /* MOVA not present in pixel shader instruction set.
1613 * Ignore this instruction altogether since it is
1614 * only used for loop counters -- and for that
1615 * we reference aL directly.
1619 if (svga_arl_needs_adjustment( emit
)) {
1620 return emit_fake_arl( emit
, insn
);
1622 /* no need to adjust, just emit straight arl */
1623 return emit_simple_instruction(emit
, SVGA3DOP_MOVA
, insn
);
1627 static boolean
alias_src_dst( struct src_register src
,
1628 SVGA3dShaderDestToken dst
)
1630 if (src
.base
.num
!= dst
.num
)
1633 if (SVGA3dShaderGetRegType(dst
.value
) !=
1634 SVGA3dShaderGetRegType(src
.base
.value
))
1640 static boolean
emit_pow(struct svga_shader_emitter
*emit
,
1641 const struct tgsi_full_instruction
*insn
)
1643 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1644 struct src_register src0
= translate_src_register(
1645 emit
, &insn
->Src
[0] );
1646 struct src_register src1
= translate_src_register(
1647 emit
, &insn
->Src
[1] );
1648 boolean need_tmp
= FALSE
;
1650 /* POW can only output to a temporary */
1651 if (insn
->Dst
[0].Register
.File
!= TGSI_FILE_TEMPORARY
)
1654 /* POW src1 must not be the same register as dst */
1655 if (alias_src_dst( src1
, dst
))
1658 /* it's a scalar op */
1659 src0
= scalar( src0
, TGSI_SWIZZLE_X
);
1660 src1
= scalar( src1
, TGSI_SWIZZLE_X
);
1663 SVGA3dShaderDestToken tmp
= writemask(get_temp( emit
), TGSI_WRITEMASK_X
);
1665 if (!submit_op2(emit
, inst_token( SVGA3DOP_POW
), tmp
, src0
, src1
))
1668 return submit_op1(emit
, inst_token( SVGA3DOP_MOV
), dst
, scalar(src(tmp
), 0) );
1671 return submit_op2(emit
, inst_token( SVGA3DOP_POW
), dst
, src0
, src1
);
1675 static boolean
emit_xpd(struct svga_shader_emitter
*emit
,
1676 const struct tgsi_full_instruction
*insn
)
1678 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1679 const struct src_register src0
= translate_src_register(
1680 emit
, &insn
->Src
[0] );
1681 const struct src_register src1
= translate_src_register(
1682 emit
, &insn
->Src
[1] );
1683 boolean need_dst_tmp
= FALSE
;
1685 /* XPD can only output to a temporary */
1686 if (SVGA3dShaderGetRegType(dst
.value
) != SVGA3DREG_TEMP
)
1687 need_dst_tmp
= TRUE
;
1689 /* The dst reg must not be the same as src0 or src1*/
1690 if (alias_src_dst(src0
, dst
) ||
1691 alias_src_dst(src1
, dst
))
1692 need_dst_tmp
= TRUE
;
1695 SVGA3dShaderDestToken tmp
= get_temp( emit
);
1697 /* Obey DX9 restrictions on mask:
1699 tmp
.mask
= dst
.mask
& TGSI_WRITEMASK_XYZ
;
1701 if (!submit_op2(emit
, inst_token( SVGA3DOP_CRS
), tmp
, src0
, src1
))
1704 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), dst
, src( tmp
)))
1708 if (!submit_op2(emit
, inst_token( SVGA3DOP_CRS
), dst
, src0
, src1
))
1712 /* Need to emit 1.0 to dst.w?
1714 if (dst
.mask
& TGSI_WRITEMASK_W
) {
1715 struct src_register zero
= get_zero_immediate( emit
);
1717 if (!submit_op1(emit
,
1718 inst_token( SVGA3DOP_MOV
),
1719 writemask(dst
, TGSI_WRITEMASK_W
),
1728 static boolean
emit_lrp(struct svga_shader_emitter
*emit
,
1729 const struct tgsi_full_instruction
*insn
)
1731 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1732 SVGA3dShaderDestToken tmp
;
1733 const struct src_register src0
= translate_src_register(
1734 emit
, &insn
->Src
[0] );
1735 const struct src_register src1
= translate_src_register(
1736 emit
, &insn
->Src
[1] );
1737 const struct src_register src2
= translate_src_register(
1738 emit
, &insn
->Src
[2] );
1739 boolean need_dst_tmp
= FALSE
;
1741 /* The dst reg must not be the same as src0 or src2 */
1742 if (alias_src_dst(src0
, dst
) ||
1743 alias_src_dst(src2
, dst
))
1744 need_dst_tmp
= TRUE
;
1747 tmp
= get_temp( emit
);
1748 tmp
.mask
= dst
.mask
;
1754 if (!submit_op3(emit
, inst_token( SVGA3DOP_LRP
), tmp
, src0
, src1
, src2
))
1758 if (!submit_op1(emit
, inst_token( SVGA3DOP_MOV
), dst
, src( tmp
)))
1766 static boolean
emit_dst_insn(struct svga_shader_emitter
*emit
,
1767 const struct tgsi_full_instruction
*insn
)
1769 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1770 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
1772 return emit_simple_instruction(emit
, SVGA3DOP_DST
, insn
);
1776 /* result[0] = 1 * 1;
1777 * result[1] = a[1] * b[1];
1778 * result[2] = a[2] * 1;
1779 * result[3] = 1 * b[3];
1782 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1783 SVGA3dShaderDestToken tmp
;
1784 const struct src_register src0
= translate_src_register(
1785 emit
, &insn
->Src
[0] );
1786 const struct src_register src1
= translate_src_register(
1787 emit
, &insn
->Src
[1] );
1788 struct src_register zero
= get_zero_immediate( emit
);
1789 boolean need_tmp
= FALSE
;
1791 if (SVGA3dShaderGetRegType(dst
.value
) != SVGA3DREG_TEMP
||
1792 alias_src_dst(src0
, dst
) ||
1793 alias_src_dst(src1
, dst
))
1797 tmp
= get_temp( emit
);
1805 if (tmp
.mask
& TGSI_WRITEMASK_XW
) {
1806 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1807 writemask(tmp
, TGSI_WRITEMASK_XW
),
1814 if (tmp
.mask
& TGSI_WRITEMASK_YZ
) {
1815 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1816 writemask(tmp
, TGSI_WRITEMASK_YZ
),
1821 /* tmp.yw = tmp * src1
1823 if (tmp
.mask
& TGSI_WRITEMASK_YW
) {
1824 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
1825 writemask(tmp
, TGSI_WRITEMASK_YW
),
1834 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1845 static boolean
emit_exp(struct svga_shader_emitter
*emit
,
1846 const struct tgsi_full_instruction
*insn
)
1848 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1849 struct src_register src0
=
1850 translate_src_register( emit
, &insn
->Src
[0] );
1851 struct src_register zero
= get_zero_immediate( emit
);
1852 SVGA3dShaderDestToken fraction
;
1854 if (dst
.mask
& TGSI_WRITEMASK_Y
)
1856 else if (dst
.mask
& TGSI_WRITEMASK_X
)
1857 fraction
= get_temp( emit
);
1861 /* If y is being written, fill it with src0 - floor(src0).
1863 if (dst
.mask
& TGSI_WRITEMASK_XY
) {
1864 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
),
1865 writemask( fraction
, TGSI_WRITEMASK_Y
),
1870 /* If x is being written, fill it with 2 ^ floor(src0).
1872 if (dst
.mask
& TGSI_WRITEMASK_X
) {
1873 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
),
1874 writemask( dst
, TGSI_WRITEMASK_X
),
1876 scalar( negate( src( fraction
) ), TGSI_SWIZZLE_Y
) ) )
1879 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXP
),
1880 writemask( dst
, TGSI_WRITEMASK_X
),
1881 scalar( src( dst
), TGSI_SWIZZLE_X
) ) )
1884 if (!(dst
.mask
& TGSI_WRITEMASK_Y
))
1885 release_temp( emit
, fraction
);
1888 /* If z is being written, fill it with 2 ^ src0 (partial precision).
1890 if (dst
.mask
& TGSI_WRITEMASK_Z
) {
1891 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXPP
),
1892 writemask( dst
, TGSI_WRITEMASK_Z
),
1897 /* If w is being written, fill it with one.
1899 if (dst
.mask
& TGSI_WRITEMASK_W
) {
1900 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1901 writemask(dst
, TGSI_WRITEMASK_W
),
1902 scalar( zero
, TGSI_SWIZZLE_W
) ))
1909 static boolean
emit_lit(struct svga_shader_emitter
*emit
,
1910 const struct tgsi_full_instruction
*insn
)
1912 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
1913 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
1915 return emit_simple_instruction(emit
, SVGA3DOP_LIT
, insn
);
1919 /* D3D vs. GL semantics can be fairly easily accomodated by
1920 * variations on this sequence.
1924 * tmp.z = pow(src.y,src.w)
1925 * p0 = src0.xxxx > 0
1926 * result = zero.wxxw
1927 * (p0) result.yz = tmp
1931 * tmp.z = pow(src.y,src.w)
1932 * p0 = src0.xxyy > 0
1933 * result = zero.wxxw
1934 * (p0) result.yz = tmp
1936 * Will implement the GL version for now.
1939 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
1940 SVGA3dShaderDestToken tmp
= get_temp( emit
);
1941 const struct src_register src0
= translate_src_register(
1942 emit
, &insn
->Src
[0] );
1943 struct src_register zero
= get_zero_immediate( emit
);
1945 /* tmp = pow(src.y, src.w)
1947 if (dst
.mask
& TGSI_WRITEMASK_Z
) {
1948 if (!submit_op2(emit
, inst_token( SVGA3DOP_POW
),
1957 if (dst
.mask
& TGSI_WRITEMASK_Y
) {
1958 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
1959 writemask(tmp
, TGSI_WRITEMASK_Y
),
1964 /* Can't quite do this with emit conditional due to the extra
1965 * writemask on the predicated mov:
1968 SVGA3dShaderDestToken pred_reg
= dst_register( SVGA3DREG_PREDICATE
, 0 );
1969 SVGA3dShaderInstToken setp_token
, mov_token
;
1970 struct src_register predsrc
;
1972 setp_token
= inst_token( SVGA3DOP_SETP
);
1973 mov_token
= inst_token( SVGA3DOP_MOV
);
1975 setp_token
.control
= SVGA3DOPCOMP_GT
;
1977 /* D3D vs GL semantics:
1980 predsrc
= swizzle(src0
, 0, 0, 1, 1); /* D3D */
1982 predsrc
= swizzle(src0
, 0, 0, 0, 0); /* GL */
1984 /* SETP src0.xxyy, GT, {0}.x */
1985 if (!submit_op2( emit
, setp_token
, pred_reg
,
1987 swizzle(zero
, 0, 0, 0, 0) ))
1991 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), dst
,
1992 swizzle(zero
, 3, 0, 0, 3 )))
1995 /* MOV dst.yz, tmp (predicated)
1997 * Note that the predicate reg (and possible modifiers) is passed
1998 * as the first source argument.
2000 if (dst
.mask
& TGSI_WRITEMASK_YZ
) {
2001 mov_token
.predicated
= 1;
2002 if (!submit_op2( emit
, mov_token
,
2003 writemask(dst
, TGSI_WRITEMASK_YZ
),
2004 src( pred_reg
), src( tmp
) ))
2016 static boolean
emit_ex2( struct svga_shader_emitter
*emit
,
2017 const struct tgsi_full_instruction
*insn
)
2019 SVGA3dShaderInstToken inst
;
2020 SVGA3dShaderDestToken dst
;
2021 struct src_register src0
;
2023 inst
= inst_token( SVGA3DOP_EXP
);
2024 dst
= translate_dst_register( emit
, insn
, 0 );
2025 src0
= translate_src_register( emit
, &insn
->Src
[0] );
2026 src0
= scalar( src0
, TGSI_SWIZZLE_X
);
2028 if (dst
.mask
!= TGSI_WRITEMASK_XYZW
) {
2029 SVGA3dShaderDestToken tmp
= get_temp( emit
);
2031 if (!submit_op1( emit
, inst
, tmp
, src0
))
2034 return submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2036 scalar( src( tmp
), TGSI_SWIZZLE_X
) );
2039 return submit_op1( emit
, inst
, dst
, src0
);
2043 static boolean
emit_log(struct svga_shader_emitter
*emit
,
2044 const struct tgsi_full_instruction
*insn
)
2046 SVGA3dShaderDestToken dst
= translate_dst_register( emit
, insn
, 0 );
2047 struct src_register src0
=
2048 translate_src_register( emit
, &insn
->Src
[0] );
2049 struct src_register zero
= get_zero_immediate( emit
);
2050 SVGA3dShaderDestToken abs_tmp
;
2051 struct src_register abs_src0
;
2052 SVGA3dShaderDestToken log2_abs
;
2056 if (dst
.mask
& TGSI_WRITEMASK_Z
)
2058 else if (dst
.mask
& TGSI_WRITEMASK_XY
)
2059 log2_abs
= get_temp( emit
);
2063 /* If z is being written, fill it with log2( abs( src0 ) ).
2065 if (dst
.mask
& TGSI_WRITEMASK_XYZ
) {
2066 if (!src0
.base
.srcMod
|| src0
.base
.srcMod
== SVGA3DSRCMOD_ABS
)
2069 abs_tmp
= get_temp( emit
);
2071 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2076 abs_src0
= src( abs_tmp
);
2079 abs_src0
= absolute( scalar( abs_src0
, TGSI_SWIZZLE_X
) );
2081 if (!submit_op1( emit
, inst_token( SVGA3DOP_LOG
),
2082 writemask( log2_abs
, TGSI_WRITEMASK_Z
),
2087 if (dst
.mask
& TGSI_WRITEMASK_XY
) {
2088 SVGA3dShaderDestToken floor_log2
;
2090 if (dst
.mask
& TGSI_WRITEMASK_X
)
2093 floor_log2
= get_temp( emit
);
2095 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2097 if (!submit_op1( emit
, inst_token( SVGA3DOP_FRC
),
2098 writemask( floor_log2
, TGSI_WRITEMASK_X
),
2099 scalar( src( log2_abs
), TGSI_SWIZZLE_Z
) ) )
2102 if (!submit_op2( emit
, inst_token( SVGA3DOP_ADD
),
2103 writemask( floor_log2
, TGSI_WRITEMASK_X
),
2104 scalar( src( log2_abs
), TGSI_SWIZZLE_Z
),
2105 negate( src( floor_log2
) ) ) )
2108 /* If y is being written, fill it with
2109 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2111 if (dst
.mask
& TGSI_WRITEMASK_Y
) {
2112 if (!submit_op1( emit
, inst_token( SVGA3DOP_EXP
),
2113 writemask( dst
, TGSI_WRITEMASK_Y
),
2114 negate( scalar( src( floor_log2
),
2115 TGSI_SWIZZLE_X
) ) ) )
2118 if (!submit_op2( emit
, inst_token( SVGA3DOP_MUL
),
2119 writemask( dst
, TGSI_WRITEMASK_Y
),
2125 if (!(dst
.mask
& TGSI_WRITEMASK_X
))
2126 release_temp( emit
, floor_log2
);
2128 if (!(dst
.mask
& TGSI_WRITEMASK_Z
))
2129 release_temp( emit
, log2_abs
);
2132 if (dst
.mask
& TGSI_WRITEMASK_XYZ
&& src0
.base
.srcMod
&&
2133 src0
.base
.srcMod
!= SVGA3DSRCMOD_ABS
)
2134 release_temp( emit
, abs_tmp
);
2136 /* If w is being written, fill it with one.
2138 if (dst
.mask
& TGSI_WRITEMASK_W
) {
2139 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
),
2140 writemask(dst
, TGSI_WRITEMASK_W
),
2141 scalar( zero
, TGSI_SWIZZLE_W
) ))
2149 static boolean
emit_bgnsub( struct svga_shader_emitter
*emit
,
2151 const struct tgsi_full_instruction
*insn
)
2155 /* Note that we've finished the main function and are now emitting
2156 * subroutines. This affects how we terminate the generated
2159 emit
->in_main_func
= FALSE
;
2161 for (i
= 0; i
< emit
->nr_labels
; i
++) {
2162 if (emit
->label
[i
] == position
) {
2163 return (emit_instruction( emit
, inst_token( SVGA3DOP_RET
) ) &&
2164 emit_instruction( emit
, inst_token( SVGA3DOP_LABEL
) ) &&
2165 emit_src( emit
, src_register( SVGA3DREG_LABEL
, i
)));
2173 static boolean
emit_call( struct svga_shader_emitter
*emit
,
2174 const struct tgsi_full_instruction
*insn
)
2176 unsigned position
= insn
->Label
.Label
;
2179 for (i
= 0; i
< emit
->nr_labels
; i
++) {
2180 if (emit
->label
[i
] == position
)
2184 if (emit
->nr_labels
== Elements(emit
->label
))
2187 if (i
== emit
->nr_labels
) {
2188 emit
->label
[i
] = position
;
2192 return (emit_instruction( emit
, inst_token( SVGA3DOP_CALL
) ) &&
2193 emit_src( emit
, src_register( SVGA3DREG_LABEL
, i
)));
2197 static boolean
emit_end( struct svga_shader_emitter
*emit
)
2199 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2200 return emit_vs_postamble( emit
);
2203 return emit_ps_postamble( emit
);
2209 static boolean
svga_emit_instruction( struct svga_shader_emitter
*emit
,
2211 const struct tgsi_full_instruction
*insn
)
2213 switch (insn
->Instruction
.Opcode
) {
2215 case TGSI_OPCODE_ARL
:
2216 return emit_arl( emit
, insn
);
2218 case TGSI_OPCODE_TEX
:
2219 case TGSI_OPCODE_TXB
:
2220 case TGSI_OPCODE_TXP
:
2221 case TGSI_OPCODE_TXL
:
2222 case TGSI_OPCODE_TXD
:
2223 return emit_tex( emit
, insn
);
2225 case TGSI_OPCODE_DDX
:
2226 case TGSI_OPCODE_DDY
:
2227 return emit_deriv( emit
, insn
);
2229 case TGSI_OPCODE_BGNSUB
:
2230 return emit_bgnsub( emit
, position
, insn
);
2232 case TGSI_OPCODE_ENDSUB
:
2235 case TGSI_OPCODE_CAL
:
2236 return emit_call( emit
, insn
);
2238 case TGSI_OPCODE_FLR
:
2239 case TGSI_OPCODE_TRUNC
: /* should be TRUNC, not FLR */
2240 return emit_floor( emit
, insn
);
2242 case TGSI_OPCODE_CMP
:
2243 return emit_cmp( emit
, insn
);
2245 case TGSI_OPCODE_DIV
:
2246 return emit_div( emit
, insn
);
2248 case TGSI_OPCODE_DP2
:
2249 return emit_dp2( emit
, insn
);
2251 case TGSI_OPCODE_DPH
:
2252 return emit_dph( emit
, insn
);
2254 case TGSI_OPCODE_NRM
:
2255 return emit_nrm( emit
, insn
);
2257 case TGSI_OPCODE_COS
:
2258 return emit_cos( emit
, insn
);
2260 case TGSI_OPCODE_SIN
:
2261 return emit_sin( emit
, insn
);
2263 case TGSI_OPCODE_SCS
:
2264 return emit_sincos( emit
, insn
);
2266 case TGSI_OPCODE_END
:
2267 /* TGSI always finishes the main func with an END */
2268 return emit_end( emit
);
2270 case TGSI_OPCODE_KIL
:
2271 return emit_kil( emit
, insn
);
2273 /* Selection opcodes. The underlying language is fairly
2274 * non-orthogonal about these.
2276 case TGSI_OPCODE_SEQ
:
2277 return emit_select_op( emit
, PIPE_FUNC_EQUAL
, insn
);
2279 case TGSI_OPCODE_SNE
:
2280 return emit_select_op( emit
, PIPE_FUNC_NOTEQUAL
, insn
);
2282 case TGSI_OPCODE_SGT
:
2283 return emit_select_op( emit
, PIPE_FUNC_GREATER
, insn
);
2285 case TGSI_OPCODE_SGE
:
2286 return emit_select_op( emit
, PIPE_FUNC_GEQUAL
, insn
);
2288 case TGSI_OPCODE_SLT
:
2289 return emit_select_op( emit
, PIPE_FUNC_LESS
, insn
);
2291 case TGSI_OPCODE_SLE
:
2292 return emit_select_op( emit
, PIPE_FUNC_LEQUAL
, insn
);
2294 case TGSI_OPCODE_SUB
:
2295 return emit_sub( emit
, insn
);
2297 case TGSI_OPCODE_POW
:
2298 return emit_pow( emit
, insn
);
2300 case TGSI_OPCODE_EX2
:
2301 return emit_ex2( emit
, insn
);
2303 case TGSI_OPCODE_EXP
:
2304 return emit_exp( emit
, insn
);
2306 case TGSI_OPCODE_LOG
:
2307 return emit_log( emit
, insn
);
2309 case TGSI_OPCODE_LG2
:
2310 return emit_scalar_op1( emit
, SVGA3DOP_LOG
, insn
);
2312 case TGSI_OPCODE_RSQ
:
2313 return emit_scalar_op1( emit
, SVGA3DOP_RSQ
, insn
);
2315 case TGSI_OPCODE_RCP
:
2316 return emit_scalar_op1( emit
, SVGA3DOP_RCP
, insn
);
2318 case TGSI_OPCODE_CONT
:
2319 case TGSI_OPCODE_RET
:
2320 /* This is a noop -- we tell mesa that we can't support RET
2321 * within a function (early return), so this will always be
2322 * followed by an ENDSUB.
2326 /* These aren't actually used by any of the frontends we care
2329 case TGSI_OPCODE_CLAMP
:
2330 case TGSI_OPCODE_ROUND
:
2331 case TGSI_OPCODE_AND
:
2332 case TGSI_OPCODE_OR
:
2333 case TGSI_OPCODE_I2F
:
2334 case TGSI_OPCODE_NOT
:
2335 case TGSI_OPCODE_SHL
:
2336 case TGSI_OPCODE_ISHR
:
2337 case TGSI_OPCODE_XOR
:
2340 case TGSI_OPCODE_IF
:
2341 return emit_if( emit
, insn
);
2342 case TGSI_OPCODE_ELSE
:
2343 return emit_else( emit
, insn
);
2344 case TGSI_OPCODE_ENDIF
:
2345 return emit_endif( emit
, insn
);
2347 case TGSI_OPCODE_BGNLOOP
:
2348 return emit_bgnloop2( emit
, insn
);
2349 case TGSI_OPCODE_ENDLOOP
:
2350 return emit_endloop2( emit
, insn
);
2351 case TGSI_OPCODE_BRK
:
2352 return emit_brk( emit
, insn
);
2354 case TGSI_OPCODE_XPD
:
2355 return emit_xpd( emit
, insn
);
2357 case TGSI_OPCODE_KILP
:
2358 return emit_kilp( emit
, insn
);
2360 case TGSI_OPCODE_DST
:
2361 return emit_dst_insn( emit
, insn
);
2363 case TGSI_OPCODE_LIT
:
2364 return emit_lit( emit
, insn
);
2366 case TGSI_OPCODE_LRP
:
2367 return emit_lrp( emit
, insn
);
2370 unsigned opcode
= translate_opcode(insn
->Instruction
.Opcode
);
2372 if (opcode
== SVGA3DOP_LAST_INST
)
2375 if (!emit_simple_instruction( emit
, opcode
, insn
))
2384 static boolean
svga_emit_immediate( struct svga_shader_emitter
*emit
,
2385 struct tgsi_full_immediate
*imm
)
2387 static const float id
[4] = {0,0,0,1};
2391 assert(1 <= imm
->Immediate
.NrTokens
&& imm
->Immediate
.NrTokens
<= 5);
2392 for (i
= 0; i
< imm
->Immediate
.NrTokens
- 1; i
++)
2393 value
[i
] = imm
->u
[i
].Float
;
2395 for ( ; i
< 4; i
++ )
2398 return emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
2399 emit
->imm_start
+ emit
->internal_imm_count
++,
2400 value
[0], value
[1], value
[2], value
[3]);
2403 static boolean
make_immediate( struct svga_shader_emitter
*emit
,
2408 struct src_register
*out
)
2410 unsigned idx
= emit
->nr_hw_float_const
++;
2412 if (!emit_def_const( emit
, SVGA3D_CONST_TYPE_FLOAT
,
2416 *out
= src_register( SVGA3DREG_CONST
, idx
);
2421 static boolean
emit_vs_preamble( struct svga_shader_emitter
*emit
)
2423 if (!emit
->key
.vkey
.need_prescale
) {
2424 if (!make_immediate( emit
, 0, 0, .5, .5,
2432 static boolean
emit_ps_preamble( struct svga_shader_emitter
*emit
)
2436 /* For SM20, need to initialize the temporaries we're using to hold
2437 * color outputs to some value. Shaders which don't set all of
2438 * these values are likely to be rejected by the DX9 runtime.
2440 if (!emit
->use_sm30
) {
2441 struct src_register zero
= get_zero_immediate( emit
);
2442 for (i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
2443 if (SVGA3dShaderGetRegType(emit
->true_col
[i
].value
) != 0) {
2445 if (!submit_op1( emit
,
2446 inst_token(SVGA3DOP_MOV
),
2457 static boolean
emit_ps_postamble( struct svga_shader_emitter
*emit
)
2461 /* PS oDepth is incredibly fragile and it's very hard to catch the
2462 * types of usage that break it during shader emit. Easier just to
2463 * redirect the main program to a temporary and then only touch
2464 * oDepth with a hand-crafted MOV below.
2466 if (SVGA3dShaderGetRegType(emit
->true_pos
.value
) != 0) {
2468 if (!submit_op1( emit
,
2469 inst_token(SVGA3DOP_MOV
),
2471 scalar(src(emit
->temp_pos
), TGSI_SWIZZLE_Z
) ))
2475 /* Similarly for SM20 color outputs... Luckily SM30 isn't so
2478 for (i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
2479 if (SVGA3dShaderGetRegType(emit
->true_col
[i
].value
) != 0) {
2481 /* Potentially override output colors with white for XOR
2482 * logicop workaround.
2484 if (emit
->unit
== PIPE_SHADER_FRAGMENT
&&
2485 emit
->key
.fkey
.white_fragments
) {
2487 struct src_register one
= scalar( get_zero_immediate( emit
),
2490 if (!submit_op1( emit
,
2491 inst_token(SVGA3DOP_MOV
),
2497 if (!submit_op1( emit
,
2498 inst_token(SVGA3DOP_MOV
),
2500 src(emit
->temp_col
[i
]) ))
2509 static boolean
emit_vs_postamble( struct svga_shader_emitter
*emit
)
2511 /* PSIZ output is incredibly fragile and it's very hard to catch
2512 * the types of usage that break it during shader emit. Easier
2513 * just to redirect the main program to a temporary and then only
2514 * touch PSIZ with a hand-crafted MOV below.
2516 if (SVGA3dShaderGetRegType(emit
->true_psiz
.value
) != 0) {
2518 if (!submit_op1( emit
,
2519 inst_token(SVGA3DOP_MOV
),
2521 scalar(src(emit
->temp_psiz
), TGSI_SWIZZLE_X
) ))
2525 /* Need to perform various manipulations on vertex position to cope
2526 * with the different GL and D3D clip spaces.
2528 if (emit
->key
.vkey
.need_prescale
) {
2529 SVGA3dShaderDestToken temp_pos
= emit
->temp_pos
;
2530 SVGA3dShaderDestToken pos
= emit
->true_pos
;
2531 unsigned offset
= emit
->info
.file_max
[TGSI_FILE_CONSTANT
] + 1;
2532 struct src_register prescale_scale
= src_register( SVGA3DREG_CONST
,
2534 struct src_register prescale_trans
= src_register( SVGA3DREG_CONST
,
2537 /* MUL temp_pos.xyz, temp_pos, prescale.scale
2538 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
2539 * --> Note that prescale.trans.w == 0
2541 if (!submit_op2( emit
,
2542 inst_token(SVGA3DOP_MUL
),
2543 writemask(temp_pos
, TGSI_WRITEMASK_XYZ
),
2548 if (!submit_op3( emit
,
2549 inst_token(SVGA3DOP_MAD
),
2551 swizzle(src(temp_pos
), 3, 3, 3, 3),
2557 SVGA3dShaderDestToken temp_pos
= emit
->temp_pos
;
2558 SVGA3dShaderDestToken pos
= emit
->true_pos
;
2559 struct src_register imm_0055
= emit
->imm_0055
;
2561 /* Adjust GL clipping coordinate space to hardware (D3D-style):
2563 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
2564 * MOV result.position, temp_pos
2566 if (!submit_op2( emit
,
2567 inst_token(SVGA3DOP_DP4
),
2568 writemask(temp_pos
, TGSI_WRITEMASK_Z
),
2573 if (!submit_op1( emit
,
2574 inst_token(SVGA3DOP_MOV
),
2585 1: COLOR = FrontColor;
2587 3: COLOR = BackColor;
2590 static boolean
emit_light_twoside( struct svga_shader_emitter
*emit
)
2592 struct src_register vface
, zero
;
2593 struct src_register front
[2];
2594 struct src_register back
[2];
2595 SVGA3dShaderDestToken color
[2];
2596 int count
= emit
->internal_color_count
;
2598 SVGA3dShaderInstToken if_token
;
2603 vface
= get_vface( emit
);
2604 zero
= get_zero_immediate( emit
);
2606 /* Can't use get_temp() to allocate the color reg as such
2607 * temporaries will be reclaimed after each instruction by the call
2608 * to reset_temp_regs().
2610 for (i
= 0; i
< count
; i
++) {
2611 color
[i
] = dst_register( SVGA3DREG_TEMP
,
2612 emit
->nr_hw_temp
++ );
2614 front
[i
] = emit
->input_map
[emit
->internal_color_idx
[i
]];
2616 /* Back is always the next input:
2619 back
[i
].base
.num
= front
[i
].base
.num
+ 1;
2621 /* Reassign the input_map to the actual front-face color:
2623 emit
->input_map
[emit
->internal_color_idx
[i
]] = src(color
[i
]);
2626 if_token
= inst_token( SVGA3DOP_IFC
);
2628 if (emit
->key
.fkey
.front_ccw
)
2629 if_token
.control
= SVGA3DOPCOMP_LT
;
2631 if_token
.control
= SVGA3DOPCOMP_GT
;
2633 zero
= scalar(zero
, TGSI_SWIZZLE_X
);
2635 if (!(emit_instruction( emit
, if_token
) &&
2636 emit_src( emit
, vface
) &&
2637 emit_src( emit
, zero
) ))
2640 for (i
= 0; i
< count
; i
++) {
2641 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), color
[i
], front
[i
] ))
2645 if (!(emit_instruction( emit
, inst_token( SVGA3DOP_ELSE
))))
2648 for (i
= 0; i
< count
; i
++) {
2649 if (!submit_op1( emit
, inst_token( SVGA3DOP_MOV
), color
[i
], back
[i
] ))
2653 if (!emit_instruction( emit
, inst_token( SVGA3DOP_ENDIF
) ))
2660 0: SETP_GT TEMP, VFACE, 0
2661 where TEMP is a fake frontface register
2663 static boolean
emit_frontface( struct svga_shader_emitter
*emit
)
2665 struct src_register vface
, zero
;
2666 SVGA3dShaderDestToken temp
;
2667 struct src_register pass
, fail
;
2669 vface
= get_vface( emit
);
2670 zero
= get_zero_immediate( emit
);
2672 /* Can't use get_temp() to allocate the fake frontface reg as such
2673 * temporaries will be reclaimed after each instruction by the call
2674 * to reset_temp_regs().
2676 temp
= dst_register( SVGA3DREG_TEMP
,
2677 emit
->nr_hw_temp
++ );
2679 if (emit
->key
.fkey
.front_ccw
) {
2680 pass
= scalar( zero
, TGSI_SWIZZLE_X
);
2681 fail
= scalar( zero
, TGSI_SWIZZLE_W
);
2683 pass
= scalar( zero
, TGSI_SWIZZLE_W
);
2684 fail
= scalar( zero
, TGSI_SWIZZLE_X
);
2687 if (!emit_conditional(emit
, PIPE_FUNC_GREATER
,
2688 temp
, vface
, scalar( zero
, TGSI_SWIZZLE_X
),
2692 /* Reassign the input_map to the actual front-face color:
2694 emit
->input_map
[emit
->internal_frontface_idx
] = src(temp
);
2699 static INLINE boolean
2700 needs_to_create_zero( struct svga_shader_emitter
*emit
)
2704 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2705 if (!emit
->use_sm30
)
2708 if (emit
->key
.fkey
.light_twoside
)
2711 if (emit
->key
.fkey
.white_fragments
)
2714 if (emit
->emit_frontface
)
2717 if (emit
->info
.opcode_count
[TGSI_OPCODE_DST
] >= 1 ||
2718 emit
->info
.opcode_count
[TGSI_OPCODE_LIT
] >= 1)
2722 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2723 if (emit
->info
.opcode_count
[TGSI_OPCODE_CMP
] >= 1)
2727 if (emit
->info
.opcode_count
[TGSI_OPCODE_IF
] >= 1 ||
2728 emit
->info
.opcode_count
[TGSI_OPCODE_BGNLOOP
] >= 1 ||
2729 emit
->info
.opcode_count
[TGSI_OPCODE_DDX
] >= 1 ||
2730 emit
->info
.opcode_count
[TGSI_OPCODE_DDY
] >= 1 ||
2731 emit
->info
.opcode_count
[TGSI_OPCODE_SGE
] >= 1 ||
2732 emit
->info
.opcode_count
[TGSI_OPCODE_SGT
] >= 1 ||
2733 emit
->info
.opcode_count
[TGSI_OPCODE_SLE
] >= 1 ||
2734 emit
->info
.opcode_count
[TGSI_OPCODE_SLT
] >= 1 ||
2735 emit
->info
.opcode_count
[TGSI_OPCODE_SNE
] >= 1 ||
2736 emit
->info
.opcode_count
[TGSI_OPCODE_SEQ
] >= 1 ||
2737 emit
->info
.opcode_count
[TGSI_OPCODE_EXP
] >= 1 ||
2738 emit
->info
.opcode_count
[TGSI_OPCODE_LOG
] >= 1 ||
2739 emit
->info
.opcode_count
[TGSI_OPCODE_XPD
] >= 1 ||
2740 emit
->info
.opcode_count
[TGSI_OPCODE_KILP
] >= 1)
2743 for (i
= 0; i
< emit
->key
.fkey
.num_textures
; i
++) {
2744 if (emit
->key
.fkey
.tex
[i
].compare_mode
== PIPE_TEX_COMPARE_R_TO_TEXTURE
)
2751 static INLINE boolean
2752 needs_to_create_loop_const( struct svga_shader_emitter
*emit
)
2754 return (emit
->info
.opcode_count
[TGSI_OPCODE_BGNLOOP
] >= 1);
2757 static INLINE boolean
2758 needs_to_create_sincos_consts( struct svga_shader_emitter
*emit
)
2760 return !emit
->use_sm30
&& (emit
->info
.opcode_count
[TGSI_OPCODE_SIN
] >= 1 ||
2761 emit
->info
.opcode_count
[TGSI_OPCODE_COS
] >= 1 ||
2762 emit
->info
.opcode_count
[TGSI_OPCODE_SCS
] >= 1);
2765 static INLINE boolean
2766 needs_to_create_arl_consts( struct svga_shader_emitter
*emit
)
2768 return (emit
->num_arl_consts
> 0);
2771 static INLINE boolean
2772 pre_parse_add_indirect( struct svga_shader_emitter
*emit
,
2773 int num
, int current_arl
)
2778 for (i
= 0; i
< emit
->num_arl_consts
; ++i
) {
2779 if (emit
->arl_consts
[i
].arl_num
== current_arl
)
2783 if (emit
->num_arl_consts
== i
) {
2784 ++emit
->num_arl_consts
;
2786 emit
->arl_consts
[i
].number
= (emit
->arl_consts
[i
].number
> num
) ?
2788 emit
->arl_consts
[i
].number
;
2789 emit
->arl_consts
[i
].arl_num
= current_arl
;
2794 pre_parse_instruction( struct svga_shader_emitter
*emit
,
2795 const struct tgsi_full_instruction
*insn
,
2798 if (insn
->Src
[0].Register
.Indirect
&&
2799 insn
->Src
[0].Indirect
.File
== TGSI_FILE_ADDRESS
) {
2800 const struct tgsi_full_src_register
*reg
= &insn
->Src
[0];
2801 if (reg
->Register
.Index
< 0) {
2802 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
2806 if (insn
->Src
[1].Register
.Indirect
&&
2807 insn
->Src
[1].Indirect
.File
== TGSI_FILE_ADDRESS
) {
2808 const struct tgsi_full_src_register
*reg
= &insn
->Src
[1];
2809 if (reg
->Register
.Index
< 0) {
2810 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
2814 if (insn
->Src
[2].Register
.Indirect
&&
2815 insn
->Src
[2].Indirect
.File
== TGSI_FILE_ADDRESS
) {
2816 const struct tgsi_full_src_register
*reg
= &insn
->Src
[2];
2817 if (reg
->Register
.Index
< 0) {
2818 pre_parse_add_indirect(emit
, reg
->Register
.Index
, current_arl
);
2826 pre_parse_tokens( struct svga_shader_emitter
*emit
,
2827 const struct tgsi_token
*tokens
)
2829 struct tgsi_parse_context parse
;
2830 int current_arl
= 0;
2832 tgsi_parse_init( &parse
, tokens
);
2834 while (!tgsi_parse_end_of_tokens( &parse
)) {
2835 tgsi_parse_token( &parse
);
2836 switch (parse
.FullToken
.Token
.Type
) {
2837 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2838 case TGSI_TOKEN_TYPE_DECLARATION
:
2840 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2841 if (parse
.FullToken
.FullInstruction
.Instruction
.Opcode
==
2845 if (!pre_parse_instruction( emit
, &parse
.FullToken
.FullInstruction
,
2857 static boolean
svga_shader_emit_helpers( struct svga_shader_emitter
*emit
)
2860 if (needs_to_create_zero( emit
)) {
2861 create_zero_immediate( emit
);
2863 if (needs_to_create_loop_const( emit
)) {
2864 create_loop_const( emit
);
2866 if (needs_to_create_sincos_consts( emit
)) {
2867 create_sincos_consts( emit
);
2869 if (needs_to_create_arl_consts( emit
)) {
2870 create_arl_consts( emit
);
2873 if (emit
->unit
== PIPE_SHADER_FRAGMENT
) {
2874 if (!emit_ps_preamble( emit
))
2877 if (emit
->key
.fkey
.light_twoside
) {
2878 if (!emit_light_twoside( emit
))
2881 if (emit
->emit_frontface
) {
2882 if (!emit_frontface( emit
))
2890 boolean
svga_shader_emit_instructions( struct svga_shader_emitter
*emit
,
2891 const struct tgsi_token
*tokens
)
2893 struct tgsi_parse_context parse
;
2895 boolean helpers_emitted
= FALSE
;
2896 unsigned line_nr
= 0;
2898 tgsi_parse_init( &parse
, tokens
);
2899 emit
->internal_imm_count
= 0;
2901 if (emit
->unit
== PIPE_SHADER_VERTEX
) {
2902 ret
= emit_vs_preamble( emit
);
2907 pre_parse_tokens(emit
, tokens
);
2909 while (!tgsi_parse_end_of_tokens( &parse
)) {
2910 tgsi_parse_token( &parse
);
2912 switch (parse
.FullToken
.Token
.Type
) {
2913 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2914 ret
= svga_emit_immediate( emit
, &parse
.FullToken
.FullImmediate
);
2919 case TGSI_TOKEN_TYPE_DECLARATION
:
2921 ret
= svga_translate_decl_sm30( emit
, &parse
.FullToken
.FullDeclaration
);
2923 ret
= svga_translate_decl_sm20( emit
, &parse
.FullToken
.FullDeclaration
);
2928 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2929 if (!helpers_emitted
) {
2930 if (!svga_shader_emit_helpers( emit
))
2932 helpers_emitted
= TRUE
;
2934 ret
= svga_emit_instruction( emit
,
2936 &parse
.FullToken
.FullInstruction
);
2944 reset_temp_regs( emit
);
2947 /* Need to terminate the current subroutine. Note that the
2948 * hardware doesn't tolerate shaders without sub-routines
2949 * terminating with RET+END.
2951 if (!emit
->in_main_func
) {
2952 ret
= emit_instruction( emit
, inst_token( SVGA3DOP_RET
) );
2957 assert(emit
->dynamic_branching_level
== 0);
2959 /* Need to terminate the whole shader:
2961 ret
= emit_instruction( emit
, inst_token( SVGA3DOP_END
) );
2967 tgsi_parse_free( &parse
);